metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "JesseLivezey/multicolinearity",
"score": 3
}
|
#### File: JesseLivezey/multicolinearity/analysis.py
```python
import numpy as np
from sklearn.linear_model import LinearRegression as LinR, Lasso as LasR
def sample_data(X_cov, M, noise_std, n_data, rng):
X = rng.multivariate_normal(np.zeros_like(M), X_cov, n_data)
X /= X.std(axis=0, keepdims=True)
Y = X.dot(M) + rng.randn(n_data) * noise_std
return X, Y
def fit_linear(X, Y):
return LinR(fit_intercept=False).fit(X, Y).coef_
def fit_lasso(X, Y, alpha):
return LasR(alpha=alpha, fit_intercept=False).fit(X, Y).coef_
def fit_lasso_linear(X, Y, alpha):
las = fit_lasso(X, Y, alpha)
if np.count_nonzero(las) < X.shape[1]:
result = np.zeros_like(las)
if np.count_nonzero(las) == 0:
return result
nz = np.nonzero(las)[0]
Xp = X[:, nz]
lin = fit_linear(Xp, Y)
result[nz] = lin
return result
else:
return fit_linear(X, Y)
def lin_cost(X, Y, M):
if M.ndim > 1:
n = M.shape[0]
cost = np.zeros(n)
for ii, Mi in enumerate(M):
cost[ii] = np.mean((Y - X.dot(Mi))**2) / 2.
return cost
else:
return np.mean((Y - X.dot(M))**2) / 2.
def abs_cost(M, alpha):
if M.ndim > 1:
n = M.shape[0]
cost = np.zeros(n)
for ii, Mi in enumerate(M):
cost[ii] = alpha * np.sum(abs(Mi))
return cost
else:
return alpha * np.sum(abs(Mi))
def las_cost(X, Y, M, alpha):
return lin_cost(X, Y, M) + abs_cost(M, alpha)
```
|
{
"source": "JesseLivezey/pynwb",
"score": 2
}
|
#### File: src/pynwb/misc.py
```python
import numpy as np
from collections import Iterable
from .form.utils import docval, getargs, popargs, call_docval_func
from . import register_class, CORE_NAMESPACE
from .base import TimeSeries, _default_conversion, _default_resolution
from .core import NWBContainer, NWBDataInterface, ElementIdentifiers, VectorData, VectorIndex, IndexedVector
@register_class('AnnotationSeries', CORE_NAMESPACE)
class AnnotationSeries(TimeSeries):
"""
Stores text-based records about the experiment. To use the
AnnotationSeries, add records individually through
add_annotation() and then call finalize(). Alternatively, if
all annotations are already stored in a list, use set_data()
and set_timestamps()
"""
__nwbfields__ = ()
_ancestry = "TimeSeries,AnnotationSeries"
_help = "Time-stamped annotations about an experiment."
@docval({'name': 'name', 'type': str, 'doc': 'The name of this TimeSeries dataset'},
{'name': 'source', 'type': str,
'doc': ('Name of TimeSeries or Modules that serve as the source for the data '
'contained here. It can also be the name of a device, for stimulus or '
'acquisition data')},
{'name': 'data', 'type': ('array_data', 'data', TimeSeries),
'doc': 'The data this TimeSeries dataset stores. Can also store binary data e.g. image frames',
'default': list()},
{'name': 'timestamps', 'type': ('array_data', 'data', TimeSeries),
'doc': 'Timestamps for samples stored in data', 'default': None},
{'name': 'comments', 'type': str,
'doc': 'Human-readable comments about this TimeSeries dataset', 'default': 'no comments'},
{'name': 'description', 'type': str, 'doc':
'Description of this TimeSeries dataset', 'default': 'no description'},
{'name': 'parent', 'type': NWBContainer,
'doc': 'The parent NWBContainer for this NWBContainer', 'default': None})
def __init__(self, **kwargs):
name, source, data, timestamps = popargs('name', 'source', 'data', 'timestamps', kwargs)
super(AnnotationSeries, self).__init__(name, source, data, 'n/a',
resolution=np.nan, conversion=np.nan,
timestamps=timestamps, **kwargs)
@docval({'name': 'time', 'type': float, 'doc': 'The time for the anotation'},
{'name': 'annotation', 'type': str, 'doc': 'the annotation'})
def add_annotation(self, **kwargs):
'''
Add an annotation
'''
time, annotation = getargs('time', 'annotation', kwargs)
self.fields['timestamps'].append(time)
self.fields['data'].append(annotation)
@register_class('AbstractFeatureSeries', CORE_NAMESPACE)
class AbstractFeatureSeries(TimeSeries):
"""
Represents the salient features of a data stream. Typically this
will be used for things like a visual grating stimulus, where
the bulk of data (each frame sent to the graphics card) is bulky
and not of high value, while the salient characteristics (eg,
orientation, spatial frequency, contrast, etc) are what important
and are what are used for analysis
"""
__nwbfields__ = ('feature_units',
'features')
_ancestry = "TimeSeries,AbstractFeatureSeries"
_help = "Features of an applied stimulus. This is useful when storing the raw stimulus is impractical."
@docval({'name': 'name', 'type': str, 'doc': 'The name of this TimeSeries dataset'},
{'name': 'source', 'type': str,
'doc': ('Name of TimeSeries or Modules that serve as the source for the data '
'contained here. It can also be the name of a device, for stimulus or '
'acquisition data')},
{'name': 'feature_units', 'type': (str, Iterable), 'doc': 'The unit of each feature'},
{'name': 'features', 'type': (str, Iterable), 'doc': 'Description of each feature'},
{'name': 'data', 'type': ('array_data', 'data', TimeSeries),
'doc': 'The data this TimeSeries dataset stores. Can also store binary data e.g. image frames',
'default': list()},
{'name': 'resolution', 'type': float,
'doc': 'The smallest meaningful difference (in specified unit) between values in data',
'default': _default_resolution},
{'name': 'conversion', 'type': float,
'doc': 'Scalar to multiply each element in data to convert it to the specified unit',
'default': _default_conversion},
{'name': 'timestamps', 'type': ('array_data', 'data', TimeSeries),
'doc': 'Timestamps for samples stored in data', 'default': None},
{'name': 'starting_time', 'type': float, 'doc': 'The timestamp of the first sample', 'default': None},
{'name': 'rate', 'type': float, 'doc': 'Sampling rate in Hz', 'default': None},
{'name': 'comments', 'type': str, 'doc': 'Human-readable comments about this TimeSeries dataset',
'default': 'no comments'},
{'name': 'description', 'type': str,
'doc': 'Description of this TimeSeries dataset', 'default': 'no description'},
{'name': 'control', 'type': Iterable,
'doc': 'Numerical labels that apply to each element in data', 'default': None},
{'name': 'control_description', 'type': Iterable,
'doc': 'Description of each control value', 'default': None},
{'name': 'parent', 'type': NWBContainer,
'doc': 'The parent NWBContainer for this NWBContainer', 'default': None})
def __init__(self, **kwargs):
name, source, data, features, feature_units = popargs('name', 'source', 'data',
'features', 'feature_units', kwargs)
super(AbstractFeatureSeries, self).__init__(name, source, data, "see 'feature_units'", **kwargs)
self.features = features
self.feature_units = feature_units
@docval({'name': 'time', 'type': float, 'doc': 'the time point of this feature'},
{'name': 'features', 'type': (list, np.ndarray), 'doc': 'the feature values for this time point'})
def add_features(self, **kwargs):
time, features = getargs('time', 'features', kwargs)
self.timestamps.append(time)
self.data.append(features)
@register_class('IntervalSeries', CORE_NAMESPACE)
class IntervalSeries(TimeSeries):
"""
Stores intervals of data. The timestamps field stores the beginning and end of intervals. The
data field stores whether the interval just started (>0 value) or ended (<0 value). Different interval
types can be represented in the same series by using multiple key values (eg, 1 for feature A, 2
for feature B, 3 for feature C, etc). The field data stores an 8-bit integer. This is largely an alias
of a standard TimeSeries but that is identifiable as representing time intervals in a machinereadable
way.
"""
__nwbfields__ = ()
_ancestry = "TimeSeries,IntervalSeries"
_help = "Stores the start and stop times for events."
@docval({'name': 'name', 'type': str, 'doc': 'The name of this TimeSeries dataset'},
{'name': 'source', 'type': str,
'doc': ('Name of TimeSeries or Modules that serve as the source for the data '
'contained here. It can also be the name of a device, for stimulus or '
'acquisition data')},
{'name': 'data', 'type': ('array_data', 'data', TimeSeries),
'doc': '>0 if interval started, <0 if interval ended.', 'default': list()},
{'name': 'timestamps', 'type': ('array_data', 'data', TimeSeries),
'doc': 'Timestamps for samples stored in data', 'default': list()},
{'name': 'comments', 'type': str,
'doc': 'Human-readable comments about this TimeSeries dataset', 'default': 'no comments'},
{'name': 'description', 'type': str,
'doc': 'Description of this TimeSeries dataset', 'default': 'no description'},
{'name': 'control', 'type': Iterable,
'doc': 'Numerical labels that apply to each element in data', 'default': None},
{'name': 'control_description', 'type': Iterable,
'doc': 'Description of each control value', 'default': None},
{'name': 'parent', 'type': NWBContainer,
'doc': 'The parent NWBContainer for this NWBContainer', 'default': None})
def __init__(self, **kwargs):
name, source, data, timestamps = popargs('name', 'source', 'data', 'timestamps', kwargs)
unit = 'n/a'
self.__interval_timestamps = timestamps
self.__interval_data = data
super(IntervalSeries, self).__init__(name, source, data, unit,
timestamps=timestamps,
resolution=_default_resolution,
conversion=_default_conversion,
**kwargs)
@docval({'name': 'start', 'type': float, 'doc': 'The name of this TimeSeries dataset'},
{'name': 'stop', 'type': float, 'doc': 'The name of this TimeSeries dataset'})
def add_interval(self, **kwargs):
start, stop = getargs('start', 'stop', kwargs)
self.__interval_timestamps.append(start)
self.__interval_timestamps.append(stop)
self.__interval_data.append(1)
self.__interval_data.append(-1)
@property
def data(self):
return self.__interval_data
@property
def timestamps(self):
return self.__interval_timestamps
@register_class('UnitTimes', CORE_NAMESPACE)
class UnitTimes(NWBDataInterface):
"""
Event times of observed units (e.g. cell, synapse, etc.). The UnitTimes group contains a group
for each unit. The name of the group should match the value in the source module, if that is
possible/relevant (e.g., name of ROIs from Segmentation module).
"""
__nwbfields__ = (
{'name': 'unit_ids', 'child': True},
{'name': 'spike_times_index', 'child': True},
{'name': 'spike_times', 'child': True},
)
@docval({'name': 'source', 'type': str,
'doc': 'Name, path or description of where unit times originated.'},
{'name': 'unit_ids', 'type': ('array_data', 'data', ElementIdentifiers),
'doc': 'the identifiers for the units stored in this interface', 'default': list()},
{'name': 'spike_times', 'type': ('array_data', 'data', VectorData),
'doc': 'a concatenated list of spike times for the units stored in this interface',
'default': list()},
{'name': 'spike_times_index', 'type': ('array_data', 'data', VectorIndex),
'doc': 'the indices in spike_times that correspond to each unit in unit_ids',
'default': list()},
{'name': 'name', 'type': str, 'doc': 'Name of this UnitTimes interface', 'default': 'UnitTimes'})
def __init__(self, **kwargs):
unit_ids, spike_times, spike_times_index = popargs('unit_ids', 'spike_times', 'spike_times_index', kwargs)
call_docval_func(super(UnitTimes, self).__init__, kwargs)
if not isinstance(unit_ids, ElementIdentifiers):
unit_ids = ElementIdentifiers('unit_ids', unit_ids)
if not isinstance(spike_times, VectorData):
spike_times = VectorData('spike_times', spike_times)
if not isinstance(spike_times_index, VectorIndex):
spike_times_index = VectorIndex('spike_times_index', spike_times_index)
self.unit_ids = unit_ids
self.spike_times = spike_times
self.spike_times_index = spike_times_index
self.__iv = IndexedVector(self.spike_times, self.spike_times_index)
@docval({'name': 'index', 'type': int,
'doc': 'the index of the unit in unit_ids to retrieve spike times for'})
def get_unit_spike_times(self, **kwargs):
index = getargs('index', kwargs)
return np.array(self.__iv.get_vector(index))
@docval({'name': 'unit_id', 'type': int, 'doc': 'the unit to add spike times for'},
{'name': 'spike_times', 'type': ('array_data',), 'doc': 'the spike times for the unit'},
rtype=int, returns="the index of the added unit in this UnitTimes")
def add_spike_times(self, **kwargs):
unit_id, spike_times = getargs('unit_id', 'spike_times', kwargs)
self.unit_ids.append(unit_id)
return self.__iv.add_vector(spike_times)
```
#### File: integration/ui_write/test_core.py
```python
from pynwb.form.build import GroupBuilder, DatasetBuilder
from pynwb.core import DynamicTable
from . import base
class TestDynamicTableIO(base.TestMapRoundTrip):
def setUpContainer(self):
return DynamicTable('trials', 'DynamicTable integration test', 'a test table')
def setUpBuilder(self):
id_builder = DatasetBuilder('id', data=[],
attributes={
'help': 'unique identifiers for a list of elements',
'namespace': 'core',
'neurodata_type': 'ElementIdentifiers',
})
return GroupBuilder('trials',
attributes={
'help': 'A column-centric table',
'description': 'a test table',
'namespace': 'core',
'neurodata_type': 'DynamicTable',
'source': 'DynamicTable integration test',
'colnames': tuple(),
},
datasets={'id': id_builder})
def addContainer(self, nwbfile):
nwbfile.trials = self.container
def getContainer(self, nwbfile):
return nwbfile.trials
class TestTrials(base.TestMapRoundTrip):
def setUpContainer(self):
# this will get ignored
return DynamicTable('trials', 'DynamicTable integration test', 'a placeholder table')
def addContainer(self, nwbfile):
nwbfile.add_trial_column('foo', 'an int column')
nwbfile.add_trial_column('bar', 'a float column')
nwbfile.add_trial_column('baz', 'a string column')
nwbfile.add_trial_column('qux', 'a boolean column')
nwbfile.add_trial({'start': 0., 'end': 1., 'foo': 27, 'bar': 28.0, 'baz': "29", 'qux': True})
nwbfile.add_trial({'start': 2., 'end': 3., 'foo': 37, 'bar': 38.0, 'baz': "39", 'qux': False})
# reset the thing
self.container = nwbfile.trials
def getContainer(self, nwbfile):
return nwbfile.trials
```
#### File: unit/pynwb_tests/test_ophys.py
```python
import unittest
from pynwb.ophys import TwoPhotonSeries, RoiResponseSeries, DfOverF, Fluorescence, PlaneSegmentation, \
ImageSegmentation, OpticalChannel, ImagingPlane, MotionCorrection, CorrectedImageStack
from pynwb.image import ImageSeries
from pynwb.base import TimeSeries
from pynwb.device import Device
import numpy as np
def CreatePlaneSegmentation():
w, h = 5, 5
img_mask = [[[1.0 for x in range(w)] for y in range(h)], [[2.0 for x in range(w)] for y in range(h)]]
pix_mask = [[1, 2, 1.0], [3, 4, 1.0], [5, 6, 1.0],
[7, 8, 2.0], [9, 10, 2.0]]
iSS = ImageSeries(name='test_iS', source='a hypothetical source', data=list(), unit='unit',
external_file=['external_file'], starting_frame=[1, 2, 3], format='tiff', timestamps=list())
oc = OpticalChannel('test_optical_channel', 'test_source', 'description', 500.)
device = Device(name='device_name', source='device_source')
ip = ImagingPlane('test_imaging_plane', 'test_source', oc, 'description', device, 600.,
'imaging_rate', 'indicator', 'location', (1, 2, 1, 2, 3), 4.0, 'unit', 'reference_frame')
pS = PlaneSegmentation('test source', 'description', ip, 'test_name', iSS)
pS.add_roi("1234", pix_mask[0:3], img_mask[0])
pS.add_roi("5678", pix_mask[3:5], img_mask[1])
return pS
class TwoPhotonSeriesConstructor(unittest.TestCase):
def test_init(self):
oc = OpticalChannel('test_name', 'test_source', 'description', 500.)
self.assertEqual(oc.description, 'description')
self.assertEqual(oc.emission_lambda, 500.)
device = Device(name='device_name', source='device_source')
ip = ImagingPlane('test_imaging_plane', 'test source', oc, 'description', device, 600.,
'imaging_rate', 'indicator', 'location', (50, 100, 3), 4.0, 'unit', 'reference_frame')
self.assertEqual(ip.optical_channel[0], oc)
self.assertEqual(ip.device, device)
self.assertEqual(ip.excitation_lambda, 600.)
self.assertEqual(ip.imaging_rate, 'imaging_rate')
self.assertEqual(ip.indicator, 'indicator')
self.assertEqual(ip.location, 'location')
self.assertEqual(ip.manifold, (50, 100, 3))
self.assertEqual(ip.conversion, 4.0)
self.assertEqual(ip.unit, 'unit')
self.assertEqual(ip.reference_frame, 'reference_frame')
tPS = TwoPhotonSeries('test_tPS', 'a hypothetical source', unit='unit', field_of_view=list(),
imaging_plane=ip, pmt_gain=1.0, scan_line_rate=2.0, external_file=['external_file'],
starting_frame=[1, 2, 3], format='tiff', timestamps=list())
self.assertEqual(tPS.name, 'test_tPS')
self.assertEqual(tPS.source, 'a hypothetical source')
self.assertEqual(tPS.unit, 'unit')
self.assertEqual(tPS.field_of_view, list())
self.assertEqual(tPS.imaging_plane, ip)
self.assertEqual(tPS.pmt_gain, 1.0)
self.assertEqual(tPS.scan_line_rate, 2.0)
self.assertEqual(tPS.external_file, ['external_file'])
self.assertEqual(tPS.starting_frame, [1, 2, 3])
self.assertEqual(tPS.format, 'tiff')
self.assertEqual(tPS.dimension, [np.nan])
def test_args(self):
oc = OpticalChannel('test_name', 'test_source', 'description', 500.)
device = Device(name='device_name', source='device_source')
ip = ImagingPlane('test_imaging_plane', 'test source', oc, 'description', device, 600.,
'imaging_rate', 'indicator', 'location', (50, 100, 3), 4.0, 'unit', 'reference_frame')
with self.assertRaises(ValueError): # no data or external file
TwoPhotonSeries('test_tPS', 'a hypothetical source', unit='unit', field_of_view=list(),
imaging_plane=ip, pmt_gain=1.0, scan_line_rate=2.0,
starting_frame=[1, 2, 3], format='tiff', timestamps=list())
class MotionCorrectionConstructor(unittest.TestCase):
def test_init(self):
mc = MotionCorrection('test_mc', list())
self.assertEqual(mc.source, 'test_mc')
class CorrectedImageStackConstructor(unittest.TestCase):
def test_init(self):
is1 = ImageSeries(name='is1', source='a hypothetical source', data=list(), unit='unit',
external_file=['external_file'], starting_frame=[1, 2, 3], format='tiff', timestamps=list())
is2 = ImageSeries(name='is2', source='a hypothetical source', data=list(), unit='unit',
external_file=['external_file'], starting_frame=[1, 2, 3], format='tiff', timestamps=list())
tstamps = np.arange(1.0, 100.0, 0.1, dtype=np.float)
ts = TimeSeries("test_ts", "a hypothetical source", list(range(len(tstamps))), 'unit', timestamps=tstamps)
cis = CorrectedImageStack("CorrectedImageStackConstructor", is1, is2, ts)
self.assertEqual(cis.source, "CorrectedImageStackConstructor")
self.assertEqual(cis.corrected, is1)
self.assertEqual(cis.original, is2)
self.assertEqual(cis.xy_translation, ts)
class RoiResponseSeriesConstructor(unittest.TestCase):
def test_init(self):
ip = CreatePlaneSegmentation()
rt_region = ip.create_roi_table_region('the second ROI', region=[1])
ts = RoiResponseSeries('test_ts', 'a hypothetical source', list(), 'unit', rt_region, timestamps=list())
self.assertEqual(ts.name, 'test_ts')
self.assertEqual(ts.source, 'a hypothetical source')
self.assertEqual(ts.unit, 'unit')
self.assertEqual(ts.rois, rt_region)
class DfOverFConstructor(unittest.TestCase):
def test_init(self):
ip = CreatePlaneSegmentation()
rt_region = ip.create_roi_table_region('the second ROI', region=[1])
rrs = RoiResponseSeries('test_ts', 'a hypothetical source', list(), 'unit', rt_region, timestamps=list())
dof = DfOverF('test_dof', rrs)
self.assertEqual(dof.source, 'test_dof')
self.assertEqual(dof.roi_response_series['test_ts'], rrs)
class FluorescenceConstructor(unittest.TestCase):
def test_init(self):
ip = CreatePlaneSegmentation()
rt_region = ip.create_roi_table_region('the second ROI', region=[1])
ts = RoiResponseSeries('test_ts', 'a hypothetical source', list(), 'unit', rt_region, timestamps=list())
ff = Fluorescence('test_ff', ts)
self.assertEqual(ff.source, 'test_ff')
self.assertEqual(ff.roi_response_series['test_ts'], ts)
self.assertEqual(ff.roi_response_series['test_ts'], ts)
class ImageSegmentationConstructor(unittest.TestCase):
def test_init(self):
ps = CreatePlaneSegmentation()
iS = ImageSegmentation('test_source', ps, name='test_iS')
self.assertEqual(iS.name, 'test_iS')
self.assertEqual(iS.source, 'test_source')
self.assertEqual(iS.plane_segmentations[ps.name], ps)
self.assertEqual(iS[ps.name], iS.plane_segmentations[ps.name])
class PlaneSegmentationConstructor(unittest.TestCase):
def test_init(self):
w, h = 5, 5
img_mask = [[[1.0 for x in range(w)] for y in range(h)], [[2.0 for x in range(w)] for y in range(h)]]
pix_mask = [[1, 2, 1.0], [3, 4, 1.0], [5, 6, 1.0],
[7, 8, 2.0], [9, 10, 2.0]]
iSS = ImageSeries(name='test_iS', source='a hypothetical source', data=list(), unit='unit',
external_file=['external_file'], starting_frame=[1, 2, 3], format='tiff', timestamps=list())
device = Device(name='device_name', source='device_source')
oc = OpticalChannel('test_optical_channel', 'test_source', 'description', 500.)
ip = ImagingPlane('test_imaging_plane', 'test_source', oc, 'description', device, 600.,
'imaging_rate', 'indicator', 'location', (1, 2, 1, 2, 3), 4.0, 'unit', 'reference_frame')
pS = PlaneSegmentation('test source', 'description', ip, 'test_name', iSS)
pS.add_roi("1234", pix_mask[0:3], img_mask[0])
pS.add_roi("5678", pix_mask[3:5], img_mask[1])
self.assertEqual(pS.description, 'description')
self.assertEqual(pS.source, 'test source')
self.assertEqual(pS.imaging_plane, ip)
self.assertEqual(pS.reference_images, iSS)
self.assertEqual(pS.pixel_masks.data, pix_mask)
self.assertEqual(pS.image_masks.data, img_mask)
if __name__ == '__main__':
unittest.main()
```
|
{
"source": "JesseLivezey/weird_weather",
"score": 3
}
|
#### File: JesseLivezey/weird_weather/plotting.py
```python
import numpy as np
import pandas as pd
import matplotlib
from matplotlib import rcParams
import matplotlib.pyplot as plt
from matplotlib.patches import Ellipse
from importlib import reload
import utils
reload(utils)
def plot_annual_jacket_crossings(df, stations, temp):
"""
Plot mean annual temperature variations for all stations.
Parameters
----------
df : dataframe
Data for all stations.
stations : list
List of stations to include in plot.
t_range : list, optional
Values to clip temperatures to.
Returns
-------
f : figure
Matplotlib figure.
"""
days = np.linspace(1, 365, num=365)
f, axes = plt.subplots(len(stations), 1,
sharex=True,
figsize=(6, 2*len(stations)))
for ii, (st, ax) in enumerate(zip(stations, axes)):
name = utils.short_name(st)
data = utils.single_station_data(df, st)
cross, years = utils.annual_jacket_crossing(data, temp)
mean = np.nanmean(cross, axis=0)
frac_cross = (mean > .5).sum()/float(mean.shape[0])
ax.fill_between(days, np.zeros_like(mean), mean, facecolor='blue',
alpha=.5)
ax.text(7.5, .65, '{}% of days\nP>.5\n@{} deg.'.format(np.rint(100*frac_cross).astype(int),
temp),
bbox={'facecolor':'white', 'alpha':0.5, 'pad':5})
ax.axhline(.5, c='black')
ax.set_ylim([0, 1])
ax.set_xlim([0, 366])
ax.set_title(name)
ax.set_yticks(np.linspace(0, 1, 5))
ax.set_xticks([79, 172, 265, 344])
ax.set_xticklabels(['March 20', 'June 21', 'Sept. 22', 'Dec. 21'])
ax.set_ylabel('P(jacket crossing)')
ax.grid()
return f
def plot_daily_fluctuations(df, stations):
"""
Plot daily fluctuations for TMAX and TMIN.
Parameters
----------
df : dataframe
Data for all stations.
stations : list
List of stations to include in plot.
Returns
-------
f : figure
Matplotlib figure.
"""
f, axes = plt.subplots(len(stations), 1,
sharex=True,
figsize=(5, 2*len(stations)))
for ii, (st, ax) in enumerate(zip(stations, axes)):
name = utils.short_name(st)
data = utils.single_station_data(df, st)
max_data, years = utils.annual_data(data, 'TMAX')
max_data -= max_data.mean(axis=0, keepdims=True)
hist, bins = np.histogram(max_data.flatten(), bins=60,
range=[-30, 30], density=True)
ax.step(bins[:-1], hist, 'r', where='mid', label='Daily max')
min_data, years = utils.annual_data(data, 'TMIN')
min_data -= min_data.mean(axis=0, keepdims=True)
hist, bins = np.histogram(min_data.flatten(), bins=60,
range=[-30, 30], density=True)
ax.step(bins[:-1], hist, 'b', where='mid', label='Daily min')
ax.set_title(name)
ax.set_ylabel('prob. density')
ax.set_ylim([0, .15])
ax.set_yticks(np.arange(0, .16, .05))
ax.grid()
axes[0].legend(loc='best', ncol=2)
ax.set_xlabel('Deviation from mean daily temperature')
return f
def plot_annual_power_spectrum(df, stations):
"""
Plot annual temperature powerspectrum.
Parameters
----------
df : dataframe
Data for all stations.
stations : list
List of stations to include in plot.
Returns
-------
f : figure
Matplotlib figure.
"""
f, axes = plt.subplots(len(stations), 1,
sharex=True,
figsize=(6, 2*len(stations)))
for ii, (st, ax) in enumerate(zip(stations, axes)):
name = utils.short_name(st)
data = utils.single_station_data(df, st)
freqs, tmin_power = utils.mean_annual_powerspectrum(data, 'TMIN')
freqs, tmax_power = utils.mean_annual_powerspectrum(data, 'TMAX')
ax.loglog(freqs, tmin_power, c='blue')
ax.loglog(freqs, tmin_power, '.', c='blue', alpha=.5)
ax.loglog(freqs, tmax_power, c='red')
ax.loglog(freqs, tmax_power, '.', c='red', alpha=.5)
ax.set_title(name)
ax.set_ylabel('Temp.')
ax.axvline(12, c='black', linestyle='--', label='Monthly fluctuations')
ax.axvline(52, c='black', label='Weekly fluctuations')
ax.set_ylim([1e1, 1e4])
ax.set_xlim([1e0, 2e2])
ax.grid()
axes[0].plot(0, 0, 'r-', label='Daily max')
axes[0].plot(0, 0, 'b-', label='Daily min')
leg = axes[0].legend(loc='best', ncol=2)
axes[-1].set_xlabel('Cycles/year')
return f
def plot_annual_daily_comparison(df, stations):
"""
Plot annual vs. daily temperature variations for all stations.
Parameters
----------
df : dataframe
Data for all stations.
stations : list
List of stations to include in plot.
Returns
-------
f : figure
Matplotlib figure.
"""
colors = matplotlib.cm.get_cmap('plasma')
colors = [colors(v) for v in np.linspace(0, 1, len(stations))]
f, ax = plt.subplots(1)
x_max = 0.
y_max = 0.
x_min = np.inf
y_min = np.inf
for ii, st in enumerate(stations):
name = utils.short_name(st)
data = utils.single_station_data(df, st)
daily_delta = data['TMAX'] - data['TMIN']
years = sorted(set(data.index.year))
days = data.index.dayofyear
if days[0] > 1:
years = years[1:]
if days[-1] < 365:
years = years[:-1]
annual_delta = np.zeros(len(years))
for jj, year in enumerate(years):
min_t = data['TMIN'].loc[data.index.year == year].min()
min_t = min(min_t,
data['TMAX'].loc[data.index.year == year].min())
max_t = data['TMIN'].loc[data.index.year == year].max()
max_t = max(max_t,
data['TMAX'].loc[data.index.year == year].max())
annual_delta[jj] = max_t - min_t
e = Ellipse(xy=[annual_delta.mean(), np.nanmean(daily_delta)],
height=2*np.nanstd(daily_delta),
width=2*annual_delta.std())
ax.plot(annual_delta.mean(), np.nanmean(daily_delta), 'o', c=colors[ii])
x_max = max(x_max, annual_delta.mean() + 1.5*annual_delta.std())
y_max = max(y_max, np.nanmean(daily_delta) + 1.5*np.nanstd(daily_delta))
x_min = min(x_min, annual_delta.mean() - 1.5*annual_delta.std())
y_min = min(y_min, np.nanmean(daily_delta) - 1.5*np.nanstd(daily_delta))
ax.add_artist(e)
e.set_facecolor(colors[ii])
e.set_alpha(.5)
e.set_clip_box(ax.bbox)
ax.plot(0, 0, c=colors[ii], label=name)
ax.set_xlim([x_min, x_max])
ax.set_ylim([y_min, y_max])
leg = ax.legend(loc='best', prop={'size': 12}, ncol=2)
leg.get_frame().set_alpha(0.5)
ax.set_xlabel('Annual temp. swing')
ax.set_ylabel('Daily temp. swing')
f.set_size_inches(8., 8.*(y_max-y_min)/(x_max-x_min))
plt.grid()
return f
def plot_annual_temperature(df, stations, t_range=None):
"""
Plot mean annual temperature variations for all stations.
Parameters
----------
df : dataframe
Data for all stations.
stations : list
List of stations to include in plot.
t_range : list, optional
Values to clip temperatures to.
Returns
-------
f : figure
Matplotlib figure.
"""
if t_range is None:
t_range = [0, 100]
time = np.linspace(1, 365, num=365)
f, axes = plt.subplots(len(stations), 1,
sharex=True,
figsize=(6, 2*len(stations)))
for ii, (st, ax) in enumerate(zip(stations, axes)):
name = utils.short_name(st)
data = utils.single_station_data(df, st)
mean = np.zeros(365)
delta = np.zeros(365)
for day in range(365):
temps = data[['TMIN', 'TMAX']].loc[data['day'] == day+1]
mean[day] = np.nanmean(temps.values)
delta[day] = np.nanmean((temps['TMAX']-temps['TMIN']).values)
ax.fill_between(time, mean+delta/2., mean-delta/2., facecolor='red',
alpha=.5)
ax.plot(time, mean, c='black')
ax.set_ylim(t_range)
ax.set_xlim([0, 366])
ax.set_title(name)
#ax.set_xlabel('Day of year')
ax.set_xticks([79, 172, 265, 344])
ax.set_xticklabels(['March 20', 'June 21', 'Sept. 22', 'Dec. 21'])
ax.grid()
ax.set_ylabel('Temp.')
return f
def plot_stations_all_time(df, stations, t_range=None):
"""
Plot all min and max temp data for all stations.
Parameters
----------
df : dataframe
Data for all stations.
stations : list
List of stations to include in plot.
t_range : list, optional
Values to clip temperatures to.
Returns
-------
f : figure
Matplotlib figure.
"""
if t_range is None:
t_range = [-20, 120]
f, axes = plt.subplots(len(stations), 1,
figsize=(12, 2*len(stations)))
for ii, (st, ax) in enumerate(zip(stations, axes)):
name = utils.short_name(st)
data = utils.single_station_data(df, st)
if ii == 0:
legend = True
else:
legend = False
time = matplotlib.dates.date2num(data.index.date)
for p, c in zip(['TMIN', 'TMAX'], ['blue', 'red']):
ax.plot_date(time, data[p], c=c, fmt='-', alpha=.5)
mean = data[p].mean()
rolling = data[p].rolling(window=30, min_periods=10,
center=True).median()
y = mean*np.ones_like(time)
ax.plot_date(time, y, c=c, zorder=10, fmt='-')
ax.plot_date(time, rolling, c=c, zorder=10, fmt='-')
ax.set_ylim(t_range)
ax.set_title(name)
ax.set_ylabel('Temp.')
ax.set_xlabel('Year')
return f
```
|
{
"source": "JesseLT/qstrader",
"score": 3
}
|
#### File: qstrader/qstrader/event.py
```python
from __future__ import print_function
from enum import Enum
EventType = Enum("EventType", "TICK BAR SIGNAL ORDER FILL SENTIMENT")
class Event(object):
"""
Event is base class providing an interface for all subsequent
(inherited) events, that will trigger further events in the
trading infrastructure.
"""
@property
def typename(self):
return self.type.name
class TickEvent(Event):
"""
Handles the event of receiving a new market update tick,
which is defined as a ticker symbol and associated best
bid and ask from the top of the order book.
"""
def __init__(self, ticker, time, bid, ask):
"""
Initialises the TickEvent.
Parameters:
ticker - The ticker symbol, e.g. 'GOOG'.
time - The timestamp of the tick
bid - The best bid price at the time of the tick.
ask - The best ask price at the time of the tick.
"""
self.type = EventType.TICK
self.ticker = ticker
self.time = time
self.bid = bid
self.ask = ask
def __str__(self):
return "Type: %s, Ticker: %s, Time: %s, Bid: %s, Ask: %s" % (
str(self.type), str(self.ticker),
str(self.time), str(self.bid), str(self.ask)
)
def __repr__(self):
return str(self)
class BarEvent(Event):
"""
Handles the event of receiving a new market
open-high-low-close-volume bar, as would be generated
via common data providers such as Yahoo Finance.
"""
# def __init__(
# self, ticker, time, period,
# open_price, high_price, low_price,
# close_price, volume, adj_close_price=None
# ):
def __init__(self, time, period, ticker, rows):
"""
Initialises the BarEvent.
Parameters:
ticker - The ticker symbol, e.g. 'GOOG'.
time - The timestamp of the bar
period - The time period covered by the bar in seconds
open_price - The unadjusted opening price of the bar
high_price - The unadjusted high price of the bar
low_price - The unadjusted low price of the bar
close_price - The unadjusted close price of the bar
volume - The volume of trading within the bar
adj_close_price - The vendor adjusted closing price
(e.g. back-adjustment) of the bar
Note: It is not advised to use 'open', 'close' instead
of 'open_price', 'close_price' as 'open' is a reserved
word in Python.
"""
self.type = EventType.BAR
self.time = time
self.period = period
self.ticker = ticker
self.rows = rows
# self.__dict__.update(rows.to_dict())
# self.open_price = open_price
# self.high_price = high_price
# self.low_price = low_price
# self.close_price = close_price
# self.volume = volume
# self.adj_close_price = adj_close_price
self.period_readable = self._readable_period()
def _readable_period(self):
"""
Creates a human-readable period from the number
of seconds specified for 'period'.
For instance, converts:
* 1 -> '1sec'
* 5 -> '5secs'
* 60 -> '1min'
* 300 -> '5min'
If no period is found in the lookup table, the human
readable period is simply passed through from period,
in seconds.
"""
lut = {
1: "1sec",
5: "5sec",
10: "10sec",
15: "15sec",
30: "30sec",
60: "1min",
300: "5min",
600: "10min",
900: "15min",
1800: "30min",
3600: "1hr",
86400: "1day",
604800: "1wk"
}
if self.period in lut:
return lut[self.period]
else:
return "%ssec" % str(self.period)
# def __str__(self):
# format_str = "Type: %s, Ticker: %s, Time: %s, Period: %s, " \
# "Open: %s, High: %s, Low: %s, Close: %s, " \
# "Adj Close: %s, Volume: %s" % (
# str(self.type), str(self.ticker), str(self.time),
# str(self.period_readable), str(self.open_price),
# str(self.high_price), str(self.low_price),
# str(self.close_price), str(self.adj_close_price),
# str(self.volume)
# )
# return format_str
# def __repr__(self):
# return str(self)
class SignalEvent(Event):
"""
Handles the event of sending a Signal from a Strategy object.
This is received by a Portfolio object and acted upon.
"""
def __init__(self, ticker, action, suggested_quantity=None):
"""
Initialises the SignalEvent.
Parameters:
ticker - The ticker symbol, e.g. 'GOOG'.
action - 'BOT' (for long) or 'SLD' (for short).
suggested_quantity - Optional positively valued integer
representing a suggested absolute quantity of units
of an asset to transact in, which is used by the
PositionSizer and RiskManager.
"""
self.type = EventType.SIGNAL
self.ticker = ticker
self.action = action
self.suggested_quantity = suggested_quantity
class OrderEvent(Event):
"""
Handles the event of sending an Order to an execution system.
The order contains a ticker (e.g. GOOG), action (BOT or SLD)
and quantity.
"""
def __init__(self, ticker, action, quantity):
"""
Initialises the OrderEvent.
Parameters:
ticker - The ticker symbol, e.g. 'GOOG'.
action - 'BOT' (for long) or 'SLD' (for short).
quantity - The quantity of shares to transact.
"""
self.type = EventType.ORDER
self.ticker = ticker
self.action = action
self.quantity = quantity
def print_order(self):
"""
Outputs the values within the OrderEvent.
"""
print(
"Order: Ticker=%s, Action=%s, Quantity=%s" % (
self.ticker, self.action, self.quantity
)
)
class FillEvent(Event):
"""
Encapsulates the notion of a filled order, as returned
from a brokerage. Stores the quantity of an instrument
actually filled and at what price. In addition, stores
the commission of the trade from the brokerage.
TODO: Currently does not support filling positions at
different prices. This will be simulated by averaging
the cost.
"""
def __init__(
self, timestamp, ticker,
action, quantity,
exchange, price,
commission
):
"""
Initialises the FillEvent object.
timestamp - The timestamp when the order was filled.
ticker - The ticker symbol, e.g. 'GOOG'.
action - 'BOT' (for long) or 'SLD' (for short).
quantity - The filled quantity.
exchange - The exchange where the order was filled.
price - The price at which the trade was filled
commission - The brokerage commission for carrying out the trade.
"""
self.type = EventType.FILL
self.timestamp = timestamp
self.ticker = ticker
self.action = action
self.quantity = quantity
self.exchange = exchange
self.price = price
self.commission = commission
class SentimentEvent(Event):
"""
Handles the event of streaming a "Sentiment" value associated
with a ticker. Can be used for a generic "date-ticker-sentiment"
service, often provided by many data vendors.
"""
def __init__(self, timestamp, ticker, sentiment):
"""
Initialises the SentimentEvent.
Parameters:
timestamp - The timestamp when the sentiment was generated.
ticker - The ticker symbol, e.g. 'GOOG'.
sentiment - A string, float or integer value of "sentiment",
e.g. "bullish", -1, 5.4, etc.
"""
self.type = EventType.SENTIMENT
self.timestamp = timestamp
self.ticker = ticker
self.sentiment = sentiment
```
|
{
"source": "jessemapel/SugarSpice",
"score": 3
}
|
#### File: python/tests/test_utils.py
```python
from pyspiceql import toUpper, toLower
def test_toUpper():
assert isinstance(toUpper('Some Test String'), str)
def test_toLower():
assert isinstance(toLower('Some Test String'), str)
```
|
{
"source": "jessemapel/swigcsm",
"score": 2
}
|
#### File: python/tests/test_api.py
```python
import csmapi
import pytest
def test_loadlib(loadlib):
assert loadlib is not None
def test_streamisd():
print(dir(csmapi))
stream = "The is a stream."
isd = csmapi.BytestreamIsd(stream)
assert stream == isd.data()
```
|
{
"source": "jessemcardle/Raspberry-Pi-Plant-Growth-Project",
"score": 3
}
|
#### File: Raspberry-Pi-Plant-Growth-Project/Calibration and Testing/upload_temp_humidty.py
```python
import sys
from urllib.request import urlopen
from time import sleep
import Adafruit_DHT as dht
# Enter Your API key here
myAPI = '<KEY>'
# URL where we will send the data, Don't change it
baseURL = 'https://api.thingspeak.com/update?api_key=%s' % myAPI
def DHT22_data():
# Reading from DHT22 and storing the temperature and humidity
humi, temp = dht.read_retry(dht.DHT22, 4)
return humi, temp
while True:
try:
humi, temp = DHT22_data()
# If Reading is valid
if isinstance(humi, float) and isinstance(temp, float):
# Formatting to two decimal places
humi = '%.2f' % humi
temp = '%.2f' % temp
print(temp, humi)
# Sending the data to thingspeak
#conn = urllib2.urlopen(baseURL + '&temperature=%s&humidity=%s' % (temp, humi))
conn = urlopen(baseURL + '&field1=%s&field2=%s' % (temp, humi))
print (conn.read())
# Closing the connection
conn.close()
else:
print ('Error')
# DHT22 requires 2 seconds to give a reading, so make sure to add delay of above 2 seconds.
sleep(60)
except:
raise Exception()
break
```
|
{
"source": "jessemcbride/canvasapi",
"score": 2
}
|
#### File: canvasapi/canvasapi/assignment.py
```python
from __future__ import absolute_import, division, print_function, unicode_literals
from six import python_2_unicode_compatible
from canvasapi.canvas_object import CanvasObject
from canvasapi.exceptions import RequiredFieldMissing
from canvasapi.paginated_list import PaginatedList
from canvasapi.submission import Submission
from canvasapi.user import UserDisplay
from canvasapi.util import combine_kwargs, obj_or_id
@python_2_unicode_compatible
class Assignment(CanvasObject):
def __str__(self):
return "{} ({})".format(self.name, self.id)
def delete(self, **kwargs):
"""
Delete this assignment.
:calls: `DELETE /api/v1/courses/:course_id/assignments/:id \
<https://canvas.instructure.com/doc/api/assignments.html#method.assignments.destroy>`_
:rtype: :class:`canvasapi.assignment.Assignment`
"""
response = self._requester.request(
'DELETE',
'courses/{}/assignments/{}'.format(self.course_id, self.id),
_kwargs=combine_kwargs(**kwargs)
)
return Assignment(self._requester, response.json())
def edit(self, **kwargs):
"""
Modify this assignment.
:calls: `PUT /api/v1/courses/:course_id/assignments/:id \
<https://canvas.instructure.com/doc/api/assignments.html#method.assignments_api.update>`_
:rtype: :class:`canvasapi.assignment.Assignment`
"""
response = self._requester.request(
'PUT',
'courses/{}/assignments/{}'.format(self.course_id, self.id),
_kwargs=combine_kwargs(**kwargs)
)
if 'name' in response.json():
super(Assignment, self).set_attributes(response.json())
return Assignment(self._requester, response.json())
def get_gradeable_students(self, **kwargs):
"""
List students eligible to submit the assignment.
:calls: `GET /api/v1/courses/:course_id/assignments/:assignment_id/gradeable_students \
<https://canvas.instructure.com/doc/api/submissions.html#method.submissions_api.gradeable_students>`_
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of
:class:`canvasapi.user.UserDisplay`
"""
return PaginatedList(
UserDisplay,
self._requester,
'GET',
'courses/{}/assignments/{}/gradeable_students'.format(self.course_id, self.id),
_kwargs=combine_kwargs(**kwargs)
)
def get_submission(self, user, **kwargs):
"""
Get a single submission, based on user id.
:calls: `GET /api/v1/courses/:course_id/assignments/:assignment_id/submissions/:user_id \
<https://canvas.instructure.com/doc/api/submissions.html#method.submissions_api.show>`_
:param user: The object or ID of the related user
:type user: :class:`canvasapi.user.User` or int
:rtype: :class:`canvasapi.submission.Submission`
"""
from canvasapi.user import User
user_id = obj_or_id(user, "user", (User,))
response = self._requester.request(
'GET',
'courses/{}/assignments/{}/submissions/{}'.format(self.course_id, self.id, user_id),
_kwargs=combine_kwargs(**kwargs)
)
response_json = response.json()
response_json.update(course_id=self.course_id)
return Submission(self._requester, response_json)
def get_submissions(self, **kwargs):
"""
Get all existing submissions for this assignment.
:calls: `GET /api/v1/courses/:course_id/assignments/:assignment_id/submissions \
<https://canvas.instructure.com/doc/api/submissions.html#method.submissions_api.index>`_
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of
:class:`canvasapi.submission.Submission`
"""
return PaginatedList(
Submission,
self._requester,
'GET',
'courses/{}/assignments/{}/submissions'.format(self.course_id, self.id),
{'course_id': self.course_id},
_kwargs=combine_kwargs(**kwargs)
)
def submit(self, submission, **kwargs):
"""
Makes a submission for an assignment.
:calls: `POST /api/v1/courses/:course_id/assignments/:assignment_id/submissions \
<https://canvas.instructure.com/doc/api/submissions.html#method.submissions.create>`_
:param submission: The attributes of the submission.
:type submission: dict
:rtype: :class:`canvasapi.submission.Submission`
"""
if isinstance(submission, dict) and 'submission_type' in submission:
kwargs['submision'] = submission
else:
raise RequiredFieldMissing(
"Dictionary with key 'submission_type' is required."
)
response = self._requester.request(
'POST',
'courses/{}/assignments/{}/submissions'.format(self.course_id, self.id),
_kwargs=combine_kwargs(**kwargs)
)
response_json = response.json()
response_json.update(course_id=self.course_id)
return Submission(self._requester, response_json)
@python_2_unicode_compatible
class AssignmentGroup(CanvasObject):
def __str__(self):
return "{} ({})".format(self.name, self.id)
def edit(self, **kwargs):
"""
Modify this assignment group.
:calls: `PUT /api/v1/courses/:course_id/assignment_groups/:assignment_group_id \
<https://canvas.instructure.com/doc/api/assignment_groups.html#method.assignment_groups_api.update>`_
:rtype: :class:`canvasapi.assignment.AssignmentGroup`
"""
response = self._requester.request(
'PUT',
'courses/{}/assignment_groups/{}'.format(self.course_id, self.id),
_kwargs=combine_kwargs(**kwargs)
)
if 'name' in response.json():
super(AssignmentGroup, self).set_attributes(response.json())
return AssignmentGroup(self._requester, response.json())
def delete(self, **kwargs):
"""
Delete this assignment.
:calls: `DELETE /api/v1/courses/:course_id/assignment_groups/:assignment_group_id \
<https://canvas.instructure.com/doc/api/assignment_groups.html#method.assignment_groups_api.destroy>`_
:rtype: :class:`canvasapi.assignment.AssignmentGroup`
"""
response = self._requester.request(
'DELETE',
'courses/{}/assignment_groups/{}'.format(self.course_id, self.id),
_kwargs=combine_kwargs(**kwargs)
)
return AssignmentGroup(self._requester, response.json())
```
#### File: canvasapi/tests/test_course.py
```python
from __future__ import absolute_import, division, print_function, unicode_literals
import unittest
import uuid
import warnings
import requests
import requests_mock
from six import text_type
from six.moves.urllib.parse import quote
from canvasapi import Canvas
from canvasapi.assignment import Assignment, AssignmentGroup
from canvasapi.course import Course, CourseNickname, Page
from canvasapi.discussion_topic import DiscussionTopic
from canvasapi.grading_standard import GradingStandard
from canvasapi.enrollment import Enrollment
from canvasapi.exceptions import ResourceDoesNotExist, RequiredFieldMissing
from canvasapi.external_feed import ExternalFeed
from canvasapi.external_tool import ExternalTool
from canvasapi.file import File
from canvasapi.folder import Folder
from canvasapi.group import Group, GroupCategory
from canvasapi.module import Module
from canvasapi.outcome import OutcomeGroup, OutcomeLink
from canvasapi.quiz import Quiz
from canvasapi.rubric import Rubric
from canvasapi.section import Section
from canvasapi.submission import Submission
from canvasapi.tab import Tab
from canvasapi.user import User
from canvasapi.user import UserDisplay
from tests import settings
from tests.util import cleanup_file, register_uris
@requests_mock.Mocker()
class TestCourse(unittest.TestCase):
def setUp(self):
self.canvas = Canvas(settings.BASE_URL, settings.API_KEY)
with requests_mock.Mocker() as m:
requires = {
'course': ['get_assignment_by_id', 'get_by_id', 'get_page'],
'quiz': ['get_by_id'],
'user': ['get_by_id']
}
register_uris(requires, m)
self.course = self.canvas.get_course(1)
self.page = self.course.get_page('my-url')
self.quiz = self.course.get_quiz(1)
self.user = self.canvas.get_user(1)
self.assignment = self.course.get_assignment(1)
# __str__()
def test__str__(self, m):
string = str(self.course)
self.assertIsInstance(string, str)
# conclude()
def test_conclude(self, m):
register_uris({'course': ['conclude']}, m)
success = self.course.conclude()
self.assertTrue(success)
# delete()
def test_delete(self, m):
register_uris({'course': ['delete']}, m)
success = self.course.delete()
self.assertTrue(success)
# update()
def test_update(self, m):
register_uris({'course': ['update']}, m)
new_name = 'New Name'
self.course.update(course={'name': new_name})
self.assertEqual(self.course.name, new_name)
# get_user()
def test_get_user(self, m):
register_uris({'course': ['get_user']}, m)
user_by_id = self.course.get_user(1)
self.assertIsInstance(user_by_id, User)
self.assertTrue(hasattr(user_by_id, 'name'))
user_by_obj = self.course.get_user(user_by_id)
self.assertIsInstance(user_by_obj, User)
self.assertTrue(hasattr(user_by_obj, 'name'))
def test_get_user_id_type(self, m):
register_uris({'course': ['get_user_id_type']}, m)
user = self.course.get_user("LOGINID", "login_id")
self.assertIsInstance(user, User)
self.assertTrue(hasattr(user, 'name'))
# get_users()
def test_get_users(self, m):
register_uris({'course': ['get_users', 'get_users_p2']}, m)
users = self.course.get_users()
user_list = [user for user in users]
self.assertEqual(len(user_list), 4)
self.assertIsInstance(user_list[0], User)
# enroll_user()
def test_enroll_user(self, m):
requires = {
'course': ['enroll_user'],
'user': ['get_by_id']
}
register_uris(requires, m)
enrollment_type = 'TeacherEnrollment'
user_by_id = self.canvas.get_user(1)
enrollment_by_id = self.course.enroll_user(user_by_id, enrollment_type)
self.assertIsInstance(enrollment_by_id, Enrollment)
self.assertTrue(hasattr(enrollment_by_id, 'type'))
self.assertEqual(enrollment_by_id.type, enrollment_type)
user_by_obj = self.canvas.get_user(self.user)
enrollment_by_obj = self.course.enroll_user(user_by_obj, enrollment_type)
self.assertIsInstance(enrollment_by_obj, Enrollment)
self.assertTrue(hasattr(enrollment_by_obj, 'type'))
self.assertEqual(enrollment_by_obj.type, enrollment_type)
# get_recent_students()
def test_get_recent_students(self, m):
recent = {'course': ['get_recent_students', 'get_recent_students_p2']}
register_uris(recent, m)
students = self.course.get_recent_students()
student_list = [student for student in students]
self.assertEqual(len(student_list), 4)
self.assertIsInstance(student_list[0], User)
self.assertTrue(hasattr(student_list[0], 'name'))
# preview_html()
def test_preview_html(self, m):
register_uris({'course': ['preview_html']}, m)
html_str = "<script></script><p>hello</p>"
prev_html = self.course.preview_html(html_str)
self.assertIsInstance(prev_html, text_type)
self.assertEqual(prev_html, "<p>hello</p>")
# get_settings()
def test_get_settings(self, m):
register_uris({'course': ['settings']}, m)
settings = self.course.get_settings()
self.assertIsInstance(settings, dict)
# update_settings()
def test_update_settings(self, m):
register_uris({'course': ['update_settings']}, m)
settings = self.course.update_settings()
self.assertIsInstance(settings, dict)
self.assertTrue(settings['hide_final_grades'])
# upload()
def test_upload(self, m):
register_uris({'course': ['upload', 'upload_final']}, m)
filename = 'testfile_course_{}'.format(uuid.uuid4().hex)
try:
with open(filename, 'w+') as file:
response = self.course.upload(file)
self.assertTrue(response[0])
self.assertIsInstance(response[1], dict)
self.assertIn('url', response[1])
finally:
cleanup_file(filename)
# reset()
def test_reset(self, m):
register_uris({'course': ['reset']}, m)
course = self.course.reset()
self.assertIsInstance(course, Course)
self.assertTrue(hasattr(course, 'name'))
# create_quiz()
def test_create_quiz(self, m):
register_uris({'course': ['create_quiz']}, m)
title = 'Newer Title'
new_quiz = self.course.create_quiz({'title': title})
self.assertIsInstance(new_quiz, Quiz)
self.assertTrue(hasattr(new_quiz, 'title'))
self.assertEqual(new_quiz.title, title)
self.assertTrue(hasattr(new_quiz, 'course_id'))
self.assertEqual(new_quiz.course_id, self.course.id)
def test_create_quiz_fail(self, m):
with self.assertRaises(RequiredFieldMissing):
self.course.create_quiz({})
# get_quiz()
def test_get_quiz(self, m):
register_uris({'course': ['get_quiz']}, m)
target_quiz_by_id = self.course.get_quiz(1)
self.assertIsInstance(target_quiz_by_id, Quiz)
self.assertTrue(hasattr(target_quiz_by_id, 'course_id'))
self.assertEqual(target_quiz_by_id.course_id, self.course.id)
target_quiz_by_obj = self.course.get_quiz(target_quiz_by_id)
self.assertIsInstance(target_quiz_by_obj, Quiz)
self.assertTrue(hasattr(target_quiz_by_obj, 'course_id'))
self.assertEqual(target_quiz_by_obj.course_id, self.course.id)
def test_get_quiz_fail(self, m):
register_uris({'generic': ['not_found']}, m)
with self.assertRaises(ResourceDoesNotExist):
self.course.get_quiz(settings.INVALID_ID)
# get_quizzes()
def test_get_quizzes(self, m):
register_uris({'course': ['list_quizzes', 'list_quizzes2']}, m)
quizzes = self.course.get_quizzes()
quiz_list = [quiz for quiz in quizzes]
self.assertEqual(len(quiz_list), 4)
self.assertIsInstance(quiz_list[0], Quiz)
self.assertTrue(hasattr(quiz_list[0], 'course_id'))
self.assertEqual(quiz_list[0].course_id, self.course.id)
# get_modules()
def test_get_modules(self, m):
register_uris({'course': ['list_modules', 'list_modules2']}, m)
modules = self.course.get_modules()
module_list = [module for module in modules]
self.assertEqual(len(module_list), 4)
self.assertIsInstance(module_list[0], Module)
self.assertTrue(hasattr(module_list[0], 'course_id'))
self.assertEqual(module_list[0].course_id, self.course.id)
# get_module()
def test_get_module(self, m):
register_uris({'course': ['get_module_by_id']}, m)
target_module_by_id = self.course.get_module(1)
self.assertIsInstance(target_module_by_id, Module)
self.assertTrue(hasattr(target_module_by_id, 'course_id'))
self.assertEqual(target_module_by_id.course_id, self.course.id)
target_module_by_obj = self.course.get_module(target_module_by_id)
self.assertIsInstance(target_module_by_obj, Module)
self.assertTrue(hasattr(target_module_by_obj, 'course_id'))
self.assertEqual(target_module_by_obj.course_id, self.course.id)
# create_module()
def test_create_module(self, m):
register_uris({'course': ['create_module']}, m)
name = 'Name'
new_module = self.course.create_module(module={'name': name})
self.assertIsInstance(new_module, Module)
self.assertTrue(hasattr(new_module, 'name'))
self.assertTrue(hasattr(new_module, 'course_id'))
self.assertEqual(new_module.course_id, self.course.id)
def test_create_module_fail(self, m):
with self.assertRaises(RequiredFieldMissing):
self.course.create_module(module={})
# get_enrollments()
def test_get_enrollments(self, m):
register_uris({'course': ['list_enrollments', 'list_enrollments_2']}, m)
enrollments = self.course.get_enrollments()
enrollment_list = [enrollment for enrollment in enrollments]
self.assertEqual(len(enrollment_list), 4)
self.assertIsInstance(enrollment_list[0], Enrollment)
# get_sections()
def test_get_sections(self, m):
register_uris({'course': ['get_sections', 'get_sections_p2']}, m)
sections = self.course.get_sections()
section_list = [section for section in sections]
self.assertEqual(len(section_list), 4)
self.assertIsInstance(section_list[0], Section)
# get_section
def test_get_section(self, m):
register_uris({'course': ['get_section']}, m)
section_by_id = self.course.get_section(1)
self.assertIsInstance(section_by_id, Section)
section_by_obj = self.course.get_section(section_by_id)
self.assertIsInstance(section_by_obj, Section)
# create_assignment()
def test_create_assignment(self, m):
register_uris({'course': ['create_assignment']}, m)
name = '<NAME>'
assignment = self.course.create_assignment(assignment={'name': name})
self.assertIsInstance(assignment, Assignment)
self.assertTrue(hasattr(assignment, 'name'))
self.assertEqual(assignment.name, name)
self.assertEqual(assignment.id, 1)
def test_create_assignment_fail(self, m):
with self.assertRaises(RequiredFieldMissing):
self.course.create_assignment(assignment={})
# get_assignment()
def test_get_assignment(self, m):
register_uris({'course': ['get_assignment_by_id']}, m)
assignment_by_id = self.course.get_assignment(1)
self.assertIsInstance(assignment_by_id, Assignment)
self.assertTrue(hasattr(assignment_by_id, 'name'))
assignment_by_obj = self.course.get_assignment(self.assignment)
self.assertIsInstance(assignment_by_obj, Assignment)
self.assertTrue(hasattr(assignment_by_obj, 'name'))
# get_assignments()
def test_get_assignments(self, m):
requires = {'course': ['get_all_assignments', 'get_all_assignments2']}
register_uris(requires, m)
assignments = self.course.get_assignments()
assignment_list = [assignment for assignment in assignments]
self.assertIsInstance(assignments[0], Assignment)
self.assertEqual(len(assignment_list), 4)
# show_front_page()
def test_show_front_page(self, m):
register_uris({'course': ['show_front_page']}, m)
front_page = self.course.show_front_page()
self.assertIsInstance(front_page, Page)
self.assertTrue(hasattr(front_page, 'url'))
self.assertTrue(hasattr(front_page, 'title'))
# create_front_page()
def test_edit_front_page(self, m):
register_uris({'course': ['edit_front_page']}, m)
new_front_page = self.course.edit_front_page()
self.assertIsInstance(new_front_page, Page)
self.assertTrue(hasattr(new_front_page, 'url'))
self.assertTrue(hasattr(new_front_page, 'title'))
# get_page()
def test_get_page(self, m):
register_uris({'course': ['get_page']}, m)
url = 'my-url'
page = self.course.get_page(url)
self.assertIsInstance(page, Page)
# get_pages()
def test_get_pages(self, m):
register_uris({'course': ['get_pages', 'get_pages2']}, m)
pages = self.course.get_pages()
page_list = [page for page in pages]
self.assertEqual(len(page_list), 4)
self.assertIsInstance(page_list[0], Page)
self.assertTrue(hasattr(page_list[0], 'course_id'))
self.assertEqual(page_list[0].course_id, self.course.id)
# create_page()
def test_create_page(self, m):
register_uris({'course': ['create_page']}, m)
title = "Newest Page"
new_page = self.course.create_page(wiki_page={'title': title})
self.assertIsInstance(new_page, Page)
self.assertTrue(hasattr(new_page, 'title'))
self.assertEqual(new_page.title, title)
self.assertTrue(hasattr(new_page, 'course_id'))
self.assertEqual(new_page.course_id, self.course.id)
def test_create_page_fail(self, m):
with self.assertRaises(RequiredFieldMissing):
self.course.create_page(settings.INVALID_ID)
# get_external_tool()
def test_get_external_tool(self, m):
register_uris({'external_tool': ['get_by_id_course']}, m)
tool_by_id = self.course.get_external_tool(1)
self.assertIsInstance(tool_by_id, ExternalTool)
self.assertTrue(hasattr(tool_by_id, 'name'))
tool_by_obj = self.course.get_external_tool(tool_by_id)
self.assertIsInstance(tool_by_obj, ExternalTool)
self.assertTrue(hasattr(tool_by_obj, 'name'))
# get_external_tools()
def test_get_external_tools(self, m):
requires = {'course': ['get_external_tools', 'get_external_tools_p2']}
register_uris(requires, m)
tools = self.course.get_external_tools()
tool_list = [tool for tool in tools]
self.assertIsInstance(tool_list[0], ExternalTool)
self.assertEqual(len(tool_list), 4)
def test_list_sections(self, m):
register_uris({'course': ['get_sections', 'get_sections_p2']}, m)
with warnings.catch_warnings(record=True) as warning_list:
sections = self.course.list_sections()
section_list = [sect for sect in sections]
self.assertEqual(len(section_list), 4)
self.assertIsInstance(section_list[0], Section)
self.assertEqual(len(warning_list), 1)
self.assertEqual(warning_list[-1].category, DeprecationWarning)
def test_create_course_section(self, m):
register_uris({'course': ['create_section']}, m)
section = self.course.create_course_section()
self.assertIsInstance(section, Section)
def test_list_groups(self, m):
requires = {'course': ['list_groups_context', 'list_groups_context2']}
register_uris(requires, m)
groups = self.course.list_groups()
group_list = [group for group in groups]
self.assertIsInstance(group_list[0], Group)
self.assertEqual(len(group_list), 4)
# create_group_category()
def test_create_group_category(self, m):
register_uris({'course': ['create_group_category']}, m)
name_str = "<NAME>"
response = self.course.create_group_category(name=name_str)
self.assertIsInstance(response, GroupCategory)
# list_group_categories()
def test_list_group_categories(self, m):
register_uris({'course': ['list_group_categories']}, m)
response = self.course.list_group_categories()
category_list = [category for category in response]
self.assertIsInstance(category_list[0], GroupCategory)
# get_discussion_topic()
def test_get_discussion_topic(self, m):
register_uris({'course': ['get_discussion_topic']}, m)
topic_id = 1
discussion_by_id = self.course.get_discussion_topic(topic_id)
self.assertIsInstance(discussion_by_id, DiscussionTopic)
self.assertTrue(hasattr(discussion_by_id, 'course_id'))
self.assertEqual(discussion_by_id.course_id, 1)
discussion_by_obj = self.course.get_discussion_topic(discussion_by_id)
self.assertIsInstance(discussion_by_obj, DiscussionTopic)
self.assertTrue(hasattr(discussion_by_obj, 'course_id'))
self.assertEqual(discussion_by_obj.course_id, 1)
# get_file()
def test_get_file(self, m):
register_uris({'course': ['get_file']}, m)
file_by_id = self.course.get_file(1)
self.assertIsInstance(file_by_id, File)
self.assertEqual(file_by_id.display_name, 'Course_File.docx')
self.assertEqual(file_by_id.size, 2048)
file_by_obj = self.course.get_file(file_by_id)
self.assertIsInstance(file_by_obj, File)
self.assertEqual(file_by_obj.display_name, 'Course_File.docx')
self.assertEqual(file_by_obj.size, 2048)
# get_full_discussion_topic()
def test_get_full_discussion_topic(self, m):
register_uris(
{
'course': [
'get_discussion_topics',
'get_full_discussion_topic'
]
}, m)
topic_id = 1
discussion_by_id = self.course.get_full_discussion_topic(topic_id)
self.assertIsInstance(discussion_by_id, dict)
self.assertIn('view', discussion_by_id)
self.assertIn('participants', discussion_by_id)
self.assertIn('id', discussion_by_id)
self.assertEqual(discussion_by_id['id'], topic_id)
discussion_topics = self.course.get_discussion_topics()
discussion_by_obj = self.course.get_full_discussion_topic(discussion_topics[0])
self.assertIsInstance(discussion_by_obj, dict)
self.assertIn('view', discussion_by_obj)
self.assertIn('participants', discussion_by_obj)
self.assertIn('id', discussion_by_obj)
self.assertEqual(discussion_by_obj['id'], topic_id)
# get_discussion_topics()
def test_get_discussion_topics(self, m):
register_uris({'course': ['get_discussion_topics']}, m)
response = self.course.get_discussion_topics()
discussion_list = [discussion for discussion in response]
self.assertIsInstance(discussion_list[0], DiscussionTopic)
self.assertTrue(hasattr(discussion_list[0], 'course_id'))
self.assertEqual(2, len(discussion_list))
# create_discussion_topic()
def test_create_discussion_topic(self, m):
register_uris({'course': ['create_discussion_topic']}, m)
title = "Topic 1"
discussion = self.course.create_discussion_topic()
self.assertIsInstance(discussion, DiscussionTopic)
self.assertTrue(hasattr(discussion, 'course_id'))
self.assertEqual(title, discussion.title)
self.assertEqual(discussion.course_id, 1)
# reorder_pinned_topics()
def test_reorder_pinned_topics(self, m):
# Custom matcher to test that params are set correctly
def custom_matcher(request):
match_text = '1,2,3'
if request.text == 'order={}'.format(quote(match_text)):
resp = requests.Response()
resp._content = b'{"reorder": true, "order": [1, 2, 3]}'
resp.status_code = 200
return resp
m.add_matcher(custom_matcher)
order = [1, 2, 3]
discussions = self.course.reorder_pinned_topics(order=order)
self.assertTrue(discussions)
def test_reorder_pinned_topics_tuple(self, m):
register_uris({'course': ['reorder_pinned_topics']}, m)
order = (1, 2, 3)
discussions = self.course.reorder_pinned_topics(order=order)
self.assertTrue(discussions)
def test_reorder_pinned_topics_comma_separated_string(self, m):
register_uris({'course': ['reorder_pinned_topics']}, m)
order = "1,2,3"
discussions = self.course.reorder_pinned_topics(order=order)
self.assertTrue(discussions)
def test_reorder_pinned_topics_invalid_input(self, m):
order = "invalid string"
with self.assertRaises(ValueError):
self.course.reorder_pinned_topics(order=order)
# get_assignment_group()
def test_get_assignment_group(self, m):
register_uris({'assignment': ['get_assignment_group']}, m)
assignment_group_by_id = self.course.get_assignment_group(5)
self.assertIsInstance(assignment_group_by_id, AssignmentGroup)
self.assertTrue(hasattr(assignment_group_by_id, 'id'))
self.assertTrue(hasattr(assignment_group_by_id, 'name'))
self.assertTrue(hasattr(assignment_group_by_id, 'course_id'))
self.assertEqual(assignment_group_by_id.course_id, 1)
assignment_group_by_obj = self.course.get_assignment_group(assignment_group_by_id)
self.assertIsInstance(assignment_group_by_obj, AssignmentGroup)
self.assertTrue(hasattr(assignment_group_by_obj, 'id'))
self.assertTrue(hasattr(assignment_group_by_obj, 'name'))
self.assertTrue(hasattr(assignment_group_by_obj, 'course_id'))
self.assertEqual(assignment_group_by_obj.course_id, 1)
# list_group_categories()
def test_list_assignment_groups(self, m):
register_uris({
'assignment': ['list_assignment_groups', 'get_assignment_group']
}, m)
response = self.course.list_assignment_groups()
asnt_group_list = [assignment_group for assignment_group in response]
self.assertIsInstance(asnt_group_list[0], AssignmentGroup)
self.assertTrue(hasattr(asnt_group_list[0], 'id'))
self.assertTrue(hasattr(asnt_group_list[0], 'name'))
self.assertTrue(hasattr(asnt_group_list[0], 'course_id'))
self.assertEqual(asnt_group_list[0].course_id, 1)
# create_assignment_group()
def test_create_assignment_group(self, m):
register_uris({'assignment': ['create_assignment_group']}, m)
response = self.course.create_assignment_group()
self.assertIsInstance(response, AssignmentGroup)
self.assertTrue(hasattr(response, 'id'))
self.assertEqual(response.id, 3)
# create_external_tool()
def test_create_external_tool(self, m):
register_uris({'external_tool': ['create_tool_course']}, m)
response = self.course.create_external_tool(
name="External Tool - Course",
privacy_level="public",
consumer_key="key",
shared_secret="secret"
)
self.assertIsInstance(response, ExternalTool)
self.assertTrue(hasattr(response, 'id'))
self.assertEqual(response.id, 20)
# get_course_level_participation_data()
def test_get_course_level_participation_data(self, m):
register_uris({'course': ['get_course_level_participation_data']}, m)
response = self.course.get_course_level_participation_data()
self.assertIsInstance(response, list)
# get_course_level_assignment_data()
def test_get_course_level_assignment_data(self, m):
register_uris({'course': ['get_course_level_assignment_data']}, m)
response = self.course.get_course_level_assignment_data()
self.assertIsInstance(response, list)
# get_course_level_student_summary_data()
def test_get_course_level_student_summary_data(self, m):
register_uris({'course': ['get_course_level_student_summary_data']}, m)
response = self.course.get_course_level_student_summary_data()
self.assertIsInstance(response, list)
# get_user_in_a_course_level_participation_data()
def test_get_user_in_a_course_level_participation_data(self, m):
register_uris({'course': ['get_user_in_a_course_level_participation_data']}, m)
response = self.course.get_user_in_a_course_level_participation_data(1)
self.assertIsInstance(response, list)
response = self.course.get_user_in_a_course_level_participation_data(self.user)
self.assertIsInstance(response, list)
# get_user_in_a_course_level_assignment_data()
def test_get_user_in_a_course_level_assignment_data(self, m):
register_uris({'course': ['get_user_in_a_course_level_assignment_data']}, m)
response = self.course.get_user_in_a_course_level_assignment_data(1)
self.assertIsInstance(response, list)
response = self.course.get_user_in_a_course_level_assignment_data(self.user)
self.assertIsInstance(response, list)
# get_user_in_a_course_level_messaging_data()
def test_get_user_in_a_course_level_messaging_data(self, m):
register_uris({'course': ['get_user_in_a_course_level_messaging_data']}, m)
response = self.course.get_user_in_a_course_level_messaging_data(1)
self.assertIsInstance(response, list)
response = self.course.get_user_in_a_course_level_messaging_data(self.user)
self.assertIsInstance(response, list)
# submit_assignment()
def test_submit_assignment(self, m):
register_uris({'assignment': ['submit']}, m)
with warnings.catch_warnings(record=True) as warning_list:
assignment_id = 1
sub_type = "online_upload"
sub_dict = {'submission_type': sub_type}
submission_by_id = self.course.submit_assignment(assignment_id, sub_dict)
self.assertIsInstance(submission_by_id, Submission)
self.assertTrue(hasattr(submission_by_id, 'submission_type'))
self.assertEqual(submission_by_id.submission_type, sub_type)
self.assertEqual(len(warning_list), 1)
self.assertEqual(warning_list[-1].category, DeprecationWarning)
with warnings.catch_warnings(record=True) as warning_list:
submission_by_obj = self.course.submit_assignment(self.assignment, sub_dict)
self.assertIsInstance(submission_by_obj, Submission)
self.assertTrue(hasattr(submission_by_obj, 'submission_type'))
self.assertEqual(submission_by_obj.submission_type, sub_type)
self.assertEqual(len(warning_list), 1)
self.assertEqual(warning_list[-1].category, DeprecationWarning)
def test_submit_assignment_fail(self, m):
with warnings.catch_warnings(record=True) as warning_list:
with self.assertRaises(RequiredFieldMissing):
self.course.submit_assignment(1, {})
self.assertEqual(len(warning_list), 1)
self.assertEqual(warning_list[-1].category, DeprecationWarning)
# list_submissions()
def test_list_submissions(self, m):
register_uris({'submission': ['list_submissions']}, m)
with warnings.catch_warnings(record=True) as warning_list:
assignment_id = 1
submissions_by_id = self.course.list_submissions(assignment_id)
submission_list_by_id = [submission for submission in submissions_by_id]
self.assertEqual(len(submission_list_by_id), 2)
self.assertIsInstance(submission_list_by_id[0], Submission)
self.assertEqual(len(warning_list), 1)
self.assertEqual(warning_list[-1].category, DeprecationWarning)
with warnings.catch_warnings(record=True) as warning_list:
submissions_by_obj = self.course.list_submissions(self.assignment)
submission_list_by_obj = [submission for submission in submissions_by_obj]
self.assertEqual(len(submission_list_by_obj), 2)
self.assertIsInstance(submission_list_by_obj[0], Submission)
self.assertEqual(len(warning_list), 1)
self.assertEqual(warning_list[-1].category, DeprecationWarning)
# list_multiple_submission()
def test_list_multiple_submissions(self, m):
register_uris({'course': ['list_multiple_submissions']}, m)
submissions = self.course.list_multiple_submissions()
submission_list = [submission for submission in submissions]
self.assertEqual(len(submission_list), 2)
self.assertIsInstance(submission_list[0], Submission)
def test_list_multiple_submissions_grouped_param(self, m):
register_uris({'course': ['list_multiple_submissions']}, m)
with warnings.catch_warnings(record=True) as warning_list:
warnings.simplefilter('always')
submissions = self.course.list_multiple_submissions(grouped=True)
submission_list = [submission for submission in submissions]
# Ensure using the `grouped` param raises a warning
self.assertEqual(len(warning_list), 1)
self.assertEqual(warning_list[-1].category, UserWarning)
self.assertEqual(
text_type(warning_list[-1].message),
'The `grouped` parameter must be empty. Removing kwarg `grouped`.'
)
self.assertEqual(len(submission_list), 2)
self.assertIsInstance(submission_list[0], Submission)
# get_submission()
def test_get_submission(self, m):
register_uris({
'course': ['get_assignment_by_id'],
'submission': ['get_by_id_course']
}, m)
assignment_for_id = 1
user_id = 1
with warnings.catch_warnings(record=True) as warning_list:
submission_by_id = self.course.get_submission(assignment_for_id, user_id)
self.assertIsInstance(submission_by_id, Submission)
self.assertTrue(hasattr(submission_by_id, 'submission_type'))
self.assertEqual(len(warning_list), 1)
self.assertEqual(warning_list[-1].category, DeprecationWarning)
with warnings.catch_warnings(record=True) as warning_list:
assignment_for_obj = self.course.get_assignment(1)
submission_by_obj = self.course.get_submission(assignment_for_obj, self.user)
self.assertIsInstance(submission_by_obj, Submission)
self.assertTrue(hasattr(submission_by_obj, 'submission_type'))
self.assertEqual(len(warning_list), 1)
self.assertEqual(warning_list[-1].category, DeprecationWarning)
# update_submission()
def test_update_submission(self, m):
register_uris({
'course': ['get_assignment_by_id'],
'submission': ['edit', 'get_by_id_course']
}, m)
assignment_for_id = 1
user_id = 1
with warnings.catch_warnings(record=True) as warning_list:
submission = self.course.update_submission(
assignment_for_id,
user_id,
submission={'excuse': True}
)
self.assertIsInstance(submission, Submission)
self.assertTrue(hasattr(submission, 'excused'))
self.assertEqual(len(warning_list), 1)
self.assertEqual(warning_list[-1].category, DeprecationWarning)
assignment_for_obj = self.course.get_assignment(1)
with warnings.catch_warnings(record=True) as warning_list:
submission = self.course.update_submission(
assignment_for_obj,
self.user,
submission={'excuse': True}
)
self.assertIsInstance(submission, Submission)
self.assertTrue(hasattr(submission, 'excused'))
self.assertEqual(len(warning_list), 1)
self.assertEqual(warning_list[-1].category, DeprecationWarning)
# list_gradeable_students()
def test_list_gradeable_students(self, m):
register_uris({'course': ['get_assignment_by_id', 'list_gradeable_students']}, m)
assignment_for_id = 1
with warnings.catch_warnings(record=True) as warning_list:
students_by_id = self.course.list_gradeable_students(assignment_for_id)
student_list_by_id = [student for student in students_by_id]
self.assertEqual(len(student_list_by_id), 2)
self.assertIsInstance(student_list_by_id[0], UserDisplay)
self.assertEqual(len(warning_list), 1)
self.assertEqual(warning_list[-1].category, DeprecationWarning)
assignment_for_obj = self.course.get_assignment(1)
with warnings.catch_warnings(record=True) as warning_list:
students_by_id = self.course.list_gradeable_students(assignment_for_obj)
student_list_by_id = [student for student in students_by_id]
self.assertEqual(len(student_list_by_id), 2)
self.assertIsInstance(student_list_by_id[0], UserDisplay)
self.assertEqual(len(warning_list), 1)
self.assertEqual(warning_list[-1].category, DeprecationWarning)
# mark_submission_as_read
def test_mark_submission_as_read(self, m):
register_uris({'course': ['get_assignment_by_id', 'mark_submission_as_read']}, m)
assignment_for_id = 1
user_for_id = 1
with warnings.catch_warnings(record=True) as warning_list:
submission_by_id = self.course.mark_submission_as_read(assignment_for_id, user_for_id)
self.assertTrue(submission_by_id)
self.assertEqual(len(warning_list), 1)
self.assertEqual(warning_list[-1].category, DeprecationWarning)
assignment_for_obj = self.course.get_assignment(1)
with warnings.catch_warnings(record=True) as warning_list:
submission_by_obj = self.course.mark_submission_as_read(assignment_for_obj, self.user)
self.assertTrue(submission_by_obj)
self.assertEqual(len(warning_list), 1)
self.assertEqual(warning_list[-1].category, DeprecationWarning)
# mark_submission_as_unread
def test_mark_submission_as_unread(self, m):
register_uris({'course': ['get_assignment_by_id', 'mark_submission_as_unread']}, m)
assignment_for_id = 1
user_for_id = 1
with warnings.catch_warnings(record=True) as warning_list:
submission_by_id = self.course.mark_submission_as_unread(
assignment_for_id,
user_for_id
)
self.assertTrue(submission_by_id)
self.assertEqual(len(warning_list), 1)
self.assertEqual(warning_list[-1].category, DeprecationWarning)
assignment_for_obj = self.course.get_assignment(1)
with warnings.catch_warnings(record=True) as warning_list:
submission_by_obj = self.course.mark_submission_as_unread(
assignment_for_obj,
self.user
)
self.assertTrue(submission_by_obj)
self.assertEqual(len(warning_list), 1)
self.assertEqual(warning_list[-1].category, DeprecationWarning)
# list_external_feeds()
def test_list_external_feeds(self, m):
register_uris({'course': ['list_external_feeds']}, m)
feeds = self.course.list_external_feeds()
feed_list = [feed for feed in feeds]
self.assertEqual(len(feed_list), 2)
self.assertTrue(hasattr(feed_list[0], 'url'))
self.assertIsInstance(feed_list[0], ExternalFeed)
# create_external_feed()
def test_create_external_feed(self, m):
register_uris({'course': ['create_external_feed']}, m)
url_str = "http://example.com/myblog.rss"
response = self.course.create_external_feed(url=url_str)
self.assertIsInstance(response, ExternalFeed)
# delete_external_feed()
def test_delete_external_feed(self, m):
register_uris({'course': ['delete_external_feed']}, m)
ef_id = 1
deleted_ef_by_id = self.course.delete_external_feed(ef_id)
self.assertIsInstance(deleted_ef_by_id, ExternalFeed)
self.assertTrue(hasattr(deleted_ef_by_id, 'url'))
self.assertEqual(deleted_ef_by_id.display_name, "My Blog")
deleted_ef_by_obj = self.course.delete_external_feed(deleted_ef_by_id)
self.assertIsInstance(deleted_ef_by_obj, ExternalFeed)
self.assertTrue(hasattr(deleted_ef_by_obj, 'url'))
self.assertEqual(deleted_ef_by_obj.display_name, "My Blog")
# list_files()
def test_course_files(self, m):
register_uris({'course': ['list_course_files', 'list_course_files2']}, m)
files = self.course.list_files()
file_list = [file for file in files]
self.assertEqual(len(file_list), 4)
self.assertIsInstance(file_list[0], File)
# get_folder()
def test_get_folder(self, m):
register_uris({'course': ['get_folder']}, m)
folder_by_id = self.course.get_folder(1)
self.assertEqual(folder_by_id.name, "Folder 1")
self.assertIsInstance(folder_by_id, Folder)
folder_by_obj = self.course.get_folder(folder_by_id)
self.assertEqual(folder_by_obj.name, "Folder 1")
self.assertIsInstance(folder_by_obj, Folder)
# list_folders()
def test_list_folders(self, m):
register_uris({'course': ['list_folders']}, m)
folders = self.course.list_folders()
folder_list = [folder for folder in folders]
self.assertEqual(len(folder_list), 2)
self.assertIsInstance(folder_list[0], Folder)
# create_folder()
def test_create_folder(self, m):
register_uris({'course': ['create_folder']}, m)
name_str = "Test String"
response = self.course.create_folder(name=name_str)
self.assertIsInstance(response, Folder)
# list_tabs()
def test_list_tabs(self, m):
register_uris({'course': ['list_tabs']}, m)
tabs = self.course.list_tabs()
tab_list = [tab for tab in tabs]
self.assertEqual(len(tab_list), 2)
self.assertIsInstance(tab_list[0], Tab)
# update_tab()
def test_update_tab(self, m):
register_uris({'course': ['update_tab']}, m)
tab_id = "pages"
new_position = 3
tab = self.course.update_tab(tab_id, position=new_position)
self.assertIsInstance(tab, Tab)
self.assertEqual(tab.position, 3)
# get_rubric
def test_get_rubric(self, m):
register_uris({'course': ['get_rubric_single']}, m)
rubric_id = 1
rubric = self.course.get_rubric(rubric_id)
self.assertIsInstance(rubric, Rubric)
self.assertEqual(rubric.id, rubric_id)
self.assertEqual(rubric.title, "Course Rubric 1")
# list_rubrics
def test_list_rubrics(self, m):
register_uris({'course': ['get_rubric_multiple']}, m)
rubrics = self.course.list_rubrics()
self.assertEqual(len(list(rubrics)), 2)
self.assertIsInstance(rubrics[0], Rubric)
self.assertEqual(rubrics[0].id, 1)
self.assertEqual(rubrics[0].title, "Course Rubric 1")
self.assertIsInstance(rubrics[1], Rubric)
self.assertEqual(rubrics[1].id, 2)
self.assertEqual(rubrics[1].title, "Course Rubric 2")
# get_root_outcome_group()
def test_get_root_outcome_group(self, m):
register_uris({'outcome': ['course_root_outcome_group']}, m)
outcome_group = self.course.get_root_outcome_group()
self.assertIsInstance(outcome_group, OutcomeGroup)
self.assertEqual(outcome_group.id, 1)
self.assertEqual(outcome_group.title, "ROOT")
# get_outcome_group()
def test_get_outcome_group(self, m):
register_uris({'outcome': ['course_get_outcome_group']}, m)
outcome_group_by_id = self.course.get_outcome_group(1)
self.assertIsInstance(outcome_group_by_id, OutcomeGroup)
self.assertEqual(outcome_group_by_id.id, 1)
self.assertEqual(outcome_group_by_id.title, "Course outcome group title")
outcome_group_by_obj = self.course.get_outcome_group(outcome_group_by_id)
self.assertIsInstance(outcome_group_by_obj, OutcomeGroup)
self.assertEqual(outcome_group_by_obj.id, 1)
self.assertEqual(outcome_group_by_obj.title, "Course outcome group title")
# get_outcome_groups_in_context()
def test_get_outcome_groups_in_context(self, m):
register_uris({'outcome': ['course_outcome_groups_in_context']}, m)
outcome_group_list = self.course.get_outcome_groups_in_context()
self.assertIsInstance(outcome_group_list[0], OutcomeGroup)
self.assertEqual(outcome_group_list[0].id, 1)
self.assertEqual(outcome_group_list[0].title, "ROOT")
# get_all_outcome_links_in_context()
def test_get_outcome_links_in_context(self, m):
register_uris({'outcome': ['course_outcome_links_in_context']}, m)
outcome_link_list = self.course.get_all_outcome_links_in_context()
self.assertIsInstance(outcome_link_list[0], OutcomeLink)
self.assertEqual(outcome_link_list[0].outcome_group['id'], 2)
self.assertEqual(outcome_link_list[0].outcome_group['title'], "test outcome")
# get_outcome_results()
def test_get_outcome_results(self, m):
register_uris({'outcome': ['course_get_outcome_results']}, m)
result = self.course.get_outcome_results()
self.assertIsInstance(result, dict)
self.assertIsInstance(result['outcome_results'], list)
# get_outcome_result_rollups()
def test_get_outcome_result_rollups(self, m):
register_uris({'outcome': ['course_get_outcome_result_rollups']}, m)
result = self.course.get_outcome_result_rollups()
self.assertIsInstance(result, dict)
self.assertIsInstance(result['rollups'], list)
# add_grading_standards()
def test_add_grading_standards(self, m):
register_uris({'course': ['add_grading_standards']}, m)
title = "Grading Standard 1"
grading_scheme = []
grading_scheme.append({"name": "A", "value": 90})
grading_scheme.append({"name": "B", "value": 80})
grading_scheme.append({"name": "C", "value": 70})
response = self.course.add_grading_standards(title, grading_scheme)
self.assertIsInstance(response, GradingStandard)
self.assertTrue(hasattr(response, 'title'))
self.assertEqual(title, response.title)
self.assertTrue(hasattr(response, "grading_scheme"))
self.assertEqual(response.grading_scheme[0].get('name'), "A")
self.assertEqual(response.grading_scheme[0].get('value'), 0.9)
# add_grading_standards()
def test_add_grading_standards_empty_list(self, m):
register_uris({'course': ['add_grading_standards']}, m)
with self.assertRaises(ValueError):
self.course.add_grading_standards("title", [])
def test_add_grading_standards_non_dict_list(self, m):
register_uris({'course': ['add_grading_standards']}, m)
with self.assertRaises(ValueError):
self.course.add_grading_standards("title", [1, 2, 3])
def test_add_grading_standards_missing_value_key(self, m):
register_uris({'course': ['add_grading_standards']}, m)
with self.assertRaises(ValueError):
self.course.add_grading_standards("title", [{'name': "test"}])
def test_add_grading_standards_missing_name_key(self, m):
register_uris({'course': ['add_grading_standards']}, m)
with self.assertRaises(ValueError):
self.course.add_grading_standards("title", [{'value': 2}])
# get_grading_standards()
def test_get_grading_standards(self, m):
register_uris({'course': ['get_grading_standards']}, m)
standards = self.course.get_grading_standards()
standard_list = [standard for standard in standards]
self.assertEqual(len(standard_list), 2)
self.assertIsInstance(standard_list[0], GradingStandard)
self.assertIsInstance(standard_list[1], GradingStandard)
# get_single_grading_standards()
def test_get_single_grading_standard(self, m):
register_uris({'course': ['get_single_grading_standard']}, m)
response = self.course.get_single_grading_standard(1)
self.assertIsInstance(response, GradingStandard)
self.assertTrue(hasattr(response, 'id'))
self.assertEqual(1, response.id)
self.assertTrue(hasattr(response, 'title'))
self.assertEqual("Grading Standard 1", response.title)
self.assertTrue(hasattr(response, "grading_scheme"))
self.assertEqual(response.grading_scheme[0].get('name'), "A")
self.assertEqual(response.grading_scheme[0].get('value'), 0.9)
@requests_mock.Mocker()
class TestCourseNickname(unittest.TestCase):
def setUp(self):
self.canvas = Canvas(settings.BASE_URL, settings.API_KEY)
with requests_mock.Mocker() as m:
register_uris({'user': ['course_nickname']}, m)
self.nickname = self.canvas.get_course_nickname(1)
# __str__()
def test__str__(self, m):
string = str(self.nickname)
self.assertIsInstance(string, str)
# remove()
def test_remove(self, m):
register_uris({'user': ['remove_nickname']}, m)
deleted_nick = self.nickname.remove()
self.assertIsInstance(deleted_nick, CourseNickname)
self.assertTrue(hasattr(deleted_nick, 'nickname'))
```
|
{
"source": "jessemcdowell/MediaMonkeyToSpotifyMigrator",
"score": 3
}
|
#### File: jessemcdowell/MediaMonkeyToSpotifyMigrator/Matching.py
```python
def disable_log():
global log, format_candidate
log = lambda text: 0
format_candidate = lambda candidate: 'candidate'
disable_log()
def enable_log(candidate_formatter):
global log, format_candidate
log = lambda text: print(f' # Matching: {text}')
format_candidate = candidate_formatter
def best_match(candidates, matchers, minimum_score):
best_candidate = None
best_candidate_score = -1
for candidate in candidates:
score = sum(matcher(candidate) for matcher in matchers)
log(f'- candidate: {format_candidate(candidate)}, score: {score}')
if score > best_candidate_score and score >= minimum_score:
best_candidate = candidate
best_candidate_score = score
log(f'winner: {format_candidate(best_candidate) if best_candidate else "None"}')
return best_candidate
def boolean_matcher(predicate, score):
return lambda input: score if predicate(input) else 0
def _strings_are_equal(source, compare_to, total_possible_score):
if source == compare_to:
return total_possible_score
if source.lower() == compare_to.lower():
return total_possible_score - 1
return 0
def text_matcher(selector, source, total_possible_score):
def break_into_chunks(source):
chunk_size = 4
if len(source) <= chunk_size:
yield source
return
index = 0;
while (index + chunk_size) <= len(source):
yield source[index:index + chunk_size]
index += 1
def match(source, compare_to, total_possible_score):
score = _strings_are_equal(source, compare_to, total_possible_score)
if score > 0:
return score
score = 0
sequential_match_index = 0
number_of_chunks = 0
for chunk in break_into_chunks(source):
found_at_index = compare_to.find(chunk)
if found_at_index >= 0:
if found_at_index == sequential_match_index:
score += 1
elif found_at_index < sequential_match_index:
score += 0.8
else:
score += 0.7
sequential_match_index = found_at_index + 1
else:
sequential_match_index = -1
number_of_chunks += 1
return score * total_possible_score / number_of_chunks
def evaluate_function(input):
compare_source = source
compare_input = selector(input)
return match(compare_source, compare_input, total_possible_score * 0.06) + \
match(compare_input, compare_source, total_possible_score * 0.04) + \
match(compare_source.lower(), compare_input.lower(), total_possible_score * 0.5) + \
match(compare_input.lower(), compare_source.lower(), total_possible_score * 0.4);
return evaluate_function
def simple_text_matcher(selector, source, total_possible_score):
def match(source, compare_to, total_possible_score):
equal_score = _strings_are_equal(source, compare_to, total_possible_score)
if equal_score > 0:
return equal_score
found_at_index = compare_to.lower().find(source.lower())
if found_at_index >= 0:
factor = 1
if found_at_index == len(compare_to) - len(source):
factor = 0.7
elif found_at_index > 0:
factor = 0.3
return total_possible_score * (len(source) / len(compare_to)) * factor
return 0
return lambda input: match(source, selector(input), total_possible_score)
```
|
{
"source": "jessemcready/FindMyStuff",
"score": 2
}
|
#### File: jessemcready/FindMyStuff/get_item_wrapper.py
```python
import os
def get_item_wrapper(client, item_name, user_id):
return client.get_item(
TableName=os.environ['DYNAMODB_TABLE_NAME'],
Key={
"item_name":{
"S": item_name
},
"userId": {
"S": user_id
}
}
)
```
|
{
"source": "jesse-michael-han/flax",
"score": 2
}
|
#### File: flax/flax/serialization.py
```python
import collections
import enum
import jax
import msgpack
import numpy as np
_STATE_DICT_REGISTRY = {}
class _NamedTuple:
"""Fake type marker for namedtuple for registry."""
pass
def _is_namedtuple(x):
"""Duck typing test for namedtuple factory-generated objects."""
return isinstance(x, tuple) and hasattr(x, '_fields')
def from_state_dict(target, state):
"""Restores the state of the given target using a state dict.
This function takes the current target as an argument. This
lets us know the exact structure of the target,
as well as lets us add assertions that shapes and dtypes don't change.
In practice, none of the leaf values in `target` are actually
used. Only the tree structure, shapes and types.
Args:
target: the object of which the state should be restored.
state: a dictionary generated by `to_state_dict` with the desired new
state for `target`.
Returns:
A copy of the object with the restored state.
"""
if _is_namedtuple(target):
ty = _NamedTuple
else:
ty = type(target)
if ty not in _STATE_DICT_REGISTRY:
return state
ty_from_state_dict = _STATE_DICT_REGISTRY[ty][1]
return ty_from_state_dict(target, state)
def to_state_dict(target):
"""Returns a dictionary with the state of the given target."""
if _is_namedtuple(target):
ty = _NamedTuple
else:
ty = type(target)
if ty not in _STATE_DICT_REGISTRY:
return target
ty_to_state_dict = _STATE_DICT_REGISTRY[ty][0]
state_dict = ty_to_state_dict(target)
assert isinstance(state_dict, dict), 'A state dict must be a Python dict.'
for key in state_dict.keys():
assert isinstance(key, str), 'A state dict must only have string keys.'
return state_dict
def register_serialization_state(ty, ty_to_state_dict, ty_from_state_dict,
override=False):
"""Register a type for serialization.
Args:
ty: the type to be registered
ty_to_state_dict: a function that takes an instance of ty and
returns its state as a dictionary.
ty_from_state_dict: a function that takes an instance of ty and
a state dict, and returns a copy of the instance with the restored state.
override: override a previously registered serialization handler
(default: False).
"""
if ty in _STATE_DICT_REGISTRY and not override:
raise ValueError(f'a serialization handler for "{ty.__name__}"'
' is already registered')
_STATE_DICT_REGISTRY[ty] = (ty_to_state_dict, ty_from_state_dict)
def _list_state_dict(xs):
return {str(i): to_state_dict(x) for i, x in enumerate(xs)}
def _restore_list(xs, state_dict):
if len(state_dict) != len(xs):
raise ValueError(f'The size of the list and the state dict do not match,'
' got {len(xs)} and {len(state_dict)}.')
ys = []
for i in range(len(state_dict)):
y = from_state_dict(xs[i], state_dict[str(i)])
ys.append(y)
return ys
def _dict_state_dict(xs):
return {key: to_state_dict(value) for key, value in xs.items()}
def _restore_dict(xs, states):
return {key: from_state_dict(value, states[key])
for key, value in xs.items()}
def _namedtuple_state_dict(nt):
return {'name': nt.__class__.__name__,
'fields': {str(i): to_state_dict(x)
for i, x in enumerate(nt._fields)},
'values': {str(i): to_state_dict(x)
for i, x in enumerate(nt)}
}
def _restore_namedtuple(xs, state_dict):
"""Rebuild namedtuple from serialized dict."""
if len(state_dict['values']) != len(xs):
raise ValueError(f'The size of the list and the state dict do not match,'
' got {len(xs)} and {len(state_dict["values"])}.')
fields = [state_dict['fields'][str(i)] for i in range(len(xs))]
namedtuple_class = collections.namedtuple(
state_dict['name'], fields)
ys = []
for i in range(len(state_dict['values'])):
y = from_state_dict(xs[i], state_dict['values'][str(i)])
ys.append(y)
return namedtuple_class(*ys)
register_serialization_state(dict, _dict_state_dict, _restore_dict)
register_serialization_state(list, _list_state_dict, _restore_list)
register_serialization_state(
tuple, _list_state_dict,
lambda xs, state_dict: tuple(_restore_list(list(xs), state_dict)))
register_serialization_state(_NamedTuple,
_namedtuple_state_dict,
_restore_namedtuple)
# On-the-wire / disk serialization format
# We encode state-dicts via msgpack, using its custom type extension.
# https://github.com/msgpack/msgpack/blob/master/spec.md
#
# - ndarrays and DeviceArrays are serialized to nested msgpack-encoded string
# of (shape-tuple, dtype-name (e.g. 'float32'), row-major array-bytes).
# Note: only simple ndarray types are supported, no objects or fields.
#
# - native complex scalars are converted to nested msgpack-encoded tuples
# (real, imag).
def _ndarray_to_bytes(arr):
"""Save ndarray to simple msgpack encoding."""
if isinstance(arr, jax.xla.DeviceArray):
arr = np.array(arr)
if arr.dtype.hasobject or arr.dtype.isalignedstruct:
raise ValueError('Object and structured dtypes not supported '
'for serialization of ndarrays.')
tpl = (arr.shape, arr.dtype.name, arr.tobytes('C'))
return msgpack.packb(tpl, use_bin_type=True)
def _dtype_from_name(name):
"""Handle JAX bfloat16 dtype correctly."""
if name == b'bfloat16':
return jax.numpy.bfloat16
else:
return np.dtype(name)
def _ndarray_from_bytes(data):
"""Load ndarray from simple msgpack encoding."""
shape, dtype_name, buffer = msgpack.unpackb(data, raw=True)
return np.frombuffer(buffer,
dtype=_dtype_from_name(dtype_name),
count=-1,
offset=0).reshape(shape, order='C')
class _MsgpackExtType(enum.IntEnum):
"""Messagepack custom type ids."""
ndarray = 1
native_complex = 2
npscalar = 3
def _msgpack_ext_pack(x):
"""Messagepack encoders for custom types."""
if isinstance(x, (np.ndarray, jax.xla.DeviceArray)):
return msgpack.ExtType(_MsgpackExtType.ndarray, _ndarray_to_bytes(x))
if np.issctype(type(x)):
# pack scalar as ndarray
return msgpack.ExtType(_MsgpackExtType.npscalar, _ndarray_to_bytes(np.asarray(x)))
elif isinstance(x, complex):
return msgpack.ExtType(_MsgpackExtType.native_complex,
msgpack.packb((x.real, x.imag)))
return x
def _msgpack_ext_unpack(code, data):
"""Messagepack decoders for custom types."""
if code == _MsgpackExtType.ndarray:
return _ndarray_from_bytes(data)
elif code == _MsgpackExtType.native_complex:
complex_tuple = msgpack.unpackb(data)
return complex(complex_tuple[0], complex_tuple[1])
elif code == _MsgpackExtType.npscalar:
ar = _ndarray_from_bytes(data)
return ar[()] # unpack ndarray to scalar
return msgpack.ExtType(code, data)
# Chunking array leaves
# msgpack has a hard limit of 2**31 - 1 bytes per object leaf. To circumvent
# this limit for giant arrays (e.g. embedding tables), we traverse the tree
# and break up arrays near the limit into flattened array chunks.
# True limit is 2**31 - 1, but leave a margin for encoding padding.
MAX_CHUNK_SIZE = 2**30
def _np_convert_in_place(d):
"""Convert any jax devicearray leaves to numpy arrays in place."""
if isinstance(d, dict):
for k, v in d.items():
if isinstance(v, jax.xla.DeviceArray):
d[k] = np.array(v)
elif isinstance(v, dict):
_np_convert_in_place(v)
elif isinstance(d, jax.xla.DeviceArray):
return np.array(d)
return d
_tuple_to_dict = lambda tpl: dict([(str(x), y) for x, y in enumerate(tpl)])
_dict_to_tuple = lambda dct: tuple(dct[str(i)] for i in range(len(dct)))
def _chunk(arr):
"""Convert array to a canonical dictionary of chunked arrays."""
chunksize = max(1, int(MAX_CHUNK_SIZE / arr.dtype.itemsize))
data = {'__msgpack_chunked_array__': True,
'shape': _tuple_to_dict(arr.shape)}
flatarr = arr.reshape(-1)
chunks = [flatarr[i: i + chunksize] for i in range(0, flatarr.size, chunksize)]
data['chunks'] = _tuple_to_dict(chunks)
return data
def _unchunk(data):
"""Convert canonical dictionary of chunked arrays back into array."""
assert '__msgpack_chunked_array__' in data
shape = _dict_to_tuple(data['shape'])
flatarr = np.concatenate(_dict_to_tuple(data['chunks']))
return flatarr.reshape(shape)
def _chunk_array_leaves_in_place(d):
"""Convert oversized array leaves to safe chunked form in place."""
if isinstance(d, dict):
for k, v in d.items():
if isinstance(v, np.ndarray):
if v.size * v.dtype.itemsize > MAX_CHUNK_SIZE:
d[k] = _chunk(v)
elif isinstance(v, dict):
_chunk_array_leaves_in_place(v)
elif isinstance(d, np.ndarray):
if d.size * d.dtype.itemsize > MAX_CHUNK_SIZE:
return _chunk(d)
return d
def _unchunk_array_leaves_in_place(d):
"""Convert chunked array leaves back into array leaves, in place."""
if isinstance(d, dict):
if '__msgpack_chunked_array__' in d:
return _unchunk(d)
else:
for k, v in d.items():
if isinstance(v, dict) and '__msgpack_chunked_array__' in v:
d[k] = _unchunk(v)
elif isinstance(v, dict):
_unchunk_array_leaves_in_place(v)
return d
# User-facing API calls:
def msgpack_serialize(pytree):
"""Save data structure to bytes in msgpack format.
Low-level function that only supports python trees with array leaves,
for custom objects use `to_bytes`.
Args:
pytree: python tree of dict, list, tuple with python primitives
and array leaves.
Returns:
msgpack-encoded bytes of pytree.
"""
return msgpack.packb(pytree, default=_msgpack_ext_pack, strict_types=True)
def msgpack_restore(encoded_pytree):
"""Restore data structure from bytes in msgpack format.
Low-level function that only supports python trees with array leaves,
for custom objects use `from_bytes`.
Args:
encoded_pytree: msgpack-encoded bytes of python tree.
Returns:
Python tree of dict, list, tuple with python primitive
and array leaves.
"""
return msgpack.unpackb(
encoded_pytree, ext_hook=_msgpack_ext_unpack, raw=False)
def from_bytes(target, encoded_bytes):
"""Restore optimizer or other object from msgpack-serialized state-dict.
Args:
target: template object with state-dict registrations that matches
the structure being deserialized from `encoded_bytes`.
encoded_bytes: msgpack serialized object structurally isomorphic to
`target`. Typically a flax model or optimizer.
Returns:
A new object structurally isomorphic to `target` containing the updated
leaf data from saved data.
"""
state_dict = msgpack_restore(encoded_bytes)
state_dict = _unchunk_array_leaves_in_place(state_dict)
return from_state_dict(target, state_dict)
def to_bytes(target):
"""Save optimizer or other object as msgpack-serialized state-dict.
Args:
target: template object with state-dict registrations to be
serialized to msgpack format. Typically a flax model or optimizer.
Returns:
Bytes of msgpack-encoded state-dict of `target` object.
"""
state_dict = to_state_dict(target)
state_dict = _np_convert_in_place(state_dict)
state_dict = _chunk_array_leaves_in_place(state_dict)
return msgpack_serialize(state_dict)
```
|
{
"source": "jesse-michael-han/neuro-cadical",
"score": 2
}
|
#### File: neuro-cadical/python/data_util.py
```python
import random
import re
import shutil
import os
import h5py
import collections
import numpy as np
import uuid
import tempfile
import sys
import itertools
from math import ceil
import torch.utils.data as td
from util import *
NMSDP = collections.namedtuple( # all fields besides dp_id must be numpy arrays
"NMSDP",
["dp_id",
"is_train",
"n_vars",
"n_clauses",
"C_idxs",
"L_idxs",
"core_var_mask",
"core_clause_mask",
"var_lemma_counts"]
)
LBDP = collections.namedtuple( # all fields besides dp_id must be numpy arrays
"LBDP",
["dp_id",
"is_train",
"n_vars",
"n_clauses",
"C_idxs",
"L_idxs",
"glue_counts"]
)
def serialize_lbdp(lbdp, f):
return serialize_nmsdp(lbdp, f)
def deserialize_lbdp(grp, dp_id):
return LBDP(
dp_id = dp_id,
is_train = grp["is_train"][()],
n_vars = grp["n_vars"][()],
n_clauses = grp["n_clauses"][()],
C_idxs = grp["C_idxs"][()],
L_idxs = grp["L_idxs"][()],
glue_counts = grp["glue_counts"][()]
)
def serialize_nmsdp(nmsdp, f):
dp_id = nmsdp.dp_id
grp = f.create_group(dp_id)
for key in nmsdp._fields[1:]:
try:
grp.create_dataset(key, data=getattr(nmsdp, key), compression="gzip")
except TypeError:
print("BAD KEY", key)
raise Exception
def deserialize_nmsdp(grp, dp_id):
return NMSDP(
dp_id = dp_id,
is_train = grp["is_train"][()],
n_vars = grp["n_vars"][()],
n_clauses = grp["n_clauses"][()],
C_idxs = grp["C_idxs"][()],
L_idxs = grp["L_idxs"][()],
core_var_mask = grp["core_var_mask"][()],
core_clause_mask = grp["core_clause_mask"][()],
var_lemma_counts = grp["var_lemma_counts"][()]
)
class DataWriter:
def __init__(self, n_datapoints_per_file, dest, out=sys.stdout):
self.n_datapoints_per_file = n_datapoints_per_file
self.dest = dest
self.TOTAL_WRITE_COUNT = 0
self.FILE_COUNT = 0
self.log_stream = out
self.tmpdir = tempfile.TemporaryDirectory()
self.prepare_next_file() # sets current file handle
if not os.path.exists(self.dest):
os.makedirs(dest)
def prepare_next_file(self):
print("Preparing next file.", file=self.log_stream)
self.FILE_COUNT += 1
self.FILE_WRITE_COUNT = 0
self.outfile = f"file{self.FILE_COUNT}_{str(uuid.uuid4())}.h5"
self.outfile_path = os.path.join(self.tmpdir.name, self.outfile)
self.current_file_handle = h5py.File(self.outfile_path, "a")
def finish_file(self):
print("Finishing and moving file.", file=self.log_stream)
if self.FILE_WRITE_COUNT > 0:
self.current_file_handle.flush()
self.current_file_handle.close()
shutil.move(self.outfile_path, os.path.join(self.dest, self.outfile))
def write_nmsdp(self, nmsdp):
print(f"FILE WRITE COUNT: {self.FILE_WRITE_COUNT}", file=self.log_stream)
print(f"FILE COUNT: {self.FILE_COUNT}", file=self.log_stream)
if self.FILE_WRITE_COUNT >= self.n_datapoints_per_file:
self.finish_file()
self.prepare_next_file()
serialize_nmsdp(nmsdp, self.current_file_handle)
self.TOTAL_WRITE_COUNT += 1
self.FILE_WRITE_COUNT += 1
def write_lbdp(self, lbdp):
print(f"FILE WRITE COUNT: {self.FILE_WRITE_COUNT}", file=self.log_stream)
print(f"FILE COUNT: {self.FILE_COUNT}", file=self.log_stream)
if self.FILE_WRITE_COUNT >= self.n_datapoints_per_file:
self.finish_file()
self.prepare_next_file()
serialize_lbdp(lbdp, self.current_file_handle)
self.TOTAL_WRITE_COUNT += 1
self.FILE_WRITE_COUNT += 1
def __del__(self):
print("Finalizing due to garbage collection.", file=self.log_stream)
self.finish_file()
class ListSharder: # responsible for cycling and sharding
def __init__(self, xs, n_shards):
self.shard_size = ceil(len(xs)/n_shards)
self.n_shards = n_shards
self.xs = xs
random.shuffle(self.xs)
@property
def xs_iter(self):
return itertools.cycle(self.xs)
def get_shard(self, index):
start = index * self.shard_size
stop = (index + 1) * self.shard_size
return list(itertools.islice(self.xs_iter, start, stop))
def batch_iterator(it, batch_size):
while True:
count = 0
result = []
try:
while count < batch_size:
result.append(next(it))
count += 1
yield result
except StopIteration:
# if len(result) == 0:
# return
# else:
# yield result
return # note(jesse, March 04 2020, 07:44 PM): drop the last batch for now
class BatchedIterable(td.IterableDataset):
def __init__(self, it, batch_size):
"""
Args:
it: an iterable
batch_size: an integer
"""
super(BatchedIterable, self).__init__()
self.it = it
self.batch_size = batch_size
def __iter__(self):
return batch_iterator(self.it.__iter__(), self.batch_size)
def shuffle_with_buffer(it0, buf_size):
buf = []
it = it0.__iter__()
FLAG1 = False
while True:
if not FLAG1:
if len(buf) < buf_size:
try:
next_val = next(it)
except StopIteration:
return
buf.append(next_val)
continue
else:
FLAG1 = True
continue
random.shuffle(buf)
for x in buf:
yield x
FLAG1=False
buf=[]
continue
class H5Dataset(td.IterableDataset):
"""
Dataset which yields either single NMSDPs or lists of NMSDPs (depending on if batch_size is None or a positive integer.)
"""
def __init__(self, data_dir, batch_size=None):
super(H5Dataset, self).__init__()
self.data_dir = data_dir
self.files = files_with_extension(self.data_dir, "h5")
self.shuffle_files()
self.batch_size = batch_size
def shuffle_files(self):
random.shuffle(self.files)
def _mk_iter(self):
for f in self.files:
with h5py.File(f, "r") as f:
for dp_id in f:
yield deserialize_lbdp(f[dp_id], dp_id)
def __iter__(self):
if self.batch_size is None:
return self._mk_iter()
else:
return batch_iterator(self._mk_iter(), self.batch_size)
def dist_configure(self, rank, size):
ls = ListSharder(self.files, size)
self.files = ls.get_shard(rank)
def h5_worker_init_fn(worker_id):
worker_info = td.get_worker_info()
ls = ListSharder(worker_info.dataset.files, worker_info.num_workers)
worker_info.dataset.files = ls.get_shard(worker_info.id) # shard files only
random.shuffle(worker_info.dataset.files)
print(f"[DATALOADER] STARTING WORKER {worker_id} WITH SHARD OF {len(worker_info.dataset.files)} FILES")
def mk_H5DataLoader(data_dir, batch_size, num_workers):
"""
Helper function for constructing a parallelized H5DataLoader which shards the files in `data_dir` among `num_workers` workers.
`batch_size` is used to wrap each copy of the `H5Dataset` with a `BatchedIterable` returning lists of NMSDPs.
Since `DataLoader` automatically tries to pack batches into a tensor, we construct the DataLoader with `batch_size=1`, moving the batching into the Dataset itself.
(WARNING: this is an abuse of the API.)
"""
h5d = H5Dataset(data_dir, batch_size=batch_size)
return td.DataLoader(h5d, batch_size=1, num_workers=num_workers, worker_init_fn=h5_worker_init_fn, pin_memory=True)
```
#### File: neuro-cadical/python/gen_data.py
```python
import time
import glob
import datetime
import tempfile
import uuid
import os
import sys
from pysat.solvers import Solver
from pysat.formula import CNF
import numpy as np
import cnfformula
import random
import subprocess
import h5py as h5
import shutil
import torch.nn as nn
from types import SimpleNamespace
import io
import util
from data_util import *
from config import *
def lemma_occ(tsr):
n_vars = tsr.shape[0]
result = np.zeros(shape=[n_vars])
for idx in range(n_vars):
result[idx] = np.sum(tsr[idx, 0, :])
return result
def del_occ(tsr):
n_vars = tsr.shape[0]
result = np.zeros(shape=[n_vars])
for idx in range(n_vars):
result[idx] = np.sum(tsr[idx, 1, :])
return result
class CNFDataset:
def __init__(self):
raise Exception("abstract method")
def gen_formula(self):
raise Exception("abstract method")
def __iter__(self):
def _gen_formula():
while True:
try:
yield self.gen_formula()
except StopIteration:
return
return _gen_formula()
class CNFDirDataset(CNFDataset):
def __init__(self, data_dir):
self.data_dir = data_dir
self.files = util.files_with_extension(self.data_dir, "cnf")
self.file_index = 0
def gen_formula(self):
try:
cnf = CNF(from_file=self.files[self.file_index])
except IndexError:
raise StopIteration
self.file_index += 1
return cnf
class Logger:
def __init__(self):
raise Exception("Abstract method")
def write(self):
raise Exception("Abstract method")
class SimpleLogger(Logger):
def __init__(self, logfile):
self.logfile = logfile
util.check_make_path(logfile)
def write(self, *args, verbose=True):
with open(self.logfile, "a") as f:
if verbose:
print(f"({datetime.datetime.now()}):", *args)
print(f"({datetime.datetime.now()}):", *args, file=f)
class DummyLogger(Logger):
def __init__(self, verbose=False):
self.verbose = verbose
def write(self, *args, verbose=True, **kwargs):
if self.verbose and verbose:
print(*args)
def coo(fmla):
"""
Returns sparse indices of a CNF object, as two numpy arrays.
"""
C_result = []
L_result = []
for cls_idx in range(len(fmla.clauses)):
for lit in fmla.clauses[cls_idx]:
if lit > 0:
lit_enc = lit - 1
else:
lit_enc = fmla.nv + abs(lit) - 1
C_result.append(cls_idx)
L_result.append(lit_enc)
return np.array(C_result, dtype="int32"), np.array(L_result, dtype="int32")
def lbdcdl(cnf_dir, cnf, llpath, dump_dir=None, dumpfreq=50e3, timeout=None, clause_limit=1e6):
"""
Args: CNF object, optional timeout and dump flags
Returns: nothing
"""
cnf_path = os.path.join(cnf_dir, str(uuid.uuid4()) + ".cnf.gz")
cnf.to_file(cnf_path, compress_with="gzip")
cadical_command = [CADICAL_PATH]
cadical_command += ["-ll", llpath]
if dump_dir is not None:
cadical_command += ["--dump"]
cadical_command += ["-dd", dump_dir]
cadical_command += [f"--dumpfreq={int(dumpfreq)}"]
if timeout is not None:
cadical_command += ["-t", str(int(timeout))]
if clause_limit is not None:
cadical_command += [f"--clauselim={int(clause_limit)}"]
cadical_command += [f"--seed={int(np.random.choice(int(10e5)))}"]
cadical_command += [cnf_path]
subprocess.run(cadical_command, stdout=subprocess.PIPE)
def gen_lbdp(td, cnf, is_train=True, logger=DummyLogger(verbose=True), dump_dir=None, dumpfreq=50e3, timeout=None, clause_limit=1e6):
clause_limit = int(clause_limit)
fmla = cnf
counts = np.zeros(fmla.nv)
n_vars = fmla.nv
n_clauses = len(fmla.clauses)
name = str(uuid.uuid4())
with td as td:
llpath = os.path.join(td, name+".json")
lbdcdl(td, fmla, llpath, dump_dir=dump_dir, dumpfreq=dumpfreq, timeout=timeout, clause_limit=clause_limit)
with open(llpath, "r") as f:
for idx, line in enumerate(f):
counts[idx] = int(line.split()[1])
C_idxs, L_idxs = coo(fmla)
n_clauses = len(fmla.clauses)
lbdp = LBDP(
dp_id = name,
is_train = np.array([is_train], dtype="bool"),
n_vars = np.array([n_vars], dtype="int32"),
n_clauses = np.array([n_clauses], dtype="int32"),
C_idxs = np.array(C_idxs),
L_idxs = np.array(L_idxs),
glue_counts = counts
)
return lbdp
class CNFProcessor:
def __init__(self, cnfdataset, tmpdir=None, use_glue_counts=False, timeout=None):
if tmpdir is None:
self.tmpdir = tempfile.TemporaryDirectory()
else:
self.tmpdir = tmpdir
self.cnfdataset = cnfdataset
self.use_glue_counts = use_glue_counts
self.timeout = timeout
def _mk_nmsdp_gen(self):
for cnf in self.cnfdataset:
if not self.use_glue_counts:
nmsdp = gen_nmsdp(self.tmpdir, cnf)
else:
nmsdp = gen_lbdp(self.tmpdir, cnf, timeout=self.timeout)
if np.sum(nmsdp.glue_counts) <= 50:
continue
self.tmpdir = tempfile.TemporaryDirectory()
yield nmsdp
def __iter__(self):
return self._mk_nmsdp_gen()
def clean(self):
print("[CNF PROCESSOR]: CLEANING TMPDIR")
self.tmpdir.cleanup()
util.check_make_path(self.tmpdir.name)
def __del__(self):
self.clean()
```
|
{
"source": "jesse-michael-han/neurocuber-public",
"score": 2
}
|
#### File: jesse-michael-han/neurocuber-public/gen_fmlas.py
```python
import sr
from cnf_util import *
from pysat.solvers import Solver
from pysat.formula import CNF
import numpy as np
def get_unsat_randkcnf(k,n, alpha=4.4):
while True:
result = sample_randkcnf(k,n, alpha)
with Solver(name="cdl", bootstrap_with=result) as S:
if S.solve():
continue
else:
break
return result
def get_unsat_sr(n1, n2,min_cls_len=1,p_binom=0.7, p_geo=0.4):
n = np.random.choice(range(n1, n2+1))
return sr.sample_SR(n, min_cls_len, p_binom, p_geo)[0]
def get_unsat_src(x1,x2,min_cls_len=2):
return sr.gen_src(n1=x1, n2=x2, min_cls_len=min_cls_len)
```
#### File: jesse-michael-han/neurocuber-public/query_model.py
```python
from train_util import *
from neurocuber import *
from cnf_util import clgraph
# TODO(jesse): wrap in gRPC server
class NeuroResQuery:
def __init__(self, cfg_path, restore=True, restore_from=None):
self.cfg = ModelCfg_from_file(cfg_path)
self.model = init_neurocuber(self.cfg, restore, restore_from)
def get_logits(self, fmla):
n_clauses = len(fmla.clauses)
n_vars = fmla.nv
CL_idxs = tf.cast(clgraph(fmla), tf.int64)
G_cls = [G_cl_of_idxs(n_clauses, n_vars, CL_idxs)]
DRAT_logits, V_core_logits, C_core_logits = self.model(G_cls, tf.cast([n_clauses], tf.int64), tf.cast([n_vars], tf.int64))
return DRAT_logits[0], V_core_logits[0], C_core_logits[0]
def get_core_clause_ps(self, fmla):
logits = self.get_logits(fmla)[2]
ps = tf.nn.softmax(logits)
return ps
def get_core_var_ps(self, fmla):
logits = self.get_logits(fmla)[1]
ps = tf.nn.softmax(logits, tau=1)
return ps
def get_drat_var_ps(self, fmla):
logits = self.get_logits(fmla)[0]
ps = tf.nn.softmax(logits, tau=1)
return ps
def __call__(self, fmla):
return self.get_logits(fmla)
class NeuroCuberQuery:
def __init__(self, cfg_path, restore=True, restore_from=None):
self.cfg = ModelCfg_from_file(cfg_path)
self.model = init_neurocuber(self.cfg, restore, restore_from)
def get_logits(self, fmla):
n_clauses = len(fmla.clauses)
n_vars = fmla.nv
CL_idxs = tf.cast(clgraph(fmla), tf.int64)
G_cls = [G_cl_of_idxs(n_clauses, n_vars, CL_idxs)]
DRAT_logits, V_core_logits, C_core_logits = self.model(G_cls, tf.cast([n_clauses], tf.int64), tf.cast([n_vars], tf.int64))
return DRAT_logits[0], V_core_logits[0], C_core_logits[0]
def get_core_clause_ps(self, fmla):
logits = self.get_logits(fmla)[2]
ps = tf.nn.softmax(logits)
return ps
def get_core_var_ps(self, fmla):
logits = self.get_logits(fmla)[1]
ps = tf.nn.softmax(logits, tau=1)
return ps
def get_drat_var_ps(self, fmla):
logits = self.get_logits(fmla)[0]
ps = tf.nn.softmax(logits, tau=1)
return ps
def __call__(self, fmla):
return self.get_logits(fmla)
```
#### File: jesse-michael-han/neurocuber-public/sr.py
```python
import numpy as np
from pysat.formula import CNF, IDPool
from pysat.solvers import Solver
import datetime
import tempfile
import os
import subprocess
import itertools
import random
import sys
import uuid
import shutil
import collections
import uuid
from pathos.multiprocessing import ProcessPool
from pathos.helpers import mp
from tftd import TFDC
from config import *
from cnf_util import cadical, drat_trim, gen_tfdc, parse_core, parse_drat, lemma_occ, del_occ, validate_TFDC, data_writer
TESTS = []
def is_test(f):
TESTS.append(f)
return f
def run_tests():
for t in TESTS:
t()
print("tests ok")
def sample_SR_aux(n, min_cls_len=1, p_binom=0.7, p_geo=0.4):
"""
Args:
n: positive integer
Returns:
A randomly-generated formula and a clause which makes the formula unsat
This procedure has no guarantees on the number of clauses in the formula.
Reference implementation in source code of NeuroSAT:
https://github.com/dselsam/neurosat/blob/master/python/gen_sr_dimacs.py
"""
result = CNF()
with Solver(name="cdl") as S:
while True:
k = min_cls_len + np.random.binomial(n=1, p=p_binom) + np.random.geometric(p_geo)
vs = np.random.choice(n,size=min(n,k),replace=False)
vs = [int(v + 1) if random.random() < 0.5 else int(-(v + 1)) for v in vs]
S.add_clause(vs)
if S.solve():
result.append(vs)
else:
break
return result, vs
def sample_SR(n, min_cls_len=1, p_binom=0.7, p_geo=0.4):
"""
Args: same as sample_SR_aux
Returns:
1. an unsat formula and sat formula, differing only by the sign of a flipped literal in their final clause
2. clause and literal index of the flip literal
"""
result_unsat, vs = sample_SR_aux(n,min_cls_len,p_binom,p_geo)
result_sat = result_unsat.copy()
result_unsat.append(vs)
flip_idx = np.random.choice(len(vs))
result_sat.append(vs[0:flip_idx] + [-vs[flip_idx]] + vs[flip_idx + 1:])
return result_unsat, result_sat, len(result_sat.clauses)-1, flip_idx
@is_test
def sample_SR_test(num_vars=10, num_rounds=100):
def sample_SR_test_aux(n):
result_unsat, result_sat, c_idx, l_idx = sample_SR(n)
with Solver(name="cdl") as S:
S.append_formula(result_unsat)
assert S.solve() is False
with Solver(name="cdl") as S:
S.append_formula(result_sat)
assert S.solve() is True
with Solver(name="cdl") as S:
result_sat.clauses[c_idx][l_idx] *= -1
S.append_formula(result_sat)
assert S.solve() is False
print("ok")
for _ in range(num_rounds):
sample_SR_test_aux(num_rounds)
def sample_SRC_aux(n,u1,c_idx,l_idx,p_geo=0.4,p_binom=0.7):
"""
Args:
n: positive integer
u1: an unsat core
c_idx: a clause index
l_idx: a literal index
u1 must become sat if the literal at (c_idx, l_idx) is flipped.
Note that if result, vs = sample_SR_aux(args...), then result + vs is a valid argument for u1
Returns: a random formula drawn from n variables containing u1 as an unsat core, and the unsat core
"""
result = CNF()
u2 = u1.copy()
u2.clauses[c_idx][l_idx] = -u2.clauses[c_idx][l_idx]
with Solver(name="cdl") as S:
while True:
S.append_formula(u2)
k = 1 + np.random.binomial(n=1, p=p_binom) + np.random.geometric(p_geo)
vs = np.random.choice(n,size=min(n,k),replace=False)
vs = [int(v + 1) if random.random() < 0.5 else int(-(v + 1)) for v in vs]
S.add_clause(vs)
if S.solve():
result.append(vs)
else:
break
for cls in u1.clauses:
result.append(cls)
return result, u1 # TODO(jesse): output a core clause mask
def unsat_core_example():
fmla = CNF()
fmla.append([1,2,3])
fmla.append([-1,2,3])
fmla.append([2,-3])
fmla.append([-2,-3])
fmla.append([-2,3])
return fmla
@is_test
def u_test():
c_idx = 4
l_idx = 0
fmla = unsat_core_example()
fmla2 = unsat_core_example()
fmla2.clauses[c_idx][l_idx] = -(fmla2.clauses[c_idx][l_idx])
# is this an example of a blocked clause?
with Solver(name="cdl") as S:
S.append_formula(fmla)
result = S.solve()
assert result is False
with Solver(name="cdl") as S:
S.append_formula(fmla2)
result = S.solve()
assert result is True
print("ok")
@is_test
def sample_SRC_aux_test1():
u1 = unsat_core_example()
c_idx = 4
l_idx = 0
n = 10
for _ in range(100):
fmla, u1 = sample_SRC_aux(n,u1,c_idx,l_idx)
fmla.to_fp(sys.stdout)
with Solver(name="cdl") as S:
S.append_formula(fmla)
result = S.solve()
assert result is False
print("ok")
@is_test
def sample_SRC_aux_test2():
core_sizes = []
ratios = []
valid_count = 0
num_rounds = 200
for _ in range(num_rounds):
fmla_unsat, fmla_sat, c_idx, l_idx = sample_SR(10)
# fmla_unsat.to_fp(sys.stdout)
fmla_src, u = sample_SRC_aux(100, fmla_unsat, c_idx, l_idx)
with Solver(name="cdl") as S:
S.append_formula(fmla_src)
result = S.solve()
assert result is False
core_size = len(fmla_unsat.clauses)
formula_core_size_ratio = float(len(fmla_src.clauses)/core_size)
ratios.append(formula_core_size_ratio)
core_sizes.append(core_size)
if core_size <= 100 and 50 <= core_size and formula_core_size_ratio <= 20 and 5 <= formula_core_size_ratio:
valid_count += 1
print("ratio of fmla_src to fmla_unsat: ", formula_core_size_ratio)
print("ok")
print("mean fmla-core ratio:", np.mean(ratios))
print("fmla-core ratio variance:", np.var(ratios))
print("min fmla-core ratio", np.min(ratios))
print("max fmla-core ratio", np.max(ratios))
print("mean core size:", np.mean(core_sizes))
print("core size variance:", np.var(core_sizes))
print("max core size:", np.max(core_sizes))
print("min core size:", np.min(core_sizes))
print("percent valid datapoints:", float(valid_count/num_rounds))
def sample_SRC_aux_test3(core_min=20, core_max=100, ratio_min=5, ratio_max=20, n1=10, n2=100):
core_sizes = []
ratios = []
valid_count = 0
num_rounds = 200
with tempfile.TemporaryDirectory() as tmpdir:
cnfdir = tmpdir + "/"
cdl = cadical(cnf_dir =cnfdir)
drat = drat_trim(cnf_dir = cnfdir)
for _ in range(num_rounds):
name = str(uuid.uuid4())
fmla_unsat, fmla_sat, c_idx, l_idx = sample_SR(n1,2,p_binom=0.3)
with open(os.path.join(cnfdir, name + ".cnf"), "w") as f:
fmla_unsat.to_fp(f)
cdl.set_rootname(name)
cdl.run() # compute a proof of unsat for this formula
cdl.process_output()
assert cdl.result is False
cdl.write_proof()
drat.set_rootname(name)
drat.run() # extract an unsat core by calling DRAT-trim
tsr = parse_drat(drat.opt_path, fmla_unsat.nv)
var_lemma_counts = lemma_occ(tsr)
var_del_counts = del_occ(tsr)
masks = parse_core(drat.core_path, fmla_unsat.nv, len(fmla_unsat.clauses))
core_clause_mask = masks["core_clause_mask"]
u = CNF() # this unsat core should still satisfy the property that it becomes sat if the sign of a single literal is flipped
for i in range(len(core_clause_mask)):
if core_clause_mask[i] == 1:
u.append(fmla_unsat.clauses[i])
l_idx = u.clauses[-1].index(fmla_unsat.clauses[c_idx][l_idx]) # get new l_idx since DRAT sometimes permutes literals inside clauses
fmla_src, u = sample_SRC_aux(n2, u, len(u.clauses)-1, l_idx) # use this unsat core as the seed to a call to sample_SR_aux, and obtain fmla_src
core_size = len(u.clauses)
ratio = float(len(fmla_src.clauses)/core_size)
core_sizes.append(core_size)
ratios.append(ratio)
if core_size <= core_max and core_min <= core_size and ratio <= ratio_max and ratio_min <= ratio:
valid_count += 1
print("max/min/mean core size:", np.max(core_sizes), np.min(core_sizes), np.mean(core_sizes))
print("max/min/mean ratios:", np.max(ratios), np.min(ratios), np.mean(ratios))
print("percent valid datapoints: ", str(float(valid_count/num_rounds)*100) + "%")
def gen_src_aux(core_min=20, core_max=100, ratio_min=5, ratio_max=20, n1=10, n2=100, min_cls_len=2, cnfdir = None):
"""
Repeatedly samples formulas from SRC until it finds one which meets the specifications, then returns that formula.
These parameters need to be tuned before running at scale to ensure that each call to this function rarely needs more than one iteration.
Args:
core_min: minimum size of the unsat core of the formula
core_max: maximum size of the unsat core of the formula
ratio_min: minimum ratio of formula-size to unsat-core-size
ratio_max: maximum ratio of formula-size to unsat-core-size
n1: `n` parameter passed to sample_SR when sampling the unsat core
n2: `n` parameter passed to sample_SRC when sampling the larger formula containing the unsat core
Returns:
A formula from SRC obeying the constraints given by `core_min`, `core_max`, `ratio_min`, and `ratio_max`.
"""
# with tempfile.TemporaryDirectory() as cnfdir:
cdl = cadical(cnf_dir = cnfdir)
drat = drat_trim(cnf_dir=cnfdir)
while True:
name = str(uuid.uuid4())
fmla_unsat, fmla_sat, c_idx, l_idx = sample_SR(n1,min_cls_len,p_binom=0.3)
with open(os.path.join(cnfdir, name + ".cnf"), "w") as f:
fmla_unsat.to_fp(f)
cdl.set_rootname(name)
cdl.run() # compute a proof of unsat for this formula
cdl.process_output()
assert cdl.result is False
cdl.write_proof()
drat.set_rootname(name)
drat.run() # extract an unsat core by calling DRAT-trim
tsr = parse_drat(drat.opt_path, fmla_unsat.nv)
var_lemma_counts = lemma_occ(tsr)
var_del_counts = del_occ(tsr)
masks = parse_core(drat.core_path, fmla_unsat.nv, len(fmla_unsat.clauses))
core_clause_mask = masks["core_clause_mask"]
u = CNF() # this unsat core should still satisfy the property that it becomes sat if the sign of a single literal is flipped
for i in range(len(core_clause_mask)):
if core_clause_mask[i] == 1:
u.append(fmla_unsat.clauses[i])
l_idx = u.clauses[-1].index(fmla_unsat.clauses[c_idx][l_idx]) # get new l_idx since DRAT sometimes permutes literals inside clauses
fmla_src, u = sample_SRC_aux(n2, u, len(u.clauses)-1, l_idx) # use this unsat core as the seed to a call to sample_SR_aux, and obtain fmla_src
core_size = len(u.clauses)
ratio = float(len(fmla_src.clauses)/core_size)
# if fmla_src satisfies the constraints, return the TFDC
if (ratio_min <= ratio and ratio <= ratio_max and core_min <= core_size and core_size <= core_max):
break
else:
continue
return fmla_src
def gen_src(core_min=20, core_max=100, ratio_min=5, ratio_max=20, n1=10, n2=100, min_cls_len=2, cnfdir = None):
if cnfdir is None:
with tempfile.TemporaryDirectory() as tmpdir:
return gen_src_aux(core_min, core_max, ratio_min, ratio_max, n1, n2, min_cls_len, tmpdir)
else:
return gen_src_aux(core_min, core_max, ratio_min, ratio_max, n1, n2, min_cls_len, cnfdir)
# def gen_src_tfdc(core_min=20, core_max=50, ratio_min=5, ratio_max=20, n1=10, n2=100, min_cls_len=2, cnfdir=None):
# """
# Repeatedly samples formulas from SRC until it finds one which meets the specifications, then returns that formula's serialization as a `TFDC`.
# These parameters need to be tuned before running at scale to ensure that each call to this function rarely needs more than one iteration.
# Args:
# core_min: minimum size of the unsat core of the formula
# core_max: maximum size of the unsat core of the formula
# ratio_min: minimum ratio of formula-size to unsat-core-size
# ratio_max: maximum ratio of formula-size to unsat-core-size
# n1: `n` parameter passed to sample_SR when sampling the unsat core
# n2: `n` parameter passed to sample_SRC when sampling the larger formula containing the unsat core
# Returns:
# A `TFDC` serializing a formula obeying the constraints given by `core_min`, `core_max`, `ratio_min`, and `ratio_max`.
# """
# if cnfdir is None:
# raise Exception("no CNF directory specified")
# cdl = cadical(cnf_dir = cnfdir)
# drat = drat_trim(cnf_dir=cnfdir)
# while True:
# name = str(uuid.uuid4())
# fmla_unsat, fmla_sat, c_idx, l_idx = sample_SR(n1,min_cls_len,p_binom=0.3)
# with open(os.path.join(cnfdir, name + ".cnf"), "w") as f:
# fmla_unsat.to_fp(f)
# cdl.set_rootname(name)
# cdl.run() # compute a proof of unsat for this formula
# cdl.process_output()
# assert cdl.result is False
# cdl.write_proof()
# drat.set_rootname(name)
# drat.run() # extract an unsat core by calling DRAT-trim
# tsr = parse_drat(drat.opt_path, fmla_unsat.nv)
# var_lemma_counts = lemma_occ(tsr)
# var_del_counts = del_occ(tsr)
# masks = parse_core(drat.core_path, fmla_unsat.nv, len(fmla_unsat.clauses))
# core_clause_mask = masks["core_clause_mask"]
# u = CNF() # this unsat core should still satisfy the property that it becomes sat if the sign of a single literal is flipped
# for i in range(len(core_clause_mask)):
# if core_clause_mask[i] == 1:
# u.append(fmla_unsat.clauses[i])
# l_idx = u.clauses[-1].index(fmla_unsat.clauses[c_idx][l_idx]) # get new l_idx since DRAT sometimes permutes literals inside clauses
# fmla_src, u = sample_SRC_aux(n2, u, len(u.clauses)-1, l_idx) # use this unsat core as the seed to a call to sample_SR_aux, and obtain fmla_src
# core_size = len(u.clauses)
# ratio = float(len(fmla_src.clauses)/core_size)
# # if fmla_src satisfies the constraints, return the TFDC
# if (ratio_min <= ratio and ratio <= ratio_max and core_min <= core_size and core_size <= core_max):
# break
# else:
# continue
# # don't recompute an unsat proof, just re-use knowledge of the core
# new_core_clause_mask = np.zeros(shape=[len(fmla_src.clauses)], dtype="int32")
# for i in range(1, len(u.clauses)+1):
# new_core_clause_mask[-i] = 1
# def mask_pad(mask, n_var):
# if n_var <= len(mask):
# return mask
# else:
# return np.pad(mask, (0, n_var-len(mask)), "constant", constant_values=(0,0))
# # compute result
# tfdc = TFDC(
# n_vars = fmla_src.nv,
# n_clauses = len(fmla_src.clauses),
# CL_idxs = clgraph(fmla_src)
# core_var_mask = mask_pad(masks["core_var_mask"], fmla_src.nv),
# core_clause_mask = new_core_clause_mask,
# var_lemma_counts = mask_pad(var_lemma_counts, fmla_src.nv),
# var_del_counts = mask_pad(var_del_counts, fmla_src.nv)
# )
# return tfdc
@is_test
def test_gen_src_tfdc():
with tempfile.TemporaryDirectory() as tmp:
cnfdir = tmp + "/"
tfdcs = [gen_src_tfdc(cnfdir = cnfdir) for _ in range(20)]
for tfdc in tfdcs:
validate_TFDC(tfdc)
```
#### File: z3/tests/test_solver.py
```python
from sat_util import *
from nose.tools import assert_equals, assert_not_equal, assert_true, assert_raises
import numpy as np
import os
TEST_DIR = "/home/dselsam/alphacuber/tests"
def opts(max_conflicts):
return Z3Options(max_conflicts=max_conflicts, sat_restart_max=0)
def test_Z3Solver_basics():
sp = parse_dimacs(os.path.join(TEST_DIR, "test1.dimacs"))
s = Z3Solver(sp, opts(max_conflicts=0))
assert_equals(s.check(), Z3Status.unknown)
assert_equals(s.propagate(), Z3Status.unknown)
s = Z3Solver(sp, opts(max_conflicts=100))
assert_equals(s.check(), Z3Status.sat)
def check_to_tf_query(dimacs, expected):
sp = parse_dimacs(dimacs)
s = Z3Solver(sp, opts(max_conflicts=0))
assert_equals(s.check(), Z3Status.unknown)
s.add(expected['trail'])
tfq = s.to_tf_query()
assert_equals(tfq.fvars, expected['fvars'])
assert_true((tfq.LC_idxs == expected['LC_idxs']).all())
def test_to_tf_query():
TESTS = {
os.path.join(TEST_DIR, "test1.dimacs") : {
'trail':[],
'fvars':[0, 1, 2, 3],
'LC_idxs': np.array([
[0, 0], [1, 0], [2, 0], [3, 0],
[6, 1], [7, 1], [8, 1],
[0, 3], [2, 3],
[9, 4], [7, 4], [6, 4],
[3, 6], [7, 6]
])
},
os.path.join(TEST_DIR, "test1.dimacs") : {
'trail':[Lit(Var(3), False)],
'fvars':[0, 1, 2],
'LC_idxs': np.array([
[6, 1], [7, 1], [8, 1],
[0, 3], [2, 3],
[7, 4], [6, 4],
])
}
}
for dimacs, expected in TESTS.items():
yield check_to_tf_query, dimacs, expected
def test_unsat_core():
sp = parse_dimacs(os.path.join(TEST_DIR, "test1.dimacs"))
s = Z3Solver(sp, opts(max_conflicts=0))
status, core = s.check_core([Lit(Var(0), False), Lit(Var(4), True), Lit(Var(1), False)])
assert_equals(status, Z3Status.unsat)
# TODO(dselsam): support < on lits
assert_equals(len(core), 2)
assert_true(Lit(Var(0), False) in core)
assert_true(Lit(Var(1), False) in core)
def test_scopes():
sp = parse_dimacs(os.path.join(TEST_DIR, "test1.dimacs"))
s = Z3Solver(sp, opts(max_conflicts=0))
assert_equals(s.check(), Z3Status.unknown)
s.push()
s.add([Lit(Var(0), False), Lit(Var(4), True), Lit(Var(1), False)])
assert_equals(s.check(), Z3Status.unsat)
assert_equals(s.check(), Z3Status.unsat)
s.pop()
assert_equals(s.check(), Z3Status.unknown)
```
|
{
"source": "jesse-michael-han/oracle",
"score": 3
}
|
#### File: oracle/learning/cnn.py
```python
import torch
import torch.nn as nn
class GridCNN(nn.Module):
def __init__(self, d_in, d_out, kernel_size):
super(GridCNN, self).__init__()
self.d_in = d_in
self.d_out = d_out
self.layers = nn.Sequential(nn.Conv2d(d_in, d_out, kernel_size=kernel_size),
nn.Conv2d(d_out,d_out,kernel_size=1),
nn.LeakyReLU(),
nn.Conv2d(d_out,d_out,kernel_size=1),
nn.LeakyReLU(),
nn.Conv2d(d_out,d_out,kernel_size=kernel_size))
def forward(self, x):
return self.layers(x)
```
#### File: oracle/learning/embedder.py
```python
import torch
import torch.nn as nn
import math
from learning.cnn import GridCNN
from learning.protos.Embeddable_pb2 import Embeddable
from learning.mlp import BasicMLP
def grid_idx(n_rows, n_cols):
def idxer(idx):
row_idx = int(math.floor(idx/n_rows))
col_idx = idx % n_rows
return row_idx, col_idx
return idxer
class Embedder(nn.Module):
# TODO(sameera): this class is responsible for recursively embedding Embeddables.
# Note: we make it a class even though it is "mostly" functional, in case we want
# to add state, e.g. memoization for turning trees into DAGs.
def __init__(self, cfg):
super(Embedder, self).__init__()
self.cfg = cfg
self.d = self.cfg["d"]
self.grid_cnn = GridCNN(d_in=self.d, d_out=self.d, kernel_size=5) # TODO(jesse): don't hardcode
self.list_lstm = nn.LSTM(input_size=self.d, hidden_size=self.d, batch_first=True)
self.char_embedding = nn.Embedding(256, self.d)
self.pair_mlp = BasicMLP(input_dim=2*self.d,
hidden_dims=[2*self.d],
output_dim=self.d,
activation="leaky_relu",
bias_at_end=True,
p_dropout=0.0)
self.list_nil = torch.nn.Parameter(torch.nn.init.xavier_normal_(torch.empty([1,self.d])), requires_grad=True).view(self.d)
def forward(self, embeddable):
return self.embed(embeddable)
def embed(self, embeddable):
# Input: a term of Embeddable protobuf type (<repo>/protos/Embeddable.proto)
# Output: a fixed dimensional embedding
kind = embeddable.WhichOneof("body")
if kind == "b": result = self.embed_bool(embeddable.b)
elif kind == "char": result = self.embed_char(embeddable.char)
elif kind == "n": result = self.embed_int(embeddable.n)
elif kind == "s": result = self.embed_string(embeddable.s)
elif kind == "pair": result = self.embed_pair(embeddable.pair)
elif kind == "maybe": result = self.embed_maybe(embeddable.maybe)
elif kind == "list": result = self.embed_list(embeddable.list)
elif kind == "array": result = self.embed_array(embeddable.array)
elif kind == "set": result = self.embed_set(embeddable.set)
elif kind == "map": result = self.embed_map(embeddable.map)
elif kind == "grid": result = self.embed_grid(embeddable.grid)
elif kind == "graph": result = self.embed_graph(embeddable.graph)
elif kind == "record": result = self.embed_record(embeddable.record)
else: raise Exception("[embed] invalid embeddable kind: %s" % kind)
# result = result.view(self.d) # TODO(jesse): temp fix, rm later
try:
assert result.size(0) == self.d
assert len(result.size()) == 1
except AssertionError as e:
print("BAD RESULT: ", result)
print("BAD RESULT SIZE: ", result.size())
print("BAD RESULT EMBEDDABLE KIND: ", kind)
raise e
return result
def embed_bool(self, b):
# TODO(sameera): one vector for False, one for True
raise Exception("embed_bool not yet implemented")
def embed_int(self, n):
return self.embed_string(str(n)) # >:(
def embed_string(self, s):
# TODO(dselsam, jesse): use token level embeddings!
return self.embed_list_aux(s, (lambda c: self.char_embedding(torch.as_tensor([ord(c)]))))
def embed_char(self, c):
return self.char_embedding(torch.as_tensor(c))
def embed_pair(self, pair):
return self.pair_mlp(torch.cat([self.embed(pair.fst), self.embed(pair.snd)], dim=-1))
def embed_maybe(self, maybe):
# TODO(sameera): baseline is vector for no-value, mlp applied to embedding of value otherwise
raise Exception("embed_maybe not yet implemented")
def embed_list_aux(self, l, f):
if len(l) == 0:
return self.list_nil
x = torch.empty(len(l), self.d)
for i, elem in enumerate(l):
x[i] = f(elem)
return self.list_lstm(x.unsqueeze(0))[1][0].squeeze(0).squeeze(0)
def embed_list(self, l):
return self.embed_list_aux(l.elems, f=self.embed)
def embed_array(self, array):
# TODO(sameera): transformer with a reduce? 1-d convolution?
raise Exception("embed_array not yet implemented")
def embed_set(self, set):
# TODO(sameera): something perm-invariant
raise Exception("embed_set not yet implemented")
def embed_map(self, map):
# TODO(sameera): something perm-invariant
raise Exception("embed_map not yet implemented")
def embed_grid(self, grid):
idxer = grid_idx(grid.nRows, grid.nCols)
g = torch.empty(grid.nRows, grid.nCols, self.d)
for idx, elem in enumerate(grid.elems):
i,j = idxer(idx)
g[i,j] = self.embed(elem)
g = g.view(self.d, grid.nRows, grid.nCols)
return self.grid_cnn(g.unsqueeze(0)).view(self.d)
def embed_graph(self, graph):
# TODO(sameera): GNN
raise Exception("embed_graph not yet implemented")
def embed_record(self, record):
name_embedding = self.embed_string(record.name)
fields_embedding = self.embed_list_aux(record.fields, f=self.embed_field)
return self.pair_mlp(torch.cat([name_embedding, fields_embedding], dim=-1))
def embed_field(self, field):
name_embedding = self.embed_string(field.name)
value_embedding = self.embed(field.value)
return self.pair_mlp(torch.cat([name_embedding, value_embedding], dim=-1))
```
#### File: oracle/learning/handler.py
```python
from learning.protos.Response_pb2 import Response, Prediction
from learning.model import GenericModel
import torch
import torch.nn as nn
import torch.optim as optim
def count_params(model):
count = 0
params = list(model.parameters())
for p in params:
if p.requires_grad:
count += p.numel()
return count
def unpack_datapoint(datapoint):
snapshot = datapoint.choicepoint.snapshot
choices = datapoint.choicepoint.choices
choiceIdx = datapoint.label.choiceIdx
return snapshot, choices, torch.as_tensor([choiceIdx])
class Handler:
def __init__(self, cfg):
self.cfg = cfg
self.model = GenericModel(cfg['model'])
rhs = count_params(self.model.reasoner) + count_params(self.model.embedder)
print("RHS", rhs)
assert count_params(self.model) == rhs
assert count_params(self.model.embedder) == sum([count_params(m) for m in [self.model.embedder.grid_cnn, self.model.embedder.list_lstm, self.model.embedder.char_embedding, self.model.embedder.pair_mlp]])
assert count_params(self.model.reasoner) == count_params(self.model.reasoner.mlp)
self.loss = nn.CrossEntropyLoss()
self.optimizer = optim.Adam(self.model.parameters(), lr=cfg['optim']['learning_rate'])
def handle(self, cmd):
kind = cmd.WhichOneof("body")
if kind == "init": return self.handle_init(cmd.init)
elif kind == "predict": return self.handle_predict(cmd.predict)
elif kind == "train": return self.handle_train(cmd.train)
elif kind == "valid": return self.handle_valid(cmd.valid)
elif kind == "save": return self.handle_save(cmd.save)
elif kind == "load": return self.handle_load(cmd.load)
else: raise Exception("[handle] invalid cmd kind: %s" % kind)
def handle_init(self, init_cmd):
# TODO(sameera): re-initialize model (+ friends)
response = Response()
response.success = False
response.msg = "init command not yet implemented"
return response
def handle_predict(self, predict_cmd):
response = Response()
response.success = False
response.msg = "predict command not yet implemented"
for choicepoint in predict_cmd.choicepoints:
with torch.set_grad_enabled(False):
logits = self.model(choicepoint.snapshot, choicepoint.choices)
policy = nn.functional.softmax(logits, dim=-1)
prediction = Prediction()
prediction.policy.extend(list(policy.squeeze(0)))
response.predictions.append(prediction)
response.success = True
return response
def handle_train(self, train_cmd):
total_loss = 0.0
n_steps = 0
for _ in range(train_cmd.nEpochs):
for datapoint in train_cmd.datapoints:
snapshot, choices, choiceIdx = unpack_datapoint(datapoint)
with torch.set_grad_enabled(True):
logits = self.model(snapshot, choices)
loss = self.loss(logits, choiceIdx)
self.model.zero_grad()
loss.backward()
torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.cfg['optim']['grad_norm_clip'])
self.optimizer.step()
total_loss += loss
n_steps += 1
response = Response()
response.loss = total_loss / n_steps if n_steps > 0 else float('nan')
response.success = True
return response
def handle_valid(self, valid_cmd):
total_loss = 0.0
n_steps = 0
for datapoint in valid_cmd.datapoints:
snapshot, choices, choiceIdx = unpack_datapoint(datapoint)
with torch.set_grad_enabled(False):
logits = self.model(snapshot, choices)
loss = self.loss(logits, choiceIdx)
total_loss += loss
n_steps += 1
response = Response()
response.loss = total_loss / n_steps if n_steps > 0 else float('nan')
response.success = True
return response
def handle_save(self, save_cmd):
# TODO(sameera): store all relevant data to save_cmd.filename
response = Response()
response.msg = "save command not yet implemented"
response.success = False
return response
def handle_load(self, load_cmd):
# TODO(sameera): load all relevant data to load_cmd.filename
response = Response()
response.msg = "load command not yet implemented"
response.success = False
return response
```
#### File: oracle/learning/mlp.py
```python
import torch
import torch.nn as nn
def decode_activation(activation):
if activation == "relu":
return nn.ReLU
elif activation == "leaky_relu":
return nn.LeakyReLU
elif activation == "relu6":
return nn.ReLU6
elif activation == "elu":
return nn.ELU
else:
raise Exception("unsupported activation")
# joe average MLP
class BasicMLP(nn.Module):
def __init__(self, input_dim, hidden_dims, output_dim, activation, bias_at_end=True, p_dropout=0.1, **kwargs):
super(BasicMLP, self).__init__(**kwargs)
layers = []
for k in range(len(hidden_dims) + 1):
if k == 0:
d_in = input_dim
else:
d_in = hidden_dims[k-1]
if k == len(hidden_dims):
d_out = output_dim
else:
d_out = hidden_dims[k]
layers.append(nn.Linear(in_features=d_in,
out_features=d_out,
bias=(True if ((k == len(hidden_dims) and bias_at_end) or k < len(hidden_dims)) else False)))
if not (k == len(hidden_dims)):
layers.append(decode_activation(activation)())
layers.append(nn.Dropout(p_dropout))
self.main = nn.Sequential(*layers)
def forward(self, z):
return self.main(z)
```
|
{
"source": "jessemillar/pythonista",
"score": 4
}
|
#### File: pythonista/Desktop/vssh.py
```python
import os
import subprocess
import sys
def check_exists(name):
"""Check if the virtual machine exists."""
virtualbox_exists = subprocess.Popen(["VBoxManage", "list", "vms"], stdout=subprocess.PIPE)
if name in virtualbox_exists.communicate()[0]:
return True
else:
return False
def check_up(name):
"""Check if the virtual machine is currently powered on."""
virtualbox_up = subprocess.Popen(["VBoxManage", "list", "runningvms"], stdout=subprocess.PIPE)
if name in virtualbox_up.communicate()[0]:
return True
else:
return False
def find_host(name):
"""Check if an entry already exists in /etc/hosts."""
hosts = open("/etc/hosts", "r")
for line in hosts:
if name in line:
return True
return False
def host_outdated(address, name):
"""Check if the entry for the virtual machine in /etc/hosts is outdated."""
hosts = open("/etc/hosts", "r")
for line in hosts:
if name in line:
if address not in line:
return True
return False
def add_host(address, name):
"""Add an entry in /etc/hosts for the virtual machine."""
hosts = open("/etc/hosts", "rt")
hosts_contents = hosts.read() + "\n" + address + "\t" + name + "\n"
temp_hosts = open("/tmp/etc_hosts.tmp", "wt")
temp_hosts.write(hosts_contents)
# Move the temp hosts file into place with sudo permissions
os.system("sudo mv /tmp/etc_hosts.tmp /etc/hosts")
def update_host(address, name):
"""Update an entry in /etc/hosts to have the correct IP address."""
hosts = open("/etc/hosts", "r")
data = hosts.readlines()
new_hosts = []
for line in data:
if name in line:
new_hosts.append(address + "\t" + name + "\n")
else:
new_hosts.append(line)
temp_hosts = open("/tmp/etc_hosts.tmp", "wt")
temp_hosts.writelines(new_hosts)
# Move the temp hosts file into place with sudo permissions
os.system("sudo mv /tmp/etc_hosts.tmp /etc/hosts")
def main(): # Define as a function to adhere to style guidelines
"""Where the magic happens."""
try:
sys.argv[1]
except IndexError:
print "Missing name of virtual machine"
return
# Check if the user is supplying the virtual machine's name correctly
try:
sys.argv[2]
# If the name is correct, run the program
except IndexError:
if not check_exists(sys.argv[1]):
print "The specified virtual machine does not appear to exist."
return
if not check_up(sys.argv[1]):
headless_input = raw_input("The specified virtual machine does not appear to be running. Would you like to start the machine in 'headless' mode? [Y/n] ")
if len(headless_input) == 0 or headless_input == "Y" or headless_input == "y": # If the user responds in the affirmative
subprocess.Popen(["VBoxManage", "startvm", sys.argv[1], "--type", "headless"], stdout=subprocess.PIPE)
print "Please wait for the machine to boot before trying to connect again."
return
else:
return
virtualbox_ip = subprocess.Popen(["VBoxManage", "guestproperty", "get", sys.argv[1], "/VirtualBox/GuestInfo/Net/0/V4/IP"], stdout=subprocess.PIPE)
ip_response = virtualbox_ip.communicate()[0]
if ip_response == "No value set!\n":
print "Could not find the virtual machine's IP address. Are network settings configured correctly and are VirtualBox Guest additions installed on the virtual machine?"
return
if find_host(sys.argv[1]):
if host_outdated(ip_response.split()[1], sys.argv[1]):
hosts_input = raw_input("/etc/hosts has an outdated entry for this virtual machine. Would you like to update it? [Y/n] ")
if len(hosts_input) == 0 or hosts_input == "Y" or hosts_input == "y": # If the user responds in the affirmative
update_host(ip_response.split()[1], sys.argv[1])
else:
hosts_input = raw_input("/etc/hosts does not have an entry for this virtual machine. Would you like to add one? [Y/n] ")
if len(hosts_input) == 0 or hosts_input == "Y" or hosts_input == "y": # If the user responds in the affirmative
add_host(ip_response.split()[1], sys.argv[1])
os.system("ssh " + ip_response.split()[1])
else:
print "If your virtual machine's name contains spaces, please wrap it in quotes."
return
main() # Run the function so the module is useful in a CLI
```
|
{
"source": "jessemillar/StretchReminderDiscord",
"score": 2
}
|
#### File: jessemillar/StretchReminderDiscord/main.py
```python
import discord
import json
import logging
from bot import StretchRemindersBot
def main():
# Setup logging
logger = logging.getLogger()
logger.setLevel(logging.INFO)
file_handler = logging.FileHandler("stretchremindersbot.log")
file_handler.setLevel(logging.ERROR)
datefmt = "%Y-%m-%d %H:%M:%S"
formatter = logging.Formatter("{asctime}:{levelname}:{name}: {message}", datefmt=datefmt, style="{")
file_handler.setFormatter(formatter)
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(formatter)
logger.addHandler(file_handler)
logger.addHandler(stream_handler)
# Load config json
with open("config.json") as fp:
config = json.load(fp)
# Initialise bot instance
intents = discord.Intents.default()
intents.members = True
intents.presences = True
bot = StretchRemindersBot(command_prefix="!", intents=intents)
# Load extensions
bot.load_extension("cogs.admin")
bot.load_extension("cogs.autoreminders")
# Start bot loop
bot.run(config)
if __name__ == "__main__":
main()
```
|
{
"source": "jessemin/pytext",
"score": 2
}
|
#### File: models/qna/bert_squad_qa.py
```python
from typing import Union
import torch
from pytext.common.constants import Stage
from pytext.data.squad_for_bert_tensorizer import (
SquadForBERTTensorizer,
SquadForRoBERTaTensorizer,
)
from pytext.data.tensorizers import LabelTensorizer
from pytext.data.utils import Vocabulary
from pytext.models.bert_classification_models import NewBertModel
from pytext.models.decoders.mlp_decoder import MLPDecoder
from pytext.models.model import BaseModel
from pytext.models.module import create_module
from pytext.models.output_layers.squad_output_layer import SquadOutputLayer
from pytext.models.representations.huggingface_bert_sentence_encoder import (
HuggingFaceBertSentenceEncoder,
)
from pytext.models.representations.transformer_sentence_encoder_base import (
TransformerSentenceEncoderBase,
)
class BertSquadQAModel(NewBertModel):
__EXPANSIBLE__ = True
class Config(NewBertModel.Config):
class ModelInput(BaseModel.Config.ModelInput):
squad_input: Union[
SquadForBERTTensorizer.Config, SquadForRoBERTaTensorizer.Config
] = SquadForBERTTensorizer.Config(max_seq_len=256)
# is_impossible label
has_answer: LabelTensorizer.Config = LabelTensorizer.Config(
column="has_answer"
)
inputs: ModelInput = ModelInput()
encoder: TransformerSentenceEncoderBase.Config = HuggingFaceBertSentenceEncoder.Config()
decoder: MLPDecoder.Config = MLPDecoder.Config(out_dim=2)
output_layer: SquadOutputLayer.Config = SquadOutputLayer.Config()
@classmethod
def from_config(cls, config: Config, tensorizers):
has_answer_labels = ["False", "True"]
tensorizers["has_answer"].vocab = Vocabulary(has_answer_labels)
vocab = tensorizers["squad_input"].vocab
encoder = create_module(
config.encoder,
output_encoded_layers=True,
padding_idx=vocab.get_pad_index(),
vocab_size=vocab.__len__(),
)
decoder = create_module(
config.decoder, in_dim=encoder.representation_dim, out_dim=2
)
has_ans_decoder = create_module(
config.decoder,
in_dim=encoder.representation_dim,
out_dim=len(has_answer_labels),
)
output_layer = create_module(config.output_layer, labels=has_answer_labels)
return cls(encoder, decoder, has_ans_decoder, output_layer)
def __init__(
self, encoder, decoder, has_ans_decoder, output_layer, stage=Stage.TRAIN
) -> None:
super().__init__(encoder, decoder, output_layer, stage)
self.has_ans_decoder = has_ans_decoder
self.module_list.append(has_ans_decoder)
def arrange_model_inputs(self, tensor_dict):
(
tokens,
pad_mask,
segment_labels,
positions,
answer_start_indices,
answer_end_indices,
) = tensor_dict["squad_input"]
return tokens, pad_mask, segment_labels, positions
def arrange_targets(self, tensor_dict):
(
tokens,
pad_mask,
segment_labels,
positions,
answer_start_indices,
answer_end_indices,
) = tensor_dict["squad_input"]
# label = True if answer exists
label = tensor_dict["has_answer"]
return answer_start_indices, answer_end_indices, label
def forward(self, *inputs):
encoded_layers, cls_embed = self.encoder(inputs)
logits = self.decoder(encoded_layers[-1])
if isinstance(logits, (list, tuple)):
logits = logits[0]
label = (
torch.zeros((logits.size(0), 2)) # dummy tensor
if self.output_layer.ignore_impossible
else self.has_ans_decoder(cls_embed)
)
# Shape of logits is (batch_size, seq_len, 2)
start_logits, end_logits = logits.split(1, dim=-1)
# Shape of start_logits and end_logits is (batch_size, seq_len, 1)
# Hence, remove the last dimension and reduce them to the dimensions to
# (batch_size, seq_len)
start_logits = start_logits.squeeze(-1)
end_logits = end_logits.squeeze(-1)
return start_logits, end_logits, label
```
#### File: torchscript/tests/test_tensorizer.py
```python
import random
import unittest
from typing import List, Tuple
import torch
from pytext.torchscript.tensorizer import ScriptBERTTensorizer, ScriptRoBERTaTensorizer
from pytext.torchscript.tensorizer.tensorizer import VocabLookup
from pytext.torchscript.vocab import ScriptVocabulary
class TensorizerTest(unittest.TestCase):
def _mock_vocab(self):
# mapping of vocab index to token is x: x + 100
return ScriptVocabulary(
[str(i) for i in range(100, 203)], pad_idx=200, bos_idx=201, eos_idx=202
)
def _mock_tokenizer(self):
class MockTokenizer(torch.jit.ScriptModule):
def __init__(self, tokens: List[Tuple[str, int, int]]):
super().__init__()
self.tokens = torch.jit.Attribute(tokens, List[Tuple[str, int, int]])
def tokenize(self, text: str) -> List[Tuple[str, int, int]]:
return self.tokens
rand_tokens = [(str(random.randint(100, 200)), -1, -1) for i in range(20)]
return MockTokenizer(rand_tokens), rand_tokens
def test_lookup_tokens(self):
_, rand_tokens = self._mock_tokenizer()
vocab = self._mock_vocab()
vocab_lookup = VocabLookup(vocab)
token_ids, start_idxs, end_idxs = vocab_lookup(rand_tokens)
for token_id, token in zip(token_ids, rand_tokens):
self.assertEqual(token_id, int(token[0]) - 100)
def test_lookup_tokens_with_bos_eos(self):
_, rand_tokens = self._mock_tokenizer()
vocab = self._mock_vocab()
vocab_lookup = VocabLookup(vocab)
token_ids, start_idxs, end_idxs = vocab_lookup(
rand_tokens, bos_idx=201, eos_idx=202
)
self.assertEqual(token_ids[0], 201)
self.assertEqual(token_ids[-1], 202)
for token_id, token in zip(token_ids[1:-1], rand_tokens):
self.assertEqual(token_id, int(token[0]) - 100)
def test_bert_tensorizer(self):
tokenizer, rand_tokens = self._mock_tokenizer()
vocab = self._mock_vocab()
bert = ScriptBERTTensorizer(tokenizer, vocab, max_seq_len=100)
token_ids, _, _, _ = bert.numberize(["mock test"], None)
self.assertEqual(token_ids[0], 201)
self.assertEqual(token_ids[-1], 202)
for token_id, token in zip(token_ids[1:-1], rand_tokens):
self.assertEqual(token_id, int(token[0]) - 100)
def test_roberta_tensorizer(self):
tokenizer, rand_tokens = self._mock_tokenizer()
vocab = self._mock_vocab()
roberta = ScriptRoBERTaTensorizer(tokenizer, vocab, max_seq_len=100)
token_ids, _, _, _ = roberta.numberize(["mock test"], None)
self.assertEqual(token_ids[0], 201)
self.assertEqual(token_ids[-1], 202)
for token_id, token in zip(token_ids[1:-1], rand_tokens):
self.assertEqual(token_id, int(token[0]) - 100)
```
|
{
"source": "Jesse-mk/10617_Project",
"score": 3
}
|
#### File: Jesse-mk/10617_Project/token_cnn_experiments.py
```python
import numpy as np
import os
import pandas as pd
import pickle
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
from tqdm import tqdm
from torch.utils.data.sampler import SubsetRandomSampler
from pathlib import Path
import json
# In[10]:
IMAGE_SIZE = 96
# After unpickling, `train_set` and `test_set` will be lists, where each element is a dictionary that has keys `features` and `labels`. `features` will be a 1D numpy array of 1's and 0's, with size `box_size * box_size` where `box_size` is the size of the image. `label` will be a one-hot-encoded array.
# ### Generating Dataset
#
# In[3]:
class MathTokensDataset(Dataset):
"""
Dataset containing math tokens extracted from the CROHME 2011, 2012, and 2013 datasets.
"""
def __init__(self, pickle_file, image_size, transform=None):
"""
Args:
pickle_file (string): Path to dataset pickle file.
transform (callable, optional): Optional transform to be applied
on a sample.
"""
with open(pickle_file, 'rb') as f:
self.df_data = pd.DataFrame(pickle.load(f))
# Reshape features to 3D tensor.
self.df_data['features'] = self.df_data['features'].apply(lambda vec: vec.reshape(1, image_size, image_size))
# # Convert one-hot labels to numbers (PyTorch expects this).
# self.df_data['label'] = self.df_data['label'].apply(lambda ohe_vec: np.argmax(ohe_vec))
self.transform = transform
def __len__(self):
return len(self.df_data)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
sample = {
'features': self.df_data.iloc[idx]['features'],
'label': self.df_data.iloc[idx]['label']
}
if self.transform:
sample = self.transform(sample)
return sample
# In[4]:
class BaselineTokenCNN(nn.Module):
def __init__(self, num_classes):
super(BaselineTokenCNN, self).__init__()
self.conv1 = nn.Conv2d(in_channels=1, out_channels=4, kernel_size=7)
self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2)
self.conv2 = nn.Conv2d(in_channels=4, out_channels=8, kernel_size=5)
self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2)
self.conv3 = nn.Conv2d(in_channels=8, out_channels=16, kernel_size=3)
self.pool3 = nn.MaxPool2d(kernel_size=2, stride=2)
self.fc1 = nn.Linear(16 * 9 * 9, 600)
self.fc2 = nn.Linear(600, 200)
self.fc3 = nn.Linear(200, num_classes)
def forward(self, x):
x = x.float()
x = self.pool1(F.relu(self.conv1(x)))
x = self.pool2(F.relu(self.conv2(x)))
x = self.pool3(F.relu(self.conv3(x)))
x = x.view(-1, 16 * 9 * 9)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
# In[5]:
# Set device to GPU if available.
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
device
# ##### Mods:
# 1. Changing Optimizers
# 2. Changing NN structure
# 3. ???
#
# In[6]:
#### 1. Optimizers to try: ###
#we can add more but just wanted to see
optimizer_dict = {"adam": optim.Adam,
"sgd": optim.SGD,
"adamW": optim.AdamW}
optimizer_params_dict = {"adam": {"lr": 0.001,
"weight_decay": 0},
"sgd": {"lr": 0.001,
"momentum": 0.9},
"adamW": {"lr": 0.001,
"weight_decay": 0.01 }}
# In[27]:
class Experiment():
def __init__(self, experiment_name, optimizer_class, train_set, val_split, test_set, classes, batch_size, save_dir):
#get runtime:
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
#get name for save files:
self.experiment_name = experiment_name
#make CNN
self.net = BaselineTokenCNN(num_classes=len(classes))
self.net.to(device) # Send to GPU.
#make loss
self.criterion = nn.CrossEntropyLoss()
#get optimizer and params:
optimizer = optimizer_dict[optimizer_class]
optimizer_params = optimizer_params_dict[optimizer_class]
#add in the parameters:
optimizer_params["params"] = self.net.parameters()
# print(optimizer_params)
#add in parameters to optimizer:
self.optimizer = optimizer([optimizer_params])
#keep track of train_history
self.train_loss_history = []
print("Model created with optimizer {}".format(optimizer_class))
self.init_dataloaders(train_set, val_split, test_set, batch_size)
print(f'{len(classes)} classes.')
self.history = {
'train_loss': [],
'train_acc': [],
'val_loss': [],
'val_acc': []
}
self.save_dir = save_dir
# Save the experiment settings.
exp_dir = os.path.join(self.save_dir, self.experiment_name)
Path(exp_dir).mkdir(parents=True, exist_ok=True)
settings = {
'optimizer': self.optimizer.state_dict(),
'batch_size': batch_size,
'val_split': val_split
}
settings_path = os.path.join(self.save_dir, self.experiment_name, 'settings.json' )
with open(settings_path, 'w') as f:
json.dump(settings, f)
print(f'Initialized experiment \'{self.experiment_name}\'')
def init_dataloaders(self, train_set, val_split, test_set, batch_size):
if val_split is None or val_split == 0:
self.train_loader = DataLoader(train_set, batch_size=batch_size, shuffle=True, num_workers=0)
else:
# Split the training set into train/validation.
# Creating data indices for training and validation splits:
num_train = len(train_set)
indices = np.arange(num_train)
split = int(np.floor(val_split * num_train)) # Index to split at.
# Uncomment the line below if you want the train/val split to be different every time.
# np.random.shuffle(indices)
train_indices, val_indices = indices[split:], indices[:split]
# Create PyTorch data samplers and loaders.
train_sampler = SubsetRandomSampler(train_indices)
val_sampler = SubsetRandomSampler(val_indices)
self.train_loader = torch.utils.data.DataLoader(train_set,
batch_size=batch_size,
sampler=train_sampler,
num_workers=4)
self.val_loader = torch.utils.data.DataLoader(train_set,
batch_size=batch_size,
sampler=val_sampler,
num_workers=4)
self.test_loader = DataLoader(test_set, batch_size=batch_size, shuffle=False, num_workers=4)
print(f'{len(train_indices)} training examples.')
print(f'{len(val_indices)} validation examples.')
print(f'{len(test_set)} test examples.')
def train(self, max_epochs, patience):
best_val_loss = np.inf
no_up = 0
for epoch in tqdm(range(max_epochs), desc='Max Epochs'):
for i, data in tqdm(enumerate(self.train_loader), total=len(self.train_loader), desc='Training Batches', leave=False):
# Get the inputs and send to GPU if available.
features = data['features'].to(self.device)
labels = data['label'].to(self.device)
# zero the parameter gradients
self.optimizer.zero_grad()
# forward + backward + optimize
outputs = self.net(features)
loss = self.criterion(outputs, labels)
loss.backward()
self.optimizer.step()
train_loss, train_acc = self.evaluate(self.train_loader, tqdm_desc='Eval. Train')
val_loss, val_acc = self.evaluate(self.val_loader, tqdm_desc='Eval. Val')
# Save statistics to history.
self.history['train_loss'].append(train_loss)
self.history['train_acc'].append(train_acc)
self.history['val_loss'].append(val_loss)
self.history['val_acc'].append(val_acc)
if val_loss < best_val_loss:
best_val_loss = val_loss
self.save_checkpoint(epoch, val_loss)
else:
no_up += 1
if no_up == patience:
self.save_checkpoint(epoch, val_loss)
print(f'Stopping after {epoch} epochs.')
print(f'Early stopping condition met, validation loss did not decrease for {patience} epochs.')
break
def evaluate(self, dataloader, tqdm_desc=''):
num_correct = 0
num_total = 0
total_loss = 0
with torch.no_grad():
for data in tqdm(dataloader, desc=tqdm_desc, leave=False):
# Get the inputs and send to GPU if available.
features = data['features'].to(self.device)
labels = data['label'].to(self.device)
# Get the predictions / loss.
outputs = self.net(features)
_, predicted = torch.max(outputs.data, dim=1)
loss = self.criterion(outputs, labels)
# Update correct/total counts.
num_correct += (predicted == labels).sum().item()
num_total += labels.size()[0]
# Update total loss.
total_loss += loss.item()
acc = num_correct / num_total * 100
avg_loss = total_loss / len(dataloader)
return avg_loss, acc
def train_loss(self):
# TODO: Is this correct? Should we really be averaging the train loss over all epochs?
loss = np.mean(self.train_loss_history)
print(f"Loss of the network on train set: {loss}")
return loss
def test_accuracy(self, classes, test_loader):
self.num_classes = len(classes)
self.total_counts = np.zeros(self.num_classes)
self.correct_counts = np.zeros(self.num_classes)
self.predictions = []
# print(total_counts)
# print(correct_counts)
self.num_correct = 0
self.num_total_examples = 0
with torch.no_grad():
for test_data in tqdm(test_loader):
test_features = test_data['features'].to(self.device)
labels = test_data['label'].to(self.device)
outputs = self.net(test_features)
_, predicted = torch.max(outputs.data, dim=1)
self.predictions.append(predicted)
for p, l in zip(labels, predicted):
self.total_counts[l] += 1
if p == l:
self.correct_counts[p] += 1
self.num_total_examples += labels.size(0)
self.num_correct += (predicted == labels).sum().item()
self.test_accuracy = self.num_correct / self.num_total_examples * 100
print(f'Accuracy of the network on test set: {self.test_accuracy}%')
def save_checkpoint(self, epoch, val_loss):
checkpoint_dir = os.path.join(self.save_dir, self.experiment_name, 'checkpoints')
Path(checkpoint_dir).mkdir(parents=True, exist_ok=True)
path = os.path.join(checkpoint_dir, f'epoch={epoch}_valLoss={np.round(val_loss, 4)}.pt')
torch.save(self.net.state_dict(), path)
def save_history(self):
history_path = os.path.join(self.save_dir, self.experiment_name, 'history.csv' )
pd.DataFrame(self.history).to_csv(history_path)
def save_test_performance(self):
test_loss, test_acc = self.evaluate(self.test_loader, tqdm_desc='Eval. Test')
print(f'Test accuracy = {test_acc}%')
test_perf_path = os.path.join(self.save_dir, self.experiment_name, 'test.json' )
with open(test_perf_path, 'w') as f:
json.dump({'test_loss': test_loss, 'test_acc': test_acc}, f)
# In[28]:
def run_experiment(name, tokens_dataset_folder):
prefix = os.path.join(os.getcwd(), 'data', 'tokens', tokens_dataset_folder)
train_path = os.path.join(prefix, 'train.pickle')
test_path = os.path.join(prefix, 'test.pickle')
int_to_token_path = os.path.join(prefix, 'int_to_token.pickle')
train_set = MathTokensDataset(train_path, IMAGE_SIZE)
test_set = MathTokensDataset(test_path, IMAGE_SIZE)
with open(int_to_token_path, 'rb') as f:
int_to_token = pickle.load(f)
classes = list(int_to_token.values())
exp = Experiment(experiment_name=name,
optimizer_class='adamW',
train_set=train_set,
val_split=0.2,
test_set=test_set,
classes=classes,
batch_size=4,
save_dir=os.path.join(os.getcwd(), 'experiments', 'token_cnn'))
exp.train(max_epochs=100, patience=3)
exp.save_history()
exp.save_test_performance()
# In[29]:
# In[ ]:
if __name__ == '__main__':
run_experiment(name='t=3,5,7', tokens_dataset_folder='b=96_train=2011,2013_test=2012_c=all_t=3,5,7')
# In[ ]:
```
|
{
"source": "jessemyers/mend",
"score": 3
}
|
#### File: mend/files/blob.py
```python
from dataclasses import dataclass
from pathlib import Path
from typing import BinaryIO, Type
from mend.files.tree import FileTree
from mend.protocols import Blob, Tree
@dataclass(frozen=True)
class FileBlob(Blob):
data: BinaryIO
path: Path
def close(self) -> None:
self.data.close()
def as_tree(self) -> Tree:
return FileTree({
self.path: self.data,
})
@classmethod
def open(
cls: Type["FileBlob"],
path: Path,
) -> "FileBlob":
return cls(
data=open(path, "rb"),
path=path,
)
```
#### File: mend/generators/file.py
```python
from dataclasses import dataclass
from pathlib import Path
from typing import Iterable, Type
from click import Argument, Parameter, Path as PathType
from mend.files import FileBlob
from mend.protocols import Generator, Tree
@dataclass(frozen=True)
class FileGenerator(Generator):
"""
Generate from a local file.
"""
blob: FileBlob
def close(self) -> None:
self.blob.close()
def generate(self) -> Tree:
return self.blob.as_tree()
@classmethod
def iter_parameters(cls: Type["FileGenerator"]) -> Iterable[Parameter]:
yield Argument(
[
"path",
],
required=True,
type=PathType(
allow_dash=False,
dir_okay=False,
exists=True,
file_okay=True,
path_type=Path,
readable=True,
resolve_path=True,
writable=False,
),
)
@classmethod
def from_parameters(
cls: Type["FileGenerator"],
*args,
**kwargs,
) -> "FileGenerator":
path = kwargs["path"]
return cls(
blob=FileBlob.open(
path=path,
),
)
```
#### File: mend/plugins/github.py
```python
from dataclasses import dataclass
from pathlib import Path
from typing import Iterable, Type
from click import (
BadParameter,
Option,
Parameter,
echo,
)
from github import Github
from github.Branch import Branch
from github.GitCommit import GitCommit
from github.GithubException import GithubException
from github.InputGitTreeElement import InputGitTreeElement
from github.PullRequest import PullRequest
from github.Repository import Repository
from mend.protocols import Plugin, Tree
# See: https://git-scm.com/book/en/v2/Git-Internals-Git-Objects
NORMAL_FILE_MODE = "100644"
def normalize_path(path: Path) -> str:
return str(path.relative_to(Path.cwd()))
@dataclass(frozen=True)
class GitHubPlugin(Plugin):
"""
Create a GitHub pull request.
"""
repository: Repository
base_branch: str
target_branch: str
title: str
def apply(self, tree: Tree) -> None:
"""
Create a pull request, apply the generated tree.
"""
branch = self.create_branch()
self.create_commit(branch, tree)
pull_request = self.create_pull_request()
echo(f"Created pull request: {pull_request.number}")
def create_branch(self) -> Branch:
"""
Create a remote branch.
"""
echo(f"Creating branch: {self.target_branch} off of {self.base_branch}.")
base_branch = self.repository.get_branch(self.base_branch)
try:
self.repository.create_git_ref(
ref=f"refs/heads/{self.target_branch}",
sha=base_branch.commit.sha,
)
except GithubException as error:
if error.status == 404:
raise Exception(
f"Unable to create branch {self.target_branch}; please confirm that your "
"access token has write access to this repository."
)
if error.status != 422:
raise
# Branch should exist already, pass through to make sure
pass
git_branch = self.repository.get_branch(self.target_branch)
echo(f"Created branch: {self.target_branch}.")
return git_branch
def create_commit(self, branch: Branch, tree: Tree) -> GitCommit:
echo(f"Finding base tree for {branch.name}.")
base_tree = self.repository.get_git_tree(branch.commit.sha)
echo(f"Creating {len(tree.blobs)} git blob(s).")
git_blobs = {
path: self.repository.create_git_blob(
content=blob.read().decode("utf-8"),
encoding="utf-8",
)
for path, blob in tree.blobs.items()
}
echo("Creating a new git tree from blob(s).")
git_tree = self.repository.create_git_tree(
tree=[
InputGitTreeElement(
path=normalize_path(path),
mode=NORMAL_FILE_MODE,
type="blob",
sha=blob.sha,
)
for path, blob in git_blobs.items()
],
base_tree=base_tree,
)
echo(f"Creating git commit from tree: {git_tree.sha}.")
git_commit = self.repository.create_git_commit(
message=(
f"""mend: applying changes to ${len(tree.blobs)} files
Includes:
"""
"\n".join(
f" - {path}"
for path in sorted(tree.blobs.keys())
)
),
tree=git_tree,
parents=[
branch.commit.commit,
],
)
echo(f"Updating git ref {self.target_branch} to: {git_commit.sha}")
git_ref = self.repository.get_git_ref(f"heads/{self.target_branch}")
git_ref.edit(git_commit.sha)
return git_commit
def create_pull_request(self) -> PullRequest:
echo(f"Creating pull request from {self.target_branch} onto {self.base_branch}.")
try:
# create a PR of the release branch into head
return self.repository.create_pull(
title=self.title,
body=f"Merge mend changes into {self.base_branch}.",
base=self.base_branch,
head=self.target_branch,
)
except GithubException as error:
if error.status != 422:
raise
if any((
"No commits between" in error.get("message", "")
for error in error.data.get("errors", ())
if isinstance(error, dict)
)):
# NB: maybe we should delete the branch here?
raise Exception("Skipping pull request; no changes.")
# PR should exist already; make sure
pull_requests = self.repository.get_pulls(
base=self.base_branch,
head=self.target_branch,
)
if not pull_requests:
raise
return pull_requests[0]
@classmethod
def iter_parameters(cls: Type["GitHubPlugin"]) -> Iterable[Parameter]:
yield Option(
[
"--token",
"--github-token",
],
envvar="GITHUB_TOKEN",
help=(
"A GitHub API access token, either provided via the GITHUB_TOKEN "
"environment variable or via a CLI prompt."
),
hide_input=True,
prompt=True,
required=True,
)
yield Option(
[
"--organization",
"--org",
"-o",
],
help=(
"The name of the target Github organization name, which may be "
"omitted if the repository name is fully-qualified."
),
required=False,
)
yield Option(
[
"--repository",
"--repo",
"-r",
],
help=(
"The name of the target Github repository name."
),
required=True,
)
yield Option(
[
"--branch",
"--branch-name",
"-b",
],
help="The name of the branch to create",
required=True,
)
yield Option(
[
"--branch-prefix",
],
help="The prefix to apply to the branch",
default="mend"
)
yield Option(
[
"--base",
"--base-branch",
],
help="The name of the base branch to use; uses the default branch if omitted",
)
yield Option(
[
"--title",
],
help="The pull request title",
)
@classmethod
def from_parameters(
cls: Type["GitHubPlugin"],
*args,
**kwargs,
) -> "GitHubPlugin":
github_token = kwargs["token"]
organization_name = kwargs["organization"]
repository_name = kwargs["repository"]
branch_name = kwargs["branch"]
branch_prefix = kwargs["branch_prefix"]
base_branch = kwargs["base"]
title = kwargs["title"]
if organization_name is None:
if "/" not in repository_name:
raise BadParameter(
message="Expected 'organization/repository' when --organization is omitted.",
param_hint="repository",
)
else:
repository_name = f"{organization_name}/{repository_name}"
github = Github(github_token)
repository = github.get_repo(repository_name)
return cls(
repository=repository,
base_branch=base_branch or repository.default_branch,
target_branch=f"{branch_prefix}/{branch_name}" if branch_prefix else branch_name,
title=title or f"Mend {branch_name}",
)
```
|
{
"source": "jessemyers/pluscal",
"score": 3
}
|
#### File: ast/statements/base.py
```python
from dataclasses import dataclass
from typing import Iterable, Optional, Sequence
from pluscal.ast.base import Base, Label, Line
class UnlabeledStmt(Base):
"""
UnlabeledStmt := <Assign> | <If> | <While> | <Either> | <With> |
<Await> | <Print> | <Assert> | <Skip> | <Return> |
<Goto> | <Call> | <MacroCall>
Servces as a base class for unlabeled statements.
"""
pass
@dataclass(frozen=True)
class Stmt(Base):
"""
Stmt ::= [<Label> : [+|-]?]? <UnlabeledStmt>
"""
# XXX label should be extended to allow fairness +/-
value: UnlabeledStmt
label: Optional[Label] = None
def render(self, indent: int = 0) -> Iterable[Line]:
if self.label is not None:
yield Line(f"{str(self.label)}:", indent)
extra_indent = 2
else:
extra_indent = 0
yield from self.value.render(indent=indent + extra_indent)
def validate(self) -> None:
if self.label is not None:
self.label.validate()
self.value.validate()
@dataclass(frozen=True)
class AlgorithmBody(Base):
"""
AlgorthmBody ::= begin <Stmt>+
"""
items: Sequence[Stmt]
def render(self, indent: int = 0) -> Iterable[Line]:
yield Line("begin", indent)
for item in self.items:
yield from item.render(indent + 2)
def validate(self) -> None:
assert self.items
for item in self.items:
item.validate()
```
#### File: ast/statements/goto.py
```python
from dataclasses import dataclass
from typing import Iterable
from pluscal.ast.base import Label, Line
from pluscal.ast.statements.base import UnlabeledStmt
@dataclass(frozen=True)
class Goto(UnlabeledStmt):
"""
Goto ::= goto <Label> ;
"""
value: Label
def render(self, indent: int = 0) -> Iterable[Line]:
yield Line(f"goto {str(self.value)};", indent)
def validate(self) -> None:
self.value.validate()
```
#### File: pluscal/builders/call.py
```python
from dataclasses import dataclass, field
from typing import List, Type, Union
from pluscal.ast import Call, Expr, MacroCall, Name, Stmt
from pluscal.builders.base import Builder
from pluscal.builders.source import LabelSource, to_label
@dataclass
class AbstractCallBuilder(Builder[Stmt]):
name: str
args: List[Expr] = field(default_factory=list)
label: LabelSource = None
@property
def call_type(self) -> Union[Type[Call], Type[MacroCall]]:
raise NotImplementedError()
def __call__(self, *args: str) -> "AbstractCallBuilder":
return self.with_(*args)
def with_(self, *args: str) -> "AbstractCallBuilder":
self.args.extend(Expr(arg) for arg in args)
return self
def build(self) -> Stmt:
return Stmt(
label=to_label(self.label),
value=self.call_type(
name=Name(self.name),
args=self.args,
),
)
class CallBuilder(AbstractCallBuilder):
@property
def call_type(self) -> Type[Call]:
return Call
class MacroCallBuilder(AbstractCallBuilder, Builder[Stmt]):
@property
def call_type(self) -> Type[MacroCall]:
return MacroCall
```
#### File: pluscal/builders/macro.py
```python
from dataclasses import dataclass, field
from typing import List, Optional
from pluscal.ast import Macro, Macros, Name, Variable
from pluscal.builders.base import Builder
from pluscal.builders.body import BodyBuilder
from pluscal.builders.source import MacroSource, StatementSource, to_macro
@dataclass
class MacroBuilder(Builder[Macro]):
name: str
args_: List[Variable] = field(default_factory=list)
body: BodyBuilder = field(default_factory=BodyBuilder)
def build(self) -> Macro:
return Macro(
name=Name(self.name),
args=self.args_,
body=self.body.build(),
)
def __call__(self, *args: str) -> "MacroBuilder":
return self.args(*args)
def args(self, *args: str) -> "MacroBuilder":
self.args_.extend(Variable(arg) for arg in args)
return self
def do(self, *args: StatementSource) -> "MacroBuilder":
self.body.do(*args)
return self
@dataclass
class MacrosBuilder(Builder[Optional[Macros]]):
items: List[Macro] = field(default_factory=list)
def __bool__(self) -> bool:
return bool(self.items)
def build(self) -> Optional[Macros]:
return Macros(items=self.items) if self else None
def define(self, *args: MacroSource) -> "MacrosBuilder":
self.items.extend(to_macro(arg) for arg in args)
return self
```
#### File: pluscal/builders/source.py
```python
from typing import Optional, Union
from pluscal.ast import (
LHS,
Label,
Macro,
Procedure,
Process,
PVarDecl,
Stmt,
UnlabeledStmt,
VarDecl,
Variable,
)
from pluscal.builders.base import Builder
LabelSource = Optional[Union[Label, str]]
LHSSource = Union[Builder[LHS], LHS, str]
MacroSource = Union[Builder[Macro], Macro]
ProcedureSource = Union[Builder[Procedure], Procedure]
ProcessSource = Union[Builder[Process], Process]
PVariableSource = Union[Builder[PVarDecl], PVarDecl]
StatementSource = Union[Builder[Stmt], Stmt, UnlabeledStmt]
VariableSource = Union[Builder[VarDecl], VarDecl, Variable, str]
def to_label(value: LabelSource) -> Optional[Label]:
if value is None:
return None
elif isinstance(value, Label):
return value
else:
return Label(value)
def to_lhs(value: LHSSource) -> LHS:
if isinstance(value, LHS):
return value
elif isinstance(value, Builder):
return value.build()
else:
return LHS(Variable(value))
def to_macro(value: MacroSource) -> Macro:
if isinstance(value, Builder):
return value.build()
else:
return value
def to_procedure(value: ProcedureSource) -> Procedure:
if isinstance(value, Builder):
return value.build()
else:
return value
def to_process(value: ProcessSource) -> Process:
if isinstance(value, Builder):
return value.build()
else:
return value
def to_pvariable(value: PVariableSource) -> PVarDecl:
if isinstance(value, Builder):
return value.build()
else:
return value
def to_statement(value: StatementSource) -> Stmt:
if isinstance(value, UnlabeledStmt):
return Stmt(value)
elif isinstance(value, Builder):
return value.build()
else:
return value
def to_variable(value: VariableSource) -> VarDecl:
if isinstance(value, VarDecl):
return value
elif isinstance(value, Builder):
return value.build()
elif isinstance(value, Variable):
return VarDecl(name=value)
else:
return VarDecl(name=Variable(value))
```
#### File: ast/statements/test_base.py
```python
from textwrap import dedent
from hamcrest import assert_that, equal_to, has_length, is_
from pluscal.ast.base import Expr, Label
from pluscal.ast.statements import Print, Stmt, UnlabeledStmt
def test_unlabeled_stmt() -> None:
# Assert, Assign, Await, Call, Either, Go, If, MacroCall, Print, Return, Skip, While, With
assert_that(
UnlabeledStmt.__subclasses__(),
has_length(13),
)
def test_stmt_without_label() -> None:
ast = Stmt(
value=Print(
value=Expr("1"),
),
)
ast.validate()
assert_that(
str(ast),
is_(equal_to(dedent("""\
print 1;"""))),
)
def test_stmt_with_label() -> None:
ast = Stmt(
label=Label("foo"),
value=Print(
value=Expr("1"),
),
)
ast.validate()
assert_that(
str(ast),
is_(equal_to(dedent("""\
foo:
print 1;"""))),
)
```
#### File: ast/statements/test_if.py
```python
from textwrap import dedent
from hamcrest import assert_that, equal_to, is_
from pluscal.ast.base import Expr
from pluscal.ast.statements import ElsifClause, If, IfClause, Print, Stmt
def test_if() -> None:
ast = If(
IfClause(
Expr("foo"),
[
Stmt(Print(Expr("bar"))),
],
),
)
ast.validate()
assert_that(
str(ast),
is_(equal_to(dedent("""\
if foo then
print bar;
end if;"""))),
)
def test_if_else() -> None:
ast = If(
IfClause(
Expr("foo"),
[
Stmt(Print(Expr("bar"))),
],
),
else_=[
Stmt(Print(Expr("baz"))),
],
)
ast.validate()
assert_that(
str(ast),
is_(equal_to(dedent("""\
if foo then
print bar;
else
print baz;
end if;"""))),
)
def test_if_elsif() -> None:
ast = If(
IfClause(
Expr("foo"),
[
Stmt(Print(Expr("bar"))),
],
),
[
ElsifClause(
Expr("baz"),
[
Stmt(Print(Expr("baz"))),
],
),
],
)
ast.validate()
assert_that(
str(ast),
is_(equal_to(dedent("""\
if foo then
print bar;
elsif baz then
print baz;
end if;"""))),
)
```
#### File: ast/statements/test_while.py
```python
from textwrap import dedent
from hamcrest import assert_that, equal_to, is_
from pluscal.ast.base import Expr
from pluscal.ast.statements import Print, Stmt, While
def test_while() -> None:
ast = While(
condition=Expr("foo"),
statements=[
Stmt(Print(Expr("bar"))),
],
)
ast.validate()
assert_that(
str(ast),
is_(equal_to(dedent("""\
while foo do
print bar;
end while;"""))),
)
```
#### File: tests/builders/test_either.py
```python
from textwrap import dedent
from hamcrest import assert_that, equal_to, is_
from pluscal.ast import Skip
from pluscal.builders.either import EitherBuilder
class TestEitherBuilder:
def test_either(self) -> None:
builder = EitherBuilder(Skip(), Skip())
assert_that(
str(builder),
is_(equal_to(dedent("""\
either
skip;
skip;
end either;"""))),
)
def test_either_or(self) -> None:
builder = EitherBuilder(Skip(), Skip()).or_(Skip()).or_(Skip())
assert_that(
str(builder),
is_(equal_to(dedent("""\
either
skip;
skip;
or
skip;
or
skip;
end either;"""))),
)
```
|
{
"source": "jessenestler/floodplains",
"score": 3
}
|
#### File: floodplains/floodplains/config.py
```python
import getpass
import logging
import logging.config
import logging.handlers
import os
import yaml
from cryptography.fernet import Fernet
def decrypt(key: str, token: str):
"""Decrypts encrypted text back into plain text.
Parameters:
-----------
key : str
Encryption key
token : str
Encrypted text
Returns:
--------
str
Decrypted plain text
"""
f = Fernet(key)
decrypted = f.decrypt(bytes(token, 'utf-8'))
return decrypted.decode("utf-8")
username = getpass.getuser()
user_email = f"{<EMAIL>"
with open(f".{os.sep}floodplains{os.sep}credentials.yaml") as cred_file:
creds = yaml.safe_load(cred_file.read())
with open(f".{os.sep}floodplains{os.sep}config.yaml") as config_file:
config = yaml.safe_load(config_file.read())
config['LOGGING']['handlers']['email']['toaddrs'] = user_email
config['LOGGING']['handlers']['email']['credentials'] = [
creds['EMAIL']['address'],
creds['EMAIL']['password']]
logging.config.dictConfig(config['LOGGING'])
# ESRI properties
esri = config["ESRI"]
esri_folder = os.path.abspath(esri["root"])
# Pro project location
aprx_location = os.path.join(esri_folder, esri["aprx_name"])
# Data properties
urls = config["DATA"]["urls"]
sde = config["DATA"]["sde"]
sr = sde["spatialref"]
fc_name = sde["feature"]["name"]
fc_fields = sde["feature"]["fields"]
# Database properties
database = config["DATABASE"]
# Connections
read_conn = database["connections"]["read"]
edit_conn = database["connections"]["edit"]
# Version properties
version_params = config["VERSIONING"]
version_name = version_params["version_name"]
version_params["in_workspace"] = edit_conn
# Versioned SDE Connection
db_params = database["info"]
db_creds = creds["DATABASE"]
edit_user = db_params["username"].upper()
db_params["version"] = f"{edit_user}.{version_name}"
db_params["password"] = decrypt(db_creds["key"], db_creds["token"])
db_params["out_folder_path"] = esri_folder
db_params["out_name"] = db_params["version"] + ".sde"
# Email credentials
sender = creds["EMAIL"]["address"]
password = creds["EMAIL"]["password"]
# Email recipient config dict
recipients = config["EMAIL"]
# Different lists
notification = recipients["lomr-notification"]
steward = recipients["data-steward"]
```
#### File: floodplains/utils/managedb.py
```python
import os
import arcpy
import floodplains.config as config
log = config.logging.getLogger(__name__)
def _create_version(version_kwargs: dict):
"""Creates a version using the dict variables defined in the project
config.
The tool uses status codes provided by ESRI to keep trying a version
creation until the tool succeeds (status code = 4). Status codes can
be found here:
https://pro.arcgis.com/en/pro-app/arcpy/classes/result.htm
Parameters
----------
version_kwargs : dict
Parameters required for the arcpy.CreateVersion_management
function
"""
log.info("Creating a new version.")
status = 0
while status != 4:
result = arcpy.CreateVersion_management(**version_kwargs)
status = result.status
if status != 4:
log.warning((f"Version creation failed with ESRI code {status}. "
"Retrying."))
def create_versioned_connection(version_kwargs: dict,
connect_kwargs: dict) -> str:
"""Creates an sde connection on disk and returns the path to that
file.
Parameters
----------
version_kwargs : dict
Parameters required for the arcpy.CreateVersion func
connect_kwargs : dict
Parameters required for the arcpy.CreateDatabaseConnection func
Returns
-------
str
File path to the new database connection file
"""
_create_version(version_kwargs)
log.info("Creating a versioned database connection.")
arcpy.CreateDatabaseConnection_management(**connect_kwargs)
filepath = os.path.join(connect_kwargs["out_folder_path"],
connect_kwargs["out_name"])
return filepath
def remove_version(connection: str, version: str) -> None:
"""Removes the specified version from the database connection
Parameters
----------
connection : str
File path to the sde connection file
version : str
Name of the version being deleted (without the owner prepended)
"""
try:
del_version = [v for v in arcpy.da.ListVersions(
connection) if version.lower() in v.name.lower()][0]
if del_version.isOwner:
log.info("Removing old edit version.")
arcpy.DeleteVersion_management(connection, del_version.name)
else:
log.warning(("The version could not be deleted through the "
"connection provided because it does not own the "
"version."))
except IndexError:
log.info(f"No edit version called {version} currently exists.")
```
|
{
"source": "jessengolab/demo-jaeger",
"score": 2
}
|
#### File: demo-jaeger/jaeger/opentracing_flask.py
```python
from flask import Flask
from flask import request
import requests
from jaeger_client import Config
from flask_opentracing import FlaskTracer
app = Flask(__name__)
@app.route('/')
def pull_requests():
# Fetch a list of pull requests on the opentracing repository
github_url = "https://api.github.com/repos/opentracing/opentracing-python/pulls"
r = requests.get(github_url)
json = r.json()
pull_request_titles = map(lambda item: item['title'], json)
return 'OpenTracing Pull Requests: ' + ', '.join(pull_request_titles)
def initialize_tracer():
config = Config(
config={
# 'sampler': {'type': 'const', 'param': 1}
'local_agent': {
'reporting_host': 'jaeger-agent',
},
'logging': True
},
service_name='hello-world')
return config.initialize_tracer() # also sets opentracing.tracer
flask_tracer = FlaskTracer(initialize_tracer, True, app)
if __name__ == "__main__":
app.run(host='0.0.0.0', debug=True)
```
|
{
"source": "jesseniagonzalezv/App-Segmentation",
"score": 2
}
|
#### File: App-Segmentation/app/main.py
```python
import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import torchvision
import argparse
from torchvision import datasets, models, transforms
from starlette.applications import Starlette
from starlette.responses import HTMLResponse, JSONResponse, FileResponse, StreamingResponse
from starlette.staticfiles import StaticFiles
from starlette.middleware.cors import CORSMiddleware
import uvicorn, aiohttp, asyncio
from io import BytesIO
from pathlib import Path
import os
import sys
import json
import csv
from testFunctions import dataLoaders, test_model
#from unrar import rarfile
from pyunpack import Archive
model_file_url = 'https://drive.google.com/uc?export=download&id=1nLbLcm1uv-nGA_KNGzA47cqpt_lyLTV_' # inception model
# model_file_url = 'https://drive.google.com/uc?export=download&id=1DfQMqvHKENNQBjxBmmpjJi_YGVLCTYyP' # densenet201 model
model_file_name = 'modelInception'
#model_file_name = 'modelDensenet201'
if model_file_name == 'modelInception':
input_size = 299
elif model_file_name == 'modelDensenet201':
input_size = 224
path = Path(__file__).parent
app = Starlette()
app.add_middleware(CORSMiddleware, allow_origins=['*'], allow_headers=['X-Requested-With', 'Content-Type'])
app.mount('/static', StaticFiles(directory='app/static'))
async def download_file(url, dest):
if dest.exists(): return
async with aiohttp.ClientSession() as session:
async with session.get(url) as response:
data = await response.read()
with open(dest, 'wb') as f: f.write(data)
async def setup_device():
model_path = path/'models'/f'{model_file_name}.pth'
await download_file(model_file_url, model_path) # download model from Google Drive
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
if torch.cuda.is_available():
# load model on gpu and set it on test mode
model = torch.load(model_path)
model.eval()
model.cuda(device)
else:
# load model on cpu and set it on test mode
model = torch.load(model_path, map_location='cpu')
model.eval()
return model, device
loop = asyncio.get_event_loop()
tasks = [asyncio.ensure_future(setup_device())]
learn = loop.run_until_complete(asyncio.gather(*tasks))[0]
loop.close()
async def download_images(url_dir):
data_path = path/'dataset_test.rar'
await download_file(url_dir, data_path) # download data from Dropbox
os.makedirs(path/'reto_deep_learning'/'test', exist_ok=True)
Archive(data_path).extractall("./app/reto_deep_learning/test")
#rar = rarfile.RarFile(data_path)
#rar.extractall()
local_data_dir = './app/reto_deep_learning'
'''
# r=root, d=directory, f=files
print(path/'reto_deep_learning')
for r, d, f in os.walk(path/'reto_deep_learning'):
for directory in d:
print(len(d))
print('\n')
#print(r)
if directory == 'test':
local_data_dir = os.path.join(r)
else:
print('No existe folder de test. Renombrar folder a test')
os.makedirs(os.path.join(local_data_dir,'test_img/','class0'), exist_ok=True)
for r, d, files in os.walk(path/'reto_deep_learning/test_img'):
for f in files:
os.replace(f'{local_data_dir}/test_img/{f}',f'{local_data_dir}/test_img/class0/{f}')
print(f'Moving ... {local_data_dir}/test_img/{f}')
'''
return local_data_dir
@app.route('/')
def index(request):
html = path/'view'/'index.html'
return HTMLResponse(html.open().read())
@app.route('/analyze', methods=['POST'])
async def analyze(request):
data = await request.form()
contents = await data["upload_file"].read()
root = json.loads(contents.decode('utf-8'))
itemUrl = root["imagenUrl"]
data_dir = await download_images(itemUrl)
'''
data_path = path/'dataset_test.rar'
await download_file(itemUrl, data_path) # download data from Dropbox
Archive(data_path).extractall(".")
#rar = rarfile.RarFile(data_path)
#rar.extractall()
# r=root, d=directory, f=files
for r, d, f in os.walk(path/'reto_deep_learning'):
for directory in d:
if directory == 'train_img':
data_dir = os.path.join(r, directory)
'''
#print(learn)
dataloaders_dict, class_to_idx, imgs_filename = dataLoaders(input_size, data_dir)
predictions = test_model(learn[0], dataloaders_dict, learn[1], class_to_idx)
#print(imgs_filename['test'].imgs)
with open('output.csv', mode='w') as output_preds:
output_preds = csv.writer(output_preds, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
output_preds.writerow(['image_id', 'label'])
for i in range(1,len(predictions)+1):
output_preds.writerow([os.path.split(str(imgs_filename['test'].imgs[i-1][0]))[1], predictions[i-1]])
#return JSONResponse({'result': str(f'{len(predictions)} images were processed')})
return FileResponse(path='output.csv', filename='output.csv')
#return StreamingResponse(open('output.csv', mode='w'), media_type='text/plain')
if __name__ == '__main__':
if 'serve' in sys.argv: uvicorn.run(app, host='0.0.0.0', port=8080)
```
|
{
"source": "JesseNicholas00/IdeaBag",
"score": 4
}
|
#### File: JesseNicholas00/IdeaBag/DistanceBetweenTwoCities.py
```python
class Coordinate():
def __init__(self,x_value,y_value):
self.x_value = float(x_value)
self.y_value = float(y_value)
def Distance(self,other):
x = (self.x_value - other.x_value)**2
y = (self.y_value - other.y_value)**2
return x + y
cityA = Coordinate(input("Give me cityA's x coordinate "),input("Give me cityA's y coordinate "))
cityB = Coordinate(input("Give me cityB's x coordinate "),input("Give me cityB's y coordinate "))
print("The distance between those cities is " + str(Coordinate.Distance(cityA,cityB)))
```
#### File: JesseNicholas00/IdeaBag/FactorialFinder.py
```python
def factorial(n):
if n == 0 or n == 1:
return 1
return n*factorial(n-1)
answer = int(input("what factorial number do you want to know ? "))
print(answer)
```
#### File: JesseNicholas00/IdeaBag/RandomNumberGen.py
```python
import random
def CoinFlip(iteration):
#this function does a coin flip the number
#of times the user has requested --> iteration
result = {'heads':0,'tails':0}
for i in range(iteration):
flip = random.randint(0,1)
if flip == 0:
result['heads'] += 1
else:
result['tails'] += 1
print("We're done, here are your results")
return result
print("We are going to do a coin flip")
num = int(input("how many times do you want to flip the coin ? "))
print(CoinFlip(num))
```
|
{
"source": "jessenie-intel/websensor-datacapture",
"score": 3
}
|
#### File: jessenie-intel/websensor-datacapture/classification.py
```python
import pandas
from pandas.tools.plotting import scatter_matrix
import matplotlib.pyplot as plt
from sklearn import model_selection
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
import numpy
from helperfuncs import prettyprint, count_features, read_from_file, list_keys
__author__ = "<NAME>"
__status__ = "Development"
#TODO: Name variables better, same variable names used in different places (for example 'data')
#Load dataset
datafilename = 'data_processed'
datafilename_vector = 'data_vector_processed'
with open(datafilename, 'r') as datafile:
dataset = datafile.read()
with open(datafilename_vector, 'r') as datafile_vector:
dataset_vector = datafile_vector.read() #Feature vector dataset
buttondata = read_from_file(dataset)
buttondata_vector = read_from_file(dataset_vector)
#prettyprint(buttondata_vector)
featurevectors = [] #Array to hold all the feature vectors
i = 0
for buttonpress in buttondata_vector:
featurevector = [] #A single feature vector
for key, value in sorted(buttonpress.items()): #Feature vector has to be sorted
if(type(value) is dict):
for k, v in value.items():
featurevector.append(v)
featurevectors.append(featurevector)
i = i+1
#print(featurevectors[0])
#print(len(featurevectors[0])) #Too short?
dataset_file = 'dataset/dataset1'
#Now write the feature vectors into a dataset file (CSV format)
with open(dataset_file, 'w+') as datasetfile:
keys = list_keys(buttondata)
print(keys)
datasetfile.write(str(keys).strip("[]"))
datasetfile.write('\n')
for vector in featurevectors:
#Add button data (what button was pressed)
vector.append(buttondata[featurevectors.index(vector)]['button'])
#Add frequency data (what sensor frequency was used)
vector.append(buttondata[featurevectors.index(vector)]['frequency'])
datasetfile.write(str(vector).strip("[]"))
datasetfile.write('\n')
datasetfile.close()
#From sensor readings (one for each reading), need to make sequences (one for each coordinate) - for feature vector, don't need to do this
dictkeys = []
listkeys = []
xyzkeys = [] #Keys that have x, y, z data
abgkeys = [] #Keys that have alpha, beta, gamma data
numpyarrkeys = [] #Keys that have numpy array data
#Still need to find a rule to separate xyz and abg keys
for key, value in buttondata[0].items():
if(key != 'button' and key != 'frequency'):
#Keys that have dict data
if(type(value) is dict):
dictkeys.append(key)
#Keys that have list data
if(type(value) is list):
listkeys.append(key)
#Keys that have alpha, beta, gamma data
if('orientation' in key):
abgkeys.append(key)
#Keys that have x,y,z data (all the others except orientation, dac)
elif('dac' not in key and key != 'button' and key != 'frequency'):
xyzkeys.append(key)
if('_fft' in key):
numpyarrkeys.append(key)
buttondata_array = [] #Holds the data for all the buttons
for buttonpress in buttondata:
data = {} #Dict holding the sequences for a single button press
data['button'] = buttonpress['button']
for key, value in buttonpress.items():
seq = {}
seqlist = []
if(key == 'button' or key == 'frequency'): #Don't need to make sequences for these
continue
#Make sequences for each key
#First handle list keys
if key in listkeys and key in xyzkeys and key not in numpyarrkeys:
seq_x = []
seq_y = []
seq_z = []
for i in value:
seq_x.append(i['x'])
seq_y.append(i['y'])
seq_z.append(i['z'])
seq = {'x':seq_x, 'y':seq_y, 'z':seq_z}
elif key in listkeys and key in abgkeys and key not in numpyarrkeys:
seq_alpha = []
seq_beta = []
seq_gamma = []
for i in value:
seq_alpha.append(i['alpha'])
seq_beta.append(i['beta'])
seq_gamma.append(i['gamma'])
seq = {'alpha':seq_alpha, 'beta':seq_beta, 'gamma':seq_gamma}
elif key in listkeys and key in numpyarrkeys:
seqlist = value
#Then handle dict keys
elif key in dictkeys and key in xyzkeys:
seq = {'x':value['x'], 'y':value['y'], 'z':value['z']}
elif key in dictkeys and key in abgkeys:
seq = {'alpha':value['alpha'], 'beta':value['beta'], 'gamma':value['gamma']}
else: #DAC keys
data[key] = value
if(seq):
data[key] = seq
elif(seqlist):
data[key] = seqlist
buttondata_array.append(data)
def buttonselection(): #Condition for selecting the buttons to be plotted
for x in buttondata_array:
if x['button'] == 0: #Select all buttons that fulfil this condition
yield x
#prettyprint(buttonselection())
def plot(buttons, sameplot=False):
index = 1
for button in buttons:
#Plot sequences
fig = plt.figure(index)
fig.suptitle('Data for button ' + str(button['button']))
ax1 = plt.subplot(221)
plt.plot(button['acceleration']['x'], color='r', label='accelx')
plt.plot(button['acceleration']['y'], color='b', label='accely')
plt.plot(button['acceleration']['z'], color='g', label='accelz')
ax1.legend(["accx", "accy", "accz"], loc='upper center', bbox_to_anchor=(0.5, 1.10),
ncol=3, fancybox=True, shadow=True)
plt.ylabel('Acceleration')
ax2 = plt.subplot(222)
plt.plot(button['rotation']['x'], color='r', label='rotx')
plt.plot(button['rotation']['y'], color='b', label='roty')
plt.plot(button['rotation']['z'], color='g', label='rotz')
ax2.legend(["rotx", "roty", "rotz"], loc='upper center', bbox_to_anchor=(0.5, 1.10),
ncol=3, fancybox=True, shadow=True)
plt.ylabel('Rotation')
ax3 = plt.subplot(223)
plt.plot(button['orientation']['alpha'], color='r', label='orix')
plt.plot(button['orientation']['beta'], color='b', label='oriy')
plt.plot(button['orientation']['gamma'], color='g', label='oriz')
ax3.legend(["orix", "oriy", "oriz"],loc='upper center', bbox_to_anchor=(0.5, 1.10),
ncol=3, fancybox=True, shadow=True)
plt.ylabel('Orientation')
if not sameplot:
index = index+1 #Plot each button press in different plot window
manager = plt.get_current_fig_manager()
manager.resize(*manager.window.maxsize())
plt.savefig('button_' + str(button['button']) + '_' +str(index) + '.svg', format='svg')
plt.show()
plot(buttondata_array, True)
```
|
{
"source": "JessenPan/leetcode-java",
"score": 3
}
|
#### File: python/test/S1TwoSumTest.py
```python
import unittest
from solution.S1TwoSum import TwoSum
class S1TwoSumTest(unittest.TestCase):
twoSumObj = TwoSum()
def test1(self):
self.assertEqual([0, 1], self.twoSumObj.twoSum([2, 7, 11, 15], 9))
```
|
{
"source": "jesseokeya/linkedin-scraper",
"score": 3
}
|
#### File: jesseokeya/linkedin-scraper/app.py
```python
from lib import Scrape
from typing import List
from os import environ
def main():
seconds: int = 60
username: str = environ.get('EMAIL')
password: str = environ.get('PASSWORD')
# Navigates to Linkedin's website
scraper = Scrape()
# Takes in credentials to login into the url sepecified
scraper.login(username=username, password=password)
# Navigate to specified pages on the website
scraper.navigate_to('profile', duration=2)
scraper.navigate_to(
multiple=['notifications', 'messages', 'network', 'jobs', 'home'],
duration=2
)
# Scroll to the bottom of page for 10 seconds
# The longer you scroll the more data you collect from linkedin
scraper.scroll_to_bottom(10)
# Returns a list of all images on website
images: List[str] = scraper.retrieve_images()
# Returns a list of all videos on website
videos: List[str] = scraper.retrieve_videos()
# Build data scrapped into a set
file_data: set = {
'images': images,
'videos': videos
}
# print scrapped information before saving to file
print(file_data)
# create and write file data to json file
scraper.write_file(file_data, 'data.json')
# Uncomment to end the selenium chrome driver after 60 seconds
# scraper.end(seconds)
main()
```
#### File: linkedin-scraper/lib/helper.py
```python
from os import remove
from time import sleep
from os.path import exists
from json import dump, load
from platform import system
class Helper:
def __init__(self, url: str, base_dir: str):
self.url = url
self.base_dir = base_dir
def handle_error(self, e: Exception, message: str = 'Error Occured'):
print(e, message)
def get_url(self) -> str:
try:
return self.url
except Exception as e:
self.handle_error(e, 'Error occured during login')
def write_file(self, *args) -> None:
try:
file_data: set = args[0]
file_name: str = args[1]
file_path: str = f'{self.base_dir}/{file_name}'
path_exists = exists(file_path)
file_ctx = open(
file_path, 'r+') if path_exists else open(file_path, 'w+')
if not path_exists:
dump(file_data, file_ctx, indent=4)
else:
remove(file_path)
self.write_file(file_data, file_name)
except Exception as e:
self.handle_error(e, 'Error occured while writing to file')
def duration(self, seconds: int = 0) -> None:
try:
return sleep(seconds)
except Exception as e:
self.handle_error(e, 'Error occured in chrome driver duration')
def getPlatform(self) -> str:
try:
return system()
except Exception as e:
self.handle_error(e, 'Error occured while trying to get the os platform')
```
|
{
"source": "Jesse-Opitz/grephy",
"score": 3
}
|
#### File: Jesse-Opitz/grephy/grephy.py
```python
import argparse, logging, sys
from graphviz import Digraph
# Custom python files
import finite_automata as fa
import create_nfa as cnfa
import create_dfa as cdfa
import state, edge
import draw_fa
import find_match
def read_file(fname):
"""
Reads data inside a file.
@type fname: string
@param fname: Name of the file to read
@rtype: string
@return: Data within the file
"""
try:
with open(fname, 'r') as f:
data = f.read();
return data
except IOError as e:
logging.critical("FILE['{2}'] I/O error({0}): {1}".format(e.errno, e.strerror, fname))
sys.exit(1)
except:
logging.critical("Unexpected error:", sys.exc_info()[0])
raise
sys.exit(1)
def main():
logging.basicConfig(level=logging.CRITICAL, format='%(levelname)s:%(message)s')
parser = argparse.ArgumentParser(description='Searches files for regular expression pattern matches.')
parser.add_argument('-n', '--NFA-FILE', nargs=1, help='Output file for NFA')
parser.add_argument('-d', '--DFA-FILE', nargs=1, help='Output file for DFA')
parser.add_argument('-p', '--preview', action="store_true", help='Opens a pdf view of DFA and NFA')
parser.add_argument('REGEX', type=str, help='Regular expression file')
parser.add_argument('FILE', type=str, help='Input file')
args = parser.parse_args()
nfa = cnfa.create_nfa(args.REGEX)
dfa = cdfa.create_dfa(nfa)
nfa_dot = draw_fa.draw(nfa)
dfa_dot = draw_fa.draw(dfa)
if args.preview:
if args.NFA_FILE is None:
nfa_dot.render('nfa.dot', view=True)
elif args.NFA_FILE is not None:
nfa_dot.render(args.NFA_FILE[0], view=True)
if args.DFA_FILE is None:
dfa_dot.render('dfa.dot', view=True)
elif args.DFA_FILE is not None:
nfa_dot.render(args.DFA_FILE[0], view=True)
elif not args.preview:
if args.NFA_FILE is None:
nfa_dot.save('nfa.dot')
elif args.NFA_FILE is not None:
nfa_dot.render(args.NFA_FILE[0])
if args.DFA_FILE is None:
dfa_dot.save('dfa.dot')
elif args.DFA_FILE is not None:
nfa_dot.save(args.DFA_FILE[0])
#TODO: Fix bug where matches first letter = matches line
matches = find_match.find_match(dfa, args.FILE)
for m in matches:
print m.strip()
if __name__ == "__main__":
main()
```
|
{
"source": "jesseops/Flask-Echelon",
"score": 3
}
|
#### File: Flask-Echelon/flask_echelon/helpers.py
```python
from functools import wraps
from flask import current_app
from flask_login import current_user
from flask_echelon import AccessCheckFailed
def has_access(echelon):
"""
Check if `current_user` has access to an Echelon in `current_app`
:return: bool
"""
if not hasattr(current_app, 'echelon_manager'):
raise Exception("Flask app '{!r}' does not have a bound interaction manager".format(current_app))
return current_app.echelon_manager.check_access(current_user, echelon)
def require_echelon(echelon):
"""
Check if `current_user` has access to an Echelon in `current_app`
If check fails, raise `AccessCheckFailed`
"""
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
if current_app.echelon_manager.check_access(current_user, echelon):
return func(*args, **kwargs)
raise AccessCheckFailed('{} does not have access to Echelon "{}"'.format(current_user, echelon))
return wrapper
return decorator
```
|
{
"source": "jessepeng/awd-lstm-lm",
"score": 2
}
|
#### File: jessepeng/awd-lstm-lm/locked_dropout.py
```python
import torch.nn as nn
from torch.autograd import Variable
from torch.nn.utils.rnn import PackedSequence, pad_packed_sequence, pack_padded_sequence
class LockedDropout(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x, dropout=0.5):
if not self.training or not dropout:
return x
sequence_lengths = None
if isinstance(x, PackedSequence):
x, sequence_lengths = pad_packed_sequence(x, batch_first=True)
m = x.data.new(1, x.size(1), x.size(2)).bernoulli_(1 - dropout)
mask = Variable(m, requires_grad=False) / (1 - dropout)
mask = mask.expand_as(x)
result = mask * x
if sequence_lengths is not None:
result = pack_padded_sequence(result, sequence_lengths, batch_first=True)
return result
```
|
{
"source": "jesse-peters/people",
"score": 3
}
|
#### File: people/scrape/scrape_md.py
```python
import re
import lxml.html
import click
import scrapelib
from common import Person
def elem_to_str(item, inside=False):
attribs = " ".join(f"{k}='{v}'" for k, v in item.attrib.items())
return f"<{item.tag} {attribs}> @ line {item.sourceline}"
class XPath:
def __init__(self, xpath, *, min_items=1, max_items=None, num_items=None):
self.xpath = xpath
self.min_items = min_items
self.max_items = max_items
self.num_items = num_items
def match(self, element, *, min_items=None, max_items=None, num_items=None):
items = element.xpath(self.xpath)
num_items = self.num_items if num_items is None else num_items
max_items = self.max_items if max_items is None else max_items
min_items = self.min_items if min_items is None else min_items
if num_items is not None and len(items) != num_items:
raise XPathError(
f"{self.xpath} on {elem_to_str(element)} got {len(items)}, "
f"expected {num_items}"
)
if min_items is not None and len(items) < min_items:
raise XPathError(
f"{self.xpath} on {elem_to_str(element)} got {len(items)}, "
f"expected at least {min_items}"
)
if max_items is not None and len(items) > max_items:
raise XPathError(
f"{self.xpath} on {elem_to_str(element)} got {len(items)}, "
f"expected at most {max_items}"
)
return items
def match_one(self, element):
return str(self.match(element, num_items=1)[0])
class NoSuchScraper(Exception):
pass
class XPathError(ValueError):
pass
# @attr.s
# class ContactDetail:
# note = attr.ib()
# voice = attr.ib()
# email =attr.ib()
# fax = attr.ib()
# address = attr.ib()
# @attr.s
# class Person:
# name = attr.ib()
# state = attr.ib()
# party = attr.ib()
# district = attr.ib()
# chamber = attr.ib()
# image = attr.ib(default=None)
# given_name = attr.ib(default=None)
# family_name = attr.ib(default=None)
# links = attr.ib(default=attr.Factory(list))
# sources = attr.ib(default=attr.Factory(list))
# capitol_office = attr.ib(default=None)
# district_office = attr.ib(default=None)
class Scraper(scrapelib.Scraper):
def fetch_page_data(self, page):
print(f"fetching {page.url} for {page.__class__.__name__}")
data = self.get(page.url)
page.set_raw_data(data)
def augment_item(self, item, subpages):
for subpage_func in subpages:
page = subpage_func(item)
self.fetch_page_data(page)
page_data = page.get_data()
item.update(page_data)
return item
def scrape(self, chamber, session):
for page in self.start_scrape(chamber, session):
self.fetch_page_data(page)
for item in page.get_data():
if page.subpages:
item = self.augment_item(item, page.subpages)
if isinstance(item, dict):
item = self.to_object(item)
yield item
def to_object(self, item):
"""
converts intermediate data (often in a dictionary) to a final object to be validated
"""
return item
def start_scrape(self, chamber, session):
"""
yields one or more Page objects that will kick off the scrape.
It may also raise a ValueError (TBD) when it does not have an appropriate entrypoint
to scrape the requested data.
"""
raise NotImplementedError()
class Page:
def __init__(self, url):
"""
a Page can be instantiated with a url & options (TBD) needed to fetch it
"""
self.url = url
def set_raw_data(self, raw_data):
""" callback to handle raw data returned by grabbing the URL """
self.raw_data = raw_data
def get_data(self):
""" return data extracted from this page and this page alone """
raise NotImplementedError()
class HtmlPage:
def set_raw_data(self, raw_data):
self.raw_data = raw_data
self.root = lxml.html.fromstring(raw_data.content)
self.root.make_links_absolute(self.url)
class HtmlListPage(HtmlPage):
"""
Simplification for HTML pages that get a list of items and process them.
When overriding the class, instead of providing get_data, one must only provide
an xpath and a process_item function.
"""
xpath = None
def get_data(self):
if not self.xpath:
raise NotImplementedError("must either provide xpath or override scrape")
items = self.xpath.match(self.root)
for item in items:
item = self.process_item(item)
yield item
def process_item(self, item):
return item
class MDPersonDetail(HtmlPage):
def __init__(self, url):
self.url = url
def parse_address_block(self, block):
state = "address"
# group lines by type
values = {"address": [], "phone": [], "fax": []}
for line in block.splitlines():
line = line.strip()
if not line:
continue
if line.startswith("Phone"):
state = "phone"
elif line.startswith("Fax"):
state = "fax"
values[state].append(line)
# postprocess values
phones = []
for line in values["phone"]:
for match in re.findall(r"\d{3}-\d{3}-\d{4}", line):
phones.append(match)
faxes = []
for line in values["fax"]:
for match in re.findall(r"\d{3}-\d{3}-\d{4}", line):
faxes.append(match)
return {"address": "; ".join(values["address"]), "phones": phones, "faxes": faxes}
def get_data(self):
# annapolis_info = (
# XPath("//dt[text()='Annapolis Info']/following-sibling::dd[1]")
# .match_one(self.root)
# .text_content()
# )
# interim_info = (
# XPath("//dt[text()='Interim Info']/following-sibling::dd[1]")
# .match_one(self.root)
# .text_content()
# )
# print(self.parse_address_block(annapolis_info))
# print(self.parse_address_block(interim_info))
return dict(
name=XPath("//h2/text()").match_one(self.root).split(" ", 1)[1],
# "email": XPath(
# "//dt[text()='Contact']/following-sibling::dd[1]/a[1]/text()"
# ).match_one(self.root),
)
class MDPersonList(HtmlListPage):
xpath = XPath("//div[@id='myDIV']//div[@class='p-0 member-index-cell']")
subpages = [lambda item: MDPersonDetail(item["link"])]
def __init__(self, url):
self.url = url
def process_item(self, item):
dd_text = XPath(".//dd/text()").match(item)
district = dd_text[2].strip().split()[1]
party = dd_text[4].strip()
return dict(
chamber="upper" if "senate" in self.url else "lower",
image=XPath(".//img/@src").match_one(item),
district=district,
party=party,
link=XPath(".//dd/a[1]/@href").match_one(item),
)
class MDPersonScraper(Scraper):
def start_scrape(self, chamber, session):
""" This function yields one or more Page objects that will kick off the scrape.
It may also raise a ValueError (TBD) when it does not have an appropriate entrypoint
to scrape the requested data.
"""
if session:
raise NoSuchScraper("cannot scrape non-current sessions")
if chamber == "upper":
yield MDPersonList("http://mgaleg.maryland.gov/mgawebsite/Members/Index/senate")
elif chamber == "lower":
yield MDPersonList("http://mgaleg.maryland.gov/mgawebsite/Members/Index/house")
def to_object(self, item):
p = Person(
state="md",
chamber=item["chamber"],
name=item["name"],
party=item["party"],
image=item["image"],
district=item["district"],
)
p.add_link(item["link"])
p.add_source(item["link"])
return p
@click.group()
def cli():
pass
@cli.command()
@click.argument("class_name")
@click.argument("url")
def sample(class_name, url):
# implementation is a stub, this will be able to accept dotted paths once implemented
Cls = globals()[class_name]
page = Cls(url)
s = Scraper()
s.fetch_page_data(page)
print(page.get_data())
@cli.command()
@click.option("--chamber", multiple=True, default=["upper", "lower"])
@click.option("--session", default=None)
def scrape(chamber, session):
for ch in chamber:
for item in MDPersonScraper().scrape(ch, session):
item.save("incoming/md/people")
if __name__ == "__main__":
cli()
```
|
{
"source": "jessepinnell/buph",
"score": 2
}
|
#### File: python/tests/test_routine_engine.py
```python
import unittest
from xrsrv import routine_engine
from xrsrv import exercise_database
from xrsrv.type_factories import UserRig, UserFixture
# The test function names are quite long
# pylint: disable=invalid-name
EXERCISE_DATABASE_NAME = "exercise.db"
class TestRoutineEngine(unittest.TestCase):
"""
Test the routine engine class
"""
def __init__(self, *args, **kwargs):
super(TestRoutineEngine, self).__init__(*args, **kwargs)
exercise_db = exercise_database.SQLiteConnection(EXERCISE_DATABASE_NAME)
self.engine = routine_engine.RoutineEngine(exercise_db)
self.build_user_all()
def build_user_all(self):
""" Builds up test user data """
self.user_fixtures = [
UserFixture("floor", 0, 0),
UserFixture("block on floor", 0, 0),
UserFixture("horizontal bench", 0, 0)
]
self.user_rigs = [
UserRig("barbell", 25, 25),
UserRig("barbell", 35, 35),
UserRig("barbell", 45, 45),
UserRig("barbell", 55, 55),
UserRig("dumbbell pair", 55, 55),
UserRig("dumbbell single", 55, 55),
UserRig("imbalanced dumbbell single", 55, 55),
]
def test_instantiation(self):
""" Test the creation of the connection to the database """
self.assertIsInstance(self.engine, routine_engine.RoutineEngine)
def test_exercise_one_fixture(self):
""" Simple test for a single fixture exercise """
user_fixtures = [
UserFixture("test fixture 1", 0, 0)
]
user_rigs = []
self.engine.set_user_exercise_environment(user_fixtures, user_rigs)
self.assertEqual(len(self.engine.available_exercises), 1)
self.assertIn("test exercise 1", self.engine.available_exercises)
def test_exercise_two_fixtures(self):
""" Test exercise having two acceptable fixtures """
user_fixtures = [
UserFixture("test fixture 2a", 0, 0),
]
user_rigs = []
self.engine.set_user_exercise_environment(user_fixtures, user_rigs)
self.assertEqual(len(self.engine.available_exercises), 1)
self.assertIn("test exercise 2", self.engine.available_exercises)
user_fixtures = [
UserFixture("test fixture 2b", 0, 0),
]
self.engine.set_user_exercise_environment(user_fixtures, user_rigs)
self.assertEqual(len(self.engine.available_exercises), 1)
self.assertIn("test exercise 2", self.engine.available_exercises)
def test_exercise_multiple_optional_rigs(self):
""" Test exercise having one or more optional rigs
Examples:
* holding a plate when doing sit-ups
* holding barbells when doing calf raises
"""
user_fixtures = [
UserFixture("test fixture 3", 0, 0)
]
user_rigs = [
UserRig("test rig 1", 0, 0)
]
self.engine.set_user_exercise_environment(user_fixtures, user_rigs)
self.assertIn("test exercise 3", self.engine.available_exercises)
user_rigs = [
UserRig("test rig 2", 0, 0)
]
self.engine.set_user_exercise_environment(user_fixtures, user_rigs)
self.assertIn("test exercise 3", self.engine.available_exercises)
user_rigs = []
self.engine.set_user_exercise_environment(user_fixtures, user_rigs)
self.assertIn("test exercise 3", self.engine.available_exercises)
def test_exercise_multiple_required_rigs(self):
""" Test exercise having more than one possible rig
Example:
* front dumbbell raise using either balanced dumbbell or an imbalanced dumbbell
This wouldn't apply to things such as overhead presses with barbells versus dumbbells.
They would be considered different exercises due to range of motion, angle, etc.
"""
user_fixtures = [
UserFixture("test fixture 4", 0, 0)
]
user_rigs = [
UserRig("test rig 1", 0, 0)
]
self.engine.set_user_exercise_environment(user_fixtures, user_rigs)
self.assertIn("test exercise 4", self.engine.available_exercises)
user_rigs = [
UserRig("test rig 2", 0, 0)
]
self.engine.set_user_exercise_environment(user_fixtures, user_rigs)
self.assertIn("test exercise 4", self.engine.available_exercises)
user_rigs = []
self.engine.set_user_exercise_environment(user_fixtures, user_rigs)
self.assertNotIn("test exercise 4", self.engine.available_exercises)
def test_generate_single_plan(self):
""" Test the generate_single_plan() method """
num_exercises_in_plan = 12
self.engine.set_user_exercise_environment(self.user_fixtures, self.user_rigs)
plan = self.engine.generate_plan("basic_random", n=num_exercises_in_plan)
self.assertEqual(len(plan), 1)
self.assertEqual(len(plan[0]), num_exercises_in_plan)
def test_generate_single_plan_too_many(self):
""" Test the generate_single_plan() method but with too many exercises requested
than could be possibly selected
"""
num_exercises_in_plan = 342
self.engine.set_user_exercise_environment(self.user_fixtures, self.user_rigs)
plan = self.engine.generate_plan("basic_random", n=num_exercises_in_plan)
self.assertEqual(len(plan), 1)
self.assertNotEqual(len(plan[0]), num_exercises_in_plan)
if __name__ == '__main__':
unittest.main()
```
#### File: python/utils/exercise_converter.py
```python
import csv
import sys
# pylint: disable=missing-docstring
# XXX This defaults to olympic stuff, need to handle multiple rigs for each exercise
EQUIPMENT_MAP = {
"B": "balanced olympic barbell",
"C": "cable",
"D": "double adjustable dumbbell",
"E": "balanced olympic e-z bar",
"N": None,
"P": None,
"Q": None,
"R": None,
"T": "balanced olympic trap bar"
}
class ExerciseConverter():
"""
Utility for converting csv export of exercise data to SQLITE queries
"""
def __init__(self):
self.exercises = {}
def print_exercises_sql(self, filename):
with open(filename, "w") as sql_file:
for exercise, info in self.exercises.items():
rig = "?" if info[1] is None else "\"{0}\"".format(info[1])
sql_file.write("INSERT INTO Exercises VALUES (\"{0}\", \"floor\", {1}, ?);\n".format(exercise, rig))
def print_muscles_worked_sql(self, filename):
with open(filename, "w") as sql_file:
for exercise, info in self.exercises.items():
for muscle in info[0]:
sql_file.write("INSERT INTO MusclesExercised VALUES (\"{0}\", \"{1}\");\n".format(exercise, muscle))
def add_exercise(self, exercise, muscle, equipment_char):
if exercise not in self.exercises:
self.exercises[exercise] = (set(), EQUIPMENT_MAP[equipment_char])
self.exercises[exercise][0].add(muscle)
def generate_sql_from_csv(self, input_file, exercises_out_file, muscles_worked_out_file):
""" Generates queries based on CSV """
with open(input_file, "r") as csv_file:
reader = csv.reader(csv_file)
header = next(reader)
for row in reader:
if row[0] != "":
for i, field in enumerate(row):
if field == "⚫":
self.add_exercise(row[1], header[i], row[2])
self.print_exercises_sql(exercises_out_file)
self.print_muscles_worked_sql(muscles_worked_out_file)
if __name__ == "__main__":
if len(sys.argv) != 4:
sys.exit("usage: {0} [input file] [muscles out] [muscles worked out]".format(sys.argv[0]))
CONVERTER = ExerciseConverter()
CONVERTER.generate_sql_from_csv(sys.argv[1], sys.argv[2], sys.argv[3])
```
#### File: python/xrsrv/routine_engine.py
```python
from xrsrv.type_factories import RoutineEnvironment
import xrsrv.routine_generators.debug
import xrsrv.routine_generators.basic_random
import xrsrv.routine_generators.multi_day_random
class EngineException(Exception):
""" Routine engine exception class """
def __init__(self, value):
super(EngineException, self).__init__()
self.value = value
def __str__(self):
return self.value
class RoutineEngine(object):
""" Routine engine """
def __init__(self, exercise_database_object):
self.exercise_database = exercise_database_object
self.user_routine_history = []
self.user_preferences = None
self.available_exercises = set()
self.unavailable_exercises = set()
self.exercise_data = {exercise: self.exercise_database.get_exercise_data(exercise)\
for exercise in self.exercise_database.get_list_of_exercise_names()}
self.generators = {
"debug": xrsrv.routine_generators.debug,
"basic_random": xrsrv.routine_generators.basic_random,
"multi_day_random": xrsrv.routine_generators.multi_day_random
}
def set_user_exercise_environment(self, user_fixtures, user_rigs, verbose=False):
""" set the user environment to use for generation functions
if len(user_fixtures) = 0, give all exercises possible
"""
print_verbose = print if verbose else lambda *a, **k: None
self.available_exercises = set()
self.unavailable_exercises = set()
# Starting with the full list of exercise choices, remove or use them depending on
# whether they pass all the rules tests
for exercise_name, exercise in self.exercise_data.items():
# *** Fixture checks ***
if not user_fixtures:
print_verbose("Y: No user fixtures supplied, adding by default: " + exercise_name)
self.available_exercises.add(exercise_name)
continue
#Check if the user has any fixture satisfying this exercise
#if count(exercise_fixtures) > 1 then any single fixture can be used
if user_fixtures and exercise.fixtures.intersection({uf.name for uf in user_fixtures}):
# User had the fixture, check rigs
if exercise.rigs:
exercise_rig_names = {rig.name for rig in exercise.rigs}
user_rig_names = {rig.name for rig in user_rigs}
# If count(exercise_rigs) > 0 and all are optional, then any single one or none can be used
optional_values = [rig.optional for rig in exercise.rigs]
if optional_values and all(optional_values):
print_verbose("Y: All rigs are optional ({0}), adding {1}".format(exercise.rigs, exercise_name))
self.available_exercises.add(exercise_name)
continue
# If count(exercise_rigs) > 1 and all are not optional, then any single one can be used
if len(exercise_rig_names) == 1:
if exercise_rig_names.issubset(user_rig_names):
print_verbose("Y: Has the single required rig ({0}), adding {1}".format(\
*exercise_rig_names, exercise_name))
self.available_exercises.add(exercise_name)
continue
else:
print_verbose("N: User doesn't have the rig ({0}), skipping {1}".format(\
*exercise_rig_names, exercise_name))
self.unavailable_exercises.add(exercise_name)
continue
else: # assume > 1
required_rig_names = {rig.name for rig in exercise.rigs if not rig.optional}
if user_rig_names.intersection(required_rig_names):
print_verbose("Y: Has more than one that work as the required rig ({0}), adding {1}"\
.format(user_rig_names.intersection(required_rig_names), exercise_name))
self.available_exercises.add(exercise_name)
continue
else:
print_verbose("N: User doesn't have any rigs ({0}) that work for {1}".format(\
*required_rig_names, exercise_name))
self.unavailable_exercises.add(exercise_name)
continue
else:
print_verbose("Y: User has fixture and exercise requires no rigs, adding " + exercise_name)
self.available_exercises.add(exercise_name)
continue
raise EngineException("failed to classify exercise: " + exercise_name)
else:
print_verbose("N: User doesn't have the fixture(s) ({0}), skipping {1}".format(\
exercise.fixtures, exercise_name))
self.unavailable_exercises.add(exercise_name)
def set_user_routine_history(self, user_routine_history):
""" set the user exercise history
This should be a sequence of ExerciseSets
"""
self.user_routine_history = user_routine_history
def set_user_preferences(self, user_preferences):
""" set the user preferences
This describes general workout preferences which may or may not be used by the routines
Used as a hint for some routines
This should be a UserPreferences
"""
self.user_preferences = user_preferences
def generate_plan(self, generator, **kwargs):
""" generates single plan by generator referred to by name with arbitrary args
TODO document args in a consistent format
This returns a sequence of ExerciseSets
"""
if generator in self.generators:
routine_environment = RoutineEnvironment(self.available_exercises,\
self.unavailable_exercises, self.user_preferences, self.user_routine_history)
return self.generators[generator].generate_plan(routine_environment, self.exercise_data, **kwargs)
else:
raise EngineException("Invalid generator: " + str(generator))
```
|
{
"source": "jessepinnell/driver-sample",
"score": 2
}
|
#### File: driver-sample/python/driver_sample_app.py
```python
import sys
sys.path.append("../lib")
sys.path.append("lib")
sys.path.append("../python")
sys.path.append("python")
import wx
import wx.grid
import py_driver_sample
ID_ABOUT = 2323
ID_QUIT = 2324
ID_RESET = 2325
ID_READ = 2326
ID_WRITE = 2327
NUM_ROWS = 16
NUM_COLUMNS = 8
def get_cell_by_address(address):
""" get the cell address from a memory address """
return (address // NUM_COLUMNS, address % NUM_COLUMNS)
def get_address_by_cell(row, column):
""" get the memory address from a cell address """
return (row * NUM_COLUMNS) + column
class Simulator(object):
""" loopback simulator for when driver fails to talk to hardware """
def __init__(self):
self.data = [0] * (NUM_ROWS * NUM_COLUMNS)
def write(self, address, value):
""" shove a value into the fake memory """
print "setting 0x{0:x} to 0x{1:x}".format(address, value)
self.data[address] = value
def read(self, address):
""" increment a value in the fake memory and return it """
self.data[address] = min(self.data[address] + 1, 0xff)
return self.data[address]
def reset(self):
""" pretend to reset """
print "reset"
class RegisterGrid(wx.grid.Grid):
""" Grid matching Table 4.1 in http://ww1.microchip.com/downloads/en/DeviceDoc/20005282B.pdf """
def __init__(self, parent):
super(RegisterGrid, self).__init__(parent, size=(300, 400))
self.CreateGrid(NUM_ROWS, NUM_COLUMNS)
grid_font = wx.Font(9, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, "Terminus")
self.SetDefaultCellFont(grid_font)
self.SetDefaultRowSize(19)
self.SetDefaultColSize(30)
self.SetFont(grid_font)
self.SetRowLabelSize(30)
self.SetLabelFont(grid_font)
self.skip_cell = None
for row in xrange(NUM_ROWS):
self.SetRowLabelValue(row, "{0:04b}".format(row))
for col in xrange(NUM_COLUMNS):
self.SetColLabelValue(col, "{0:04b}".format(col))
def set_value(self, address, value):
""" update a value in the grid """
cell = get_cell_by_address(address)
self.SetCellValue(cell[0], cell[1], "{0:02x}".format(value))
class DriverSampleApp(wx.Frame):
"""
Application for illustrating accessing SWIG bindings
"""
def __init__(self, parent, title):
"""
Create the frame, lay out controls, and set up event handlers
"""
wx.Frame.__init__(self, parent, wx.ID_ANY, title, size=(400, 480), style=(wx.CLOSE_BOX))
main_sizer = wx.BoxSizer(wx.VERTICAL)
reset_button = wx.Button(self, ID_RESET, "Make chip stop working (reset)", size=wx.Size(220, 23))
main_sizer.Add(reset_button, 0, wx.CENTER, 0)
register_sizer = wx.BoxSizer(wx.VERTICAL)
self.register_grid = RegisterGrid(self)
register_sizer.Add(self.register_grid, 0, wx.CENTER, 0)
main_sizer.Add(register_sizer, 0, wx.CENTER, 0)
self.SetSizer(main_sizer)
self.Center()
file_menu = wx.Menu()
file_menu.Append(ID_ABOUT, "&About", "Information")
file_menu.AppendSeparator()
file_menu.Append(ID_QUIT, "&Q&uit", "Quit the program")
menu_bar = wx.MenuBar()
menu_bar.Append(file_menu, "&File")
self.SetMenuBar(menu_bar)
wx.EVT_BUTTON(self, ID_RESET, self.on_reset)
wx.EVT_MENU(self, ID_ABOUT, self.on_about)
wx.EVT_MENU(self, ID_QUIT, self.on_quit)
self.register_grid.Bind(wx.grid.EVT_GRID_CELL_CHANGE, self.on_grid_changed)
self.register_grid.Bind(wx.grid.EVT_GRID_SELECT_CELL, self.on_grid_cell_selected)
self.register_grid.Bind(wx.grid.EVT_GRID_LABEL_LEFT_CLICK, self.on_grid_edited)
self.skip_cell = None
self.timer = wx.Timer(self)
self.Bind(wx.EVT_TIMER, self.on_poll_timer)
try:
self.log = py_driver_sample.Log()
self.driver = py_driver_sample.Driver()
self.timer.Start(1000)
except Exception as ex:
dialog = wx.MessageDialog(self, "Failed to initialize:\n" + str(ex)\
+ "\n\n*** Using simulator ***", "Error", wx.ICON_EXCLAMATION)
dialog.ShowModal()
dialog.Destroy()
self.driver = Simulator()
self.timer.Start(2500)
self.on_poll_timer(None)
self.Show(True)
def on_grid_changed(self, event):
""" handle a value in the grid being changed """
try:
value = int(self.register_grid.GetCellValue(event.Row, event.Col).encode("ascii", "ignore"), 16)
if value > 0xff:
raise Exception("Value must be in [0x0..0xff]")
address = get_address_by_cell(event.Row, event.Col)
self.driver.write(address, value)
except Exception as ex:
dialog = wx.MessageDialog(self, "Invalid entry: " + str(ex), "Error", wx.ICON_EXCLAMATION)
dialog.ShowModal()
dialog.Destroy()
self.skip_cell = None
event.Skip()
def on_grid_cell_selected(self, event):
""" a cell has been selected so skip updating it """
self.skip_cell = (event.Row, event.Col)
event.Skip()
def on_grid_edited(self, event):
""" ignore handling changes until enter is pressed """
event.Skip()
def on_poll_timer(self, event):
""" read all the register values and update them in the grid """
for i in range(0, NUM_ROWS * NUM_COLUMNS):
cell = get_cell_by_address(i)
if cell != self.skip_cell:
self.register_grid.set_value(i, self.driver.read(i))
else:
print "editing " + str(cell) + ", skipping update"
def on_reset(self, event):
"""
Trigger a reset
"""
try:
self.driver.reset()
except Exception as ex:
dialog = wx.MessageDialog(self, "Failed to reset:\n" + str(ex), "Error", wx.ICON_EXCLAMATION)
dialog.ShowModal()
dialog.Destroy()
self.Close()
def on_about(self, event):
"""
Open a simple about dialog
"""
dialog = wx.MessageDialog(self, "Driver app test for python bindings", "About", wx.OK)
dialog.ShowModal()
dialog.Destroy()
def on_quit(self, event):
"""
Quit the application
"""
self.Close()
if __name__ == "__main__":
# pylint: disable=invalid-name
application = wx.App(False)
main_frame = DriverSampleApp(None, "Driver Sample App")
application.MainLoop()
```
|
{
"source": "jessequinn/coursera_applied_data_science_with_python_specialization",
"score": 3
}
|
#### File: assignments/course_3/assignment+4.py
```python
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
from sklearn.preprocessing import StandardScaler, MaxAbsScaler, MinMaxScaler
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, GradientBoostingClassifier
from sklearn.naive_bayes import MultinomialNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import classification_report
# specific for PipelineHelper Class
from sklearn.base import TransformerMixin, BaseEstimator, ClassifierMixin
from collections import defaultdict
import itertools
class PipelineHelper(BaseEstimator, TransformerMixin, ClassifierMixin):
def __init__(self, available_models=None, selected_model=None, include_bypass=False):
self.include_bypass = include_bypass
self.selected_model = selected_model
# this is required for the clone operator used in gridsearch
if type(available_models) == dict:
self.available_models = available_models
# this is the case for constructing the helper initially
else:
# a string identifier is required for assigning parameters
self.available_models = {}
for (key, model) in available_models:
self.available_models[key] = model
def generate(self, param_dict={}):
per_model_parameters = defaultdict(lambda: defaultdict(list))
# collect parameters for each specified model
for k, values in param_dict.items():
model_name = k.split('__')[0]
param_name = k[len(model_name)+2:] # might be nested
if model_name not in self.available_models:
raise Exception('no such model: {0}'.format(model_name))
per_model_parameters[model_name][param_name] = values
ret = []
# create instance for cartesion product of all available parameters for each model
for model_name, param_dict in per_model_parameters.items():
parameter_sets = (dict(zip(param_dict, x)) for x in itertools.product(*param_dict.values()))
for parameters in parameter_sets:
ret.append((model_name, parameters))
# for every model that has no specified parameters, add the default model
for model_name in self.available_models.keys():
if model_name not in per_model_parameters:
ret.append((model_name, dict()))
# check if the stage is to be bypassed as one configuration
if self.include_bypass:
ret.append((None, dict(), True))
return ret
def get_params(self, deep=False):
return {'available_models': self.available_models,
'selected_model': self.selected_model,
'include_bypass': self.include_bypass}
def set_params(self, selected_model, available_models=None, include_bypass=False):
include_bypass = len(selected_model) == 3 and selected_model[2]
if available_models:
self.available_models = available_models
if selected_model[0] is None and include_bypass:
self.selected_model = None
self.include_bypass = True
else:
if selected_model[0] not in self.available_models:
raise Exception('so such model available: {0}'.format(selected_model[0]))
self.selected_model = self.available_models[selected_model[0]]
self.selected_model.set_params(**selected_model[1])
def fit(self, X, y=None):
if self.selected_model is None and not self.include_bypass:
raise Exception('no model was set')
elif self.selected_model is None:
# print('bypassing model for fitting, returning self')
return self
else:
# print('using model for fitting: ', self.selected_model.__class__.__name__)
return self.selected_model.fit(X, y)
def transform(self, X, y=None):
if self.selected_model is None and not self.include_bypass:
raise Exception('no model was set')
elif self.selected_model is None:
# print('bypassing model for transforming:')
# print(X[:10])
return X
else:
# print('using model for transforming: ', self.selected_model.__class__.__name__)
return self.selected_model.transform(X)
def predict(self, x):
if self.include_bypass:
raise Exception('bypassing classifier is not allowed')
if self.selected_model is None:
raise Exception('no model was set')
return self.selected_model.predict(x)
def blight_model():
# variables (to be changed if needed)
train = 'train.csv'
test = 'test.csv'
addresses = 'addresses.csv'
latlons = 'latlons.csv'
# remove non-existing features from training data or both training and testing data
features_to_remove_from_train_only = [
'payment_amount',
'payment_date',
'payment_status',
'balance_due',
'collection_status',
'compliance_detail'
]
features_to_remove_general = [
'agency_name',
'inspector_name',
'violator_name',
'violation_street_number',
'violation_street_name',
'violation_zip_code',
'mailing_address_str_number',
'mailing_address_str_name',
'city',
'state',
'zip_code',
'non_us_str_code',
'country',
'ticket_issued_date',
'hearing_date',
'violation_code',
'violation_description',
'disposition',
'grafitti_status'
]
# https://stackoverflow.com/questions/18171739/unicodedecodeerror-when-reading-csv-file-in-pandas-with-python
df_train = pd.read_csv(train, encoding="latin-1", low_memory=False)
df_test = pd.read_csv(test)
df_addresses = pd.read_csv(addresses)
df_latlons = pd.read_csv(latlons)
# remove Null if the violator was found not responsible
df_train_filtered = df_train[df_train['compliance'].notnull()]
df_train_filtered = df_train_filtered[df_train_filtered['hearing_date'].notnull()]
# drop features from training data
df_train_filtered = df_train_filtered.drop(features_to_remove_from_train_only, axis=1)
df_train_filtered = df_train_filtered.drop(features_to_remove_general, axis=1)
df_test_filtered = df_test.drop(features_to_remove_general, axis=1)
# join addresses and latlons together based on common column of address
# https://stackoverflow.com/questions/45068584/merge-two-csv-files-into-one-with-pandas-by-id
# df_locations = pd.merge(df_addresses, df_latlons, how='inner', on=['address', 'address'])
# join addresses to train and test dataframe
# df_train_filtered = pd.merge(df_train_filtered, df_locations, how='inner', on=['ticket_id', 'ticket_id'])
# df_test_filtered = pd.merge(df_test_filtered, df_locations, how='inner', on=['ticket_id', 'ticket_id'])
# df_train_filtered = df_train_filtered.drop('address', axis=1)
# df_test_filtered = df_test_filtered.drop('address', axis=1)
# print(df_train_filtered.info())
y = df_train_filtered.compliance[0:200]
X = df_train_filtered.drop('compliance', axis=1)[0:200]
# Split the dataset in two equal parts
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)
# tuned_parameters = [
# {
# 'svm__kernel': ['linear'],
# 'svm__gamma': [0.0001, 0.001, 0.01, 0.05, 0.1, 1, 10, 100, 1000],
# 'svm__C': [1, 10, 100, 1000]
# }, {
# 'svm__kernel': ['rbf'],
# 'svm__gamma': [0.0001, 0.001, 0.01, 0.05, 0.1, 1, 10, 100, 1000],
# 'svm__C': [1, 10, 100, 1000]
# }]
# assignment only requires roc_auc
scores = ['accuracy']
for score in scores:
print("# Tuning hyper-parameters for %s" % score)
print()
pipe = Pipeline([
('scaler', PipelineHelper([
('std', StandardScaler()),
('max', MaxAbsScaler()),
('minmax', MinMaxScaler()),
], include_bypass=True)), # this will produce one setting without scaler
('classifier', PipelineHelper([
('svm', SVC()),
('rf', RandomForestClassifier()),
('ada', AdaBoostClassifier()),
('gb', GradientBoostingClassifier()),
('knn', KNeighborsClassifier()),
('nb_pipe', Pipeline([
# Naivie Bayes needs positive numbers
('scaler', MinMaxScaler()),
('nb', MultinomialNB())
])),
])),
])
# http://scikit-learn.org/stable/auto_examples/model_selection/plot_grid_search_digits.html
# http://scikit-learn.org/stable/modules/grid_search.html
# Set the parameters by cross-validation
# two grids should be explored linear and RBF
# tuned_parameters = {
# 'scaler__selected_model': pipe.named_steps['scaler'].generate({
# 'std__with_mean': [True, False],
# 'std__with_std': [True, False],
# 'max__copy': [True],
# 'minmax__copy': [True]
# }),
# 'classifier__selected_model': pipe.named_steps['classifier'].generate({
# 'svm__kernel': ['linear', 'rbf'],
# 'svm__gamma': [0.0001, 0.001, 0.01, 0.05, 0.1, 1, 10, 100, 1000],
# 'svm__C': [1, 10, 100, 1000],
# 'gbclf__max_depth': [3]
# })
# }
params = {
'scaler__selected_model': pipe.named_steps['scaler'].generate({
'std__with_mean': [True, False],
'std__with_std': [True, False],
# no params for 'max' leads to using standard params
}),
'classifier__selected_model': pipe.named_steps['classifier'].generate({
'svm__C': [0.1, 1.0],
'svm__kernel': ['linear', 'rbf'],
'rf__n_estimators': [10, 20, 50, 100, 150],
'rf__max_features' : ['auto', 'sqrt', 'log2'],
'rf__min_samples_split' : [2, 5, 10],
'rf__min_samples_leaf' : [1, 2, 4],
'rf__bootstrap': [True, False],
'ada__n_estimators': [10, 20, 40, 100],
'ada__algorithm': ['SAMME', 'SAMME.R'],
'gb__n_estimators': [10, 20, 50, 100],
'gb__criterion': ['friedman_mse', 'mse', 'mae'],
'gb__max_features': ['auto', 'sqrt', None],
'knn__n_neighbors' : [2, 3, 5, 7, 10],
'knn__leaf_size':[1,2,3,5],
'knn__weights': ['uniform', 'distance'],
'knn__algorithm': ['auto', 'ball_tree','kd_tree','brute'],
'nb_pipe__nb__fit_prior': [True, False],
'nb_pipe__nb__alpha': [0.1, 0.2],
})
}
# pipe = Pipeline(steps=[('scaler', MinMaxScaler()), ('svm', SVC())])
estimator = GridSearchCV(pipe, params, scoring='%s'%score, verbose=1, n_jobs=-1)
estimator.fit(X_train, y_train)
print("Best parameters set found on development set: ", estimator.best_params_)
print()
print('Grid best score (AUC): ', estimator.best_score_)
print()
# means = estimator.cv_results_['mean_test_score']
# stds = estimator.cv_results_['std_test_score']
# for mean, std, params in zip(means, stds, estimator.cv_results_['params']):
# print("%0.3f (+/-%0.03f) for %r"
# % (mean, std * 2, params))
# print()
# print("Detailed classification report:")
# print()
# print("The model is trained on the full development set.")
# print("The scores are computed on the full evaluation set.")
# print()
# y_true, y_pred = y_test, estimator.predict(X_test)
# print(classification_report(y_true, y_pred))
# print()
blight_model()
```
|
{
"source": "jessequinn/coursera_ibm_data_science_specialization",
"score": 4
}
|
#### File: course_4_-_python_for_data_science/week_5/fake_album_cover_game.py
```python
from IPython.display import Image as IPythonImage
from PIL import Image
from PIL import ImageFont
from PIL import ImageDraw
import requests
from lxml.html import fromstring
'''Helper Function to superimpose text on image'''
def display_cover(top, bottom):
name = 'album_art_raw.png'
# Now let's make get an album cover.
# https://picsum.photos/ is a free service that offers random images.
# Let's get a random image:
album_art_raw = requests.get('https://picsum.photos/500/500/?random')
# and save it as 'album_art_raw.png'
with open(name, 'wb') as album_art_raw_file:
album_art_raw_file.write(album_art_raw.content)
# Now that we have our raw image, let's open it
# and write our band and album name on it
img = Image.open("album_art_raw.png")
draw = ImageDraw.Draw(img)
# We'll choose a font for our band and album title,
# run "% ls /usr/share/fonts/truetype/dejavu" in a cell to see what else is available,
# or download your own .ttf fonts!
band_name_font = ImageFont.truetype(
"dejavu-sans/DejaVuSans-Bold.ttf", 25) # 25pt font
album_name_font = ImageFont.truetype(
"dejavu-sans/DejaVuSans-Bold.ttf", 20) # 20pt font
# the x,y coordinates for where our album name and band name text will start
# counted from the top left of the picture (in pixels)
band_x, band_y = 50, 50
album_x, album_y = 50, 400
# Our text should be visible on any image. A good way
# of accomplishing that is to use white text with a
# black border. We'll use the technique shown here to draw the border:
# https://mail.python.org/pipermail/image-sig/2009-May/005681.html
outline_color = "black"
draw.text((band_x-1, band_y-1), top,
font=band_name_font, fill=outline_color)
draw.text((band_x+1, band_y-1), top,
font=band_name_font, fill=outline_color)
draw.text((band_x-1, band_y+1), top,
font=band_name_font, fill=outline_color)
draw.text((band_x+1, band_y+1), top,
font=band_name_font, fill=outline_color)
draw.text((album_x-1, album_y-1), bottom,
font=album_name_font, fill=outline_color)
draw.text((album_x+1, album_y-1), bottom,
font=album_name_font, fill=outline_color)
draw.text((album_x-1, album_y+1), bottom,
font=album_name_font, fill=outline_color)
draw.text((album_x+1, album_y+1), bottom,
font=album_name_font, fill=outline_color)
draw.text((band_x, band_y), top, (255, 255, 255), font=band_name_font)
draw.text((album_x, album_y), bottom,
(255, 255, 255), font=album_name_font)
return img
'''Wikipedia random select'''
def get_band_album_titles():
wikipedia_link = 'https://en.wikipedia.org/wiki/Special:Random'
raw_random_wikipedia_page = requests.get(wikipedia_link)
tree = fromstring(raw_random_wikipedia_page.content)
raw_title = tree.findtext('.//title')
band_title = raw_title.split('-')
raw_random_wikipedia_page = requests.get(wikipedia_link)
tree = fromstring(raw_random_wikipedia_page.content)
raw_title = tree.findtext('.//title')
album_title = raw_title.split('-')
return (band_title[0].rstrip() + '-' + album_title[0].rstrip())
# first part use data science and python as bottom and top
img = display_cover(top='Python', bottom='Data Science')
img.save('sample-out.png')
IPythonImage(filename='sample-out.png')
# second part capture two random titles and make them top and bottom text
band_title, album_title = get_band_album_titles().split('-')
album_cover = display_cover(top=band_title, bottom=album_title)
album_cover.save('album-cover.png')
IPythonImage(filename='album-cover.png')
```
|
{
"source": "jessequinn/hbsis",
"score": 3
}
|
#### File: project/users/views.py
```python
from flask import flash, redirect, render_template, request, url_for, Blueprint
from flask_login import login_user, logout_user, login_required
from project import db
from project.models import User, bcrypt
from project.users.forms import LoginForm, RegisterForm
users_blueprint = Blueprint(
'users', __name__,
template_folder='templates'
)
@users_blueprint.route('/login', methods=['GET', 'POST'])
def login():
'''
Login page.
:return: rendered template
'''
error = None
form = LoginForm(request.form)
if request.method == 'POST':
if form.validate_on_submit():
user = User.query.filter_by(username=request.form['username']).first()
if user is not None and bcrypt.check_password_hash(user.password, request.form['password']):
login_user(user)
flash('You are logged in.')
return redirect(url_for('main.home'))
else:
error = 'Invalid log in credentials. Please try again.'
return render_template('login.html', form=form, error=error)
@users_blueprint.route('/logout')
@login_required
def logout():
'''
Logout routine.
:return: redirect
'''
logout_user()
flash('You were logged out.')
return redirect(url_for('main.home'))
@users_blueprint.route('/register', methods=['GET', 'POST'])
def register():
'''
Registration page.
:return: rendered template
'''
form = RegisterForm()
if form.validate_on_submit():
user = User(
username=form.username.data,
password=form.password.data
)
db.session.add(user)
db.session.commit()
login_user(user) # automatically log user in
return redirect(url_for('main.home'))
return render_template('register.html', form=form)
```
#### File: frontend_docker/tests/base.py
```python
from flask_testing import TestCase
from project import app, db
from project.models import User, WeatherRegistration
class BaseTestCase(TestCase):
'''
Backend setup and destruction.
'''
def create_app(self):
app.config.from_object('config.TestConfig')
return app
def setUp(self):
db.create_all()
db.session.add(User('testuser', 'testuser'))
db.session.add(WeatherRegistration('Toronto', 6167865, 'Canada', 1))
db.session.commit()
def tearDown(self):
db.session.remove()
db.drop_all()
```
|
{
"source": "jessequinn/udemy_python_complete",
"score": 4
}
|
#### File: udemy_python_complete/Python Scripts/lesson_110_scope.py
```python
def fact(n):
""" calculate n! iteratively """
result = 1
if n > 1:
for f in range(2, n + 1):
result *= f
return result
# for i in range(130):
# print(i, fact(i))
def recurse_fact(n):
# n! can also be defined as n * (n-1)!
""" calculates n! recursively """
if n <= 1:
return 1
else:
return n * recurse_fact(n - 1)
# for i in range(130):
# print(i, recurse_fact(i))
def recurse_fib(n):
""" F(n) = F(n - 1) + F(n - 2) """
if n < 2:
return n
else:
return recurse_fib(n - 1) + recurse_fib(n - 2)
# for i in range(36):
# print(i, recurse_fib(i))
def fib(n):
if n == 0:
result = 0
elif n == 1:
result = 1
else:
n_minus1 = 1
n_minus2 = 0
for f in range(1, n):
result = n_minus2 + n_minus1
n_minus2 = n_minus1
n_minus1 = result
return result
# for i in range(36):
# print(i, fib(i))
for i in range(36):
print(i, recurse_fib(i), "\t", fib(i))
```
#### File: udemy_python_complete/Python Scripts/lesson_111_filesanddirectories.py
```python
import os
def list_directories(s):
def dir_list(d):
nonlocal tab_stop
files = os.listdir(d)
for f in files:
current_dir = os.path.join(d, f)
if os.path.isdir(current_dir):
print("\t" * tab_stop + "Directory " + f)
tab_stop += 1
dir_list(current_dir)
tab_stop -= 1
else:
print("\t" * tab_stop + f)
tab_stop = 0
if os.path.exists(s):
print("Driectory listing of " + s)
dir_list(s)
else:
print(s + " does not exist")
list_directories('.')
```
#### File: udemy_python_complete/Python Scripts/lesson_113_oop.py
```python
class Kettle(object):
def __init__(self, make, price):
self.make = make
self.price = price
self.on = False
kenwood = Kettle("Kenwood", 8.99)
print(kenwood.make)
print(kenwood.price)
kenwood.price = 12.75
print(kenwood.price)
hamilton = Kettle("Hamilton", 14.55)
print("Models: {} = ${}, {} = ${}".format(kenwood.make,
kenwood.price, hamilton.make, hamilton.price))
```
#### File: udemy_python_complete/Python Scripts/lesson_115_oop.py
```python
class Kettle(object):
# Class attribute, all instances share this single attribute
power_source = "electricity"
def __init__(self, make, price):
self.make = make
self.price = price
self.on = False
def switch_on(self):
self.on = True
kenwood = Kettle("Kenwood", 8.99)
print(kenwood.make)
print(kenwood.price)
kenwood.price = 12.75
print(kenwood.price)
hamilton = Kettle("Hamilton", 14.55)
print("Models: {} = {}, {} = {}".format(kenwood.make, kenwood.price, hamilton.make, hamilton.price))
print("Models: {0.make} = {0.price}, {1.make} = {1.price}".format(kenwood, hamilton))
"""
Class: template for creating objects. All objects created using the same class will have the same characteristics.
Object: an instance of a class.
Instantiate: create an instance of a class.
Method: a function defined in a class.
Attribute: a variable bound to an instance of a class.
"""
print(hamilton.on)
hamilton.switch_on()
print(hamilton.on)
Kettle.switch_on(kenwood)
print(kenwood.on)
kenwood.switch_on()
print("*" * 80)
kenwood.power = 1.5
print (kenwood.power)
# print(hamilton.power)
print("Switch to atomic power")
Kettle.power_source = "atomic"
print(Kettle.power_source)
print("Switch kenwood to gas")
kenwood.power_source = "gas"
print(kenwood.power_source)
print(hamilton.power_source)
# print(Kettle.__dict__)
# print(kenwood.__dict__)
# print(hamilton.__dict__)
```
#### File: udemy_python_complete/Python Scripts/lesson_121_song.py
```python
class Song:
"""Class to represent a song
Attributes:
title (str): The title of the song
artist (Artist): An artist object representing the songs creator.
duration (int): The duration of the song in seconds. May be zero
"""
def __init__(self, title, artist, duration=0):
self.title = title
self.artist = artist
self.duration = duration
class Album:
"""Class to represent an Album, using it's track list
Attributes:
name (str): The name of the album.
year (int): The year was album was released.
artist: (Artist): The artist responsible for the album. If not specified,
the artist will default to an artist with the name "Various Artists".
tracks (List[Song]): A list of the songs on the album.
Methods:
add_song: Used to add a new song to the album's track list.
"""
def __init__(self, name, year, artist=None):
self.name = name
self.year = year
if artist is None:
self.artist = Artist("Various Artists")
else:
self.artist = artist
self.tracks = []
def add_song(self, song, position=None):
"""Adds a song to the track list
Args:
song (Song): A song to add.
position (Optional[int]): If specified, the song will be added to that position
in the track list - inserting it between other songs if necessary.
Otherwise, the song will be added to the end of the list.
"""
if position is None:
self.tracks.append(song)
else:
self.tracks.insert(position, song)
class Artist:
"""Basic class to store artist details.
Attributes:
name (str): The name of the artist.
albums (List[Album]): A list of the albums by this artist.
The list includes only those albums in this collection, it is
not an exhaustive list of the artist's published albums.
Methods:
add_album: Use to add a new album to the artist's albums list
"""
def __init__(self, name):
self.name = name
self.albums = []
def add_album(self, album):
"""Add a new album to the list.
Args:
album (Album): Album object to add to the list.
If the album is already present, it will not added again (although this is yet to implemented).
"""
self.albums.append(album)
def load_data():
new_artist = None
new_album = None
artist_list = []
with open("lesson_121_albums.txt", "r") as albums:
for line in albums:
# data row should consist of (artist, album, year, song)
artist_field, album_field, year_field, song_field = tuple(
line.strip('\n').split('\t'))
year_field = int(year_field)
print(artist_field, album_field, year_field, song_field)
if __name__ == '__main__':
load_data()
```
#### File: udemy_python_complete/Python Scripts/lesson_131_enemy.py
```python
class Enemy:
def __init__(self, name="Enemy", hit_points=0, lives=1):
self.name = name
self.hit_points = hit_points
self.lives = lives
def take_damage(self, damage):
remaining_points = self.hit_points - damage
if remaining_points >= 0:
self.hit_points = remaining_points
print("I took {} points damage and have {} left".format(damage, self.hit_points))
else:
self.lives -= 1
def __str__(self):
return "Name: {0.name}, Lives: {0.lives}, Hit points: {0.hit_points}".format(self)
```
#### File: udemy_python_complete/Python Scripts/lesson_178_star_args.py
```python
def build_tuple(*args):
return args
message_tuple = build_tuple("hello", "planet", "earth", "take", "me", "to", "your", "leader")
print(type(message_tuple))
print(message_tuple)
number_tuple = build_tuple(1, 2, 3, 4, 5, 6)
print(type(number_tuple))
print(number_tuple)
```
#### File: udemy_python_complete/Python Scripts/lesson_185_jukebox.py
```python
import sqlite3
try:
import tkinter
except ImportError: # python 2
import Tkinter as tkinter
conn = sqlite3.connect('lesson_176_music.sqlite')
class Scrollbox(tkinter.Listbox):
def __init__(self, window, **kwargs):
# tkinter.Listbox.__init__(self, window, **kwargs) # Python 2
super().__init__(window, **kwargs)
self.scrollbar = tkinter.Scrollbar(window, orient=tkinter.VERTICAL, command=self.yview)
def grid(self, row, column, sticky='nsw', rowspan=1, columnspan=1, **kwargs):
# tkinter.Listbox.grid(self, row=row, column=column, sticky=sticky, rowspan=rowspan,
# **kwargs) # Python 2
super().grid(row=row, column=column, sticky=sticky, rowspan=rowspan, columnspan=columnspan, **kwargs)
self.scrollbar.grid(row=row, column=column, sticky='nse', rowspan=rowspan)
self['yscrollcommand'] = self.scrollbar.set
class DataListBox(Scrollbox):
def __init__(self, window, connection, table, field, sort_order=(), **kwargs):
# Scrollbox.__init__(self, window, **kwargs) # Python 2
super().__init__(window, **kwargs)
self.cursor = connection.cursor()
self.table = table
self.field = field
self.sql_select = "SELECT " + self.field + ", _id" + " FROM " + self.table
if sort_order:
self.sql_sort = " ORDER BY " + ','.join(sort_order)
else:
self.sql_sort = " ORDER BY " + self.field
def clear(self):
self.delete(0, tkinter.END)
def requery(self, link_value=None):
if link_value:
sql = self.sql_select + " WHERE " + "artist" + "=?" + self.sql_sort
print(sql) # TODO delete this line
self.cursor.execute(sql, (link_value,))
else:
print(self.sql_select + self.sql_sort) # TODO delete this line
self.cursor.execute(self.sql_select + self.sql_sort)
# clear the listbox contents before re-loading
self.clear()
for value in self.cursor:
self.insert(tkinter.END, value[0])
def on_select(self, event):
print(self is event.widget) # TODO delete this line
index = self.curselection()[0]
value = self.get(index),
# get the artist ID from the database row
link_id = self.cursor.execute(self.sql_select + " WHERE " + self.field + "=?", value).fetchone()[1]
albumList.requery(link_id)
# artist_id = conn.execute("SELECT artists._id FROM artists WHERE artists.name=?", artist_name).fetchone()
# alist = []
# for row in conn.execute("SELECT albums.name FROM albums WHERE albums.artist = ? ORDER BY albums.name", artist_id):
# alist.append(row[0])
# albumLV.set(tuple(alist))
# songLV.set(("Choose an album",))
def get_songs(event):
lb = event.widget
index = int(lb.curselection()[0])
album_name = lb.get(index),
# get the artist ID from the database row
album_id = conn.execute("SELECT albums._id FROM albums WHERE albums.name=?", album_name).fetchone()
alist = []
for x in conn.execute("SELECT songs.title FROM songs WHERE songs.album=? ORDER BY songs.track", album_id):
alist.append(x[0])
songLV.set(tuple(alist))
mainWindow = tkinter.Tk()
mainWindow.title('Music DB Browser')
mainWindow.geometry('1024x768')
mainWindow.columnconfigure(0, weight=2)
mainWindow.columnconfigure(1, weight=2)
mainWindow.columnconfigure(2, weight=2)
mainWindow.columnconfigure(3, weight=1) # spacer column on right
mainWindow.rowconfigure(0, weight=1)
mainWindow.rowconfigure(1, weight=5)
mainWindow.rowconfigure(2, weight=5)
mainWindow.rowconfigure(3, weight=1)
# ===== labels =====
tkinter.Label(mainWindow, text="Artists").grid(row=0, column=0)
tkinter.Label(mainWindow, text="Albums").grid(row=0, column=1)
tkinter.Label(mainWindow, text="Songs").grid(row=0, column=2)
# ===== Artists Listbox =====
artistList = DataListBox(mainWindow, conn, "artists", "name")
artistList.grid(row=1, column=0, sticky='nsew', rowspan=2, padx=(30, 0))
artistList.config(border=2, relief='sunken')
artistList.requery()
artistList.bind('<<ListboxSelect>>', get_albums)
# ===== Albums Listbox =====
albumLV = tkinter.Variable(mainWindow)
albumLV.set(("Choose an artist",))
albumList = DataListBox(mainWindow, conn, "albums", "name", sort_order=("name",))
albumList.requery(12)
albumList.grid(row=1, column=1, sticky='nsew', padx=(30, 0))
albumList.config(border=2, relief='sunken')
albumList.bind('<<ListboxSelect>>', get_songs)
# ===== Songs Listbox =====
songLV = tkinter.Variable(mainWindow)
songLV.set(("Choose an album",))
songList = DataListBox(mainWindow, conn, "songs", "title", ("track", "title"))
songList.requery()
songList.grid(row=1, column=2, sticky='nsew', padx=(30, 0))
songList.config(border=2, relief='sunken')
# ===== Main loop =====
testList = range(0, 100)
albumLV.set(tuple(testList))
mainWindow.mainloop()
print("closing database connection")
conn.close()
```
#### File: udemy_python_complete/Python Scripts/lesson_194_filesearch.py
```python
import fnmatch
import os
def find_albums(root, artist_name):
caps_name = artist_name.upper()
for path, directories, files in os.walk(root):
# for artist in fnmatch.filter(directories, artist_name):
# for artist in fnmatch.filter((d.upper() for d in directories), caps_name):
for artist in (d for d in directories if fnmatch.fnmatch(d.upper(), caps_name)):
subdir = os.path.join(path, artist)
for album_path, albums, _ in os.walk(subdir):
for album in albums:
yield os.path.join(album_path, album), album
def find_songs(albums):
for album in albums:
for song in os.listdir(album[0]): # we want the path, not the name of the album
yield song
album_list = find_albums("lesson_193_music", "Black*")
song_list = find_songs(album_list)
for s in song_list:
print(s)
```
#### File: udemy_python_complete/Python Scripts/lesson_209_timeitchallenge.py
```python
import timeit
from statistics import mean, stdev
def fact(n):
result = 1
if n > 1:
for f in range(2, n + 1):
result *= f
return result
def factorial(n):
# n! can also be defined as n * (n-1)!
if n <= 1:
return 1
else:
return n * factorial(n - 1)
if __name__ == "__main__":
list1 = timeit.repeat("x = fact(130)", setup="from __main__ import fact", number=10000, repeat=6)
list2 = timeit.repeat("x = factorial(130)", setup="from __main__ import factorial", number=10000, repeat=6)
print(mean(list1), stdev(list1))
print(mean(list2), stdev(list2))
```
#### File: udemy_python_complete/Python Scripts/lesson_96_functions.py
```python
def python_food():
width = 80
text = "Spam and eggs"
left_margin = (width - len(text)) // 2
print(" " * left_margin, text)
# def centre_text(*args, sep=' ', end='\n', file=None, flush=False):
# text = ""
# for arg in args:
# text += str(arg) + sep
# left_margin = (80 - len(text)) // 2
# print(" " * left_margin, text, end=end, file=file, flush=flush)
def centre_text(*args, sep=' '):
text = ""
for arg in args:
text += str(arg) + sep
left_margin = (80 - len(text)) // 2
return " " * left_margin + text
# with open("lesson_96_centered", mode='w') as centred_file:
# # call the function
# s1 = centre_text("spam and eggs")
# print(s1)
# s2 = centre_text("spam, spam and eggs")
# print(s2)
# s3 = centre_text(12)
# print(s3)
# s4 = centre_text("spam, spam, spam and spam")
# print(s4)
# s5 = centre_text("first", "second", 3, 4, "spam", sep=":")
# print(s5)
# with open("lesson_96_centered", mode='w') as centred_file:
# # call the function
# centre_text("spam and eggs", file=centred_file)
# centre_text("spam, spam and eggs", file=centred_file)
# centre_text(12, file=centred_file)
# centre_text("spam, spam, spam and spam", file=centred_file)
# centre_text("first", "second", 3, 4, "spam", sep=":", file=centred_file)
with open("lesson_96_menu", "w") as menu:
s1 = centre_text("spam and eggs")
print(s1, file=menu)
s2 = centre_text("spam, spam and eggs")
print(s2, file=menu)
print(centre_text(12), file=menu)
print(centre_text("spam, spam, spam and spam"), file=menu)
s5 = centre_text("first", "second", 3, 4, "spam", sep=":")
print(s5, file=menu)
```
|
{
"source": "jessequinn/udemy_python_megacourse",
"score": 4
}
|
#### File: Python Scripts/oop/acc.py
```python
class Account:
def __init__(self, fp='balance.txt'):
self.fp = fp
self._balance = 0
self._balance_()
def _balance_(self):
with open(self.fp, 'r') as file:
self._balance = int(file.read())
def withdraw(self, amount):
self._balance = self._balance - amount
def deposit(self, amount):
self._balance = self._balance + amount
def commit(self):
with open(self.fp, 'w') as file:
file.write(str(self._balance))
def __str__(self):
return "Current Balance: {} ".format(self._balance)
class Checking(Account):
"""
This class generates checking account objects
"""
type = "checking"
def __init__(self, fee=0):
self.fee = fee
Account.__init__(self, fp='checkings.txt')
def transfer(self, amount, type):
if type == '-':
self._balance = self._balance - amount - self.fee
else:
self._balance = self._balance + amount - self.fee
if __name__ == '__main__':
acc = Account()
chk = Checking()
print(acc)
acc.withdraw(100)
acc.commit()
print(acc)
acc.deposit(300)
acc.commit()
print(acc)
print(chk)
chk.transfer(100, '+')
chk.commit()
print(chk)
print(chk.__doc__)
```
|
{
"source": "Jesse-Redford/SolidWorks_Pybullet_Integration",
"score": 3
}
|
#### File: SolidWorks_Pybullet_Integration/Test_Files/test_velosity_controller.py
```python
import pybullet
import pybullet_data
pybullet.connect(pybullet.GUI)
pybullet.resetSimulation()
pybullet.setAdditionalSearchPath(pybullet_data.getDataPath())
def get_joint_info(robot):
print('The system has', pybullet.getNumJoints(robot), 'joints')
num_joints = pybullet.getNumJoints(robot)
for i in range(num_joints):
joint_info = pybullet.getJointInfo(robot, i)
print('Joint number',i)
print('-------------------------------------')
print('Joint Index:',joint_info[0])
print('Joint Name:',joint_info[1])
print('Joint misc:',joint_info[2:])
print('-------------------------------------')
return
def create_joint_velocity_controller(joint_index=0,lower_limit=-10,upper_limit=10,inital_velosity=0):
joint_info = pybullet.getJointInfo(robot, joint_index) # get name of joint, to create on screen label
joint_parameters = pybullet.addUserDebugParameter(paramName=str(joint_info[1])+'VC', rangeMin=lower_limit, rangeMax =upper_limit, startValue=inital_velosity)
# pass the returned array to activate_position_contoller in the main loop of your script
return [ joint_index,joint_parameters]
def activate_velocity_controller(joint_parameters):
joint_index = joint_parameters[0]
velosity = joint_parameters[1]
user_velocity = pybullet.readUserDebugParameter(velosity)
pybullet.setJointMotorControl2(robot, joint_index, pybullet.VELOCITY_CONTROL,targetVelocity= user_velocity)
joint_info = pybullet.getJointState(robot,joint_index)
joint_position = joint_info[0]
joint_velosity = joint_info[1]
return joint_position,joint_velosity
# Load robot URDF file created in solidworks
robot = pybullet.loadURDF(r'C:\Users\Jesse\Desktop\PyFluid\Assem_test8\urdf\Assem_test8.urdf',[0,0,0],useFixedBase=1)
# get joint info about robot
get_joint_info(robot)
# Create Position controller for a particular joint
Joint1_VC = create_joint_velocity_controller(joint_index =0,lower_limit=-3.14,upper_limit=3.14,inital_velosity=0)
# Set up simulation parameters
pybullet.setGravity(0, 0, -9.81)
pybullet.setTimeStep(0.0001)
# Start simulation activate Joint1 for on screen control and get position and velosity readings
while True:
joint1_position,joint1_velocity = activate_velocity_controller(Joint1_VC)
print(joint1_position,joint1_velocity )
pybullet.stepSimulation()
pybullet.disconnect()
```
|
{
"source": "Jesse-Redford/SurfaceMetrology",
"score": 2
}
|
#### File: Jesse-Redford/SurfaceMetrology/analysis_app.py
```python
import os
import time
import numpy as np
import pandas as pd
import itertools
import glob
import streamlit as st
from streamlit_metrics import metric_row
# Image processing
import cv2
import PIL
from PIL import Image, ImageOps
PIL.Image.MAX_IMAGE_PIXELS = None
# Classifcation Metrics and Classifiers
import pickle
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
from sklearn.tree import DecisionTreeClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn import svm
from sklearn.model_selection import learning_curve,cross_val_score,ShuffleSplit,StratifiedKFold
# Plotting
import matplotlib.pyplot as plt
import seaborn as sns
import matplotlib as mpl
mpl.rcParams['font.family'] = "Times New Roman"
plt.rcParams.update({'font.size': 16})
plt.rcParams['xtick.major.pad']='2'
plt.rcParams['ytick.major.pad']='2'
plt.rcParams['axes.labelpad']='2'
# Custum Functions / modules
from src.DataLoader import dataloader
from src.helper_functions import two_parameter_decision_boundary, calculate_dprime_and_acc_matrix, get_dprime_vs_acc_csv
from src.ISO251782 import iso25178
# Check the version
#print('pandas',pd.__version__)
class SDT:
def __init__(self, tabular_dataset=None,split=(.25,.25,.5)):
self.tabular_dataset = tabular_dataset
self.split = split
dloader = dataloader()
self.dataset = dloader.split_data(pd.read_pickle(self.tabular_dataset), split=self.split)
self.selected_parameters = self.parameters = [ele for ele in self.dataset.columns if ele not in {'label', 'image', 'category'}]
self.selected_labels = self.class_labels = self.dataset.label.unique()
self.training_data = self.dataset.loc[self.dataset['category'].isin(['train', 'validate'])]
self.testing_data = self.dataset.loc[self.dataset['category'].isin(['test'])]
self.data = self.dataset.loc[self.dataset['category'].isin(['train', 'validate','test'])]
def calculate_dprimes(self):
dprime_scores = []
combinations = []
for feature in self.selected_parameters:
dprime = 0
class_dictionary = {}
for i, label_i in enumerate(self.selected_labels[:-1]):
for label_j in self.selected_labels[i + 1:]:
ui = self.training_data[self.training_data['label'] == label_i][feature].mean()
uj = self.training_data[self.training_data['label'] == label_j][feature].mean()
sigmai = self.training_data[self.training_data['label'] == label_i][feature].std()
sigmaj = self.training_data[self.training_data['label'] == label_j][feature].std()
dprime += np.abs((np.max([ui, uj]) - np.min([ui, uj])) / np.sqrt((sigmai ** 2 + sigmaj ** 2) / 2))
class_dictionary[label_i+'_vs_'+label_j] = np.abs((np.max([ui, uj]) - np.min([ui, uj])) / np.sqrt((sigmai ** 2 + sigmaj ** 2) / 2))
combinations.append(class_dictionary)
n = len(self.selected_labels)
coeff = 1 / (np.math.factorial(n) / (np.math.factorial(2) * np.math.factorial(n - 2)))
dprime_scores.append((coeff * dprime))
self.dprime_df = pd.DataFrame(combinations,index = self.selected_parameters)
# Extract best features from dprime matrix (take max along columns then demove duplicates)
self.best_parameters = list(set([self.dprime_df[column].idxmax() for column in self.dprime_df]))
best_parameters = []
for column in self.dprime_df:
best_parameters.append(self.dprime_df[column].idxmax())
#self.best_parameters = best_parameters
#print(best_parameters)
#self.best_parameters = list(set(best_parameters))
#print(self.best_parameters)
#print(self.dprime_df.apply(pd.DataFrame.describe, axis=1))
# choose what to sort dprime by, mean, std, min, max, %25 etc...
df = self.dprime_df.apply(pd.DataFrame.describe, axis=1)['mean']
ds = pd.DataFrame({'parameters':df.index, 'discriminability':df.values})
#print('ds',list(ds.sort_values(by='discriminability', ascending=False)['parameters']))
#dscores = {'parameters': self.selected_parameters, 'discriminability': dprime_scores}
#ds = pd.DataFrame(dscores)
#ds = ds.fillna(0)
return ds.sort_values(by='discriminability', ascending=False)
def calculate_parameters_accuracy(self):
results = []
for selected_parameter in self.selected_parameters:
threshold = float(self.training_data[selected_parameter].mean())
# compute parmameter average of each class label and store as 2 list
dd = self.training_data.groupby('label').agg({selected_parameter:['mean']}) # print('training sorted',dd[selected_parameter])
train_labels = list(dd[selected_parameter].index)
train_labels_means = list(dd[selected_parameter]['mean'])
# create list of tuples (class_label,parmeter value) = ('class1',.5)...('classN',2.4)
test_labels_and_values = list(zip(self.testing_data.label, self.testing_data[selected_parameter]))
# Loop through test lave
y_pred = []; y_true = []
for test_label, test_value in test_labels_and_values:
absolute_difference_function = lambda list_value: abs(list_value - test_value)
closest_value = min(train_labels_means, key=absolute_difference_function)
if test_value > threshold and len(self.selected_labels) == 2:
y_pred.append(train_labels[np.argmax(train_labels_means)])
elif test_value < threshold and len(self.selected_labels) == 2:
y_pred.append(train_labels[np.argmin(train_labels_means)])
else:
y_pred.append(train_labels[train_labels_means.index(closest_value)])
y_true.append(test_label)
acc = accuracy_score(y_true, y_pred)
results.append((selected_parameter, acc))
scores = pd.DataFrame(results, columns=['parameters', 'accuracy'])
scores = scores.sort_values(by='accuracy', ascending=False)
return scores.round(decimals=3)
def update_dataset(self):
self.data = self.data[self.data.label.isin(self.selected_labels) == True]
self.training_data = self.training_data[self.training_data.label.isin(self.selected_labels) == True]
self.testing_data = self.testing_data[self.testing_data.label.isin(self.selected_labels) == True]
return None
def dprime_vs_accuracy(self):
self.update_dataset()
return self.calculate_dprimes().merge(self.calculate_parameters_accuracy(), how='inner', left_on='parameters', right_on='parameters')
def plot_matrix(cm, classes, title):
fig,ax = plt.subplots()
ax = sns.heatmap(cm, cmap="Blues", annot=True, xticklabels=classes, yticklabels=classes, cbar=False, fmt='.5g')
ax.set(title=title, xlabel="Predicted Label", ylabel="True label")
return fig
def compute_metrics(confusion_matrix=None,class_labels=None):
#https://stackoverflow.com/questions/31324218/scikit-learn-how-to-obtain-true-positive-true-negative-false-positive-and-fal
cm = pd.DataFrame(confusion_matrix)
# False positives
FP = cm.sum(axis=0) - np.diag(cm)
# False negatives
FN = cm.sum(axis=1) - np.diag(cm)
# True Positives
TP = np.diag(cm)
# True Negatives
TN = cm.values.sum() - (FP + FN + TP)
# Sensitivity, hit rate, recall, or true positive rate
TPR = TP/(TP+FN)
# Specificity or true negative rate
TNR = TN/(TN+FP)
# Precision or positive predictive value
PPV = TP/(TP+FP)
# Negative predictive value
NPV = TN/(TN+FN)
# Fall out or false positive rate
FPR = FP/(FP+TN)
# False negative rate
FNR = FN/(TP+FN)
# False discovery rate
FDR = FP/(TP+FP)
# Overall accuracy
ACC = (TP+TN)/(TP+FP+FN+TN)
metrics ={ 'ACC':ACC,
'FP':FP,
'FN':FN,
'TP':TP,
'TN':TN,
'TPR':TPR,
'TNR':TNR,
'PPV':PPV,
'NPV':FPR,
'FNR':FNR,
'FDR':FDR,}
df_metrics = pd.DataFrame.from_dict(metrics)
df_metrics['label'] =class_labels
return df_metrics
def folder_selector(folder_path='.'):
filenames = glob.glob(r'./Datasets/Tabular/*')
selected_filename = st.selectbox('Select a folder', filenames)
return os.path.join(folder_path, selected_filename)
def app():
st.title('Analysis App')
st.write('Based on: Signal Detection Theory')
st.write('Created by <NAME>')
pkl_data = folder_selector()
if st.button('dprime vs acc plot'):
fig = calculate_dprime_and_acc_matrix(pkl_data)
st.pyplot(fig)
#if st.button('Statstics'):
# dataset = pd.read_pickle(pkl_data)
# dataset.drop(['image'],axis=1,inplace=True)
# dataset = dataset.fillna(0)
# dataset.reset_index()
# print(dataset)
# print(pd.melt(dataset))
# fig,ax = plt.subplots()
# fig = sns.pairplot(dataset, hue="label")
# st.pyplot(fig)
test_percentage = st.number_input('Testing Percentage', min_value=.05, max_value=1.0, value=.5, step=.05)
val_percentage = train_perentage = (1 - test_percentage) / 2
sdt = SDT(tabular_dataset=pkl_data,split=(train_perentage,val_percentage,test_percentage))
sdt.selected_labels = st.multiselect('Select Categories To Compare',list(sdt.class_labels),list(sdt.selected_labels))
#for class_label in sdt.selected_labels:
for i,col in enumerate(st.columns(len(sdt.selected_labels))):
label = sdt.selected_labels[i]
train_examples = len(sdt.training_data[sdt.training_data["label"] == label])
test_examples = len(sdt.testing_data[sdt.testing_data["label"] == label])
col.write(f"{label}")
col.write(f"{train_examples}/{test_examples} ")
col.write(f"train/test")
sdt.selected_parameters = st.multiselect('Select a parameters to analyze:',sdt.parameters,sdt.selected_parameters)
if st.checkbox('Get A Complete Summary for Dprime vs Acc vs Feature vs Sub Task'):
st.download_button("Press to Download CSV file", get_dprime_vs_acc_csv(pkl_data),
"task_parameter_dprime_acc.csv", "text/csv", key='download-csv')
if st.checkbox('Single Descriptor Analysis'):
selected_parameter = st.selectbox('Select a parameter to analyze:',sdt.selected_parameters)
test_data, test_labels = map(list, zip(*[[sdt.testing_data.loc[sdt.testing_data['label'] == l][selected_parameter],l] for l in sdt.selected_labels]))
data, labels = map(list, zip(*[[sdt.training_data.loc[sdt.training_data['label'] == l][selected_parameter],l] for l in sdt.selected_labels]))
# Compute unbias threshold based on training data
threshold = st.slider('threshold', min_value=float(sdt.training_data[selected_parameter].min()),
max_value=float(sdt.training_data[selected_parameter].max()),
value=float(sdt.training_data[selected_parameter].mean()))
# Plot Distributions
fig, ax = plt.subplots(figsize=(20, 10))
#fig.set_figheight(3)
palette = itertools.cycle(sns.color_palette())
show_test_hist = st.checkbox('-overlay histogram data (test set)')
show_train_hist = st.checkbox('-overlay histogram data (training set)')
for i,d in enumerate(data):
c = next(palette)
sns.kdeplot(d, shade=True, label=labels[i], ax=ax,color=c)
if show_train_hist:
sns.histplot(data=d, label=labels[i], ax=ax, color=c)
if show_test_hist:
for i, d in enumerate(test_data):
c = next(palette)
sns.histplot(data=d, label=test_labels[i], ax=ax, color=c)
ax.legend(title_fontsize='xx-small',frameon=False)
if st.checkbox('-overlay pdf of test data'):
for i, d in enumerate(test_data):
sns.kdeplot(d, shade=False, label=test_labels[i]+'(test)',linestyle='--', ax=ax)
ax.axvline(threshold, 0, 1, color="k", linestyle="dashed", linewidth=1)
min_ylim, max_ylim = plt.ylim()
if len(sdt.selected_labels)<=2:
plt.text(threshold, max_ylim * 1.01, "Critera {:.2f}".format(threshold),fontsize=25)
ax.set(xlabel=selected_parameter+" Value (units)", ylabel="Density Estimation")
st.pyplot(fig)
# compute parmameter average of each class label and store as 2 list
dd = sdt.training_data.groupby('label').agg({selected_parameter: ['mean']}) #print('training sorted',dd[selected_parameter])
train_labels = list(dd[selected_parameter].index)
train_labels_means = list(dd[selected_parameter]['mean'])
# create list of tuples (class_label,parmeter value) = ('class1',.5)...('classN',2.4)
test_labels_and_values = list(zip(sdt.testing_data.label, sdt.testing_data[selected_parameter]))
# Loop through test lave
y_pred = []
y_true = []
for test_label,test_value in test_labels_and_values:
absolute_difference_function = lambda list_value: abs(list_value - test_value)
closest_value = min(train_labels_means, key=absolute_difference_function)
if test_value > threshold and len(sdt.selected_labels)==2:
y_pred.append(train_labels[np.argmax(train_labels_means)])
elif test_value < threshold and len(sdt.selected_labels)==2:
y_pred.append(train_labels[np.argmin(train_labels_means)])
else:
y_pred.append(train_labels[train_labels_means.index(closest_value)])
y_true.append(test_label)
cm = matrix = confusion_matrix(y_true, y_pred,labels=sdt.selected_labels) #df.label.unique())
df_metrics = compute_metrics(confusion_matrix=cm,class_labels=sdt.selected_labels)
correct = np.sum(np.diag(cm))
if len(sdt.selected_labels) == 2:
metric_row({'Parameter':selected_parameter,
'Critera': "{:.2f}".format(threshold),
#"Accuracry": str(round(sklearn.metrics.accuracy_score(y_true, y_pred) * 100, 2)) ,
"Correct:Total" : str(correct) + ':' + str(np.sum(cm))})
else:
metric_row({'Parameter': selected_parameter,
'Critera': 'NM',
# "Accuracry": str(round(sklearn.metrics.accuracy_score(y_true, y_pred) * 100, 2)) + '%',
"Correct:Total": str(correct) + ':' + str(np.sum(cm))})
if st.checkbox('-single descriptor classifcation performance'):
st.dataframe(df_metrics.set_index('label'))
if st.checkbox('Evaluate D-prime Matrix'):
sdt.calculate_dprimes()
co = st.slider(' d-prime cut off',0,5,0)
df = sdt.dprime_df
df[df < co] = np.nan
fig = plt.figure(figsize=(len(df.columns), len(df)))
sns.heatmap(df ,cmap='coolwarm', linewidths=0.5, annot=True,vmin=co)
st.pyplot(fig)
if st.checkbox('- d-prime vs accuracy single descriptors (based on nearest means for multiclass)'):
# update dataset and calculate dprime vs acc for each parameter
st.dataframe(sdt.dprime_vs_accuracy())
if st.checkbox('-get dprime statistics for each descriptor'):
st.dataframe(sdt.dprime_df.apply(pd.DataFrame.describe,axis=1))
if st.checkbox('-use & view suggest parameters'):
sdt.dprime_vs_accuracy()
st.subheader(sdt.best_parameters) #sdt.selected_parameters)
sdt.selected_parameters = sdt.best_parameters
if st.checkbox('Run SVM to Convergence'):
svm_iterations = -1
else:
svm_iterations = 10000
if st.button('Optimize Classifiers'):
# get acc vs paratmer vs dprime dataframe
df_metrics = sdt.dprime_vs_accuracy()
sdt.selected_parameters = list(df_metrics.parameters)
#df_metrics.sort_values(by='discriminability', ascending=True, inplace=True)
svm_scores = []
dt_scores = []
nb_scores = []
for i, par in enumerate(sdt.selected_parameters):
#print('selects',sdt.selected_parameters)
#print(sdt.selected_parameters[:i + 1])
y_train = sdt.training_data['label'].to_numpy()
#print('features',sdt.selected_parameters[:i + 1])
x_train = sdt.training_data[sdt.selected_parameters[:i + 1]].to_numpy()
y_test = sdt.testing_data['label'].to_numpy()
x_test = sdt.testing_data[sdt.selected_parameters[:i + 1]].to_numpy()
y_data = sdt.data['label'].to_numpy()
x_data = sdt.data[sdt.selected_parameters[:i + 1]].to_numpy()
# Create classifiers
svm_clf = svm.SVC(kernel='linear', max_iter=svm_iterations,probability=True,random_state=17).fit(x_train, y_train)
dt_clf = DecisionTreeClassifier(criterion='entropy', random_state=77, max_depth=None).fit(x_train, y_train) # max_depth=-1
nb_clf = GaussianNB().fit(x_train, y_train) # MLPClassifier(random_state=1, max_iter=300).fit(x_train, y_train)
# record prediction acc on test dataset for each classifer
svm_scores.append(accuracy_score(y_test, svm_clf.predict(x_test)))
dt_scores.append(accuracy_score(y_test, dt_clf.predict(x_test)))
nb_scores.append(accuracy_score(y_test, nb_clf.predict(x_test)))
#print('decison tree scores',dt_scores)
dt = pd.DataFrame({'parameters': sdt.selected_parameters, 'dt_scores': dt_scores}, columns=['parameters', 'dt_scores'])
nb = pd.DataFrame({'parameters': sdt.selected_parameters, 'nb_scores': nb_scores},columns=['parameters', 'nb_scores'])
dd = pd.DataFrame({'parameters': sdt.selected_parameters, 'scores': svm_scores}, columns=['parameters', 'scores'])
dt.reset_index(drop=True, inplace=True)
nb.reset_index(drop=True, inplace=True)
dd.reset_index(drop=True, inplace=True)
tic_fontsize = 28
label_fontsize = 32
fig = plt.figure(figsize=(25, 15)) # Create matplotlib figure
ax = fig.add_subplot(111) # Create matplotlib axes
ax2 = ax.twinx() # Create another axes that shares the same x-axis as ax.
df_metrics.rename({'accuracy': 'TSD Accuracy'}, axis=1, inplace=True)
df_metrics.rename({'discriminability': 'Parameter Discriminability'}, axis=1, inplace=True)
dd.rename({'scores': 'Recognition Accuracy'}, axis=1, inplace=True)
df_metrics.set_index('parameters', inplace=True)
df_metrics['TSD Accuracy'].plot(kind='bar', color='k', ax=ax, width=.4, position=1)
df_metrics['Parameter Discriminability'].plot(kind='bar', color='gray', width=.4, ax=ax2, position=0)
chance_line = 1.0/float(len(sdt.selected_labels))
ax.set(ylim=(chance_line, 1.0))
ax.set_ylabel('Recognition Accuracy')
ax2.set_ylabel('d\'')
sns.lineplot(data=dd, x=dd.index, y="Recognition Accuracy", label='SVM Accuracy', ax=ax,
linestyle='dotted', linewidth=10, color='r')
sns.lineplot(data=dt, x=dt.index, y="dt_scores", label='Decision Tree Accuracy', ax=ax,
linestyle='dashdot', linewidth=10, color='b')
sns.lineplot(data=nb, x=nb.index, y="nb_scores", label='Naive Bayes Accuracy', ax=ax, linestyle='--',
linewidth=10, color='g')
ax.axhline(.95, color='k', label='95% Recognition Accuracy')
ax.legend().set_visible(False)
fig.legend(loc='upper center', bbox_to_anchor=(.5, 1.1), fancybox=True, shadow=True, ncol=3)
st.pyplot(fig)
"""
fig, ax = plt.subplots(figsize=(25, 15))
df_metrics.rename({'accuracy': 'TSD Accuracy'}, axis=1, inplace=True)
df_metrics.rename({'discriminability': 'Parameter Discriminability'}, axis=1, inplace=True)
dd.rename({'scores': 'Recognition Accuracy'}, axis=1, inplace=True)
chart = sns.lineplot(data=dd, x=dd.index, y="Recognition Accuracy", label='SVM Accuracy', ax=ax, linestyle='dotted',linewidth=7, color='r')
chart = sns.lineplot(data=dt, x=dt.index, y="dt_scores", label='Decision Tree Accuracy', ax=ax, linestyle='dashdot',linewidth=7, color='b')
chart = sns.lineplot(data=nb, x=nb.index, y="nb_scores", label='Naive Bayes Accuracy', ax=ax,linestyle='--',linewidth=7, color='g')
df_metrics.set_index('parameters', inplace=True)
df_metrics.plot(kind='bar', secondary_y='Parameter Discriminability', rot=0, ax=ax)
ax.right_ax.set_ylabel('Discriminability Index', fontsize=label_fontsize)
chart.set_xticklabels(chart.get_xticklabels(), rotation=45, fontsize=label_fontsize)
ax.set_ylabel('Recognition Accuracry')
ax.yaxis.label.set_size(label_fontsize)
ax.tick_params(axis="y", labelsize=tic_fontsize)
ax.right_ax.tick_params(axis="y", labelsize=tic_fontsize)
ax.xaxis.label.set_visible(False)
ax.set(ylim=(0.0, 1.0))
ax.set_yticks(np.arange(0.0, 1.1, .1))
#ax.yaxis.grid(True)
ax.axhline(.95, color='k',label='95% Recognition Accuracy')
#ax.right_ax.set(ylim=(0.0, 4.0))
ax.legend().set_visible(False)
fig.legend(loc='upper center', bbox_to_anchor=(.5, 1.1), fancybox=True, shadow=True, ncol=3,fontsize=label_fontsize)
st.pyplot(fig)
"""
plt.rcParams["font.family"] = "Times New Roman"
fig = plt.figure(figsize=(7,2)) # Create matplotlib figure
ax = fig.add_subplot(111) # Create matplotlib axes
ax2 = ax.twinx() # Create another axes that shares the same x-axis as ax.
width = 0.4
#print(df_metrics)
df_metrics.rename_axis('Surface Descriptors', inplace=True)
df_metrics.rename(columns={'TSD Accuracy': 'Recognition Accuracy (left)'}, inplace=True)
df_metrics.rename(columns={'Parameter Discriminability': 'd\'(right)' }, inplace=True)
df_metrics['Recognition Accuracy (left)'].plot(kind='bar', color='red', ax=ax, width=width, position=1,fontsize=14)
df_metrics['d\'(right)'].plot(kind='bar', color='blue', ax=ax2, width=width, position=0,fontsize=14)
ax.set_ylabel('Recognition Accuracy',fontsize=14)
ax.set_xlabel('Surface Descriptors', fontsize=14)
ax2.set_ylabel('d\'' ,fontsize=14)
ax.set(ylim=(0.5, 1.0))
#ax.set_xticklabels(ax.get_xticklabels(), rotation=45)
fig.legend(loc='upper center', bbox_to_anchor=(.5, 1.15), fancybox=True, shadow=True,fontsize=14,ncol=2)
st.pyplot(fig)
# Save trained Classiferes
classifer_data = {'classifers':['decision_tree_classifer','bayes_classifer','svm_classifer'], #,'knn_classifer'],
'decision_tree_classifer':dt_clf, #DecisionTreeClassifier(random_state=200),
'bayes_classifer':nb_clf, #GaussianNB(),
'svm_classifer':svm_clf, #svm.SVC(kernel='rbf', max_iter=100),
#'knn_classifer': KNeighborsClassifier(n_neighbors=3).fit(x_data,y_data),
'inputs': sdt.selected_parameters,
'suggested_x': sdt.data[sdt.best_parameters].to_numpy(),
'x_test': x_data,
'y_test': y_data
}
with open(r'./Models/test.pkl', 'wb') as f:
pickle.dump(classifer_data, f)
st.title('Decision Boundary Analysis')
col1,col2 = st.columns([.5,.5])
with col1:
x_par = col1.selectbox('Descriptor (X axis)',sdt.selected_parameters)
with col2:
y_par = col2.selectbox('Descriptor (Y axis)',sdt.selected_parameters)
if st.button('Generate Desision Boundary'):
fig = two_parameter_decision_boundary(pkl_data,
selected_labels = sdt.selected_labels,
selected_features = [x_par, y_par])
st.pyplot(fig)
st.title('Model Performance Comparison')
if st.button('10-Fold Cross Validation'):
with open(r'./Models/test.pkl', 'rb') as f:
models = pickle.load(f)
print('models',models)
fig, ax = plt.subplots(1,2, figsize=(20, 10))
colors = ['k','b','r','g']
markers = ['o--','x--','d--','s--']
cv = StratifiedKFold(n_splits=5, shuffle=True,random_state=7)
for i,classifer in enumerate(models['classifers']):
clf = models[classifer]
train_sizes, train_scores, test_scores = learning_curve(clf, models['x_test'], models['y_test'], train_sizes = np.linspace(0.1, 1.0, 20), cv = cv,random_state=7)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
ax[0].plot(train_sizes, test_scores_mean, markers[i], color=colors[i], label=classifer)
ax[0].fill_between(
train_sizes,
test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std,
alpha=0.1,
color=colors[i],
)
ax[0].set_xlabel("Training examples")
ax[0].set_ylabel('Cross-Validation Score')
ax[0].legend()
ax[0].set_title('Selected Parameters for Deployment', fontsize=20)
ax[0].set_ylim(0,1,.05)
train_sizes, train_scores, test_scores = learning_curve(clf, models['suggested_x'], models['y_test'],
train_sizes=np.linspace(0.1, 1.0, 20), cv=cv,random_state=7)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
ax[1].plot(train_sizes, test_scores_mean, markers[i], color=colors[i], label=classifer)
ax[1].fill_between(
train_sizes,
test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std,
alpha=0.1,
color=colors[i],
)
ax[1].set_xlabel("Training examples")
ax[1].set_ylabel('Cross-Validation Score')
ax[1].set_title('Suggested Parameters')
ax[1].legend()
ax[1].set_ylim(0.0, 1.0, .05)
st.pyplot(fig)
"""
#with open('test.pkl', 'rb') as f:
# ml_model = pickle.load(f)
# dt_clf = ml_model['classifer']
# parameters = ml_model['parameters']
# print('classifer inputs',parameters)
# print(dt_clf.classes_)
#cm = confusion_matrix(y_test, dt_clf.predict(x_test))
#confusion_fig = plot_matrix(cm, np.unique(y_test), 'confusion matrix')
#st.pyplot(confusion_fig)
#cm = confusion_matrix(models['y_test'], clf.predict(models['x_test']))
#confusion_fig = plot_matrix(cm, np.unique(models['y_test']), 'confusion matrix')
#st.pyplot(confusion_fig)
from sklearn.metrics import roc_auc_score
with open(r'./Models/test.pkl', 'rb') as f:
models = pickle.load(f)
#print(models)
classifer = st.selectbox("Classifer", list(models.keys()))
clf = models[classifer]
iso = iso25178()
iso.parameters = models['inputs'] #sdt.selected_parameters.tolist()
#dt_clf = pickle.load(open('dt_classifer.sav', 'rb'))
#st.write(roc_auc_score(models['y_test'], clf.predict_proba(models['x_test']), multi_class='ovr'))
st.title('Model Inference')
uploaded_file = st.file_uploader("Upload Files",type=['png','jpeg','jpg','bmp','tif'])
loaded = False
if uploaded_file is not None:
loaded = True
image = Image.open(uploaded_file)
image = ImageOps.grayscale(image)
#image = np.asarray(image)
#print(sdt.selected_parameters)
#print('analysis',iso.compute_features(np.asarray(image)))
x = [[x[1] for x in iso.compute_features(np.asarray(image))]]
yhat = clf.predict(x)
st.subheader(yhat)
st.image(Image.open(uploaded_file))
#file_details = {"FileName":uploaded_file.name,"FileType":uploaded_file.type,"FileSize":uploaded_file.size}
#st.write(file_details)
def pyramid(image, scale=1.5, minSize=(30, 30)):
# yield the original image
yield image
# keep looping over the pyramid
def sliding_window(image, stepSize, windowSize):
# slide a window across the image
for y in range(0, image.shape[0], stepSize):
for x in range(0, image.shape[1], stepSize):
# yield the current window
yield (x, y, image[y:y + windowSize[1], x:x + windowSize[0]])
if loaded:
image = np.asarray(image).astype('uint8')
winW = winH = st.number_input('Window Size',min_value=8, max_value=np.max(image.shape), value=32, step=4)
stepSize = st.number_input('StepSize',min_value=1, max_value=np.max(image.shape), value=16, step=4)
scale = st.number_input('Rescale',min_value=1.0, max_value=3.0, value=1.5, step=.1)
w = int(image.shape[1] / scale)
if st.button('Classify'):
# global classifcaition label
#print('anaylsis features',iso.compute_features(image))
global_yhat = clf.predict([[x[1] for x in iso.compute_features(image)]])[0]
st.subheader(global_yhat)
#sample_image = level_surface(np.asarray(image))
#winW = winH = 25
#stepSize = 16
# loop over the image pyramid
start = time.time()
for resized in pyramid(image, scale=1.5):
# loop over the sliding window for each layer of the pyramid
for (x, y, window) in sliding_window(resized, stepSize=stepSize, windowSize=(winW, winH)):
# print(window)
# if the window does not meet our desired window size, ignore it
if window.shape[0] != winH or window.shape[1] != winW:
continue
sample = image[y:y + winH, x:x + winW]
yhat = clf.predict([[x[1] for x in iso.compute_features(sample)]])[0]
if yhat == global_yhat:
cv2.rectangle(image, (x, y), (x + winW, y + winH), (0), 1) #,8,0)
end = time.time()
# image = imutils.resize(image, width=256)
st.image(image)
st.subheader('FPS: ' +str(round(1/(end-start),2)))
# cv2.waitKey(1)
# time.sleep(0.025)
"""
```
#### File: Jesse-Redford/SurfaceMetrology/compiler_app.py
```python
import os
import cv2
import glob
import time
import h5py
import imageio
import numpy as np
import pandas as pd
from PIL import Image
import streamlit as st
from src.ISO251782 import iso25178
from sklearn.impute import KNNImputer
def external_folder_selector(folder_path):
print(folder_path)
filenames = glob.glob(folder_path)
selected_filename = st.selectbox('Select a folder', filenames)
return os.path.join(folder_path, selected_filename)
def folder_selector(folder_path='.'):
filenames = glob.glob(r'./Datasets/MultiClass/*')
selected_filename = st.selectbox('Select a folder', filenames)
return os.path.join(folder_path, selected_filename)
def file_selector(folder_path='.'):
filenames = [os.path.join(path, name) for path, subdirs, files in os.walk(folder_path) for name in files]
selected_filename = st.selectbox('Select a file', filenames)
return selected_filename
def to_dict(tup, di={}):
di = dict(tup)
return di
# %% loader function % https://gist.github.com/g-s-k/ccffb1e84df065a690e554f4b40cfd3a
def datx2py(file_name):
# unpack an h5 group into a dict
def _group2dict(obj):
return {k: _decode_h5(v) for k, v in zip(obj.keys(), obj.values())}
# unpack a numpy structured array into a dict
def _struct2dict(obj):
names = obj.dtype.names
return [dict(zip(names, _decode_h5(record))) for record in obj]
# decode h5py.File object and all of its elements recursively
def _decode_h5(obj):
# group -> dict
if isinstance(obj, h5py.Group):
d = _group2dict(obj)
if len(obj.attrs):
d['attrs'] = _decode_h5(obj.attrs)
return d
# attributes -> dict
elif isinstance(obj, h5py.AttributeManager):
return _group2dict(obj)
# dataset -> numpy array if not empty
elif isinstance(obj, h5py.Dataset):
d = {'attrs': _decode_h5(obj.attrs)}
try:
d['vals'] = obj[()]
except (OSError, TypeError):
pass
return d
# numpy array -> unpack if possible
elif isinstance(obj, np.ndarray):
if np.issubdtype(obj.dtype, np.number) and obj.shape == (1,):
return obj[0]
elif obj.dtype == 'object':
return _decode_h5([_decode_h5(o) for o in obj])
elif np.issubdtype(obj.dtype, np.void):
return _decode_h5(_struct2dict(obj))
else:
return obj
# dimension converter -> dict
elif isinstance(obj, np.void):
return _decode_h5([_decode_h5(o) for o in obj])
# bytes -> str
elif isinstance(obj, bytes):
return obj.decode()
# collection -> unpack if length is 1
elif isinstance(obj, list) or isinstance(obj, tuple):
if len(obj) == 1:
return obj[0]
else:
return obj
# other stuff
else:
return obj
# open the file and decode it
with h5py.File(file_name, 'r') as f:
h5data = _decode_h5(f)
zdata = h5data['Data']['Surface']
zdata = list(zdata.values())[0]
zvals = zdata['vals']
zvals[zvals == zdata['attrs']['No Data']] = np.nan
# get units
zunit = zdata['attrs']['Z Converter']['BaseUnit']
# Fill nans use global mean
#zvals[np.isnan(zvals)] = np.nanmean(zvals)
#return zvals
# Fill nans using KNN imputation
imputer = KNNImputer(n_neighbors=10,missing_values=np.nan)
zzvals = imputer.fit_transform(zvals)
return np.ascontiguousarray(zzvals, dtype=np.float64)
def app():
st.title('Dataset Compiler')
st.write('Created by <NAME>')
if st.checkbox('External Folder'):
path = st.text_input('Enter Path to Dataset')
foldername = external_folder_selector(path)
else:
foldername = folder_selector()
st.write('You selected `%s`' % foldername)
filename = file_selector(folder_path=foldername)
st.write('You selected `%s`' % filename)
if filename:
z = None
loaded_image = None
if filename.endswith('.datx'):
z = datx2py(filename)
loaded_image = cv2.normalize(z, z, 0, 255, cv2.NORM_MINMAX, cv2.CV_8U) # for displaying on st.image
elif filename.lower().endswith('.tiff'):
z = np.asarray(imageio.imread(filename))
loaded_image = cv2.normalize(z,z,0,255, cv2.NORM_MINMAX,cv2.CV_8U) # for displaying on st.image
elif filename.lower().endswith(('.bmp','.png', '.jpg', '.jpeg')):
z = np.asarray(Image.open(filename, mode='r').convert('L'))
loaded_image = Image.open(filename, mode='r')
else:
print('File Type Not Excepted')
col1, col2,col3 = st.columns([3,1,3])
with col1:
st.write("Source")
st.image(loaded_image, caption=f"{filename} {str(z.dtype)} {str(z.shape)}", width=250)
with col2:
st.write("")
with col3:
st.write("Converted")
if len(z.shape) > 2: # if RGB apply luma transform
z = (0.2989 * z[:,:,0] + 0.5870 * z[:,:,1] + 0.114 * z[:,:,2])
loaded_image = cv2.normalize(z, z, 0, 255, cv2.NORM_MINMAX, cv2.CV_8U)
st.image(loaded_image, caption= f'Converted data is displayed in 8bit for preview, actual dtype: {str(z.dtype)} {str(z.shape)}', width=250)
st.write(z.dtype, z.shape)
st.subheader('Select Features To Compute on Dataset')
iso = iso25178()
selected_parameters = []
for par_type in iso.parameter_types:
if st.checkbox(f'{par_type} ({str(len(getattr(iso, par_type)))})'):
selected_parameters += getattr(iso, par_type)
user_pars = st.multiselect('Selected Parameters', selected_parameters, selected_parameters)
# Show options to modify polar plot defaults if any polar plot parameters are selected by user
if any(x in iso.polarplot_parameters for x in user_pars):
st.write('Set PolarPlot Parameters')
c1,c2 = st.columns(2)
if st.checkbox('Use Nan Masking for Polar Plot'):
iso.pp_mask = True
else:
iso.pp_mask = False
iso.theta_step = st.number_input('Enter rotation increment for Polar Plot (default = 1)',min_value=.1,max_value=1.0,value=1.0)
if st.button(f'Compute and Preview The ({str(len(user_pars))}) Selected Parameters'):
iso.parameters = user_pars
start = time.time()
features = iso.compute_features(z)
end = time.time()
col1, col2 = st.columns([3, 3])
col1.image(loaded_image, width=250)
col1.write(f'Processing Time: {round(end-start,3)} seconds')
col2.dataframe(pd.DataFrame(features),height=250)
dataset_name = st.text_input('Input the name of the dataset here:')
if st.button('Generate Dataset'):
iso.parameters = user_pars #iso_pars
rootdir = foldername
folder = []
filename = []
for file in os.listdir(rootdir):
folder_path = os.path.join(rootdir, file)
if os.path.isdir(folder_path):
folder_name = os.path.basename(folder_path)
for img_file in sorted(os.listdir(folder_path)):
folder.append(folder_name)
filename.append(folder_name + '/' + img_file)
df_reff = pd.DataFrame({'label': folder, 'image': filename})
df = pd.DataFrame(columns=iso.parameters)
progress_bar = st.progress(0)
for i, image in enumerate(df_reff.image):
progress_bar.progress(i / len(df_reff.image))
# open the image according to file type
filename = foldername + '/' + image
if filename.endswith('.datx'):
z = datx2py(filename)
elif filename.lower().endswith(('.tiff', '.tif')):
z = np.asarray(imageio.imread(filename))
if len(z.shape) > 2: # if RGB apply luma transform
z = (0.2989 * z[:, :, 0] + 0.5870 * z[:, :, 1] + 0.114 * z[:, :, 2])
elif filename.lower().endswith(('.bmp', '.png', '.jpg', '.jpeg')):
z = np.asarray(Image.open(filename, mode='r').convert('L'))
else:
print('File Type Not Excepted')
# compute selected features on data and add new values to dataframe
features = to_dict(iso.compute_features(z))
df = df.append(features, ignore_index=True)
df = df_reff.join(df)
# save data as pickle file and display on console
df.to_pickle(os.path.join(os.getcwd()+'\Datasets\Tabular', dataset_name + ".pkl"))
st.subheader(dataset_name + ".pkl")
st.dataframe(df)
```
#### File: Jesse-Redford/SurfaceMetrology/unet_app.py
```python
import streamlit as st
import time
import glob
import numpy as np
import pandas as pd
import logging
logging.getLogger("tensorflow").setLevel(logging.WARNING) # dont show tensor flow warninging
import tensorflow as tf
import keras.models
from keras.models import Model
from keras.layers import Input, Conv2D, MaxPooling2D, UpSampling2D, concatenate, Conv2DTranspose, BatchNormalization, Dropout, Lambda
from keras import backend as K
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import cv2
import skimage
from skimage import util
from skimage.measure import label, regionprops, regionprops_table
from skimage import color
from PIL import Image, ImageOps
from patchify import patchify, unpatchify
from keras.utils.np_utils import to_categorical
from sklearn.model_selection import train_test_split
import numpy as np
import streamlit as st
from sklearn.impute import KNNImputer
import h5py
import cv2
from PIL import Image, ImageOps
Image.MAX_IMAGE_PIXELS = 3000000000
# Batchsize = the number of divisions of the processed image
from keras import backend as K
import imageio
from PIL import Image
import cv2
import numpy as np
from skimage.measure import label, regionprops, regionprops_table
def evaluate(y_true,y_pred):
# Create refrence mask(s)
matching = TP = np.where((y_true == y_pred) & (y_true != 0) & (y_pred !=0),1,0)
excess = FP = np.where((y_true != y_pred) & (y_pred != 0),1,0)
background = TN = np.where( (y_true == y_pred) & (y_true ==0) & (y_pred==0),1,0)
missing = FN = np.where((y_true != y_pred) & (y_true != 0),1,0)
# get counts for computing semantic metrics
n_feature = np.count_nonzero(matching)
n_excess = np.count_nonzero(excess)
n_missing = np.count_nonzero(missing)
n_background = np.count_nonzero(background)
n_total = y_true.size
# compute semantic metrics
accuracy = (n_feature + n_background)/n_total
precision = n_feature/(n_feature+n_excess)
recall = n_feature / (n_feature + n_missing)
specificity = n_background / (n_background+n_excess)
balanced_accuracy = np.mean([recall,specificity])
iou = n_feature/ (n_feature+n_missing+n_excess)
dice = 2 * n_feature / (n_total + n_total)
# compute instance metrics
n_true_instances = np.count_nonzero( np.unique( label(y_true) ) )
n_pred_instances = np.count_nonzero( np.unique( label(y_pred) ) )
identification_preformance = (n_true_instances - (n_true_instances - n_pred_instances)) / n_true_instances
identification_error_ratio = (n_true_instances - n_pred_instances)/ n_true_instances
# build metrics dict
metrics = { 'accuracy': accuracy,
'balanced accuracy':balanced_accuracy,
'precision':precision,
'recall':recall,
'specificity':specificity,
'identification preformance': identification_preformance,
'identification error ratio': identification_error_ratio,
'IOU': iou,
'dice': dice
}
# round all metric values to 2 decimals places
metrics = [{k: round(v, 2) for k, v in metrics.items()} for dct in metrics][0]
# merge refrence mask and create one resulting rgb image
result = np.stack((np.zeros(np.shape(y_true)),)*3, axis=-1)
result[matching==1] = [0,255,0]
result[excess==1] = [255,0,0]
result[missing==1] = [0,0,255]
print(matching)
return result,metrics
from keras.preprocessing.image import ImageDataGenerator
def my_image_mask_generator(image_generator, mask_generator):
train_generator = zip(image_generator, mask_generator)
for (img, mask) in train_generator:
yield (img, mask)
def create_generators(x_train,y_train,x_val,y_val):
seed = 7
img_data_gen_args = dict(#rotation_range=90,
#width_shift_range=0.3,
#height_shift_range=0.3,
#shear_range=0.5,
zoom_range=0.3,
horizontal_flip=True,
vertical_flip=True,
fill_mode='reflect')
mask_data_gen_args = dict(#rotation_range=90,
#width_shift_range=0.3,
#height_shift_range=0.3,
#shear_range=0.5,
zoom_range=0.3,
horizontal_flip=True,
vertical_flip=True,
fill_mode='reflect',
preprocessing_function=lambda x: np.where(x > 0, 1, 0).astype(
x.dtype)) # Binarize the output again.
image_data_generator = ImageDataGenerator(**img_data_gen_args)
image_data_generator.fit(x_train, augment=True, seed=seed)
image_generator = image_data_generator.flow(x_train, seed=seed)
valid_img_generator = image_data_generator.flow(x_val, seed=seed)
mask_data_generator = ImageDataGenerator(**mask_data_gen_args)
mask_data_generator.fit(y_train, augment=True, seed=seed)
mask_generator = mask_data_generator.flow(y_train, seed=seed)
valid_mask_generator = mask_data_generator.flow(y_val, seed=seed)
my_generator = my_image_mask_generator(image_generator, mask_generator)
validation_datagen = my_image_mask_generator(valid_img_generator, valid_mask_generator)
return my_generator,validation_datagen
def dice_score(y_true, y_pred, smooth=1e-7):
intersection = K.sum(K.abs(y_true * y_pred), axis=-1)
return (2. * intersection + smooth) / (K.sum(K.square(y_true),-1) + K.sum(K.square(y_pred),-1) + smooth)
def dice_loss(y_true, y_pred):
return 1-dice_score(y_true, y_pred)
def multi_unet_model(n_classes=4, IMG_HEIGHT=256, IMG_WIDTH=256, IMG_CHANNELS=1):
# Build the model
inputs = Input((IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS))
# normalize input values so we dont need to do this to the dataset ourselfs
s = Lambda(lambda x: x / 255)(inputs)
# define dropout rate, .1 used in orginal paper. If overfitting increase to .2-.3
drop_out = .1
# Contraction path
c1 = Conv2D(16, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(s)
c1 = Dropout(drop_out)(c1)
c1 = Conv2D(16, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(c1)
p1 = MaxPooling2D((2, 2))(c1)
c2 = Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(p1)
c2 = Dropout(drop_out)(c2) # Original 0.1
c2 = Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(c2)
p2 = MaxPooling2D((2, 2))(c2)
c3 = Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(p2)
c3 = Dropout(drop_out)(c3)
c3 = Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(c3)
p3 = MaxPooling2D((2, 2))(c3)
c4 = Conv2D(128, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(p3)
c4 = Dropout(drop_out)(c4)
c4 = Conv2D(128, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(c4)
p4 = MaxPooling2D(pool_size=(2, 2))(c4)
c5 = Conv2D(256, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(p4)
c5 = Dropout(drop_out)(c5)
c5 = Conv2D(256, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(c5)
# Expansive path
u6 = Conv2DTranspose(128, (2, 2), strides=(2, 2), padding='same')(c5)
u6 = concatenate([u6, c4])
c6 = Conv2D(128, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(u6)
c6 = Dropout(drop_out)(c6)
c6 = Conv2D(128, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(c6)
u7 = Conv2DTranspose(64, (2, 2), strides=(2, 2), padding='same')(c6)
u7 = concatenate([u7, c3])
c7 = Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(u7)
c7 = Dropout(drop_out)(c7)
c7 = Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(c7)
u8 = Conv2DTranspose(32, (2, 2), strides=(2, 2), padding='same')(c7)
u8 = concatenate([u8, c2])
c8 = Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(u8)
c8 = Dropout(drop_out)(c8) # Original 0.1
c8 = Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(c8)
u9 = Conv2DTranspose(16, (2, 2), strides=(2, 2), padding='same')(c8)
u9 = concatenate([u9, c1], axis=3)
c9 = Conv2D(16, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(u9)
c9 = Dropout(drop_out)(c9) # Original 0.1
c9 = Conv2D(16, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(c9)
outputs = Conv2D(n_classes, (1, 1), activation='sigmoid')(c9)
#outputs = Conv2D(n_classes, (1, 1), activation='linear')(c9)
model = Model(inputs=[inputs], outputs=[outputs])
# NOTE: Compile the model in the main program to make it easy to test with various loss functions
# model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
# model.summary()
return model
def patch(list_of_images,w=128,h=128,step=128):
patched_images = []
for image in list_of_images:
patches = patchify(image, (h, w), step=step)
c, r, h, w = patches.shape
patches = patches.reshape((c * r, h, w))
patched_images.append(patches)
patched_images = np.asarray(patched_images)
i, p, h, w = patched_images.shape
return patched_images.reshape((i * p, h, w))
def load_data(n=256,step=64):
train_images = './Datasets/Misc/CSI_50x_Stich/*.png'
train_masks = './Datasets/Misc/CSI_50x_Stich/.imageLabelingSession_SessionData/*.png'
images = np.asarray([cv2.imread(file,0) for file in glob.glob(train_images)])
masks = np.asarray([cv2.imread(file,0) for file in glob.glob(train_masks)])
print('Loaded Images')
print('images',images.shape,'masks',masks.shape)
images = patch(images,w=n,h=n,step=step)
masks = patch(masks,w=n,h=n,step=step)
print('After Patching Operation')
print('images',images.shape,'masks',masks.shape)
#masks = to_categorical(masks,num_classes=1) #use this with multiclass softmax
masks = np.expand_dims(masks , axis=3) # use this for single class
images = np.expand_dims(images,axis=3)
print('To categorical')
print('images',images.shape,'masks',masks.shape)
x_train,x_test,y_train,y_test = train_test_split(images,masks,test_size=.2,random_state=np.random.randint(0,20))
print('train/test')
print('images',x_train.shape,'masks',y_train.shape)
print('images',x_test.shape,'masks',y_test.shape)
return x_train,x_test,y_train,y_test
def get_sample_weights(y_train):
sample_weights = np.ones(shape=(len(y_train),))
for i, y in enumerate(y_train):
if np.sum(y) > 0:
sample_weights[i] = 10
else:
sample_weights[i] = 1
return sample_weights
def check_predictions(model,x_train,y_train):
fig, ax = plt.subplots(nrows=3, ncols=3)
for s in range(0, 3, 1):
x = np.expand_dims(x_train[s, :, :], axis=0)
print('x shape',x.shape)
print(np.unique(y_train[s, :, :]))
#y = y_train[s, :, :]
y = np.argmax(y_train[s, :, :], axis=2)
yhat = model.predict(x)[0, :, :]
#yhat[yhat > .5] = 1
#yhat[yhat < .5] = 0
print('unique',np.unique(yhat))
print(x.shape, y.shape, yhat.shape)
x = x[0, :, :]
# yhat = np.argmax(yhat, axis=2)
#yhat = np.argmax(yhat, 2) # get indicies back
print(x.shape, y.shape, yhat.shape)
ax[s, 0].imshow(x, cmap='gray')
ax[s, 1].imshow(y, cmap='jet', vmin=0, vmax=4)
im = ax[s, 2].imshow(yhat, cmap='jet', vmin=0, vmax=4)
ax[s, 0].axis('off');
ax[s, 1].axis('off');
ax[s, 2].axis('off');
ax[0, 0].set_title("Source");
ax[0, 1].set_title("Ground Truth");
ax[0, 2].set_title("Predicted")
classes = ['Nominal', 'Spatter', 'Crud', 'Ordered', 'Disordered']
values = [0, 1, 2, 3, 4] # np.unique(yhat)
colors = [im.cmap(im.norm(value)) for value in values]
# create a patch (proxy artist) for every color
patches = [mpatches.Patch(color=colors[i], label=classes[i]) for i in range(len(values))]
# put those patched as legend-handles into the legend
plt.legend(handles=patches, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.show()
return None
def area_filter(mask):
lab_image = label(mask)
# table is a dictionary mapping column names to data columns
# (NumPy arrays)
table = regionprops_table(
lab_image,
properties=('label', 'area', 'eccentricity'),
)
condition = (table['area'] > 10) # & (table['eccentricity'] < 0.5)
# zero out labels not meeting condition
input_labels = table['label']
output_labels = input_labels * condition
filtered_lab_image = util.map_array(
lab_image, input_labels, output_labels
)
return filtered_lab_image
def level_surface(Y):
m, m = Y.shape
X1, X2 = np.mgrid[:m, :m]
# Regression
X = np.hstack((np.reshape(X1, (m * m, 1)), np.reshape(X2, (m * m, 1))))
X = np.hstack((np.ones((m * m, 1)), X))
YY = np.reshape(Y, (m * m, 1))
theta = np.dot(np.dot(np.linalg.pinv(np.dot(X.transpose(), X)), X.transpose()), YY)
plane = np.reshape(np.dot(X, theta), (m, m))
# return surface with bestfit plane removed
return Y - plane
# %% loader function % https://gist.github.com/g-s-k/ccffb1e84df065a690e554f4b40cfd3a
def datx2py(file_name):
# unpack an h5 group into a dict
def _group2dict(obj):
return {k: _decode_h5(v) for k, v in zip(obj.keys(), obj.values())}
# unpack a numpy structured array into a dict
def _struct2dict(obj):
names = obj.dtype.names
return [dict(zip(names, _decode_h5(record))) for record in obj]
# decode h5py.File object and all of its elements recursively
def _decode_h5(obj):
# group -> dict
if isinstance(obj, h5py.Group):
d = _group2dict(obj)
if len(obj.attrs):
d['attrs'] = _decode_h5(obj.attrs)
return d
# attributes -> dict
elif isinstance(obj, h5py.AttributeManager):
return _group2dict(obj)
# dataset -> numpy array if not empty
elif isinstance(obj, h5py.Dataset):
d = {'attrs': _decode_h5(obj.attrs)}
try:
d['vals'] = obj[()]
except (OSError, TypeError):
pass
return d
# numpy array -> unpack if possible
elif isinstance(obj, np.ndarray):
if np.issubdtype(obj.dtype, np.number) and obj.shape == (1,):
return obj[0]
elif obj.dtype == 'object':
return _decode_h5([_decode_h5(o) for o in obj])
elif np.issubdtype(obj.dtype, np.void):
return _decode_h5(_struct2dict(obj))
else:
return obj
# dimension converter -> dict
elif isinstance(obj, np.void):
return _decode_h5([_decode_h5(o) for o in obj])
# bytes -> str
elif isinstance(obj, bytes):
return obj.decode()
# collection -> unpack if length is 1
elif isinstance(obj, list) or isinstance(obj, tuple):
if len(obj) == 1:
return obj[0]
else:
return obj
# other stuff
else:
return obj
# open the file and decode it
with h5py.File(file_name, 'r') as f:
h5data = _decode_h5(f)
zdata = h5data['Data']['Surface']
zdata = list(zdata.values())[0]
zvals = zdata['vals']
zvals[zvals == zdata['attrs']['No Data']] = np.nan
# get units
zunit = zdata['attrs']['Z Converter']['BaseUnit']
# Fill nans using KNN imputation
imputer = KNNImputer(n_neighbors=5, missing_values=np.nan)
zzvals = imputer.fit_transform(zvals)
z_imputed = np.ascontiguousarray(zzvals, dtype=np.float64)
# Calculate mean and STD
#mean, STD = cv2.meanStdDev(z_imputed)
# Clip frame to lower and upper STD
#offset = 3
#z_imputed = np.clip(z_imputed, mean - offset * STD, mean + offset * STD) #.astype(np.uint8)
z_processed = level_surface(z_imputed)
return cv2.normalize(z_processed, z_processed, 0, 255, cv2.NORM_MINMAX, cv2.CV_8U)
def load_image(filename):
if filename.endswith('.datx'):
z = datx2py(filename)
elif filename.lower().endswith('.tiff'):
z = np.asarray(imageio.imread(filename))
elif filename.lower().endswith(('.bmp', '.png', '.jpg', '.jpeg')):
z = np.asarray(Image.open(filename, mode='r').convert('L'))
return z
class Unet:
def __init__(self, train_data=None,
test_data=None,
model=None,
label_dict=None,
window_size=256,
step_size=256,
test_size=.25):
self.train_images = train_data
self.train_masks = test_data
self.label_ids = label_dict
n_classes = len(label_dict)
self.n = window_size
self.step = step_size
if model == None:
self.model = multi_unet_model(n_classes=n_classes, IMG_HEIGHT=self.n, IMG_WIDTH=self.n, IMG_CHANNELS=1)
self.model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy',tf.keras.metrics.MeanIoU(num_classes=2)])
#self.model.compile(optimizer='adam', loss="sparse_categorical_crossentropy", metrics=['accuracy'])
#self.model.compile(optimizer='adam', loss=dice_loss, metrics=['accuracy'])
else:
self.model = model
config = self.model.get_config() # Returns pretty much every information about your model
print(config["layers"][0]["config"]["batch_input_shape"])
def load_data(self, n=256, step=256, test_size=.25):
# issues with loading in wrong order
#images = np.asarray([cv2.imread(file, 0) for file in glob.glob(self.train_images)])
#masks = np.asarray([cv2.imread(file, 0) for file in glob.glob(self.train_masks)])
# images = np.asarray([np.asarray(ImageOps.grayscale(Image.open(self.train_images+'/'+file))).astype('float32') for file in sorted(os.listdir(self.train_images), key=len)])
# masks = np.asarray([np.asarray(ImageOps.grayscale(Image.open(self.train_masks+'/'+file))).astype('float32') for file in sorted(os.listdir(self.train_masks), key=len)])
images = np.asarray([load_image(self.train_images + '/' + filename) for filename in sorted(os.listdir(self.train_images), key=len)])
masks = np.asarray([load_image(self.train_masks + '/' + filename).astype('float32') for filename in sorted(os.listdir(self.train_masks), key=len)])
"""
# check and make sure images and mask match
for image,mask in list(zip(images,masks)):
col1,col2 = st.columns(2)
col1.image(image)
col2.image(mask.astype(float))
"""
print('Loaded Images')
print('images', images.shape, 'masks', masks.shape)
images = patch(images, w=n, h=n, step=step)
masks = patch(masks, w=n, h=n, step=step)
print('After Patching Operation')
print('images', images.shape, 'masks', masks.shape)
#masks = to_categorical(masks,num_classes=1)
images = np.expand_dims(images, axis=3)
masks = np.expand_dims(masks, axis=3)
print('To categorical')
print('images', images.shape, 'masks', masks.shape)
self.x_train, self.x_val, self.y_train, self.y_val = train_test_split(images, masks, test_size=test_size,
random_state=np.random.randint(0, 20))
print('train/test')
print('images', self.x_train.shape, 'masks', self.y_train.shape)
print('images', self.x_val.shape, 'masks', self.y_val.shape)
del images
del masks
def train(self, epochs=2, batchsize=32):
#weights = get_sample_weights(self.y_train)
my_generator,validation_datagen = create_generators(self.x_train, self.y_train, self.x_val, self.y_val)
training_history = self.model.fit(my_generator, validation_data=validation_datagen, steps_per_epoch=len(self.x_train)//batchsize,
validation_steps=len(self.x_val)//batchsize, epochs=epochs)
#training_history = self.model.fit(self.x_train, self.y_train, batch_size=batchsize, verbose=1, epochs=epochs,
#validation_split = .2,
# validation_data=(self.x_val, self.y_val),
# shuffle=True,# sample_weight=weights
# )
self.model.save(r'./Models/unet_model.hdf5')
return training_history
def predict(self, image, threshold=0.0):
n = self.n
step = self.n
mask = np.zeros_like(image).astype(float)
i = 0
progress_bar = st.progress(0)
total_iterations = (image.shape[1] / n) * (image.shape[0] / n)
start = time.time()
for x in range(0, image.shape[1], n):
for y in range(0, image.shape[0], n):
window = image[y:y + n, x:x + n]
if window.shape == (n, n):
input = np.reshape(window, [1, n, n, 1])
yhat = self.model.predict(input)[0, :, :, 0] # single class use mask at end
# yhat = np.argmax(yhat, 2) # multi class
mask[y:y + n, x:x + n] = yhat
i += 1
progress_bar.progress(np.minimum(1, i / total_iterations))
end = time.time()
print('mask',np.unique(mask))
st.subheader('Inference Time: ' + str(round(end - start, 2)) + ' seconds')
mask = np.select([mask <= threshold, mask > threshold], [0, 1], mask)
class_data = []
for class_label in self.label_ids.keys():
class_map = np.where(mask != self.label_ids[class_label], 0, mask)
class_map = np.where(class_map == self.label_ids[class_label], 1, class_map)
labeled_class_map = label(class_map)
region_data = pd.DataFrame(regionprops_table(labeled_class_map, image, properties=['label', 'area']))
class_data.append((class_label, class_map, skimage.color.label2rgb(labeled_class_map, image=image,
colors=['blue', 'cyan', 'red',
'darkorange'], alpha=.5,
bg_label=0), region_data))
return mask,class_data # for single class segmentation
#return mask # for multiclass segmentation
#x_train,x_val,y_train,y_val = load_data(n=256,step=512)
#model = keras.models.load_model(r'unet.hdf5')
#model = multi_unet_model(n_classes=1, IMG_HEIGHT=256, IMG_WIDTH=256, IMG_CHANNELS=1)
#model.compile(optimizer='adam',loss='binary_crossentropy',metrics=['accuracy'])
#model.load_weights('unet.hdf5')
#model.fit(x_train,y_train,batch_size=16,verbose=1,epochs=1,validation_data=(x_val,y_val),shuffle=True,sample_weight = sample_weights)
#model.save('unet.hdf5')
from tensorflow.python.keras.saving import hdf5_format
import h5py
def custum_save(model,class_dict):
model_path = r'C:\Users\COE User\PycharmProjects\SurfaceMetrology\Models\unet_withmeta.hdf5'
# Save model
with h5py.File(model_path, mode='w') as f:
hdf5_format.save_model_to_hdf5(model, f)
f.attrs['class_ids'] = class_dict
f.attrs['param2'] = 'meta data'
# Load model
with h5py.File(model_path, mode='r') as f:
param1 = f.attrs['class_ids']
my_keras_model = hdf5_format.load_model_from_hdf5(f)
return my_keras_model,param1
import os
def file_selector(folder_path='.'):
filenames = [os.path.join(path, name) for path, subdirs, files in os.walk(folder_path) for name in files] #filter(lambda x: os.path.isfile(x), os.listdir(folder_path))
selected_filename = st.selectbox('Select a file', filenames)
return selected_filename
#class_dict = st.text_input('Enter Pixel ID Dictonary, ex: {"Spatter":1,"Crud":2, ... }')
#print(class_dict)
#model = keras.models.load_model(r'./Models/unet_withmeta.hdf5') #multi_unet_model(n_classes=4, IMG_HEIGHT=256, IMG_WIDTH=256, IMG_CHANNELS=1)
#my_keras_model,class_ids = custum_save(model,class_dict)
#print('meta data',class_ids)
#if st.checkbox('load existing model'):
# filename = file_selector(folder_path=r'C:\Users\COE User\PycharmProjects\SurfaceMetrology\Models')
# st.write('You selected `%s`' % filename)
import json
import ast
st.title('Unet - Semantic Segmentation')
st.subheader('Author: <NAME>')
col1, col2, col3 = st.columns([.5, .5, .5])
with col1:
class_dict = st.text_input('Enter Pixel ID Dictonary, ex: {"Spatter":1,"Crud":2, ... }', {"Spatter": 1})
class_dict = ast.literal_eval(class_dict)
input_size = st.number_input('Input size', 256)
with col2:
train_images = st.text_input('Training Images',r'G:\AM-Aarush\Met and Props\Data\Annotation_metandprops_Dataset\Train-Test Dataset\Height\Train\images')
train_mask = st.text_input('Training Mask',r'G:\AM-Aarush\Met and Props\Data\Annotation_metandprops_Dataset\Train-Test Dataset\Height\Train\masks')
with col3:
test_images = st.text_input('Test Images', r'G:\AM-Aarush\Met and Props\Data\Annotation_metandprops_Dataset\Train-Test Dataset\Height\Test\images')
test_masks = st.text_input('Test Mask',r'G:\AM-Aarush\Met and Props\Data\Annotation_metandprops_Dataset\Train-Test Dataset\Height\Test\masks')
col1, col2, col3 = st.columns([.5, .5, .5])
with col1:
val_frac = st.text_input('% witheld for validation', .2)
with col2:
n_epochs = st.number_input('n_epochs',1)
with col3:
batchsize = st.number_input('batchsize',1)
if st.button('Train Model'):
unet = Unet(train_data=f'{train_images}/', #*.png', #./Datasets/Segmentation/Sample9_CSI_50x_Spatter_Segmentation/train/images/*.png',
test_data=f'{train_mask}/', #*.png', #'./Datasets/Segmentation/Sample9_CSI_50x_Spatter_Segmentation/train/masks/*.png',
label_dict=class_dict, #{'Spatter': 1}, # {'Spatter': 1} #, 'Crud': 2, 'Order': 3, 'Disorder': 4}
#model = keras.models.load_model(r'C:\Users\COE User\PycharmProjects\SurfaceMetrology\Models\unet_model_25epoch_16bs_heightdata.hdf5'), # keras.models.load_model(r'./Models/unet_model.hdf5', custom_objects={'dice_loss':dice_loss}), # None, #my_keras_model, #
model = None, #keras.models.load_model(r'./Models/unet_model.hdf5'), #r'C:\Users\COE User\PycharmProjects\SurfaceMetrology\Models\unet_model_30epoch_16bs_val2_bce.hdf5'),
window_size=input_size, #256,
step_size=input_size, #256,
test_size=val_frac, #.25
)
unet.load_data(n=256, step=128, test_size=.2)
history = unet.train(epochs=n_epochs,batchsize=batchsize)
print(history.history.keys())
fig, ax = plt.subplots(ncols=2)
ax[0].plot(history.history['loss'], label='train')
#ax[0].plot(history.history['val_loss'], label = 'validation')
ax[0].set_title('loss')
ax[0].set_ylabel('loss')
ax[0].set_xlabel('epoch')
ax[0].legend()
ax[1].plot(history.history['mean_io_u'], label='train')
#ax[1].plot(history.history['val_mean_io_u'], label='validation')
ax[1].set_title('IOU')
ax[1].set_ylabel('IOU')
ax[1].set_xlabel('epoch')
ax[1].legend()
plt.tight_layout()
st.pyplot(fig)
# loop through images in folder
testing_images = []
testing_masks = []
for img_file in sorted(os.listdir(test_images), key=len):
print(img_file)
img_name = test_images + '/' + img_file
testing_images.append(load_image(img_name)) #np.asarray(ImageOps.grayscale(Image.open(img_name))))
for mask_file in sorted(os.listdir(test_masks), key=len):
print(mask_file)
mask_name = test_masks + '/' + mask_file
testing_masks.append(load_image(mask_name)) #np.asarray(ImageOps.grayscale(Image.open(mask_name))))
for x,y_true in list(zip(testing_images, testing_masks)):
y_pred, class_data = unet.predict(x, threshold=0.1)
col1, col2, col3, col4 = st.columns(4)
col1.image(x, 'Input')
col2.image(y_true.astype(float), 'Ground Truth',clamp=False)
print(np.unique(y_pred))
col3.image(y_pred, 'Predicted')
result,metrics = evaluate(y_true, y_pred)
col4.image(result,clamp=[0,255],channels="RGB")
st.table(pd.DataFrame.from_dict(metrics, orient='index'))
"""
ground_truth = st.file_uploader("Choose ground truth image...", type=["jpg",'png','tif'])
predicted = st.file_uploader("Choose predicted image...", type=["jpg",'png','tif'])
if ground_truth and predicted is not None:
ground_truth = np.array(Image.open(ground_truth))
predicted = np.array(ImageOps.grayscale(Image.open(predicted)))
predicted[predicted != 0] = 1
print(np.unique(predicted))
result,metrics = evaluate(ground_truth,predicted)
col1,col2,col3 = st.columns(3)
col1.image(ground_truth.astype('float32'),'Ground Truth')
col2.image(predicted.astype('float32'),'Predicted')
col3.image(result,'Result',clamp=[0,255],channels="RGB")
st.table(pd.DataFrame.from_dict(metrics, orient='index'))
"""
"""
st.title('Decision Boundary Analysis')
uploaded_file = st.file_uploader("Choose an image...", type=["jpg",'png','tif'])
if uploaded_file is not None:
gray_image = np.asarray(ImageOps.grayscale(Image.open(uploaded_file)))
mask, class_data = unet.predict(gray_image,threshold=.45); #print('map unqie',np.unique(map))
col1,col2 = st.columns(2)
col1.image(mask)
col2.image(skimage.color.label2rgb(mask, image=gray_image,colors=['blue','cyan','red','darkorange'],alpha=.5, bg_label=0))
for i,col in enumerate(st.columns(len(class_data))):
with col:
percent_area = np.sum(class_data[i][1])/class_data[i][1].shape[0]**2
col.subheader(class_data[i][0] + " {:.0%}".format(percent_area))
col.image(class_data[i][2],width=150)
col.image(class_data[i][1],width=150)
st.dataframe(class_data[i][3])
"""
```
|
{
"source": "jesserem/pokemon-rl-showdown",
"score": 2
}
|
#### File: pokemon-rl-showdown/pokemon-agent/pokeTrainer.py
```python
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torchvision.transforms as T
import numpy as np
from pokenet import Pokenet
from collections import deque
import random, datetime, os, copy
class Trainer:
def __init__(self, state_dim, hidden_dim, action_dim, save_dir):
self.state_dim = state_dim
self.action_dim = action_dim
self.save_dir = save_dir
self.hidden_dim = hidden_dim
self.use_cuda = torch.cuda.is_available()
# Mario's DNN to predict the most optimal action - we implement this in the Learn section
self.net = Pokenet(self.state_dim, self.action_dim, self.hidden_dim).float()
if self.use_cuda:
self.net = self.net.to(device="cuda")
self.exploration_rate = 1
self.exploration_rate_decay = 0.99999975
self.exploration_rate_min = 0.1
self.curr_step = 0
self.save_every = 5e5 # no. of experiences between saving Mario Net
self.memory = deque(maxlen=100000)
self.batch_size = 32
self.gamma = 0.9
self.optimizer = torch.optim.Adam(self.net.parameters(), lr=0.00025)
self.loss_fn = torch.nn.SmoothL1Loss()
self.burnin = 40 # min. experiences before training
self.learn_every = 3 # no. of experiences between updates to Q_online
self.sync_every = 9 # no. of experiences between Q_target & Q_online sync
def act(self, state):
"""
Given a state, choose an epsilon-greedy action and update value of step.
Inputs:
state(LazyFrame): A single observation of the current state, dimension is (state_dim)
Outputs:
action_idx (int): An integer representing which action Mario will perform
"""
# EXPLORE
if np.random.rand() < self.exploration_rate:
action_idx = np.random.randint(self.action_dim)
# EXPLOIT
else:
state = state.__array__()
if self.use_cuda:
state = torch.tensor(state).cuda()
else:
state = torch.tensor(state)
state = state.unsqueeze(0)
action_values = self.net(state.float(), model="online")
action_idx = torch.argmax(action_values, axis=1).item()
# decrease exploration_rate
self.exploration_rate *= self.exploration_rate_decay
self.exploration_rate = max(self.exploration_rate_min, self.exploration_rate)
# increment step
self.curr_step += 1
return action_idx
def cache(self, state, next_state, action, reward, done):
"""
Store the experience to self.memory (replay buffer)
Inputs:
state (LazyFrame),
next_state (LazyFrame),
action (int),
reward (float),
done(bool))
"""
state = state.__array__()
next_state = next_state.__array__()
if self.use_cuda:
state = torch.tensor(state).cuda()
next_state = torch.tensor(next_state).cuda()
action = torch.tensor([action]).cuda()
reward = torch.tensor([reward]).cuda()
done = torch.tensor([done]).cuda()
else:
state = torch.tensor(state)
next_state = torch.tensor(next_state)
action = torch.tensor([action])
reward = torch.tensor([reward])
done = torch.tensor([done])
self.memory.append((state, next_state, action, reward, done,))
def recall(self):
"""
Retrieve a batch of experiences from memory
"""
batch = random.sample(self.memory, self.batch_size)
state, next_state, action, reward, done = map(torch.stack, zip(*batch))
return state, next_state, action.squeeze(), reward.squeeze(), done.squeeze()
def td_estimate(self, state, action):
current_Q = self.net(state.float(), model="online")[
np.arange(0, self.batch_size), action
] # Q_online(s,a)
return current_Q
@torch.no_grad()
def td_target(self, reward, next_state, done):
next_state_Q = self.net(next_state.float(), model="online")
best_action = torch.argmax(next_state_Q, axis=1)
next_Q = self.net(next_state.float(), model="target")[
np.arange(0, self.batch_size), best_action
]
return (reward + (1 - done.float()) * self.gamma * next_Q).float()
def update_Q_online(self, td_estimate, td_target):
loss = self.loss_fn(td_estimate, td_target)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
return loss.item()
def sync_Q_target(self):
self.net.target.load_state_dict(self.net.online.state_dict())
def save(self):
save_path = (
self.save_dir / f"poke_net_{int(self.curr_step // self.save_every)}.chkpt"
)
torch.save(
dict(model=self.net.state_dict(), exploration_rate=self.exploration_rate),
save_path,
)
print(f"pokenet saved to {save_path} at step {self.curr_step}")
def learn(self):
if self.curr_step % self.sync_every == 0:
self.sync_Q_target()
if self.curr_step % self.save_every == 0:
self.save()
if self.curr_step < self.burnin:
return None, None
if self.curr_step % self.learn_every != 0:
return None, None
# Sample from memory
state, next_state, action, reward, done = self.recall()
# Get TD Estimate
td_est = self.td_estimate(state, action)
# Get TD Target
td_tgt = self.td_target(reward, next_state, done)
# Backpropagate loss through Q_online
loss = self.update_Q_online(td_est, td_tgt)
return (td_est.mean().item(), loss)
```
#### File: pokemon-rl-showdown/pokemon-agent/server_login.py
```python
from poke_env.player_configuration import PlayerConfiguration
def login_showdown(account_name, password=<PASSWORD>)
my_player_config = PlayerConfiguration(account_name, password)
return my_player_config
```
|
{
"source": "JesseRhoads/salt",
"score": 3
}
|
#### File: salt/modules/hashutil.py
```python
import base64
import hashlib
import hmac
def base64_encodestring(instr):
'''
Encode a string as base64
.. versionadded:: 2014.7.0
CLI Example:
.. code-block:: bash
salt '*' hashutil.base64_encodestring 'get salted'
'''
return base64.encodestring(instr)
def base64_decodestring(instr):
'''
Decode a base64-encoded string
.. versionadded:: 2014.7.0
CLI Example:
.. code-block:: bash
salt '*' hashutil.base64_decodestring 'Z2V0IHNhbHRlZA==\\n'
'''
return base64.decodestring(instr)
def md5_digest(instr):
'''
Generate an md5 hash of a given string
.. versionadded:: 2014.7.0
CLI Example:
.. code-block:: bash
salt '*' hashutil.md5_digest 'get salted'
'''
return hashlib.md5(instr).hexdigest()
def sha256_digest(instr):
'''
Generate an sha256 hash of a given string
.. versionadded:: 2014.7.0
CLI Example:
.. code-block:: bash
salt '*' hashutil.sha256_digest 'get salted'
'''
return hashlib.sha256(instr).hexdigest()
def sha512_digest(instr):
'''
Generate an sha512 hash of a given string
.. versionadded:: 2014.7.0
CLI Example:
.. code-block:: bash
salt '*' hashutil.sha512_digest 'get salted'
'''
return hashlib.sha512(instr).hexdigest()
def hmac_signature(string, shared_secret, challenge_hmac):
'''
Verify a challenging hmac signature against a string / shared-secret
.. versionadded:: 2014.7.0
Returns a boolean if the verification succeeded or failed.
CLI Example:
.. code-block:: bash
salt '*' hashutil.hmac_signature 'get salted' 'shared secret' '<KEY>
'''
hmac_hash = hmac.new(string, shared_secret, hashlib.sha256)
valid_hmac = base64.b64encode(hmac_hash.digest())
return valid_hmac == challenge_hmac
```
|
{
"source": "jesserobertson/automapper",
"score": 3
}
|
#### File: jesserobertson/automapper/waveletize.py
```python
from __future__ import print_function, division
from wavelets import dgauss, pad_array, fft_frequencies, rotate
import h5py, os
from pyfftw.interfaces.scipy_fftpack import fftn, ifftn
import pyprind
from numpy import pi, linspace, float64, nanmean, isnan
import click
@click.command()
@click.argument('filename', type=click.Path(exists=True), required=1)
@click.option('--order', type=int, default=1,
help="The order of the derivative to calculate")
@click.option('--nangles', type=int, default=1,
help="The number of angles to calculate transforms for")
@click.option('--nscales', type=int, default=20,
help="The number of scales to resolve in the transform")
@click.option('--sym', is_flag=True, default=False,
help="Whether to use a symmetric or asymmetric wavelet"
" (default: asymmetric)")
def make_wavelets(filename, order, nangles, nscales, sym):
""" Make a wavelet transform from an HDF5 file
"""
# Set wavelet type
if sym:
wav = dgauss.dgauss_nd_sym
else:
wav = dgauss.dgauss_nd
# Get info from input signal
with h5py.File(filename) as src:
spacing = (
abs(src['Longitude'][1] - src['Longitude'][0]),
abs(src['Latitude'][1] - src['Latitude'][0]))
nxs, nys, _ = src['Raster'].shape
shape = (nxs, nys)
# Generate axes for transform
scales = dgauss.generate_scales(nscales, shape, spacing, order)
angles = linspace(
0, pi * (1 - 1 / nangles), nangles)
axes = [
(0, 'Angle', angles),
(1, 'Scale', scales),
(2, 'Longitude', src['Longitude'][...]),
(3, 'Latitude', src['Latitude'][...]),
]
# Remove NaNs and pad array...
raster = src['Raster'][..., 0]
mean = nanmean(nanmean(raster))
raster[isnan(raster)] = mean
pad_raster, pad_mask = pad_array(raster)
pad_shape = pad_raster.shape
fft_data = fftn(pad_raster)
# Generate sink file
sink_fname = os.path.splitext(filename)[0] \
+ '_deriv_order{0}.hdf5'.format(order)
with h5py.File(sink_fname) as sink:
sink_shape = angles.shape + scales.shape + shape
sink.require_dataset('Raster', shape=sink_shape, dtype=float64)
# Attach dimension labels to raster, write to sink
for idx, label, dim in axes:
sink.require_dataset(name=label,
shape=dim.shape,
dtype=float64,
exact=True,
data=dim)
sink['Raster'].dims.create_scale(dset=sink[label], name=label)
sink['Raster'].dims[idx].attach_scale(sink[label])
# Evaluate transforms
progbar = pyprind.ProgBar(len(angles) * len(scales) + 1)
freqs = fft_frequencies(pad_shape, spacing)
for aidx, angle in enumerate(angles):
rfreqs = rotate(freqs, (angle,))
for sidx, scale in enumerate(scales):
item = 'Angle: {0:0.2f} deg, Scale: {1:0.2f} deg'.format(
angle * 180 / pi, scale)
progbar.update(item_id=item)
filtered = ifftn(
fft_data * wav(rfreqs, order=order, scale=scale))
sink['Raster'][aidx, sidx, ...] = filtered[pad_mask].real
if __name__ == '__main__':
make_wavelets()
```
|
{
"source": "jesserobertson/cogj",
"score": 2
}
|
#### File: cogj/cli/info.py
```python
import click
@click.command('info')
def info_command():
"""
Return information about this file
"""
click.echo('Not implemented yet')
```
#### File: cogj/cli/main.py
```python
import logging
import click
import click_log
from .info import info_command
from .convert import convert_command
# Logger options
LOGGER = logging.getLogger('cogj')
click_log.basic_config(LOGGER)
@click.group('cogj')
@click_log.simple_verbosity_option(LOGGER)
def cogj():
"""\b
_|_|_| _|_| _|_|_| _|_|_|_| | Cloud-optimized GeoJSON tools
_| _| _| _| _| | WFS without the WTF
_| _| _| _| _|_| _| |
_| _| _| _| _| _| _| | Skunkworks by @jesserobertson & @keithamoss
_|_|_| _|_| _|_|_| _|_| | some time in January 2019
For more info, try `cogj COMMAND --help`
"""
LOGGER.debug('COGJ up and running!')
cogj.add_command(info_command)
cogj.add_command(convert_command)
if __name__ == '__main__':
cogj()
```
#### File: cogj/cogj/filter.py
```python
from collections import Iterable
from voluptuous import Schema, ALLOW_EXTRA, MultipleInvalid
from toolz.curried import take, compose
from .feature import Feature
from .logging import LoggerMixin
from .utilities import grouper
class FeatureFilter(LoggerMixin):
"""
Handles vector geospatial data with some lazy filtering of features
Parameters:
limit - the number of features to keep
schema - a voluptuous.Schema or dictionary to use to filter on
feature properties
keep_properties - if True, all feature properties are kept, if
False, no feature properties are kept, if an iterable of
feature keys, only those features are kept
chunk - if not False, returns the features in chunks of n
"""
def __init__(self, limit=None, schema=None, keep_properties=True, chunk=False):
self.schema = schema
self.limit = limit
self.chunk = chunk or False
self.set_property_filter(keep_properties)
# Set up pipeline, in reverse order
steps = [self.validate, self.process]
if self.limit is not None:
self.logger.debug(f'Loading %s features only', self.limit)
steps.append(take(self.limit))
if self.chunk:
self.logger.debug(f'Features will arrive in batches of %s', self.chunk)
steps.append(lambda it: grouper(self.chunk, it))
self.pipeline = compose(*reversed(steps))
def __call__(self, features):
"Filter features using the schema"
yield from self.pipeline(features)
def set_property_filter(self, keep_properties=False):
"Set keep_properties attribute"
if isinstance(keep_properties, str):
self._process_properties = lambda f: \
self._filter_properties({keep_properties,}, f)
elif isinstance(keep_properties, Iterable):
self._process_properties = lambda f: \
self._filter_properties(set(keep_properties), f)
elif keep_properties:
self._process_properties = self._allow_properties
else:
self._process_properties = self._remove_properties
@property
def schema(self):
"Get current schema"
return self._schema
@schema.setter
def schema(self, new_schema):
"Set new schema. If None then schema is removed"
if new_schema is not None and not isinstance(new_schema, Schema):
new_schema = Schema(new_schema, extra=ALLOW_EXTRA)
self.logger.debug('Updating schema to %s', new_schema)
self._schema = new_schema # pylint: disable=W0201
def validate(self, features):
"""
Lazily evaulate an iterator against a given schema. Objects which fail
to validate are skipped.
Parameters:
features - an iterator of features to validate
Returns:
an iterator of valid values
"""
if self._schema is None:
self.logger.info('No schema, returning all features')
yield from features
return
self.logger.info('Validating features against schema')
for feature in features:
try: # validation
self._schema(feature.properties)
yield feature
except MultipleInvalid as err:
self.logger.debug('Skipping invalid object %s', feature)
self.logger.debug('Errors: %s', err.errors)
continue
def process(self, features):
"""
Process properties dictionaries
Sometimes we don't want to keep all the properties in a vector format in the final
dataset (especially if we've already filtered out what we don't care about).
This method filters on the self.keep_properties attribute.
Parameters:
features - an iterator of feature features
Returns:
an iterator over filtered features with properties handled
"""
for feature in features:
yield self._process_properties(feature)
def _allow_properties(self, feature):
"Allow all features"
self.logger.debug("Keeping all properties from features")
return feature
def _remove_properties(self, feature):
"Remove properties from feature"
self.logger.debug("Removing all properties from features")
return Feature(geometry=feature.geometry)
def _filter_properties(self, keep, feature):
"Process removing some features"
self.logger.debug("Keeping a subset of features: %s", keep)
return Feature(
geometry=feature.geometry,
properties={
k: v for k, v in feature.properties.items()
if k in keep
})
```
#### File: cogj/cogj/reproject.py
```python
from functools import partial
from shapely.geometry import Polygon, MultiPolygon, MultiLineString, \
LineString, LinearRing, Point, MultiPoint
import pyproj
import numpy as np
from .feature import Feature, FeatureCollection
GEOJSON_PROJ = 'EPSG:4326' # Default/only projection used by GeoJSON
def get_projector(from_crs, to_crs=None):
"""
Return a function to reproject something from one
coordinate reference system (CRS) to another.
Coordinate references can be specified as PROJ strings
(e.g. '+datum=WGS84 +ellps=WGS84 +no_defs +proj=longlat'
see fiona.crs.to_string for more on this) or using EPSG
codes (e.g. 'epsg:3857').
Parameters:
from_crs - the source coordinate reference system
to_crs - the destination coordinate reference system.
Optiona, defaults to 'epsg:4326' which is Web
Mercator projection (to make it easy to pass to
leaflet maps)
"""
# Generate pyproj objects from our CRSes
prjs = [from_crs, to_crs or GEOJSON_PROJ]
for idx, prj in enumerate(prjs):
if isinstance(prj, str) and prj.lower().startswith('epsg'):
prjs[idx] = pyproj.Proj(init=prj)
else:
prjs[idx] = pyproj.Proj(prj)
# Generate the function to actually carry out the transforms
if prjs[0] == prjs[1]:
_project = lambda *p: p
else:
_project = lambda *p: \
np.asarray(list(partial(pyproj.transform, *prjs)(*p))) # pylint: disable=E1120
return _project
def reproject(geom, from_crs=None, to_crs=None, projector=None):
"""
Reproject a shapely {Multi,}Polygon or {Multi,}LineString
using a given projector (see `get_projector`).
Parameters:
geom - the geometry to reproject
from_crs - the source coordinate reference system
to_crs - the destination coordinate reference system.
Optional, defaults to 'epsg:4326' which is Web
Mercator projection (to make it easy to pass to
leaflet maps)
Returns:
the reprojected geometries
"""
# Handle inputs
from_crs = from_crs or GEOJSON_PROJ
to_crs = to_crs or GEOJSON_PROJ
if projector is None:
projector = get_projector(from_crs, to_crs)
# Handle different geometry types
mapping = {
'Polygon': _polygon,
'LineString': _linestring,
'MultiPolygon': _multipolygon,
'LinearRing': _linearring,
'MultiLineString': _multilinestring,
'Point': _point,
'MultiPoint': _multipoint,
'Feature': _feature,
'FeatureCollection': _featurecollection
}
try:
return mapping[geom.geom_type](geom, projector=projector)
except KeyError:
msg = "Don't know how to reproject a {}".format(geom.geom_type)
raise ValueError(msg)
# Reprojection helpers
def _featurecollection(geom, projector):
return FeatureCollection([
reproject(f, projector=projector)
for f in geom
])
def _feature(geom, projector):
return Feature(
geometry=reproject(geom.geometry, projector=projector),
properties=geom.properties
)
def _point(geom, projector):
return Point(projector(*geom.xy))
def _multipoint(geom, projector):
return MultiPoint([_point(p, projector) for p in geom])
def _polygon(geom, projector):
if geom.interiors:
reproj = Polygon(
shell=_linearring(geom.exterior, projector),
holes=[_linearring(i, projector) for i in geom.interiors]
)
else:
reproj = Polygon(reproject(geom.exterior, projector=projector))
return reproj
def _linestring(geom, projector):
return LineString(projector(*geom.xy).T)
def _linearring(geom, projector):
return LinearRing(projector(*geom.coords.xy).T)
def _multipolygon(geom, projector):
return MultiPolygon([_polygon(p, projector) for p in geom])
def _multilinestring(geom, projector):
return MultiLineString([_linestring(p, projector) for p in geom])
```
#### File: cogj/cogj/resample.py
```python
import numpy as np
from shapely.geometry import LineString, Polygon
class LinestringSampler:
"""
Manages resampling of linestrings
Essentially a utility class which lets us parameterise resamplings
by specifying positions along the linestring without worrying
about reprojecting this along the line.
Parameters:
points - a list of points defining a path
"""
norm_tolerance = 1e-10 # eps for managing zero vs nonzero norms
def __init__(self, linestring):
# Map the vectors between points
self.points = np.asarray(linestring.xy).transpose()
self.vectors = np.diff(self.points, axis=0)
self.norms = np.linalg.norm(self.vectors, axis=1)
# Fin unit vectors for each segment
nonzero = self.norms > self.norm_tolerance
self.unit_vectors = self.vectors.copy()
self.unit_vectors[nonzero] /= self.norms[nonzero].reshape((-1, 1))
# Total path distance
self.length = self.norms.sum()
self.cumulative_norm = np.cumsum(self.norms)
def __call__(self, distances):
"""
Sample distances along our boundary
Parameters:
distances - points given as distances along the boundary
"""
return self.sample(distances)
def sample(self, distances):
"""
Sample distances along our boundary
Parameters:
distances - points given as distances along the boundary
"""
# Return the indices in cumulative norm that each sample
# would need to be inserted at to maintain the sorted propery
positions = np.searchsorted(self.cumulative_norm, distances)
positions = np.clip(positions, 0, len(self.unit_vectors) - 1)
offsets = np.append(0, self.cumulative_norm)[positions]
# new points, parameterized as a projection length, direction from
# an origin vertex
projection = distances - offsets
direction = self.unit_vectors[positions]
origin = self.points[positions]
return origin + (direction * projection.reshape((-1, 1)))
def resample_linestring_count(linestring, count=None, step=None,
step_round=True):
"""
Given a path along (n, d) points, resample them such that the
distance traversed along the path is constant in between each of
the resampled points.
Note that this will likely clip the corners of the original path,
and the original vertices are NOT guaranteed to be in the new
resampled path.
Only one of count at step can be specified. Specify count for
uniformly distributed samples (e.g. np.linspace(0, 1, count)), or
step for a specified step length (e.g. np.arange(0, 1, step))
Parameters:
linestring - the linestring containing the path
count, step - the number of points or a specified step length
(see above for details)
step_round - if True and step is specified, adjusts the step
length so that an integer number of steps is used closest
to the specified step length.
Returns:
an (m, d) set of resampled points on the path
"""
# Check inputs
if (count is None and step is None) or (count is not None and step is not None):
raise ValueError("Only one of count or step can be specified")
# Generate steps and sampler instance
sampler = LinestringSampler(linestring)
if step is not None:
if step >= sampler.length and not step_round:
raise ValueError('Step length is longer than the boundary length')
else:
# Set a step count so we use an integer number of equally-spaced
# steps
count = int(np.ceil(sampler.length / step))
if count is not None:
samples = np.linspace(0, sampler.length, count)
else:
samples = np.arange(0, sampler.length, step)
return LineString(sampler.sample(samples))
# Default clipping for resampling
DEFAULT_CLIP = [4, np.inf]
def resample(geom, resolution, clip=None, return_points=False):
"""
Resample a boundary based on some resolution rather than by number of points or step
Parameters:
linestring - the shapely.geometry.LineString instance to resample
resolution - the resolution to resample at
clip - If None, defaults to [8, 200]
return_points - if True, returns a list of points located on the boundary
ready for medial line analysis. If False, returns a shapely LineString
instance with the resampled data
Returns:
depending on return_poly - either a list of points on the boundary or
a new shapely LineString instance.
"""
mapping = {
'Polygon': resample_polygon,
'LineString': resample_linestring
}
try:
return mapping[geom.geom_type](
geom,
resolution=resolution,
clip=clip,
return_points=return_points
)
except KeyError:
raise ValueError("Don't know how to resample a {} instance".format(geom.geom_type))
def resample_linestring(linestring, resolution, clip=None, return_points=True):
"""
Resample a boundary based on some resolution rather than by number of points or step
Parameters:
linestring - the shapely.geometry.LineString instance to resample
resolution - the resolution to resample at
clip - If None, defaults to [8, 200]
return_points - if True, returns a list of points located on the boundary
ready for medial line analysis. If False, returns a shapely LineString
instance with the resampled data
Returns:
depending on return_poly - either a list of points on the boundary or
a new shapely LineString instance.
"""
clip = clip or DEFAULT_CLIP
sampler = LinestringSampler(linestring)
count = sampler.length / resolution
count = int(np.clip(count, *clip))
samples = np.linspace(0, sampler.length, count)
return sampler.sample(samples) if return_points \
else LineString(sampler.sample(samples))
def resample_polygon(polygon, resolution=0.01, clip=None, return_points=True):
"""
Resample the boundaries of a polygon using `resample_linestring`, including
holes, if any
Parameters:
polygon - the shapely.geometry.Polygon instance to resample
resolution - the resolution to resample at
clip - If None, defaults to [8, 200]
return_points - if True, returns a list of points located on the boundary
ready for medial line analysis. If False, returns a shapely Polygon
instance with the resampled data
Returns:
depending on return_poly - either a list of points on the boundary or
a new shapely Polygon instance.
"""
# Helper for resampling
_rsamp = lambda ls: \
resample_linestring(ls, resolution, clip or DEFAULT_CLIP)
# Actually do resampling
if polygon.interiors:
result = {
'shell': _rsamp(polygon.exterior),
'holes': [_rsamp(i) for i in polygon.interiors]
}
else:
result = {'shell': _rsamp(polygon.exterior)}
# Work out what we're returning here
if not return_points:
result = Polygon(**result)
elif polygon.interiors:
result = np.vstack([
result['shell'],
np.vstack(result['holes'])
])
else:
result = result['shell']
return result
```
#### File: flask/api/app.py
```python
from flask import Flask, Response, abort
from wfsserver import WFSServer
from geo_serverless import Geo_Serverless
from utils import get_request_arg_casei
app = Flask(__name__)
@app.route("/")
def wfs():
r = get_request_arg_casei("REQUEST")
COGJ_URL = get_request_arg_casei("COGJ_URL")
wfs_server = WFSServer({
"SERVICE_TITLE": "COGJ Web Feature Service",
"COGJ_URL": COGJ_URL
})
geo_serverless = Geo_Serverless(COGJ_URL)
if r.upper() == "GETCAPABILITIES":
return get_capabilities(wfs_server, geo_serverless)
elif r.upper() == "DESCRIBEFEATURETYPE":
return describe_feature_type(wfs_server, geo_serverless)
elif r.upper() == "GETFEATURE":
return get_feature(wfs_server, geo_serverless)
abort(404)
def get_capabilities(wfs_server, geo_serverless):
return Response(wfs_server.get_capabilities(geo_serverless.get_metadata(), geo_serverless.get_bbox()), mimetype="text/xml")
def describe_feature_type(wfs_server, geo_serverless):
return Response(wfs_server.describe_feature_type(get_request_arg_casei("TYPENAME"), geo_serverless), mimetype="text/xml")
def get_feature(wfs_server, geo_serverless):
bbox = get_request_arg_casei("BBOX")
feature_collections = geo_serverless.read_feature_collections(geo_serverless.get_collections_for_bbox(bbox))
gml_path = wfs_server.get_feature(get_request_arg_casei("TYPENAME"), feature_collections, bbox)
with open(gml_path) as f:
return Response(f.read(), mimetype="text/xml; subtype=gml/3.1.1")
# @app.route("/ol_wfs_demo.html")
# def ol_wfs_demo():
# return send_file("ol_wfs_demo.html")
```
#### File: flask/lambda/lambda_function.py
```python
import os
import sys
import logging
# set up logger
logger = logging.getLogger(__file__)
logger.setLevel(logging.DEBUG)
# commented out to avoid duplicate logs in lambda
# logger.addHandler(logging.StreamHandler())
# imports used for the example code below
from osgeo import gdal, ogr
import json
import subprocess
def lambda_handler(event, context):
""" Lambda handler """
logger.debug(event)
# print(event)
# process event payload and do something like this
# fname = event['filename']
# fname = fname.replace('s3://', '/vsis3/')
# # open and return metadata
# ds = gdal.Open(fname)
# band = ds.GetRasterBand(1)
# stats = band.GetStatistics(0, 1)
# geojsonFile = "./sample_data/Cadastral.firstcollection.geojson"
geojsonFile = "./sample_data/Casatral.noformatting.geojson"
# driver = ogr.GetDriverByName("GeoJSON")
# dataSource = driver.Open(geojsonFile, 0)
# layer = dataSource.GetLayer()
# with open(geojsonFile) as f:
# featureCollection = json.loads(f.read())
# for feature in featureCollection["features"][:1]:
# feature = ogr.CreateGeometryFromJson(json.dumps(feature["geometry"]))
basePath = "./"
command = ["ogr2ogr", "--version"]
# https://gis.stackexchange.com/a/154008
foo = subprocess.check_call(command)
# ogr2ogr -f "GML" -nlt "GEOMETRYCOLLECTION" ./sample_data/Cadastral.gml ./sample_data/Casatral.noformatting.geojson
return None
if __name__ == "__main__":
event = []
context = []
lambda_handler(event, context)
```
#### File: cogj/tests/test_version.py
```python
import unittest
import cogj
class TestVersion(unittest.TestCase):
"Check version is set"
def test_version(self):
"Check version is set"
self.assertTrue(cogj.__version__ is not None)
if __name__ == '__main__':
unittest.main()
```
|
{
"source": "jesserobertson/earthchem-pyclient",
"score": 3
}
|
#### File: earthchem-pyclient/earthchem/pagination.py
```python
from itertools import takewhile, count
def make_pages(max_items, items_per_page=50):
""" Get a list of page bounds for submitting to the REST endpoint
Parameters:
max_items - the total number of items to get
items_per_page - the size of each page (defaults to 50 which is
the Earthchem default)
Returns:
a list of tuples with (start_row, end_row) for each page
"""
# Make a list of page bounds until we get bigger than the maximum number of items
page_bounds = lambda n: (n * items_per_page, (n + 1) * items_per_page - 1)
pages = list(takewhile(lambda x: x[0] < max_items,
map(page_bounds, count())))
# Replace last value with maximum row number
pages[-1] = (pages[-1][0], max_items)
return pages
```
#### File: earthchem/plot/geochron.py
```python
from matplotlib import pyplot as plt
import pandas as pd
def geochron(df):
"""
Make a geochron plot
"""
raise NotImplementedError("Sorry - this is a work in progress!")
```
#### File: earthchem/plot/spider.py
```python
from matplotlib import pyplot as plt
import pandas as pd
import numpy as np
import warnings
from ..geochem import common_elements
def spiderplot(df, ax=None, components:list=None, plot=True, fill=False, **kwargs):
"""
Plots spidergrams for trace elements data.
By using separate lines and scatterplots, values between two null-valued
items are still presented. Might be able to speed up the lines
with a matplotlib.collections.LineCollection
Parameters
----------
df: pandas DataFrame
Dataframe from which to draw data.
ax: Matplotlib AxesSubplot, None
The subplot to draw on.
components: list, None
Elements or compositional components to plot.
plot: boolean, True
Whether to plot lines and markers.
fill:
Whether to add a patch representing the full range.
style:
Styling keyword arguments to pass to matplotlib.
"""
try:
assert plot or fill
except:
raise AssertionError('Please select to either plot values or fill between ranges.')
sty = {}
# Some default values
sty['marker'] = kwargs.get('marker') or 'D'
sty['color'] = kwargs.get('color') or kwargs.get('c') or None
sty['alpha'] = kwargs.get('alpha') or kwargs.get('a') or 1.
if sty['color'] is None:
del sty['color']
components = components or [el for el in common_elements(output='str')
if el in df.columns]
assert len(components) != 0
c_indexes = np.arange(len(components))
ax = ax or plt.subplots(1, figsize=(len(components)*0.25, 4))[1]
if plot:
ls = ax.plot(c_indexes,
df[components].T.values.astype(np.float),
**sty)
sty['s'] = kwargs.get('markersize') or kwargs.get('s') or 5.
if sty.get('color') is None:
sty['color'] = ls[0].get_color()
sc = ax.scatter(np.tile(c_indexes, (df[components].index.size,1)).T,
df[components].T.values.astype(np.float), **sty)
for s_item in ['marker', 's']:
if s_item in sty:
del sty[s_item]
if fill:
mins, maxs = df[components].min(axis=0), df[components].max(axis=0)
ax.fill_between(c_indexes, mins, maxs, **sty)
ax.set_xticks(c_indexes)
ax.set_xticklabels(components, rotation=60)
ax.set_yscale('log')
ax.set_xlabel('Element')
unused_keys = [i for i in kwargs if i not in list(sty.keys()) + \
['alpha', 'a', 'c', 'color', 'marker']]
if len(unused_keys):
warnings.warn('Styling not yet implemented for:{}'.format(unused_keys))
```
#### File: earthchem-pyclient/earthchem/query.py
```python
from .documentation import get_documentation
from .pagination import make_pages
import requests
import tqdm
import pandas
from io import StringIO
import textwrap
def make_query_docstring():
""" Constructs a docstring from the documentation dictionary
"""
wrapper = textwrap.TextWrapper(width=80, subsequent_indent=' ')
docstr = textwrap.dedent("""
Holds a query for the EarthChem REST API
Initialize by providing key-value pairs to build into a query URL. The
URL is available in the `url` attribute, and the results from the
`results` attribute.
Providing a keyword not in the list below will raise a KeyError.
Allowed keywords are:
""")
docdict = get_documentation()
for item in docdict.items():
docstr += '\n' + wrapper.fill('{0} - {1}'.format(*item))
return docstr
class Query(dict):
__doc__ = make_query_docstring()
docdict = get_documentation()
def __init__(self, **kwargs):
super().__init__()
# Add everything to dictionary
for key, value in kwargs.items():
# Add to dictionary
self[key] = str(value)
def __repr__(self):
kwargs = ', '.join('{0}={1}'.format(*it) for it in self.items())
return 'Query({})'.format(kwargs)
def __setitem__(self, key, value):
""" Sets a particular query term, making sure that the values
are ok etc.
Parameters:
key - the query key to set
value - the value to set for that search.
"""
# Check that items are ok to query - we escape startrow and endrow since
# they are special
allowed = list(self.docdict.keys()) + ['startrow', 'endrow']
if key not in allowed:
raise KeyError('Unknown key {0}'.format(key))
if value is None:
del self[key]
else:
super().__setitem__(key, value)
def count(self):
""" Get the total number of items returned by the query
"""
self['searchtype'] = 'count'
resp = requests.get(self.url)
self['searchtype'] = None
# Return the result
if resp.ok:
try:
return int(resp.json()['Count'])
except:
raise IOError("Couldn't parse data in response")
else:
raise IOError("Couldn't get data from network")
def dataframe(self, max_rows=None, standarditems=True, drop_empty=True):
""" Get the actual data in a dataframe
Note that this doesn't do pagination yet...
Parameters:
max_rows - the maximum number of rows to get. If None,
defaults to Query.count() (i.e. give me everything)
standarditems - if True, returns the Earthchem
standard items in the table
drop_empty - if True, drops columns for which there
is no data
"""
# Check that we actually have some data to fetch
if self.count() == 0:
print("Didn't find any records for this query, returning None")
return None
# Get the list of pages we're going to use, use to set up tqdm and query
if max_rows is None:
max_rows = self.count()
pages = make_pages(max_rows - 1)
tqdm_kwargs = {
'desc': 'Downloading pages',
'total': len(pages)
}
# Accumulate pages as we go
accumulator = None
for page in tqdm.tqdm(pages, **tqdm_kwargs):
# Add the proper search type keys to the query
self.update(
searchtype='rowdata',
standarditems='yes' if standarditems else 'no',
startrow=page[0],
endrow=page[1]
)
# Get data
resp = requests.get(self.url)
if resp.ok:
try:
# Create a new dataframe to add to the old one
df = pandas.read_json(StringIO(resp.text))
if accumulator is None:
accumulator = df
else:
accumulator = pandas.concat([accumulator, df])
except ValueError:
if resp.text == 'no results found':
print("Didn't find any records, continuing")
continue
else:
raise IOError("Couldn't parse data in response")
# We'll keep the accumulated data thank you
df = accumulator
# Reset the query
for key in ('searchtype', 'standarditems', 'startrow', 'endrow'):
self[key] = None
# Convert numerical values
string_values = { # things to keep as strings
'sample_id', 'source', 'url', 'title', 'author', 'journal',
'method', 'material', 'type', 'composition', 'rock_name'
}
for key in df.keys():
if key not in string_values:
df[key] = pandas.to_numeric(df[key])
# Drop empty columns
if drop_empty:
df.dropna(axis='columns', how='all', inplace=True)
# Return the result
return df
@property
def url(self):
query_string = ('http://ecp.iedadata.org/restsearchservice?'
'outputtype=json')
for item in self.items():
query_string += '&{0}={1}'.format(*item)
return query_string
def info(self, key, pprint=True):
""" Return info about a search key
Parameters:
key - the key to get information about
pprint - whether to print the information or return
a dictionary with the contents
Returns:
if pprint=True, None, otherwise a dictionary with a
'doc' string and a 'valid_values'
"""
pass
```
#### File: earthchem/transform/isometric.py
```python
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
from .utilities import basis_matrix, closure
class IsometricLogTransform(BaseEstimator, TransformerMixin):
""" A custom sklearn transformer which implements centered-log scaling
for compositional data
"""
def fit(self, X, y=None):
"""
Fit does nothing
"""
return self
def transform(self, X):
"""
Returns the isometric log ratio transform of the given composition vectors X
Parameters:
X - an array of composition vectors. An M x N array where M
is the number of samples, and N is the number of
compositional species.
Returns:
the additive log ratio-transformed data L
"""
X = np.asarray(X)
psi = basis_matrix(X.shape[1])
return np.dot(np.log(X), psi.T)
def inverse_transform(self, L):
"""
Returns the inverse isometric log ratio transformed data
Parameters:
Parameters:
L - an array of ILR-transformed composition vectors.
Returns:
the inverted data X
"""
L = np.asarray(L)
psi = basis_matrix(L.shape[1] + 1)
return closure(np.exp(np.dot(L, psi)))
```
#### File: earthchem-pyclient/tests/test_client_data.py
```python
from earthchem import Query
import unittest
class TestRESTClientData(unittest.TestCase):
def setUp(self):
self.query = Query(author='barnes')
self.count = self.query.count()
self.df = self.query.dataframe(max_rows=49) # <50 for test speed
def test_data_type(self):
"Check the right columns are numeric"
# Expected dtypes for columns here
expected = {
'float64': [
'al2o3', 'cao', 'cl', 'feot', 'k', 'k2o', 'latitude', 'longitude', 'mgo', 'mno', 'na2o', 'p2o5', 'sio2', 'tio2'
],
'object': [
'author', 'composition', 'journal', 'material', 'method', 'rock_name', 'sample_id', 'source', 'title', 'type'
]
}
for dtype, keys in expected.items():
for key in keys:
try:
self.assertEqual(str(self.df[key].dtype), dtype)
except KeyError:
pass
if __name__ == '__main__':
unittest.main()
```
#### File: earthchem-pyclient/tests/test_integration.py
```python
from earthchem import Query
from matplotlib import pyplot as plt
import unittest
class IntegrationTestRESTClientQuery(unittest.TestCase):
"Some integration tests to check that things are working"
def setUp(self):
self.query = Query(author='barnes')
# <50 for test speed, pagination is checked elsewhere
self.df = self.query.dataframe(max_rows=49)
def test_plot_latlon(self):
"Check that plotting works without any issues"
self.df.plot('longitude', 'latitude', 'scatter')
plt.close()
def test_plot_data(self):
"Check that plotting works with data inputs"
self.df.plot('al2o3', 'sio2', 'scatter')
plt.close()
if __name__ == '__main__':
unittest.main()
```
#### File: earthchem-pyclient/tests/test_pagination.py
```python
from earthchem import pagination
import unittest
TEST_DATA = [
# input, expected output
([200], [(0, 49), (50, 99), (100, 149), (150, 200)]),
([159], [(0, 49), (50, 99), (100, 149), (150, 159)]),
([149, 30], [(0, 29), (30, 59), (60, 89), (90, 119), (120, 149)]),
([1], [(0, 1)])
]
class TestPagination(unittest.TestCase):
def test_pagination_function(self):
for ipt, expected in TEST_DATA:
self.assertEqual(expected, pagination.make_pages(*ipt))
if __name__ == '__main__':
unittest.main()
```
#### File: earthchem-pyclient/tests/test_validation.py
```python
from earthchem.validation import *
import unittest
ELEMENTS_IN_SCHEMA = (
'Reference',
'SampleType',
'SampleID',
'Keyword',
'CruiseID',
'Location',
'Age',
'Material'
)
TEST_EXAMPLES = {
'Reference': {
'works': [
{'author': 'barnes', 'journal': 'nature'},
{'author': 'barnes', 'journal': 'nature', 'doi': 'foo'},
{'author': 'klump'},
{'exactpubyear': '2005'},
{'minpubyear': '2000', 'maxpubyear': '2017'}
],
'fails': [
(ValueError, ['foo', 'bar']),
(ValueError, 'a string'),
(KeyError, {'a':'dict', 'with': 'random', 'keys': 'fails'})
]
},
# 'SampleType',
# 'SampleID',
# 'Keyword',
# 'CruiseID',
# 'Location',
# 'Age',
# 'Material'
}
class TestValidators(unittest.TestCase):
"Test validators"
def test_integration(self):
"Elements should build for all elements in the schema"
for element in ELEMENTS_IN_SCHEMA:
elem = ElementValidator(element)
self.assertTrue(elem is not None)
self.assertTrue(elem.tree is not None)
del elem
def test_validator_creation_1(self):
"Elements should create their own validators"
for element in ELEMENTS_IN_SCHEMA:
elem = ElementValidator(element)
self.assertTrue(elem._validator is not None)
del elem
def test_validator_creation_2(self):
"Check that complex validators work ok"
for name, cases in TEST_EXAMPLES.items():
v = ElementValidator(name)
for case in cases['works']:
self.assertTrue(v.validate(case))
for errtype, case in cases['fails']:
with self.assertRaises(errtype):
v.validate(case)
if __name__ == '__main__':
unittest.main()
```
|
{
"source": "jesserobertson/maxr",
"score": 2
}
|
#### File: jesserobertson/maxr/setup_extensions.py
```python
from os import path, listdir
from logging import getLogger
from multiprocessing import cpu_count
import numpy
# Here we try to import Cython - if it's here then we can generate new c sources
# directly from the pyx files using their build_ext class.
# If not then we just use the default setuptools version
try:
from Cython.Distutils import build_ext
HAVE_CYTHON = True
except ImportError:
from setuptools.command.build_ext import build_ext
HAVE_CYTHON = False
from setuptools import Extension
from setuptools.command.sdist import sdist
LOGGER = getLogger()
# Where are our extensions located?
EXTENSIONS_MODULE = ['maxr', 'ext']
PATH_TO_EXTENSIONS = path.join(*(
[path.abspath(path.dirname(__file__))]
+ EXTENSIONS_MODULE
))
def update_thread_count():
""" Update the thread count for OpenMP extensions
Uses one thread per core, with the estimate of the number of cores from
multiprocessing.cpu_count.
"""
LOGGER.info('Updating thread count for cython code to %d', cpu_count())
num_threads = cpu_count() # We're just going for 1 thread/CPU here
fname = path.join(PATH_TO_EXTENSIONS, 'common.pxd')
with open(fname, 'r') as src:
content = src.readlines() # this is short, just slurp it
with open(fname, 'w') as sink:
for line in content:
if line.startswith('cdef int NUM_THREADS'):
sink.write('cdef int NUM_THREADS = {0}'.format(num_threads))
else:
sink.write(line)
def get_extensions():
""" Find our extensions to build.
Also updates the thread count for OpenMP extensions to the number of CPUs
availble on the current machine.
Returns:
a list of Extension objects to pass to setup
"""
update_thread_count()
# Get the extensions
if HAVE_CYTHON:
files = [f for f in listdir(PATH_TO_EXTENSIONS) if f.endswith('.pyx')]
else:
files = [f for f in listdir(PATH_TO_EXTENSIONS) if f.endswith('.c')]
# Construct keyword arguments for all extensions
kwargs = dict(
extra_compile_args=['-fopenmp'],
extra_link_args=['-fopenmp'],
include_dirs=[numpy.get_include(), PATH_TO_EXTENSIONS]
)
# Construct all the extension objects and return them
extensions = []
for fname in files:
module_name = path.splitext(path.split(fname)[1])[0]
extension_name = '.'.join(EXTENSIONS_MODULE + [module_name])
source = path.join(PATH_TO_EXTENSIONS, fname)
extensions.append(Extension(extension_name, sources=[source], **kwargs))
return extensions
# Update source distribution - we always require Cython for this...
class cython_sdist(sdist):
def run(self):
# Make sure the compiled Cython files in the distribution are up-to-date
from Cython.Build import cythonize
update_thread_count()
cythonize([path.join(PATH_TO_EXTENSIONS, f)
for f in listdir(PATH_TO_EXTENSIONS)
if f.endswith('.pyx')])
super().run()
def get_cmdclass():
""" Return a command class which builds cython extensions automatically
"""
cmdclass = {
'build_ext': build_ext,
'sdist': cython_sdist
}
return cmdclass
```
#### File: maxr/tests/test_flow.py
```python
from __future__ import print_function, division
import unittest
import numpy
import matplotlib.pyplot as plt
import os
from maxr import flow
from maxr.flow.blink import blink, tick, tock
class TestBlink(unittest.TestCase):
""" Tests for Blink functions
"""
def setUp(self):
self.times = numpy.linspace(0, 1, 1000)
self.period = 0.1
def test_ticktock(self):
""" Check that tick and tock work ok
"""
plt.plot(self.times, tick(self.times, self.period), label='tick')
plt.plot(self.times, tock(self.times, self.period), label='tock')
plt.legend(loc='best')
def test_blink(self):
""" Check that blink works ok
"""
_flow = blink(gamma=1, period=0.5)
uus, vus = _flow(0.25, 0.5, self.times)
plt.plot(self.times, uus, label='u')
plt.plot(self.times, vus, label='v')
plt.legend(loc='best')
class TestFlow(unittest.TestCase):
""" Tests for Flow class
"""
def setUp(self):
self.fname = 'blink_test.hdf5'
flow.from_function(blink(gamma=1, period=0.5), self.fname)
self.flow = flow.Flow(self.fname)
def tearDown(self):
if os.path.exists(self.fname):
os.remove(self.fname)
def test_makefile(self):
"Flow file should be created"
self.assertTrue(os.path.exists(self.fname))
def test_readifle(self):
"Flow variables should be accessible"
for key in 'tuvxy':
self.assertTrue(self.flow.data[key] is not None)
def test_interp(self):
"Flow interpolations should be accessible"
for key in ('u', 'v', 'du/dx', 'dv/dy', 'du/dt'):
self.assertTrue(self.flow(key) is not None)
def test_flow_info(self):
"Flow info should be accessible"
self.flow.info()
def test_plot_data(self):
"Flow data should be accessible to plot"
xps, yps = self.flow.grid()
_, axes = plt.subplots(2, 3)
for iidx in (0, 1):
dfunc = 'dv' if iidx else 'du'
for jidx in (0, 1):
dps = 'dy' if jidx else 'dx'
axis = axes[iidx, jidx]
key = dfunc + '/' + dps
axis.contourf(xps, yps, self.flow.data[key][..., 5])
axis.set_aspect('equal')
axis.set_axis_off()
axis.set_title(key)
axis = axes[iidx, 2]
key = dfunc + '/dt'
axis.contourf(xps, yps, self.flow.data[key][..., 5])
axis.set_axis_off()
axis.set_title(key)
def test_snapshots(self):
"Plotting snapshots should work ok"
self.flow.plot_snapshots(plot_every=1)
```
#### File: maxr/tests/test_history.py
```python
from __future__ import print_function, division
import unittest
from collections import defaultdict
from numpy import array, sqrt, pi, linspace, sin, cos, arange, median
from scipy.special import fresnel
from maxr.integrator import history
def solution(time):
""" Solution to sinusoidal history term
"""
ssc, csc = fresnel(sqrt(2 * time / pi))
return sqrt(2 * pi) * (
csc * sin(time) - ssc * cos(time))
def evaluate_history_integral(func, times, order=1):
""" Evaluate the history integral for a given driving function func
"""
return array([0] + [
history.integrator(func(times[:idx+1]), times[:idx+1],
order=order)
for idx in range(1, len(times))])
class TestHistory(unittest.TestCase):
r""" Tests to test integrator.history module
Check accuracy of convergence. We use a sinusoidal forcing and plot the response
$$
\int_0^{t} \frac{\sin{(\tau)}}{\sqrt{t - \tau}}d\tau =
\sqrt{2 \pi}\left[
C{\left(\sqrt{\frac{2t}{\pi}}\right)}\sin{t}
- S{\left(\sqrt{\frac{2t}{\pi}}\right)}\cos{t}
\right]
$$
where $C$ is the Fresnel C (cos) integral, and $S$ is the Fresnel
$S$ (sin) integral. Note the solution in the paper is **WRONG**
"""
tmin, tmax = 0, 30
def test_integrator(self):
""" Integrator should function ok
"""
expected = -1.2492166377597749
times = linspace(self.tmin, self.tmax, 1000)
self.assertTrue(
history.integrator(sin(times), times) - expected < 1e-5)
def test_integrator_solution(self):
""" Integrator should work over a range of values
"""
nsteps = 1280
order = 3
for tmax in (10, 20, 50):
times = linspace(self.tmin, tmax, nsteps)
numeric = evaluate_history_integral(sin, times, order=order)
exact = solution(times)
self.assertTrue(
median(numeric - exact) < 1e-5)
def test_range_steps(self):
""" Integrators should work for all orders at all steps
"""
# Set up steps
nstepstep = 25
nsteps = arange(nstepstep, 500, nstepstep)
# Calculate error
error = defaultdict(list)
for order in (1, 2, 3):
for num in nsteps:
times = linspace(self.tmin, self.tmax, num)
err = evaluate_history_integral(sin, times, order=order) - solution(times)
error[order].append(abs(err).max())
```
#### File: maxr/tests/test_parameters.py
```python
from __future__ import print_function, division
import unittest
from maxr import Parameters
class TestParameters(unittest.TestCase):
""" Check that parameters get updated ok
"""
def setUp(self):
self.p = Parameters()
def test_setting(self):
"Equivalent parameters should be updated at the same time"
for p1, p2 in self.p.equivalent:
self.p[p1] = 1
self.assertEqual(1, self.p[p2])
def test_defaults(self):
"Defaults should remain set when not set already"
for par, val in self.p.default_parameters.items():
self.assertEqual(self.p[par], val)
def test_attributes(self):
"Attributes should be copied to the dictionary"
for par, val in self.p.items():
self.assertEqual(getattr(self.p, par), val)
def test_unknown(self):
"Adding an unknown key raises a KeyError"
self.assertRaises(KeyError, lambda *a: self.p.__setitem__(*a),
'foo', 'bar')
if __name__ == '__main__':
unittest.main()
```
|
{
"source": "jesserobertson/uncover-ml",
"score": 2
}
|
#### File: uncover-ml/preprocessing/resample.py
```python
import click
import logging
import uncoverml as ls
from uncoverml.resampling import resample_shapefile
from uncoverml import config
import uncoverml.mllog
log = logging.getLogger(__name__)
@click.command()
@click.argument('pipeline_file')
@click.option('-o', '--outfile', type=click.Path(exists=False), default=None,
help="Sampled output shapefile name, "
"if not specified a random name is used and the file "
"is saved in the outdir specified in config file")
@click.option('-s', '--validation_file', type=click.Path(exists=False),
default=None,
help="Validation shapefile name, "
"if specified a validation shapefile is produced which "
"can be used for model validation")
@click.option('-n', '--validation_points', type=int,
default=100,
help="approximate number of validation points")
@click.option('-v', '--verbosity',
type=click.Choice(['DEBUG', 'INFO', 'WARNING', 'ERROR']),
default='INFO', help='Level of logging')
def cli(pipeline_file, outfile, validation_file, validation_points,
verbosity):
uncoverml.mllog.configure(verbosity)
config = ls.config.Config(pipeline_file)
resample_shapefile(config, outfile, validation_file,
validation_points=validation_points)
```
#### File: jesserobertson/uncover-ml/setup.py
```python
import os
import sys
from setuptools import setup
from setuptools.command.test import test as TestCommand
# If testing in python 2, use subprocess32 instead of built in subprocess
if os.name == 'posix' and sys.version_info[0] < 3:
exta_test_deps = ['subprocess32']
else:
exta_test_deps = []
class PyTest(TestCommand):
user_options = [('pytest-args=', 'a', "Arguments to pass to py.test")]
def initialize_options(self):
super(PyTest, self).initialize_options()
self.pytest_args = []
def finalize_options(self):
super(PyTest, self).finalize_options()
self.test_suite = True
self.test_args = []
def run_tests(self):
# import here, cause outside the eggs aren't loaded
import pytest
exit(pytest.main(self.pytest_args))
readme = open('README.rst').read()
doclink = """
Documentation
-------------
The full documentation is at http://GeoscienceAustralia.github.io/uncover-ml/."""
history = open('HISTORY.rst').read().replace('.. :changelog:', '')
setup(
name='uncover-ml',
version='0.1.0',
description='Machine learning tools for the Geoscience Australia uncover '
'project',
long_description=readme + '\n\n' + doclink + '\n\n' + history,
author='Geoscience Australia Mineral Systems Group, NICTA Spatial '
'Inference Systems Team',
author_email='<EMAIL>',
url='https://github.com/GeoscienceAustralia/uncover-ml',
packages=['uncoverml', 'uncoverml.scripts', 'uncoverml.transforms',
'preprocessing', 'uncoverml.optimise'],
package_dir={'uncover-ml': 'uncoverml'},
include_package_data=True,
entry_points={
'console_scripts': [
'uncoverml = uncoverml.scripts.uncoverml:cli',
'gammasensor = uncoverml.scripts.gammasensor:cli',
'tiff2kmz = uncoverml.scripts.tiff2kmz:main',
'subsampletargets = uncoverml.scripts.subsampletargets:cli',
'geoinfo = preprocessing.geoinfo:cli',
'resample = preprocessing.resample:cli',
'rasteraverage = preprocessing.raster_average:cli',
'gridsearch = uncoverml.scripts.gridsearch:cli'
]
},
install_requires=[
'numpy >= 1.9.2',
'pycontracts == 1.7.9',
'tables >= 3.2.2',
'rasterio == 0.36.0',
'affine == 2.0.0.post1',
'pyshp == 1.2.3',
'click == 6.6',
'revrand >= 0.9.10',
'mpi4py == 2.0.0',
'scipy >= 0.15.1',
'scikit-learn == 0.18.1',
'scikit-image >= 0.12.3',
'wheel >= 0.29.0',
'PyYAML >= 3.11',
'GDAL >= 2.0.0',
'pandas >= 0.18.1',
'geopandas == 0.2.1',
'matplotlib == 1.5.1',
'PyKrige == 1.3.0',
],
extras_require={
'demos': [
'matplotlib'
],
'kmz': [
'simplekml',
'pillow'
],
'dev': [
'sphinx',
'ghp-import',
'sphinxcontrib-programoutput'
]
},
cmdclass={
'test': PyTest
},
tests_require=[
'pytest',
'pytest-cov',
'coverage',
'codecov',
'tox',
] + exta_test_deps,
license="Apache Software License 2.0",
zip_safe=False,
keywords='uncover-ml',
classifiers=[
'Development Status :: 4 - Beta',
"Operating System :: POSIX",
"License :: OSI Approved :: Apache Software License",
"Natural Language :: English",
"Programming Language :: Python",
"Programming Language :: Python :: 3.4",
"Intended Audience :: Science/Research",
"Intended Audience :: Developers",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Topic :: Scientific/Engineering :: Information Analysis"
],
)
```
#### File: uncoverml/scripts/subsampletargets.py
```python
from random import sample
import os.path
import shapefile
import click
import uncoverml.mllog
@click.command()
@click.argument('filename')
@click.option('-n', '--npoints', type=int, default=1000,
help='Number of points to keep')
@click.option('-v', '--verbosity',
type=click.Choice(['DEBUG', 'INFO', 'WARNING', 'ERROR']),
default='INFO', help='Level of logging')
@click.option('-o', '--outputdir', default='.', help='Location to output file')
def cli(outputdir, npoints, verbosity, filename):
# Setup the logging
uncoverml.mllog.configure(verbosity)
name = os.path.basename(filename).rsplit(".", 1)[0]
# Read the shapefile
file = shapefile.Reader(filename)
shapes = file.shapes()
records = file.records()
items = list(zip(shapes, records))
# Randomly sample the shapefile to keep n points
remaining_items = sample(items, npoints)
# Create a new shapefile using the data saved
w = shapefile.Writer(shapefile.POINT)
w.fields = list(file.fields)
keep_shapes, _ = zip(*items)
for shape, record in remaining_items:
w.records.append(record)
w._shapes.extend(keep_shapes)
# Write out the
outfile = os.path.join(outputdir, name + '_' + str(npoints))
w.save(outfile)
```
#### File: uncover-ml/uncoverml/validate.py
```python
from __future__ import division
import logging
import copy
import matplotlib.pyplot as pl
import numpy as np
from sklearn.metrics import explained_variance_score, r2_score
from revrand.metrics import lins_ccc, mll, msll, smse
from uncoverml.models import apply_multiple_masked
from uncoverml import mpiops
from uncoverml import predict
from uncoverml import features as feat
from uncoverml import targets as targ
from uncoverml.learn import all_modelmaps as modelmaps
log = logging.getLogger(__name__)
metrics = {'r2_score': r2_score,
'expvar': explained_variance_score,
'smse': smse,
'lins_ccc': lins_ccc,
'mll': mll,
'msll': msll}
def split_cfold(nsamples, k=5, seed=None):
"""
Function that returns indices for splitting data into random folds.
Parameters
----------
nsamples: int
the number of samples in the dataset
k: int, optional
the number of folds
seed: int, optional
random seed to provide to numpy
Returns
-------
cvinds: list
list of arrays of length k, each with approximate shape (nsamples /
k,) of indices. These indices are randomly permuted (without
replacement) of assignments to each fold.
cvassigns: ndarray
array of shape (nsamples,) with each element in [0, k), that can be
used to assign data to a fold. This corresponds to the indices of
cvinds.
"""
np.random.seed(seed)
pindeces = np.random.permutation(nsamples)
cvinds = np.array_split(pindeces, k)
cvassigns = np.zeros(nsamples, dtype=int)
for n, inds in enumerate(cvinds):
cvassigns[inds] = n
return cvinds, cvassigns
def get_first_dim(y):
return y[:, 0] if y.ndim > 1 else y
# Decorator to deal with probabilistic output for non-probabilistic scores
def score_first_dim(func):
def newscore(y_true, y_pred, *args, **kwargs):
return func(y_true.flatten(), get_first_dim(y_pred), *args, **kwargs)
return newscore
def calculate_validation_scores(ys, yt, eys):
""" Calculates the validation scores for a prediction
Given the test and training data, as well as the outputs from every model,
this function calculates all of the applicable metrics in the following
list, and returns a dictionary with the following (possible) keys:
+ r2_score
+ expvar
+ smse
+ lins_ccc
+ mll
+ msll
Parameters
----------
ys: numpy.array
The test data outputs
yt: numpy.array
The training data's corresponding predictions
eys: numpy.array
The predictions made by the trained model on test data
Returns
-------
scores: dict
A dictionary containing all of the evaluated scores.
"""
probscores = ['msll', 'mll']
scores = {}
# cubist can predict nan when a categorical variable is not
# present in the training data
# TODO: Can be removed except for cubist
nans = ~np.isnan(eys[:, 0])
ys = ys[nans]
eys = eys[:, 0][nans]
for m in metrics:
if m not in probscores:
score = apply_multiple_masked(score_first_dim(metrics[m]),
(ys, eys))
elif eys.ndim == 2:
if m == 'mll' and eys.shape[1] > 1:
score = apply_multiple_masked(mll, (ys, eys[:, 0], eys[:, 1]))
elif m == 'msll' and eys.shape[1] > 1:
score = apply_multiple_masked(msll, (ys, eys[:, 0], eys[:, 1]),
(yt,))
else:
continue
else:
continue
scores[m] = score
return scores
def y_y_plot(y1, y2, y_label=None, y_exp_label=None, title=None,
outfile=None, display=None):
""" Makes a y-y plot from two corresponding vectors
This function makes a y-y plot given two y vectors (y1, y2). This plot can
be used to evaluate the performance of the machine learning models.
Parameters
----------
y1: numpy.array
The first input vector
y2: numpy.array
The second input vector, of the same size as y1
y_label: string
The axis label for the first vector
y_exp_label: string
The axis label for the second vector
title: string
The plot title
outfile: string
The location to save an image of the plot
display: boolean
If true, a matplotlib graph will display in a window, note that this
pauses the execution of the main program till this window is suspended.
"""
fig = pl.figure()
maxy = max(y1.max(), get_first_dim(y2).max())
miny = min(y1.min(), get_first_dim(y2).min())
apply_multiple_masked(pl.plot, (y1, get_first_dim(y2)), ('k.',))
pl.plot([miny, maxy], [miny, maxy], 'r')
pl.grid(True)
pl.xlabel(y_label)
pl.ylabel(y_exp_label)
pl.title(title)
if outfile is not None:
fig.savefig(outfile + ".png")
if display:
pl.show()
class CrossvalInfo:
def __init__(self, scores, y_true, y_pred):
self.scores = scores
self.y_true = y_true
self.y_pred = y_pred
def local_rank_features(image_chunk_sets, transform_sets, targets, config):
""" Ranks the importance of the features based on their performance.
This function trains and cross-validates a model with each individual
feature removed and then measures the performance of the model with that
feature removed. The most important feature is the one which; when removed,
causes the greatest degradation in the performance of the model.
Parameters
----------
image_chunk_sets: dict
A dictionary used to get the set of images to test on.
transform_sets: list
A dictionary containing the applied transformations
targets: instance of geoio.Targets class
The targets used in the cross validation
config: config class instance
The global config file
"""
feature_scores = {}
# Get all the images
all_names = []
for c in image_chunk_sets:
all_names.extend(list(c.keys()))
all_names = sorted(list(set(all_names))) # make unique
if len(all_names) <= 1:
raise ValueError("Cannot perform feature ranking with only one "
"feature! Try turning off the 'feature_rank' option.")
for name in all_names:
transform_sets_leaveout = copy.deepcopy(transform_sets)
final_transform_leaveout = copy.deepcopy(config.final_transform)
image_chunks_leaveout = [copy.copy(k) for k in image_chunk_sets]
for i, c in enumerate(image_chunks_leaveout):
if name in c:
c.pop(name)
# if only one covariate of a feature type, delete
# this feature type, and transformset
if not c:
image_chunks_leaveout.pop(i)
transform_sets_leaveout.pop(i)
fname = name.rstrip(".tif")
log.info("Computing {} feature importance of {}"
.format(config.algorithm, fname))
x, keep = feat.transform_features(image_chunks_leaveout,
transform_sets_leaveout,
final_transform_leaveout,
config)
x_all = feat.gather_features(x[keep], node=0)
targets_all = targ.gather_targets_main(targets, keep, node=0)
results = local_crossval(x_all, targets_all, config)
feature_scores[fname] = results
# Get the different types of score from one of the outputs
if mpiops.chunk_index == 0:
measures = list(next(feature_scores.values().__iter__()).scores.keys())
features = sorted(feature_scores.keys())
scores = np.empty((len(measures), len(features)))
for m, measure in enumerate(measures):
for f, feature in enumerate(features):
scores[m, f] = feature_scores[feature].scores[measure]
return measures, features, scores
else:
return None, None, None
def _join_dicts(dicts):
if dicts is None:
return
d = {k: v for D in dicts for k, v in D.items()}
return d
def local_crossval(x_all, targets_all, config):
""" Performs K-fold cross validation to test the applicability of a model.
Given a set of inputs and outputs, this function will evaluate the
effectiveness of a model at predicting the targets, by splitting all of
the known data. A model is trained on a subset of the total data, and then
this model is used to predict all of the unseen targets, its performance
can provide a benchmark to evaluate the effectiveness of a model.
Parameters
----------
x_all: numpy.array
A 2D array containing all of the training inputs
targets_all: numpy.array
A 1D vector containing all of the training outputs
config: dict
The global config object, which is used to choose the model to train.
Return
------
result: dict
A dictionary containing all of the cross validation metrics, evaluated
on the unseen data subset.
"""
# run cross validation in parallel, but one thread for each fold
if config.multicubist or config.multirandomforest:
config.algorithm_args['parallel'] = False
if (mpiops.chunk_index != 0) and (not config.parallel_validate):
return
log.info("Validating with {} folds".format(config.folds))
model = modelmaps[config.algorithm](**config.algorithm_args)
y = targets_all.observations
lon_lat = targets_all.positions
_, cv_indices = split_cfold(y.shape[0], config.folds, config.crossval_seed)
# Split folds over workers
fold_list = np.arange(config.folds)
if config.parallel_validate:
fold_node = np.array_split(fold_list, mpiops.chunks)[mpiops.chunk_index]
else:
fold_node = fold_list
y_pred = {}
y_true = {}
fold_scores = {}
# Train and score on each fold
for fold in fold_node:
print("Training fold {} of {} using process {}".format(
fold + 1, config.folds, mpiops.chunk_index))
train_mask = cv_indices != fold
test_mask = ~ train_mask
y_k_train = y[train_mask]
lon_lat_train = lon_lat[train_mask]
lon_lat_test = lon_lat[test_mask]
# Extra fields
fields_train = {f: v[train_mask]
for f, v in targets_all.fields.items()}
fields_pred = {f: v[test_mask] for f, v in targets_all.fields.items()}
# Train on this fold
apply_multiple_masked(model.fit, data=(x_all[train_mask], y_k_train),
kwargs={'fields': fields_train,
'lon_lat': lon_lat_train})
# Testing
y_k_pred = predict.predict(x_all[test_mask], model,
fields=fields_pred,
lon_lat=lon_lat_test
)
y_k_test = y[test_mask]
y_pred[fold] = y_k_pred
y_true[fold] = y_k_test
fold_scores[fold] = calculate_validation_scores(y_k_test, y_k_train,
y_k_pred)
if config.parallel_validate:
y_pred = _join_dicts(mpiops.comm.gather(y_pred, root=0))
y_true = _join_dicts(mpiops.comm.gather(y_true, root=0))
scores = _join_dicts(mpiops.comm.gather(fold_scores, root=0))
else:
scores = fold_scores
result = None
if mpiops.chunk_index == 0:
y_true = np.concatenate([y_true[i] for i in range(config.folds)])
y_pred = np.concatenate([y_pred[i] for i in range(config.folds)])
valid_metrics = scores[0].keys()
scores = {m: np.mean([d[m] for d in scores.values()])
for m in valid_metrics}
score_string = "Validation complete:\n"
for metric, score in scores.items():
score_string += "{}\t= {}\n".format(metric, score)
log.info(score_string)
result_tags = model.get_predict_tags()
y_pred_dict = dict(zip(result_tags, y_pred.T))
result = CrossvalInfo(scores, y_true, y_pred_dict)
# change back to parallel
if config.multicubist or config.multirandomforest:
config.algorithm_args['parallel'] = True
return result
```
|
{
"source": "jesserockz/forecast_solar",
"score": 3
}
|
#### File: forecast_solar/forecast_solar/models.py
```python
from __future__ import annotations
from dataclasses import dataclass
from datetime import datetime, timedelta, date
from enum import Enum
from typing import Any
import sys
if sys.version_info[:2] >= (3, 9):
import zoneinfo
else:
from backports import zoneinfo
from aiohttp import ClientResponse
def _timed_value(at: datetime, data: dict[datetime, int]) -> int | None:
"""Return the value for a specific time."""
value = None
for timestamp, cur_value in data.items():
if timestamp > at:
return value
value = cur_value
return None
class AccountType(str, Enum):
"""Enumeration representing the Forecast.Solar account type."""
PUBLIC = "public"
PERSONAL = "personal"
PROFESSIONAL = "professional"
@dataclass
class Estimate:
"""Object holding estimate forecast results from Forecast.Solar.
Attributes:
wh_days: Estimated solar energy production per day.
wh_hours: Estimated solar energy production per hour.
watts: Estimated solar power output per hour.
"""
wh_days: dict[datetime, int]
wh_hours: dict[datetime, int]
watts: dict[datetime, int]
api_rate_limit: int
api_timezone: str
@property
def timezone(self) -> str:
"""Return API timezone information."""
return self.api_timezone
@property
def account_type(self) -> AccountType:
"""Return API account_type information."""
if self.api_rate_limit == 60:
return AccountType.PERSONAL
if self.api_rate_limit == 5:
return AccountType.PROFESSIONAL
return AccountType.PUBLIC
@property
def energy_production_today(self) -> int:
"""Return estimated energy produced today."""
return self.day_production(self.now().date())
@property
def energy_production_tomorrow(self) -> int:
"""Return estimated energy produced today."""
return self.day_production(self.now().date() + timedelta(days=1))
@property
def power_production_now(self) -> int:
"""Return estimated power production right now."""
return self.power_production_at_time(self.now())
@property
def power_highest_peak_time_today(self) -> datetime:
"""Return datetime with highest power production moment today."""
return self.peak_production_time(self.now().date())
@property
def power_highest_peak_time_tomorrow(self) -> datetime:
"""Return datetime with highest power production moment tomorrow."""
return self.peak_production_time(self.now().date() + timedelta(days=1))
@property
def energy_current_hour(self) -> int:
"""Return the estimated energy production for the current hour."""
return _timed_value(self.now(), self.wh_hours) or 0
def day_production(self, specific_date: date) -> int:
"""Return the day production."""
for timestamp, production in self.wh_days.items():
if timestamp.date() == specific_date:
return production
return 0
def now(self) -> datetime:
"""Return the current timestamp in the API timezone."""
return datetime.now(tz=zoneinfo.ZoneInfo(self.api_timezone))
def peak_production_time(self, specific_date: date) -> datetime:
"""Return the peak time on a specific date."""
value = max(
(watt for date, watt in self.watts.items() if date.date() == specific_date),
default=None,
)
for (
timestamp,
watt,
) in self.watts.items():
if watt == value:
return timestamp
def power_production_at_time(self, time: datetime) -> int:
"""Return estimated power production at a specific time."""
return _timed_value(time, self.watts) or 0
def sum_energy_production(self, period_hours: int) -> int:
"""Return the sum of the energy production."""
now = self.now().replace(minute=59, second=59)
until = now + timedelta(hours=period_hours)
total = 0
for timestamp, wh in self.wh_hours.items():
# Skip all dates until this hour
if timestamp < now:
continue
if timestamp > until:
break
total += wh
return total
@classmethod
def from_dict(cls, data: dict[str, Any]) -> Estimate:
"""Return a Estimate object from a Forecast.Solar API response.
Converts a dictionary, obtained from the Forecast.Solar API into
a Estimate object.
Args:
data: The estimate response from the Forecast.Solar API.
Returns:
An Estimate object.
"""
previous_value = 0
wh_hours = {}
for timestamp, energy in data["result"]["watt_hours"].items():
timestamp = datetime.fromisoformat(timestamp)
# If we get a reset
if energy < previous_value:
previous_value = 0
wh_hours[timestamp] = energy - previous_value
previous_value = energy
return cls(
wh_days={
datetime.fromisoformat(d): e
for d, e in data["result"]["watt_hours_day"].items()
},
wh_hours=wh_hours,
watts={
datetime.fromisoformat(d): w for d, w in data["result"]["watts"].items()
},
api_rate_limit=data["message"]["ratelimit"]["limit"],
api_timezone=data["message"]["info"]["timezone"],
)
@dataclass
class Ratelimit:
"""Information about the current rate limit."""
call_limit: int
remaining_calls: int
period: int
retry_at: datetime | None
@classmethod
def from_response(cls, response: ClientResponse) -> Ratelimit:
"""Initialize rate limit object from response."""
# The documented headers do not match the returned headers
# https://doc.forecast.solar/doku.php?id=api#headers
limit = int(response.headers["X-Ratelimit-Limit"])
period = int(response.headers["X-Ratelimit-Period"])
# Remaining is not there if we exceeded limit
remaining = int(response.headers.get("X-Ratelimit-Remaining", 0))
if "X-Ratelimit-Retry-At" in response.headers:
retry_at = datetime.fromisoformat(response.headers["X-Ratelimit-Retry-At"])
else:
retry_at = None
return cls(limit, remaining, period, retry_at)
```
#### File: forecast_solar/tests/test_models.py
```python
from datetime import datetime
from forecast_solar import models
from . import PAYLOAD, patch_now, patch_previous_day, patch_near_end_today
def test_estimate_previous_day(patch_previous_day):
"""Test estimate."""
estimate = models.Estimate.from_dict(PAYLOAD)
assert estimate.timezone == "Europe/Amsterdam"
assert estimate.now().date().isoformat() == "2021-07-20"
assert estimate.energy_production_today == 12984
assert estimate.energy_production_tomorrow == 14679
assert estimate.power_production_now == 0
assert estimate.energy_current_hour == 0
assert estimate.power_highest_peak_time_today == datetime.fromisoformat(
"2021-07-20T15:00:00+02:00"
)
assert estimate.power_highest_peak_time_tomorrow == datetime.fromisoformat(
"2021-07-21T14:00:00+02:00"
)
assert estimate.sum_energy_production(1) == 0
assert estimate.sum_energy_production(6) == 0
assert estimate.sum_energy_production(12) == 3631
assert estimate.sum_energy_production(24) == 14679
def test_estimate_now(patch_now):
"""Test estimate."""
estimate = models.Estimate.from_dict(PAYLOAD)
assert estimate.timezone == "Europe/Amsterdam"
assert estimate.now().date().isoformat() == "2021-07-21"
assert estimate.energy_production_today == 14679
assert estimate.energy_production_tomorrow == 0
assert estimate.power_production_now == 724
assert estimate.energy_current_hour == 724
assert estimate.power_highest_peak_time_today == datetime.fromisoformat(
"2021-07-21T14:00:00+02:00"
)
assert estimate.power_highest_peak_time_tomorrow is None
assert estimate.sum_energy_production(1) == 1060
assert estimate.sum_energy_production(6) == 9044
assert estimate.sum_energy_production(12) == 13454
assert estimate.sum_energy_production(24) == 13454
def test_estimate_near_end(patch_near_end_today):
"""Test estimate."""
estimate = models.Estimate.from_dict(PAYLOAD)
assert estimate.timezone == "Europe/Amsterdam"
assert estimate.now().date().isoformat() == "2021-07-21"
assert estimate.energy_production_today == 14679
assert estimate.energy_production_tomorrow == 0
assert estimate.power_production_now == 888
assert estimate.energy_current_hour == 888
assert estimate.power_highest_peak_time_today == datetime.fromisoformat(
"2021-07-21T14:00:00+02:00"
)
assert estimate.power_highest_peak_time_tomorrow is None
assert estimate.sum_energy_production(1) == 548
assert estimate.sum_energy_production(6) == 846
assert estimate.sum_energy_production(12) == 846
assert estimate.sum_energy_production(24) == 846
```
|
{
"source": "jesserockz/python-clickandcollectnz",
"score": 3
}
|
#### File: python-clickandcollectnz/clickandcollectnz/__main__.py
```python
import sys
import json
import argparse
from clickandcollectnz.countdown import Countdown
from clickandcollectnz.foodstuffs import NewWorld, PakNSave
classes = ['Countdown', 'NewWorld', 'PakNSave']
def print_usage():
print("Usage: python -m clickandcollectnz [chain] [store_id]")
print()
print(" chain: Countdown | PakNSave | NewWorld")
if __name__ == "__main__":
parser = argparse.ArgumentParser(prog='python -m clickandcollectnz', description='NZ Click and Collect time slots.')
parser.add_argument('chain', nargs="?", choices=classes)
parser.add_argument('store_id', nargs="?")
parser.add_argument('--json', dest='json', action='store_const',
const=True, default=False,
help='output in JSON format')
args = parser.parse_args()
if not args.chain:
parser.print_help()
sys.exit(0)
if args.chain and not args.store_id:
cls = eval(args.chain)
stores = cls.get_store_list()
if args.json:
print(json.dumps(stores, default=lambda o: o.to_json()))
else:
print("ID - Store Name")
for store in stores:
print(store)
sys.exit(0)
if args.chain and args.store_id:
cls = eval(args.chain)
stores = cls.get_store_list()
store = next((x for x in stores if x.id == args.store_id), None)
store.get_slots()
if args.json:
print(json.dumps(store, default=lambda o: o.to_json()))
else:
print("Not Implemented")
```
|
{
"source": "jesseschalken/rules_proto_grpc",
"score": 2
}
|
#### File: rules_proto_grpc/internal/compile.bzl
```python
load("@rules_proto//proto:defs.bzl", "ProtoInfo")
load(
"//internal:common.bzl",
"copy_file",
"descriptor_proto_path",
"get_output_filename",
"get_package_root",
"strip_path_prefix",
)
load("//internal:providers.bzl", "ProtoCompileInfo", "ProtoPluginInfo")
load("//internal:protoc.bzl", "build_protoc_args")
proto_compile_attrs = {
"protos": attr.label_list(
mandatory = True,
providers = [ProtoInfo],
doc = "List of labels that provide the ProtoInfo provider (such as proto_library from rules_proto)",
),
"options": attr.string_list_dict(
doc = "Extra options to pass to plugins, as a dict of plugin label -> list of strings. The key * can be used exclusively to apply to all plugins",
),
"verbose": attr.int(
doc = "The verbosity level. Supported values and results are 0: Show nothing, 1: Show command, 2: Show command and sandbox after running protoc, 3: Show command and sandbox before and after running protoc, 4. Show env, command, expected outputs and sandbox before and after running protoc",
),
"prefix_path": attr.string(
doc = "Path to prefix to the generated files in the output directory",
),
"extra_protoc_args": attr.string_list(
doc = "A list of extra args to pass directly to protoc, not as plugin options",
),
"extra_protoc_files": attr.label_list(
allow_files = True,
doc = "List of labels that provide extra files to be available during protoc execution",
),
"output_mode": attr.string(
default = "PREFIXED",
values = ["PREFIXED", "NO_PREFIX"],
doc = "The output mode for the target. PREFIXED (the default) will output to a directory named by the target within the current package root, NO_PREFIX will output directly to the current package. Using NO_PREFIX may lead to conflicting writes",
),
}
def proto_compile_impl(ctx):
"""
Common implementation function for lang_*_compile rules.
Args:
ctx: The Bazel rule execution context object.
Returns:
Providers:
- ProtoCompileInfo
- DefaultInfo
"""
# Load attrs that we pass as args
# This is done to allow writing rules that can call proto_compile with mutable attributes,
# such as in doc_template_compile
options = ctx.attr.options
extra_protoc_args = getattr(ctx.attr, "extra_protoc_args", [])
extra_protoc_files = ctx.files.extra_protoc_files
# Execute with extracted attrs
return proto_compile(ctx, options, extra_protoc_args, extra_protoc_files)
def proto_compile(ctx, options, extra_protoc_args, extra_protoc_files):
"""
Common implementation function for lang_*_compile rules.
Args:
ctx: The Bazel rule execution context object.
options: The mutable options dict.
extra_protoc_args: The mutable extra_protoc_args list.
extra_protoc_files: The mutable extra_protoc_files list.
Returns:
Providers:
- ProtoCompileInfo
- DefaultInfo
"""
# Load attrs
proto_infos = [dep[ProtoInfo] for dep in ctx.attr.protos]
plugins = [plugin[ProtoPluginInfo] for plugin in ctx.attr._plugins]
verbose = ctx.attr.verbose
# Load toolchain and tools
protoc_toolchain_info = ctx.toolchains[str(Label("//protobuf:toolchain_type"))]
protoc = protoc_toolchain_info.protoc_executable
fixer = protoc_toolchain_info.fixer_executable
# The directory where the outputs will be generated, relative to the package.
# A temporary dir is used here to allow output directories that may need to be merged later
rel_premerge_root = "_rpg_premerge_" + ctx.label.name
# The full path to the pre-merge output root, relative to the workspace
premerge_root = get_package_root(ctx) + "/" + rel_premerge_root
# The lists of generated files and directories that we expect to be produced, in their pre-merge
# locations
premerge_files = []
premerge_dirs = []
# Convert options dict to label keys
plugin_labels = [plugin.label for plugin in plugins]
per_plugin_options = {
# Dict of plugin label to options string list
Label(plugin_label): opts
for plugin_label, opts in options.items()
if plugin_label != "*"
}
# Only allow '*' by itself
all_plugin_options = [] # Options applied to all plugins, from the '*' key
if "*" in options:
if len(options) > 1:
fail("The options attr on target {} cannot contain '*' and other labels. Use either '*' or labels".format(ctx.label))
all_plugin_options = options["*"]
# Check all labels match a plugin in use
for plugin_label in per_plugin_options:
if plugin_label not in plugin_labels:
fail("The options attr on target {} contains a plugin label {} for a plugin that does not exist on this rule. The available plugins are {} ".format(ctx.label, plugin_label, plugin_labels))
###
### Setup plugins
###
# Each plugin is isolated to its own execution of protoc, as plugins may have differing
# exclusions that cannot be expressed in a single protoc execution for all plugins.
for plugin in plugins:
###
### Check plugin
###
# Check plugin outputs
if plugin.output_directory and (plugin.out or plugin.outputs or plugin.empty_template):
fail("Proto plugin {} cannot use output_directory in conjunction with outputs, out or empty_template".format(plugin.name))
###
### Gather proto files and filter by exclusions
###
protos = [] # The filtered set of .proto files to compile
plugin_outputs = []
proto_paths = [] # The paths passed to protoc
for proto_info in proto_infos:
for proto in proto_info.direct_sources:
# Check for exclusion
if any([
proto.dirname.endswith(exclusion) or proto.path.endswith(exclusion)
for exclusion in plugin.exclusions
]) or proto in protos:
# When using import_prefix, the ProtoInfo.direct_sources list appears to contain
# duplicate records, the final check 'proto in protos' removes these. See
# https://github.com/bazelbuild/bazel/issues/9127
continue
# Proto not excluded
protos.append(proto)
# Add per-proto outputs
for pattern in plugin.outputs:
plugin_outputs.append(ctx.actions.declare_file("{}/{}".format(
rel_premerge_root,
get_output_filename(proto, pattern, proto_info),
)))
# Get proto path for protoc
proto_paths.append(descriptor_proto_path(proto, proto_info))
# Skip plugin if all proto files have now been excluded
if len(protos) == 0:
if verbose > 2:
print(
'Skipping plugin "{}" for "{}" as all proto files have been excluded'.format(
plugin.name,
ctx.label,
),
) # buildifier: disable=print
continue
# Append current plugin outputs to global outputs before looking at per-plugin outputs;
# these are manually added globally as there may be srcjar outputs.
premerge_files.extend(plugin_outputs)
###
### Declare per-plugin outputs
###
# Some protoc plugins generate a set of output files (like python) while others generate a
# single 'archive' file that contains the individual outputs (like java). Jar outputs are
# gathered as a special case as we need to post-process them to have a 'srcjar' extension
# (java_library rules don't accept source jars with a 'jar' extension).
out_file = None
if plugin.out:
# Define out file
out_file = ctx.actions.declare_file("{}/{}".format(
rel_premerge_root,
plugin.out.replace("{name}", ctx.label.name),
))
plugin_outputs.append(out_file)
if not out_file.path.endswith(".jar"):
# Add output direct to global outputs
premerge_files.append(out_file)
else:
# Create .srcjar from .jar for global outputs
premerge_files.append(copy_file(
ctx,
out_file,
"{}.srcjar".format(out_file.basename.rpartition(".")[0]),
sibling = out_file,
))
###
### Declare plugin output directory if required
###
# Some plugins outputs a structure that cannot be predicted from the input file paths alone.
# For these plugins, we simply declare the directory.
if plugin.output_directory:
out_file = ctx.actions.declare_directory(rel_premerge_root + "/" + "_plugin_" + plugin.name)
plugin_outputs.append(out_file)
premerge_dirs.append(out_file)
###
### Build command
###
# Determine the outputs expected by protoc.
# When plugin.empty_template is not set, protoc will output directly to the final targets.
# When set, we will direct the plugin outputs to a temporary folder, then use the fixer
# executable to write to the final targets.
if plugin.empty_template:
# Create path list for fixer
fixer_paths_file = ctx.actions.declare_file(rel_premerge_root + "/" + "_plugin_fixer_manifest_" + plugin.name + ".txt")
ctx.actions.write(fixer_paths_file, "\n".join([
file.path.partition(premerge_root + "/")[2] # Path of the file relative to the output root
for file in plugin_outputs
]))
# Create output directory for protoc to write into
fixer_dir = ctx.actions.declare_directory(
rel_premerge_root + "/" + "_plugin_fixed_" + plugin.name,
)
out_arg = fixer_dir.path
plugin_protoc_outputs = [fixer_dir]
# Apply fixer
ctx.actions.run(
inputs = [fixer_paths_file, fixer_dir, plugin.empty_template],
outputs = plugin_outputs,
arguments = [
fixer_paths_file.path,
plugin.empty_template.path,
fixer_dir.path,
premerge_root,
],
progress_message = "Applying fixer for {} plugin on target {}".format(
plugin.name,
ctx.label,
),
executable = fixer,
)
else:
# No fixer, protoc writes files directly
if out_file and "QUIRK_OUT_PASS_ROOT" not in plugin.quirks:
# Single output file, pass the full file name to out arg, unless QUIRK_OUT_PASS_ROOT
# quirk is in use
out_arg = out_file.path
else:
# No single output (or QUIRK_OUT_PASS_ROOT enabled), pass root dir
out_arg = premerge_root
plugin_protoc_outputs = plugin_outputs
# Build argument list for protoc execution
args_list, cmd_inputs, cmd_input_manifests = build_protoc_args(
ctx,
plugin,
proto_infos,
out_arg,
extra_options = all_plugin_options + per_plugin_options.get(plugin.label, []),
extra_protoc_args = extra_protoc_args,
)
args = ctx.actions.args()
args.add_all(args_list)
# Add import roots and files if required by plugin
# By default we pass just the descriptors and the proto paths, but these may not contain
# all of the comments etc from the source files
if "QUIRK_DIRECT_MODE" in plugin.quirks:
args.add_all([
"--proto_path=" + proto_info.proto_source_root
for proto_info in proto_infos
])
cmd_inputs += protos
# Add source proto files as descriptor paths
for proto_path in proto_paths:
args.add(proto_path)
###
### Specify protoc action
###
mnemonic = "ProtoCompile"
command = ("mkdir -p '{}' && ".format(premerge_root)) + protoc.path + " $@" # $@ is replaced with args list
cmd_inputs += extra_protoc_files
tools = [protoc] + ([plugin.tool_executable] if plugin.tool_executable else [])
# Amend command with debug options
if verbose > 0:
print("{}:".format(mnemonic), protoc.path, args) # buildifier: disable=print
if verbose > 1:
command += " && echo '\n##### SANDBOX AFTER RUNNING PROTOC' && find . -type f "
if verbose > 2:
command = "echo '\n##### SANDBOX BEFORE RUNNING PROTOC' && find . -type l && " + command
if verbose > 3:
command = "env && " + command
for f in cmd_inputs:
print("INPUT:", f.path) # buildifier: disable=print
for f in protos:
print("TARGET PROTO:", f.path) # buildifier: disable=print
for f in tools:
print("TOOL:", f.path) # buildifier: disable=print
for f in plugin_outputs:
print("EXPECTED OUTPUT:", f.path) # buildifier: disable=print
# Run protoc
ctx.actions.run_shell(
mnemonic = mnemonic,
command = command,
arguments = [args],
inputs = cmd_inputs,
tools = tools,
outputs = plugin_protoc_outputs,
use_default_shell_env = plugin.use_built_in_shell_environment,
input_manifests = cmd_input_manifests,
progress_message = "Compiling protoc outputs for {} plugin on target {}".format(
plugin.name,
ctx.label,
),
)
# Build final output defaults for merged locations
output_root = get_package_root(ctx) + "/" + ctx.label.name
output_files = depset()
output_dirs = depset()
prefix_path = ctx.attr.prefix_path
# Merge outputs
if premerge_dirs:
# If we have any output dirs specified, we declare a single output directory and merge all
# files in one go. This is necessary to prevent path prefix conflicts
if ctx.attr.output_mode != "PREFIXED":
fail("Cannot use output_mode = {} when using plugins with directory outputs")
# Declare single output directory
dir_name = ctx.label.name
if prefix_path:
dir_name += "/" + prefix_path
new_dir = ctx.actions.declare_directory(dir_name)
output_dirs = depset(direct = [new_dir])
# Build copy command for directory outputs
# Use cp {}/. rather than {}/* to allow for empty output directories from a plugin (e.g when
# no service exists, so no files generated)
command_parts = ["mkdir -p {} && cp -r {} '{}'".format(
# We need to be sure that the dirs exist, see:
# https://github.com/bazelbuild/bazel/issues/6393
" ".join(["'" + d.path + "'" for d in premerge_dirs]),
" ".join(["'" + d.path + "/.'" for d in premerge_dirs]),
new_dir.path,
)]
# Extend copy command with file outputs
command_input_files = premerge_dirs
for file in premerge_files:
# Strip pre-merge root from file path
path = strip_path_prefix(file.path, premerge_root)
# Prefix path is contained in new_dir.path created above and
# used below
# Add command to copy file to output
command_input_files.append(file)
command_parts.append("cp '{}' '{}'".format(
file.path,
"{}/{}".format(new_dir.path, path),
))
# Add debug options
if verbose > 1:
command_parts = command_parts + [
"echo '\n##### SANDBOX AFTER MERGING DIRECTORIES'",
"find . -type l",
]
if verbose > 2:
command_parts = [
"echo '\n##### SANDBOX BEFORE MERGING DIRECTORIES'",
"find . -type l",
] + command_parts
if verbose > 0:
print(
"Directory merge command: {}".format(" && ".join(command_parts)),
) # buildifier: disable=print
# Copy directories and files to shared output directory in one action
ctx.actions.run_shell(
mnemonic = "CopyDirs",
inputs = command_input_files,
outputs = [new_dir],
command = " && ".join(command_parts),
progress_message = "copying directories and files to {}".format(new_dir.path),
)
else:
# Otherwise, if we only have output files, build the output tree by
# aggregating files into one directory
output_files = []
for file in premerge_files:
# Strip pre-merge root from file path
path = strip_path_prefix(file.path, premerge_root)
# Prepend prefix path if given
if prefix_path:
path = prefix_path + "/" + path
# Select output location based on output mode
# In PREFIXED mode we output to a directory named by the target label
# In NO_PREFIX mode, we output directly to the package root
if ctx.attr.output_mode == "PREFIXED":
path = ctx.label.name + "/" + path
# Copy file to output
output_files.append(copy_file(
ctx,
file,
path,
))
output_files = depset(direct = output_files)
# Create depset containing all outputs
all_outputs = depset(direct = output_files.to_list() + output_dirs.to_list())
# Create default and proto compile providers
return [
ProtoCompileInfo(
label = ctx.label,
output_root = output_root,
output_files = output_files,
output_dirs = output_dirs,
),
DefaultInfo(
files = all_outputs,
runfiles = ctx.runfiles(transitive_files = all_outputs),
),
]
```
#### File: rules_proto_grpc/rust/rust_grpc_library.bzl
```python
load("//rust:rust_grpc_compile.bzl", "rust_grpc_compile")
load("//internal:compile.bzl", "proto_compile_attrs")
load("//rust:rust_proto_lib.bzl", "rust_proto_lib")
load("@rules_rust//rust:defs.bzl", "rust_library")
def rust_grpc_library(name, **kwargs): # buildifier: disable=function-docstring
# Compile protos
name_pb = name + "_pb"
name_lib = name + "_lib"
rust_grpc_compile(
name = name_pb,
**{
k: v
for (k, v) in kwargs.items()
if k in proto_compile_attrs.keys()
} # Forward args
)
# Create lib file
rust_proto_lib(
name = name_lib,
compilation = name_pb,
externs = ["protobuf", "grpc", "grpc_protobuf"],
)
# Create rust library
rust_library(
name = name,
srcs = [name_pb, name_lib],
deps = GRPC_DEPS + kwargs.get("deps", []),
visibility = kwargs.get("visibility"),
tags = kwargs.get("tags"),
)
GRPC_DEPS = [
Label("//rust/raze:futures"),
Label("//rust/raze:grpc"),
Label("//rust/raze:grpc_protobuf"),
Label("//rust/raze:protobuf"),
]
```
|
{
"source": "jessescn/laguinho",
"score": 4
}
|
#### File: laguinho/utils/__init__.py
```python
import os
import json
import click
def load_token():
"""Carrega o token do github
A função verifica se existe o arquivo
laguinho.config na raiz do usuário, caso
exista, ele lê o token e retorna, caso não
exista, é criado e requsitado ao usuário que
informe um token do github
"""
config_path = os.path.expanduser('~') + '/laguinho.config'
if os.path.exists(config_path):
with open(config_path, 'r') as file:
token = json.load(file)['GITHUB_TOKEN']
else:
click.echo('Para baixar os dados do github, é necessário possuir um TOKEN de permissão do Github, caso você não saiba como gerar, siga as instruções descritas no README.')
token = click.prompt("Por favor, insira um TOKEN do Github válido").strip()
file_content = {'GITHUB_TOKEN': token}
with open(config_path, 'w') as file:
json.dump(file_content, file, indent=2)
return token
os.environ['GITHUB_TOKEN'] = load_token()
```
|
{
"source": "jessesheidlower/tweepy",
"score": 3
}
|
#### File: tweepy/tweepy/client.py
```python
from collections import namedtuple
import datetime
import logging
from platform import python_version
import time
import requests
import tweepy
from tweepy.auth import OAuthHandler
from tweepy.errors import (
BadRequest, Forbidden, HTTPException, TooManyRequests, TwitterServerError,
Unauthorized
)
from tweepy.media import Media
from tweepy.place import Place
from tweepy.poll import Poll
from tweepy.tweet import Tweet
from tweepy.user import User
log = logging.getLogger(__name__)
Response = namedtuple("Response", ("data", "includes", "errors", "meta"))
class Client:
"""Twitter API v2 Client
Parameters
----------
bearer_token : Optional[str]
Twitter API Bearer Token
consumer_key : Optional[str]
Twitter API Consumer Key
consumer_secret : Optional[str]
Twitter API Consumer Secret
access_token : Optional[str]
Twitter API Access Token
access_token_secret : Optional[str]
Twitter API Access Token Secret
wait_on_rate_limit : bool
Whether to wait when rate limit is reached
Attributes
----------
session : requests.Session
Requests Session used to make requests to the API
user_agent : str
User agent used when making requests to the API
"""
def __init__(self, bearer_token=None, consumer_key=None,
consumer_secret=None, access_token=None,
access_token_secret=None, *, wait_on_rate_limit=False):
self.bearer_token = bearer_token
self.consumer_key = consumer_key
self.consumer_secret = consumer_secret
self.access_token = access_token
self.access_token_secret = access_token_secret
self.wait_on_rate_limit = wait_on_rate_limit
self.session = requests.Session()
self.user_agent = (
f"Python/{python_version()} "
f"Requests/{requests.__version__} "
f"Tweepy/{tweepy.__version__}"
)
def request(self, method, route, params=None, json=None, user_auth=False):
host = "https://api.twitter.com"
headers = {"User-Agent": self.user_agent}
auth = None
if user_auth:
auth = OAuthHandler(self.consumer_key, self.consumer_secret)
auth.set_access_token(self.access_token, self.access_token_secret)
auth = auth.apply_auth()
else:
headers["Authorization"] = f"Bearer {self.bearer_token}"
log.debug(
f"Making API request: {method} {host + route}\n"
f"Parameters: {params}\n"
f"Headers: {headers}\n"
f"Body: {json}"
)
with self.session.request(
method, host + route, params=params, json=json, headers=headers,
auth=auth
) as response:
log.debug(
"Received API response: "
f"{response.status_code} {response.reason}\n"
f"Headers: {response.headers}\n"
f"Content: {response.content}"
)
if response.status_code == 400:
raise BadRequest(response)
if response.status_code == 401:
raise Unauthorized(response)
if response.status_code == 403:
raise Forbidden(response)
# Handle 404?
if response.status_code == 429:
if self.wait_on_rate_limit:
reset_time = int(response.headers["x-rate-limit-reset"])
sleep_time = reset_time - int(time.time()) + 1
if sleep_time > 0:
log.warning(
"Rate limit exceeded. "
f"Sleeping for {sleep_time} seconds."
)
time.sleep(sleep_time)
return self.request(method, route, params, json, user_auth)
else:
raise TooManyRequests(response)
if response.status_code >= 500:
raise TwitterServerError(response)
if not 200 <= response.status_code < 300:
raise HTTPException(response)
return response.json()
def _make_request(self, method, route, params={}, endpoint_parameters=None,
json=None, data_type=None, user_auth=False):
request_params = {}
for param_name, param_value in params.items():
if param_name in endpoint_parameters:
if isinstance(param_value, list):
request_params[param_name] = ','.join(map(str, param_value))
elif param_name in ("start_time", "end_time") and isinstance(param_value, datetime.datetime):
if param_value.tzinfo is not None:
param_value = param_value.astimezone(datetime.timezone.utc)
request_params[param_name] = param_value.strftime("%Y-%m-%dT%H:%M:%S.%fZ")
# TODO: Constant datetime format string?
else:
request_params[param_name] = param_value
elif param_name.replace('_', '.') in endpoint_parameters:
# Use := when support for Python 3.7 is dropped
request_params[param_name.replace('_', '.')] = ','.join(param_value)
else:
log.warn(f"Unexpected parameter: {param_name}")
response = self.request(method, route, params=request_params,
json=json, user_auth=user_auth)
data = response.get("data")
if data_type is not None:
if isinstance(data, list):
data = [data_type(result) for result in data]
elif data is not None:
data = data_type(data)
includes = response.get("includes", {})
if "media" in includes:
includes["media"] = [Media(media) for media in includes["media"]]
if "places" in includes:
includes["places"] = [Place(place) for place in includes["places"]]
if "poll" in includes:
includes["polls"] = [Poll(poll) for poll in includes["polls"]]
if "tweets" in includes:
includes["tweets"] = [Tweet(tweet) for tweet in includes["tweets"]]
if "users" in includes:
includes["users"] = [User(user) for user in includes["users"]]
errors = response.get("errors", [])
meta = response.get("meta", {})
return Response(data, includes, errors, meta)
# Hide replies
def hide_reply(self, id):
"""Hides a reply to a Tweet.
Parameters
----------
id : Union[int, str]
Unique identifier of the Tweet to hide. The Tweet must belong to a
conversation initiated by the authenticating user.
Returns
-------
bool
Indicates if the Tweet was successfully hidden.
References
----------
https://developer.twitter.com/en/docs/twitter-api/tweets/hide-replies/api-reference/put-tweets-id-hidden
"""
return self._make_request(
"PUT", f"/2/tweets/{id}/hidden", json={"hidden": True},
user_auth=True
)[0]["hidden"]
def unhide_reply(self, id):
"""Unhides a reply to a Tweet.
Parameters
----------
id : Union[int, str]
Unique identifier of the Tweet to unhide. The Tweet must belong to
a conversation initiated by the authenticating user.
Returns
-------
bool
Indicates if the Tweet was successfully unhidden.
References
----------
https://developer.twitter.com/en/docs/twitter-api/tweets/hide-replies/api-reference/put-tweets-id-hidden
"""
return self._make_request(
"PUT", f"/2/tweets/{id}/hidden", json={"hidden": False},
user_auth=True
)[0]["hidden"]
# Likes
def unlike(self, tweet_id):
"""Unlike a Tweet.
The request succeeds with no action when the user sends a request to a
user they're not liking the Tweet or have already unliked the Tweet.
Parameters
----------
tweet_id : Union[int, str]
The ID of the Tweet that you would like to unlike.
Returns
-------
bool
Indicates whether the user is unliking the specified Tweet as a
result of this request. The returned value is ``False`` for a
successful unlike request.
References
----------
https://developer.twitter.com/en/docs/twitter-api/tweets/likes/api-reference/delete-users-user_id-likes
"""
id = self.access_token.partition('-')[0]
route = f"/2/users/{id}/likes/{tweet_id}"
return self._make_request("DELETE", route, user_auth=True)[0]["liked"]
def like(self, tweet_id):
"""Like a Tweet.
Parameters
----------
tweet_id : Union[int, str]
The ID of the Tweet that you would like to Like.
Returns
-------
bool
Indicates whether the user likes the specified Tweet as a result of
this request.
References
----------
https://developer.twitter.com/en/docs/twitter-api/tweets/likes/api-reference/post-users-user_id-likes
"""
id = self.access_token.partition('-')[0]
route = f"/2/users/{id}/likes"
return self._make_request(
"POST", route, json={"tweet_id": str(tweet_id)}, user_auth=True
)[0]["liked"]
# Search Tweets
def search_all_tweets(self, query, **params):
"""search_all_tweets( \
query, *, end_time, expansions, max_results, media_fields, \
next_token, place_fields, poll_fields, since_id, start_time, \
tweet_fields, until_id, user_fields \
)
This endpoint is only available to those users who have been approved
for the `Academic Research product track`_.
The full-archive search endpoint returns the complete history of public
Tweets matching a search query; since the first Tweet was created March
26, 2006.
The Tweets returned by this endpoint count towards the Project-level
`Tweet cap`_.
Parameters
----------
query : str
One query for matching Tweets. Up to 1024 characters.
end_time : Union[datetime.datetime, str]
YYYY-MM-DDTHH:mm:ssZ (ISO 8601/RFC 3339). Used with ``start_time``.
The newest, most recent UTC timestamp to which the Tweets will be
provided. Timestamp is in second granularity and is exclusive (for
example, 12:00:01 excludes the first second of the minute). If used
without ``start_time``, Tweets from 30 days before ``end_time``
will be returned by default. If not specified, ``end_time`` will
default to [now - 30 seconds].
expansions : Union[List[str], str]
:ref:`expansions_parameter`
max_results : int
The maximum number of search results to be returned by a request. A
number between 10 and the system limit (currently 500). By default,
a request response will return 10 results.
media_fields : Union[List[str], str]
:ref:`media_fields_parameter`
next_token : str
This parameter is used to get the next 'page' of results. The value
used with the parameter is pulled directly from the response
provided by the API, and should not be modified. You can learn more
by visiting our page on `pagination`_.
place_fields : Union[List[str], str]
:ref:`place_fields_parameter`
poll_fields : Union[List[str], str]
:ref:`poll_fields_parameter`
since_id : Union[int, str]
Returns results with a Tweet ID greater than (for example, more
recent than) the specified ID. The ID specified is exclusive and
responses will not include it. If included with the same request as
a ``start_time`` parameter, only ``since_id`` will be used.
start_time : Union[datetime.datetime, str]
YYYY-MM-DDTHH:mm:ssZ (ISO 8601/RFC 3339). The oldest UTC timestamp
from which the Tweets will be provided. Timestamp is in second
granularity and is inclusive (for example, 12:00:01 includes the
first second of the minute). By default, a request will return
Tweets from up to 30 days ago if you do not include this parameter.
tweet_fields : Union[List[str], str]
:ref:`tweet_fields_parameter`
until_id : Union[int, str]
Returns results with a Tweet ID less than (that is, older than) the
specified ID. Used with ``since_id``. The ID specified is exclusive
and responses will not include it.
user_fields : Union[List[str], str]
:ref:`user_fields_parameter`
Returns
-------
:ref:`response_reference`
References
----------
https://developer.twitter.com/en/docs/twitter-api/tweets/search/api-reference/get-tweets-search-all
.. _Academic Research product track: https://developer.twitter.com/en/docs/projects/overview#product-track
.. _Tweet cap: https://developer.twitter.com/en/docs/projects/overview#tweet-cap
.. _pagination: https://developer.twitter.com/en/docs/twitter-api/tweets/search/integrate/paginate
"""
params["query"] = query
return self._make_request(
"GET", "/2/tweets/search/all", params=params,
endpoint_parameters=(
"end_time", "expansions", "max_results", "media.fields",
"next_token", "place.fields", "poll.fields", "query",
"since_id", "start_time", "tweet.fields", "until_id",
"user.fields"
), data_type=Tweet
)
def search_recent_tweets(self, query, *, user_auth=False, **params):
"""search_recent_tweets( \
query, *, user_auth=False, end_time, expansions, max_results, \
media_fields, next_token, place_fields, poll_fields, since_id, \
start_time, tweet_fields, until_id, user_fields \
)
The recent search endpoint returns Tweets from the last seven days that
match a search query.
The Tweets returned by this endpoint count towards the Project-level
`Tweet cap`_.
Parameters
----------
query : str
One rule for matching Tweets. If you are using a
`Standard Project`_ at the Basic `access level`_, you can use the
basic set of `operators`_ and can make queries up to 512 characters
long. If you are using an `Academic Research Project`_ at the Basic
access level, you can use all available operators and can make
queries up to 1,024 characters long.
end_time : Union[datetime.datetime, str]
YYYY-MM-DDTHH:mm:ssZ (ISO 8601/RFC 3339). The newest, most recent
UTC timestamp to which the Tweets will be provided. Timestamp is in
second granularity and is exclusive (for example, 12:00:01 excludes
the first second of the minute). By default, a request will return
Tweets from as recent as 30 seconds ago if you do not include this
parameter.
expansions : Union[List[str], str]
:ref:`expansions_parameter`
max_results : int
The maximum number of search results to be returned by a request. A
number between 10 and 100. By default, a request response will
return 10 results.
media_fields : Union[List[str], str]
:ref:`media_fields_parameter`
next_token : str
This parameter is used to get the next 'page' of results. The value
used with the parameter is pulled directly from the response
provided by the API, and should not be modified.
place_fields : Union[List[str], str]
:ref:`place_fields_parameter`
poll_fields : Union[List[str], str]
:ref:`poll_fields_parameter`
since_id : Union[int, str]
Returns results with a Tweet ID greater than (that is, more recent
than) the specified ID. The ID specified is exclusive and responses
will not include it. If included with the same request as a
``start_time`` parameter, only ``since_id`` will be used.
start_time : Union[datetime.datetime, str]
YYYY-MM-DDTHH:mm:ssZ (ISO 8601/RFC 3339). The oldest UTC timestamp
(from most recent seven days) from which the Tweets will be
provided. Timestamp is in second granularity and is inclusive (for
example, 12:00:01 includes the first second of the minute). If
included with the same request as a ``since_id`` parameter, only
``since_id`` will be used. By default, a request will return Tweets
from up to seven days ago if you do not include this parameter.
tweet_fields : Union[List[str], str]
:ref:`tweet_fields_parameter`
until_id : Union[int, str]
Returns results with a Tweet ID less than (that is, older than) the
specified ID. The ID specified is exclusive and responses will not
include it.
user_fields : Union[List[str], str]
:ref:`user_fields_parameter`
Returns
-------
:ref:`response_reference`
References
----------
https://developer.twitter.com/en/docs/twitter-api/tweets/search/api-reference/get-tweets-search-recent
.. _Tweet cap: https://developer.twitter.com/en/docs/projects/overview#tweet-cap
.. _Standard Project: https://developer.twitter.com/en/docs/projects
.. _access level: https://developer.twitter.com/en/products/twitter-api/early-access/guide.html#na_1
.. _operators: https://developer.twitter.com/en/docs/twitter-api/tweets/search/integrate/build-a-query
.. _Academic Research Project: https://developer.twitter.com/en/docs/projects
"""
params["query"] = query
return self._make_request(
"GET", "/2/tweets/search/recent", params=params,
endpoint_parameters=(
"end_time", "expansions", "max_results", "media.fields",
"next_token", "place.fields", "poll.fields", "query",
"since_id", "start_time", "tweet.fields", "until_id",
"user.fields"
), data_type=Tweet, user_auth=user_auth
)
# Timelines
def get_users_mentions(self, id, *, user_auth=False, **params):
"""get_users_mentions( \
id, *, user_auth=False, end_time, expansions, max_results, \
media_fields, pagination_token, place_fields, poll_fields, \
since_id, start_time, tweet_fields, until_id, user_fields \
)
Returns Tweets mentioning a single user specified by the requested user
ID. By default, the most recent ten Tweets are returned per request.
Using pagination, up to the most recent 800 Tweets can be retrieved.
The Tweets returned by this endpoint count towards the Project-level
`Tweet cap`_.
Parameters
----------
id : Union[int, str]
Unique identifier of the user for whom to return Tweets mentioning
the user. User ID can be referenced using the `user/lookup`_
endpoint. More information on Twitter IDs is `here`_.
user_auth : bool
Whether or not to use OAuth 1.0a User context
end_time : Union[datetime.datetime, str]
YYYY-MM-DDTHH:mm:ssZ (ISO 8601/RFC 3339). The new UTC timestamp
from which the Tweets will be provided. Timestamp is in second
granularity and is inclusive (for example, 12:00:01 includes the
first second of the minute).
Please note that this parameter does not support a millisecond
value.
expansions : Union[List[str], str]
:ref:`expansions_parameter`
max_results : int
Specifies the number of Tweets to try and retrieve, up to a maximum
of 100 per distinct request. By default, 10 results are returned if
this parameter is not supplied. The minimum permitted value is 5.
It is possible to receive less than the ``max_results`` per request
throughout the pagination process.
media_fields : Union[List[str], str]
:ref:`media_fields_parameter`
pagination_token : str
This parameter is used to move forwards or backwards through
'pages' of results, based on the value of the ``next_token`` or
``previous_token`` in the response. The value used with the
parameter is pulled directly from the response provided by the API,
and should not be modified.
place_fields : Union[List[str], str]
:ref:`place_fields_parameter`
poll_fields : Union[List[str], str]
:ref:`poll_fields_parameter`
since_id : Union[int, str]
Returns results with a Tweet ID greater than (that is, more recent
than) the specified 'since' Tweet ID. There are limits to the
number of Tweets that can be accessed through the API. If the limit
of Tweets has occurred since the ``since_id``, the ``since_id``
will be forced to the oldest ID available. More information on
Twitter IDs is `here`_.
start_time : Union[datetime.datetime, str]
YYYY-MM-DDTHH:mm:ssZ (ISO 8601/RFC 3339). The oldest UTC timestamp
from which the Tweets will be provided. Timestamp is in second
granularity and is inclusive (for example, 12:00:01 includes the
first second of the minute).
Please note that this parameter does not support a millisecond
value.
tweet_fields : Union[List[str], str]
:ref:`tweet_fields_parameter`
until_id : Union[int, str]
Returns results with a Tweet ID less less than (that is, older
than) the specified 'until' Tweet ID. There are limits to the
number of Tweets that can be accessed through the API. If the limit
of Tweets has occurred since the ``until_id``, the ``until_id``
will be forced to the most recent ID available. More information on
Twitter IDs is `here`_.
user_fields : Union[List[str], str]
:ref:`user_fields_parameter`
Returns
-------
:ref:`response_reference`
References
----------
https://developer.twitter.com/en/docs/twitter-api/tweets/timelines/api-reference/get-users-id-mentions
.. _Tweet cap: https://developer.twitter.com/en/docs/projects/overview#tweet-cap
.. _user/lookup: https://developer.twitter.com/en/docs/twitter-api/users/lookup/introduction
.. _here: https://developer.twitter.com/en/docs/twitter-ids
"""
return self._make_request(
"GET", f"/2/users/{id}/mentions", params=params,
endpoint_parameters=(
"end_time", "expansions", "max_results", "media.fields",
"pagination_token", "place.fields", "poll.fields", "since_id",
"start_time", "tweet.fields", "until_id", "user.fields"
), data_type=Tweet, user_auth=user_auth
)
def get_users_tweets(self, id, *, user_auth=False, **params):
"""get_users_tweets( \
id, *, user_auth=False, end_time, exclude, expansions, \
max_results, media_fields, pagination_token, place_fields, \
poll_fields, since_id, start_time, tweet_fields, until_id, \
user_fields \
)
Returns Tweets composed by a single user, specified by the requested
user ID. By default, the most recent ten Tweets are returned per
request. Using pagination, the most recent 3,200 Tweets can be
retrieved.
The Tweets returned by this endpoint count towards the Project-level
`Tweet cap`_.
Parameters
----------
id : Union[int, str]
Unique identifier of the Twitter account (user ID) for whom to
return results. User ID can be referenced using the `user/lookup`_
endpoint. More information on Twitter IDs is `here`_.
user_auth : bool
Whether or not to use OAuth 1.0a User context
end_time : Union[datetime.datetime, str]
YYYY-MM-DDTHH:mm:ssZ (ISO 8601/RFC 3339). The newest or most recent
UTC timestamp from which the Tweets will be provided. Only the 3200
most recent Tweets are available. Timestamp is in second
granularity and is inclusive (for example, 12:00:01 includes the
first second of the minute). Minimum allowable time is
2010-11-06T00:00:01Z
Please note that this parameter does not support a millisecond
value.
exclude : Union[List[str], str]
Comma-separated list of the types of Tweets to exclude from the
response. When ``exclude=retweets`` is used, the maximum historical
Tweets returned is still 3200. When the ``exclude=replies``
parameter is used for any value, only the most recent 800 Tweets
are available.
expansions : Union[List[str], str]
:ref:`expansions_parameter`
max_results : int
Specifies the number of Tweets to try and retrieve, up to a maximum
of 100 per distinct request. By default, 10 results are returned if
this parameter is not supplied. The minimum permitted value is 5.
It is possible to receive less than the ``max_results`` per request
throughout the pagination process.
media_fields : Union[List[str], str]
:ref:`media_fields_parameter`
pagination_token : str
This parameter is used to move forwards or backwards through
'pages' of results, based on the value of the ``next_token`` or
``previous_token`` in the response. The value used with the
parameter is pulled directly from the response provided by the API,
and should not be modified.
place_fields : Union[List[str], str]
:ref:`place_fields_parameter`
poll_fields : Union[List[str], str]
:ref:`poll_fields_parameter`
since_id : Union[int, str]
Returns results with a Tweet ID greater than (that is, more recent
than) the specified 'since' Tweet ID. Only the 3200 most recent
Tweets are available. The result will exclude the ``since_id``. If
the limit of Tweets has occurred since the ``since_id``, the
``since_id`` will be forced to the oldest ID available.
start_time : Union[datetime.datetime, str]
YYYY-MM-DDTHH:mm:ssZ (ISO 8601/RFC 3339). The oldest or earliest
UTC timestamp from which the Tweets will be provided. Only the 3200
most recent Tweets are available. Timestamp is in second
granularity and is inclusive (for example, 12:00:01 includes the
first second of the minute). Minimum allowable time is
2010-11-06T00:00:00Z
Please note that this parameter does not support a millisecond
value.
tweet_fields : Union[List[str], str]
:ref:`tweet_fields_parameter`
until_id : Union[int, str]
Returns results with a Tweet ID less less than (that is, older
than) the specified 'until' Tweet ID. Only the 3200 most recent
Tweets are available. The result will exclude the ``until_id``. If
the limit of Tweets has occurred since the ``until_id``, the
``until_id`` will be forced to the most recent ID available.
user_fields : Union[List[str], str]
:ref:`user_fields_parameter`
Returns
-------
:ref:`response_reference`
References
----------
https://developer.twitter.com/en/docs/twitter-api/tweets/timelines/api-reference/get-users-id-tweets
.. _Tweet cap: https://developer.twitter.com/en/docs/projects/overview#tweet-cap
.. _user/lookup: https://developer.twitter.com/en/docs/twitter-api/users/lookup/introduction
.. _here: https://developer.twitter.com/en/docs/twitter-ids
"""
return self._make_request(
"GET", f"/2/users/{id}/tweets", params=params,
endpoint_parameters=(
"end_time", "exclude", "expansions", "max_results",
"media.fields", "pagination_token", "place.fields",
"poll.fields", "since_id", "start_time", "tweet.fields",
"until_id", "user.fields"
), data_type=Tweet, user_auth=user_auth
)
# Tweet lookup
def get_tweet(self, id, *, user_auth=False, **params):
"""get_tweet(id, *, user_auth=False, expansions, media_fields, \
place_fields, poll_fields, twitter_fields, user_fields)
Returns a variety of information about a single Tweet specified by
the requested ID.
Parameters
----------
id : Union[int, str]
Unique identifier of the Tweet to request
user_auth : bool
Whether or not to use OAuth 1.0a User context
expansions : Union[List[str], str]
:ref:`expansions_parameter`
media_fields : Union[List[str], str]
:ref:`media_fields_parameter`
place_fields : Union[List[str], str]
:ref:`place_fields_parameter`
poll_fields : Union[List[str], str]
:ref:`poll_fields_parameter`
tweet_fields : Union[List[str], str]
:ref:`tweet_fields_parameter`
user_fields : Union[List[str], str]
:ref:`user_fields_parameter`
Returns
-------
:ref:`response_reference`
References
----------
https://developer.twitter.com/en/docs/twitter-api/tweets/lookup/api-reference/get-tweets-id
"""
return self._make_request(
"GET", f"/2/tweets/{id}", params=params,
endpoint_parameters=(
"expansions", "media.fields", "place.fields", "poll.fields",
"tweet.fields", "user.fields"
), data_type=Tweet, user_auth=user_auth
)
def get_tweets(self, ids, *, user_auth=False, **params):
"""get_tweets(ids, *, user_auth=False, expansions, media_fields, \
place_fields, poll_fields, twitter_fields, user_fields)
Returns a variety of information about the Tweet specified by the
requested ID or list of IDs.
Parameters
----------
ids : Union[List[int, str], str]
A comma separated list of Tweet IDs. Up to 100 are allowed in a
single request. Make sure to not include a space between commas and
fields.
user_auth : bool
Whether or not to use OAuth 1.0a User context
expansions : Union[List[str], str]
:ref:`expansions_parameter`
media_fields : Union[List[str], str]
:ref:`media_fields_parameter`
place_fields : Union[List[str], str]
:ref:`place_fields_parameter`
poll_fields : Union[List[str], str]
:ref:`poll_fields_parameter`
tweet_fields : Union[List[str], str]
:ref:`tweet_fields_parameter`
user_fields : Union[List[str], str]
:ref:`user_fields_parameter`
Returns
-------
:ref:`response_reference`
References
----------
https://developer.twitter.com/en/docs/twitter-api/tweets/lookup/api-reference/get-tweets
"""
params["ids"] = ids
return self._make_request(
"GET", "/2/tweets", params=params,
endpoint_parameters=(
"ids", "expansions", "media.fields", "place.fields",
"poll.fields", "tweet.fields", "user.fields"
), data_type=Tweet, user_auth=user_auth
)
# Blocks
def unblock(self, target_user_id):
"""Unblock another user.
The request succeeds with no action when the user sends a request to a
user they're not blocking or have already unblocked.
Parameters
----------
target_user_id : Union[int, str]
The user ID of the user that you would like to unblock.
Returns
-------
bool
Indicates whether the user is blocking the specified user as a
result of this request. The returned value is ``False`` for a
successful unblock request.
References
----------
https://developer.twitter.com/en/docs/twitter-api/users/blocks/api-reference/delete-users-user_id-blocking
"""
source_user_id = self.access_token.partition('-')[0]
route = f"/2/users/{source_user_id}/blocking/{target_user_id}"
return self._make_request(
"DELETE", route, user_auth=True
)[0]["blocking"]
def block(self, target_user_id):
"""Block another user.
Parameters
----------
target_user_id : Union[int, str]
The user ID of the user that you would like to block.
Returns
-------
bool
Indicates whether the user is blocking the specified user as a
result of this request.
References
----------
https://developer.twitter.com/en/docs/twitter-api/users/blocks/api-reference/post-users-user_id-blocking
"""
id = self.access_token.partition('-')[0]
route = f"/2/users/{id}/blocking"
return self._make_request(
"POST", route, json={"target_user_id": str(target_user_id)},
user_auth=True
)[0]["blocking"]
# Follows
def unfollow(self, target_user_id):
"""Allows a user ID to unfollow another user.
The request succeeds with no action when the authenticated user sends a
request to a user they're not following or have already unfollowed.
Parameters
----------
target_user_id : Union[int, str]
The user ID of the user that you would like to unfollow.
Returns
-------
:ref:`response_reference`
References
----------
https://developer.twitter.com/en/docs/twitter-api/users/follows/api-reference/delete-users-source_id-following
"""
source_user_id = self.access_token.partition('-')[0]
route = f"/2/users/{source_user_id}/following/{target_user_id}"
return self._make_request("DELETE", route, user_auth=True)
def get_users_followers(self, id, *, user_auth=False, **params):
"""get_users_followers( \
id, *, user_auth=False, expansions, max_results, \
pagination_token, tweet_fields, user_fields \
)
Returns a list of users who are followers of the specified user ID.
Parameters
----------
id : Union[int, str]
The user ID whose followers you would like to retrieve.
user_auth : bool
Whether or not to use OAuth 1.0a User context
expansions : Union[List[str], str]
:ref:`expansions_parameter`
max_results : int
The maximum number of results to be returned per page. This can be
a number between 1 and the 1000. By default, each page will return
100 results.
pagination_token : str
Used to request the next page of results if all results weren't
returned with the latest request, or to go back to the previous
page of results. To return the next page, pass the ``next_token``
returned in your previous response. To go back one page, pass the
``previous_token`` returned in your previous response.
tweet_fields : Union[List[str], str]
:ref:`tweet_fields_parameter`
user_fields : Union[List[str], str]
:ref:`user_fields_parameter`
Returns
-------
:ref:`response_reference`
References
----------
https://developer.twitter.com/en/docs/twitter-api/users/follows/api-reference/get-users-id-followers
"""
return self._make_request(
"GET", f"/2/users/{id}/followers", params=params,
endpoint_parameters=(
"expansions", "max_results", "pagination_token",
"tweet.fields", "user.fields"
),
data_type=User, user_auth=user_auth
)
def get_users_following(self, id, *, user_auth=False, **params):
"""get_users_following( \
id, *, user_auth=False, expansions, max_results, \
pagination_token, tweet_fields, user_fields \
)
Returns a list of users the specified user ID is following.
Parameters
----------
id : Union[int, str]
The user ID whose following you would like to retrieve.
user_auth : bool
Whether or not to use OAuth 1.0a User context
expansions : Union[List[str], str]
:ref:`expansions_parameter`
max_results : int
The maximum number of results to be returned per page. This can be
a number between 1 and the 1000. By default, each page will return
100 results.
pagination_token : str
Used to request the next page of results if all results weren't
returned with the latest request, or to go back to the previous
page of results. To return the next page, pass the ``next_token``
returned in your previous response. To go back one page, pass the
``previous_token`` returned in your previous response.
tweet_fields : Union[List[str], str]
:ref:`tweet_fields_parameter`
user_fields : Union[List[str], str]
:ref:`user_fields_parameter`
Returns
-------
:ref:`response_reference`
References
----------
https://developer.twitter.com/en/docs/twitter-api/users/follows/api-reference/get-users-id-following
"""
return self._make_request(
"GET", f"/2/users/{id}/following", params=params,
endpoint_parameters=(
"expansions", "max_results", "pagination_token",
"tweet.fields", "user.fields"
), data_type=User, user_auth=user_auth
)
def follow(self, target_user_id):
"""Allows a user ID to follow another user.
If the target user does not have public Tweets, this endpoint will send
a follow request.
The request succeeds with no action when the authenticated user sends a
request to a user they're already following, or if they're sending a
follower request to a user that does not have public Tweets.
Parameters
----------
target_user_id : Union[int, str]
The user ID of the user that you would like to follow
Returns
-------
:ref:`response_reference`
References
----------
https://developer.twitter.com/en/docs/twitter-api/users/follows/api-reference/post-users-source_user_id-following
"""
source_user_id = self.access_token.partition('-')[0]
route = f"/2/users/{source_user_id}/following"
return self._make_request(
"POST", route, json={"target_user_id": str(target_user_id)},
user_auth=True
)
# User lookup
def get_user(self, *, id=None, username=None, user_auth=False, **params):
"""get_user(*, id, username, user_auth=False, expansions, \
tweet_fields, user_fields)
Returns a variety of information about a single user specified by the
requested ID or username.
Parameters
----------
id : Union[int, str]
The ID of the user to lookup.
username : str
The Twitter username (handle) of the user.
user_auth : bool
Whether or not to use OAuth 1.0a User context
expansions : Union[List[str], str]
:ref:`expansions_parameter`
tweet_fields : Union[List[str], str]
:ref:`tweet_fields_parameter`
user_fields : Union[List[str], str]
:ref:`user_fields_parameter`
Raises
------
TypeError
If ID and username are not passed or both are passed
Returns
-------
:ref:`response_reference`
References
----------
https://developer.twitter.com/en/docs/twitter-api/users/lookup/api-reference/get-users-id
https://developer.twitter.com/en/docs/twitter-api/users/lookup/api-reference/get-users-by-username-username
"""
if id is not None and username is not None:
raise TypeError("Expected ID or username, not both")
route = "/2/users"
if id is not None:
route += f"/{id}"
elif username is not None:
route += f"/by/username/{username}"
else:
raise TypeError("ID or username is required")
return self._make_request(
"GET", route, params=params,
endpoint_parameters=("expansions", "tweet.fields", "user.fields"),
data_type=User, user_auth=user_auth
)
def get_users(self, *, ids=None, usernames=None, user_auth=False,
**params):
"""get_users(*, ids, usernames, user_auth=False, expansions, \
tweet_fields, user_fields)
Returns a variety of information about one or more users specified by
the requested IDs or usernames.
Parameters
----------
ids : Union[List[int, str], str]
A comma separated list of user IDs. Up to 100 are allowed in a
single request. Make sure to not include a space between commas and
fields.
usernames : Union[List[str], str]
A comma separated list of Twitter usernames (handles). Up to 100
are allowed in a single request. Make sure to not include a space
between commas and fields.
user_auth : bool
Whether or not to use OAuth 1.0a User context
expansions : Union[List[str], str]
:ref:`expansions_parameter`
tweet_fields : Union[List[str], str]
:ref:`tweet_fields_parameter`
user_fields : Union[List[str], str]
:ref:`user_fields_parameter`
Raises
------
TypeError
If IDs and usernames are not passed or both are passed
Returns
-------
:ref:`response_reference`
References
----------
https://developer.twitter.com/en/docs/twitter-api/users/lookup/api-reference/get-users
https://developer.twitter.com/en/docs/twitter-api/users/lookup/api-reference/get-users-by
"""
if ids is not None and usernames is not None:
raise TypeError("Expected IDs or usernames, not both")
route = "/2/users"
if ids is not None:
params["ids"] = ids
elif usernames is not None:
route += "/by"
params["usernames"] = usernames
else:
raise TypeError("IDs or usernames are required")
return self._make_request(
"GET", route, params=params,
endpoint_parameters=(
"ids", "usernames", "expansions", "tweet.fields", "user.fields"
), data_type=User, user_auth=user_auth
)
```
|
{
"source": "jessesiu/hku_scholars_hub",
"score": 3
}
|
#### File: jessesiu/hku_scholars_hub/data_availability.py
```python
from bs4 import BeautifulSoup as bts
import re
import csv
from urllib.request import urlopen
import xlrd
import xlwt
def check_url(url):
html = urlopen(url).read().decode('utf-8')
content = bts(html, features='lxml')
result = True
if "Item Withheld" in content.title.get_text():
result = False
return result
count_plos = 0
count_sc = 0
all_href_plos = []
all_href_sc = []
all_href_excel_plos = []
all_href_excel_sc = []
journal_name = input("Enter the journal name (e.g. plos one): ")
journal_name = journal_name.replace(" ","+")
#print ("you entered " + input_var)
#input the journal name
#journal_name = "plos+one"
#input the year
#search_time = "%5B2016+TO+2018%5D"
search_time = input("Enter the search time (e.g. 2016): ")
#search_time = "2016"
#PLOS ONE
purl = "http://hub.hku.hk/simple-search?query=&location=publication&filter_field_1=journal&filter_type_1=equals&filter_value_1="+journal_name+"&filter_field_2=dateIssued&filter_type_2=equals&filter_value_2="+search_time+"&sort_by=score&order=desc&rpp=25&etal=0&start=0";
print(purl)
fo = open("./test.txt", "w")
fo.write("HKU Scholarhub URL"+"|"+"Manuscript DOI URL"+"|"+"Data Availability Comment"+"\n")
while purl is not None:
html = urlopen(purl).read().decode('utf-8')
content = bts(html, features='lxml')
pages= content.find('ul','pagination pull-right')
#print (pages)
next_url = None
flag = False
pages_refs = pages.find_all('li')
for page_ref in pages_refs:
if page_ref.has_attr('class'):
flag = True
continue
if flag == True:
next_url = page_ref.find('a')['href']
break
#print(next_url)
if next_url is None:
purl = None
elif "simple-search" in next_url:
purl = 'http://hub.hku.hk'+ next_url
else:
purl = None
titles = content.find_all('div',{"class":"dctitle"})
for title in titles:
href = title.find('a')['href']
count_plos+=1
href = "http://hub.hku.hk" + href
#record handle url
print(href)
html = urlopen(href).read().decode('utf-8')
content = bts(html, features='lxml')
dcdoi = content.find('div',{"class":"dcdoi"})
doi_link = dcdoi.find('a')['href']
##record doi_link
print(doi_link)
html = urlopen(doi_link).read().decode('utf-8')
content = bts(html, features='lxml')
temp = content.find('div',{"class":"articleinfo"})
data_comments = temp.find_all('p')
for comment in data_comments:
if "Data Availability:" in comment.get_text():
## record data_comment
data_comment= comment.get_text().split(':',1)[1].strip()
print(data_comment)
fo.write(href+"|"+doi_link+"|"+data_comment+"\n")
all_href_plos.append(href.strip())
fo.close()
```
|
{
"source": "JesseSlim/polymetric",
"score": 3
}
|
#### File: polymetric/exporters/gds.py
```python
import gdspy
from importlib import reload
from .. import polymetric as pm
# should we reload the gdspy module every time something is saved/loaded?
# GDSPY keeps some global library of stuff in memory that might need to be wiped
DO_RELOAD = True
# maximum number of vertices per polygon. Absolute limit set by GDS file format is 8191
# can be adjusted by user
MAX_VERTICES = 8000
def save(shape, filename, cell_name="POLYGON", datatype=1000,
ignore_interiors=False, **kw):
if DO_RELOAD:
reload(gdspy)
poly_cell = gdspy.Cell(cell_name)
if not ignore_interiors and shape.has_interiors():
raise ValueError(
"Polygon contains interiors, which can not be represented"
" in the GDS file format. To ignore, call with ignore_interiors=True")
vertex_lists = shape.get_exterior_vertex_lists()
for vl in vertex_lists:
if len(vl) > MAX_VERTICES:
raise ValueError(f"Polygon contains {len(vl)} vertices, more than the limit of {MAX_VERTICES} vertices")
# shapely always duplicates the first vertex as the last vertex, get rid of that
gds_poly = gdspy.Polygon(vl[:-1], datatype=datatype)
poly_cell.add(gds_poly)
gdspy.write_gds(filename, **kw)
def save_multiple(shapes, filename, layers=None, cell_name="POLYGON", datatype=1000, ignore_interiors=False, **kw):
if DO_RELOAD:
reload(gdspy)
poly_cell = gdspy.Cell(cell_name)
for i, shape in enumerate(shapes):
if not ignore_interiors and shape.has_interiors():
raise ValueError(
"Polygon contains interiors, which can not be represented"
" in the GDS file format. To ignore, call with ignore_interiors=True")
vertex_lists = shape.get_exterior_vertex_lists()
for vl in vertex_lists:
if len(vl) > MAX_VERTICES:
raise ValueError("Polygon contains {} vertices, more than the"
" limit of {} vertices".format(len(vl), MAX_VERTICES))
layer = 0
if layers is not None:
layer = layers[i]
# shapely always duplicates the first vertex as the last vertex, get rid of that
gds_poly = gdspy.Polygon(vl[:-1], datatype=datatype, layer=layer)
poly_cell.add(gds_poly)
gdspy.write_gds(filename, **kw)
def load(filename):
if DO_RELOAD:
reload(gdspy)
gdslib = gdspy.GdsLibrary(infile=filename)
polys_per_layer = {}
for _, cell in gdslib.cell_dict.items():
for p1 in cell.polygons:
for (layer, poly) in zip(p1.layers, p1.polygons):
if layer not in polys_per_layer:
polys_per_layer[layer] = []
polys_per_layer[layer].append(pm.Polygon(shell=poly))
return polys_per_layer
```
|
{
"source": "jessesnyder/dallinger-frontend-demo",
"score": 3
}
|
#### File: dlgr_contrib/frontend_demo/bots.py
```python
import logging
import requests
from selenium.webdriver.common.by import By
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from dallinger.bots import BotBase, HighPerformanceBotBase
logger = logging.getLogger(__file__)
class Bot(BotBase):
"""Bot tasks for experiment participation"""
def participate(self):
"""Click the button."""
try:
logger.info("Entering participate method")
submit = WebDriverWait(self.driver, 10).until(
EC.element_to_be_clickable((By.ID, 'submit-response')))
submit.click()
return True
except TimeoutException:
return False
class HighPerformanceBot(HighPerformanceBotBase):
"""Bot for experiment participation with direct server interaction"""
def participate(self):
"""Click the button."""
self.log('Bot player participating.')
node_id = None
while True:
# create node
url = "{host}/node/{self.participant_id}".format(
host=self.host,
self=self
)
result = requests.post(url)
if result.status_code == 500 or result.json()['status'] == 'error':
self.stochastic_sleep()
continue
node_id = result.json.get('node', {}).get('id')
while node_id:
# add info
url = "{host}/info/{node_id}".format(
host=self.host,
node_id=node_id
)
result = requests.post(url, data={"contents": "Submitted",
"info_type": "Info"})
if result.status_code == 500 or result.json()['status'] == 'error':
self.stochastic_sleep()
continue
return
```
|
{
"source": "jessestewart1/nrn-rrn",
"score": 2
}
|
#### File: src/stage_4/stage.py
```python
import click
import logging
import sys
from pathlib import Path
filepath = Path(__file__).resolve()
sys.path.insert(1, str(filepath.parents[1]))
import helpers
from validation_functions import Validator
# Set logger.
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.INFO)
handler.setFormatter(logging.Formatter("%(asctime)s - %(levelname)s: %(message)s", "%Y-%m-%d %H:%M:%S"))
logger.addHandler(handler)
# Create logger for validation errors.
logger_validations = logging.getLogger("validations")
logger_validations.setLevel(logging.WARNING)
class Stage:
"""Defines an NRN stage."""
def __init__(self, source: str, remove: bool = False) -> None:
"""
Initializes an NRN stage.
:param str source: abbreviation for the source province / territory.
:param bool remove: removes pre-existing validation log within the data/processed directory for the specified
source, default False.
"""
self.stage = 4
self.source = source.lower()
self.remove = remove
self.Validator = None
# Configure and validate input data path.
self.data_path = filepath.parents[2] / f"data/interim/{self.source}.gpkg"
if not self.data_path.exists():
logger.exception(f"Input data not found: \"{self.data_path}\".")
sys.exit(1)
# Configure output path.
self.output_path = filepath.parents[2] / f"data/interim/{self.source}_validation_errors.log"
# Conditionally clear output namespace.
if self.output_path.exists():
logger.warning("Output namespace already occupied.")
if self.remove:
logger.warning(f"Parameter remove=True: Removing conflicting file: \"{self.output_path}\".")
self.output_path.unlink()
else:
logger.exception("Parameter remove=False: Unable to proceed while output namespace is occupied. Set "
"remove=True (-r) or manually clear the output namespace.")
sys.exit(1)
# Load data.
self.dframes = helpers.load_gpkg(self.data_path)
def log_errors(self) -> None:
"""Outputs error logs returned by validation functions."""
logger.info("Writing error logs.")
# Add File Handler to validation logger.
f_handler = logging.FileHandler(self.output_path)
f_handler.setLevel(logging.WARNING)
f_handler.setFormatter(logger.handlers[0].formatter)
logger_validations.addHandler(f_handler)
# Iterate and log errors.
for heading, errors in sorted(self.Validator.errors.items()):
errors = "\n".join(map(str, errors))
logger_validations.warning(f"{heading}\n{errors}\n")
def validations(self) -> None:
"""Applies a set of validations to one or more NRN datasets."""
logger.info("Applying validations.")
# Instantiate and execute validator class.
self.Validator = Validator(self.dframes)
self.Validator.execute()
def execute(self) -> None:
"""Executes an NRN stage."""
self.validations()
self.log_errors()
@click.command()
@click.argument("source", type=click.Choice("ab bc mb nb nl ns nt nu on pe qc sk yt".split(), False))
@click.option("--remove / --no-remove", "-r", default=False, show_default=True,
help="Remove pre-existing validation log within the data/processed directory for the specified source.")
def main(source: str, remove: bool = False) -> None:
"""
Executes an NRN stage.
:param str source: abbreviation for the source province / territory.
:param bool remove: removes pre-existing validation log within the data/processed directory for the specified
source, default False.
"""
try:
with helpers.Timer():
stage = Stage(source, remove)
stage.execute()
except KeyboardInterrupt:
logger.exception("KeyboardInterrupt: Exiting program.")
sys.exit(1)
if __name__ == "__main__":
main()
```
#### File: src/stage_4/validation_functions.py
```python
import calendar
import geopandas as gpd
import logging
import networkx as nx
import numpy as np
import pandas as pd
import pyproj
import shapely.ops
import string
import sys
from collections import Counter, defaultdict
from datetime import datetime
from itertools import chain, combinations, compress, groupby, product, tee
from operator import attrgetter, itemgetter
from pathlib import Path
from scipy.spatial import cKDTree
from scipy.spatial.distance import euclidean
from shapely.geometry import Point
from typing import Dict, List, Tuple, Union
sys.path.insert(1, str(Path(__file__).resolve().parents[1]))
import helpers
# Set logger.
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.INFO)
handler.setFormatter(logging.Formatter("%(asctime)s - %(levelname)s: %(message)s", "%Y-%m-%d %H:%M:%S"))
logger.addHandler(handler)
def ordered_pairs(coords: Tuple[tuple, ...]) -> List[Tuple[tuple, tuple]]:
"""
Creates an ordered sequence of adjacent coordinate pairs, sorted.
:param Tuple[tuple, ...] coords: tuple of coordinate tuples.
:return List[Tuple[tuple, tuple]]: ordered sequence of coordinate pair tuples.
"""
coords_1, coords_2 = tee(coords)
next(coords_2, None)
return sorted(zip(coords_1, coords_2))
class Validator:
"""Handles the execution of validation functions against the NRN datasets."""
def __init__(self, dframes: Dict[str, Union[gpd.GeoDataFrame, pd.DataFrame]]) -> None:
"""
Initializes variables for validation functions.
:param Dict[str, Union[gpd.GeoDataFrame, pd.DataFrame]] dframes: dictionary of NRN dataset names and
(Geo)DataFrames.
"""
logger.info("Configuring validation variables.")
self.errors = defaultdict(list)
# Compile default field values and dtypes.
self.defaults_all = helpers.compile_default_values()
self.dtypes_all = helpers.compile_dtypes()
# Classify dataframes by geometry type.
self.df_lines = ("ferryseg", "roadseg")
self.df_points = ("blkpassage", "junction", "tollpoint")
# Compile dataframes in original and meter-based projections (EPSG:3348; spatial datasets only).
self.dframes = dict()
self.dframes_m = dict()
for name, df in dframes.items():
# Store original dataframe.
self.dframes[name] = df.copy(deep=True)
# Store reprojected dataframe.
if "geometry" in df.columns:
epsg = df.crs.to_epsg()
if epsg == 3348:
self.dframes_m[name] = df.copy(deep=True)
else:
self.dframes_m[name] = df.to_crs("EPSG:3348").copy(deep=True)
# Define projection transformers, if required.
self.prj_3348_to_4617 = pyproj.Transformer.from_crs(pyproj.CRS("EPSG:3348"), pyproj.CRS("EPSG:4617"),
always_xy=True).transform
# Define validation parameters.
# Note: List validations in order if execution order matters.
self.validations = {
self.duplicated_lines: {
"code": 1,
"datasets": self.df_lines,
"iterate": True
},
self.duplicated_points: {
"code": 2,
"datasets": self.df_points,
"iterate": True
},
self.isolated_lines: {
"code": 3,
"datasets": ["roadseg"],
"iterate": True
},
self.dates: {
"code": 4,
"datasets": self.dframes.keys(),
"iterate": True
},
self.deadend_proximity: {
"code": 5,
"datasets": ["junction", "roadseg"],
"iterate": False
},
self.conflicting_exitnbrs: {
"code": 6,
"datasets": ["roadseg"],
"iterate": True
},
self.exitnbr_roadclass_relationship: {
"code": 7,
"datasets": ["roadseg"],
"iterate": True
},
self.ferry_road_connectivity: {
"code": 8,
"datasets": ["ferryseg", "roadseg", "junction"],
"iterate": False
},
self.ids: {
"code": 9,
"datasets": self.dframes.keys(),
"iterate": True
},
self.line_internal_clustering: {
"code": 10,
"datasets": self.df_lines,
"iterate": True
},
self.line_length: {
"code": 11,
"datasets": self.df_lines,
"iterate": True
},
self.line_merging_angle: {
"code": 12,
"datasets": self.df_lines,
"iterate": True
},
self.line_proximity: {
"code": 13,
"datasets": self.df_lines,
"iterate": True
},
self.nbrlanes: {
"code": 14,
"datasets": ["roadseg"],
"iterate": True
},
self.nid_linkages: {
"code": 15,
"datasets": self.dframes.keys(),
"iterate": True
},
self.conflicting_pavement_status: {
"code": 16,
"datasets": ["roadseg"],
"iterate": True
},
self.point_proximity: {
"code": 17,
"datasets": self.df_points,
"iterate": True
},
self.structure_attributes: {
"code": 18,
"datasets": ["roadseg", "junction"],
"iterate": False
},
self.roadclass_rtnumber_relationship: {
"code": 19,
"datasets": ["ferryseg", "roadseg"],
"iterate": True
},
self.self_intersecting_elements: {
"code": 20,
"datasets": ["roadseg"],
"iterate": True
},
self.self_intersecting_structures: {
"code": 21,
"datasets": ["roadseg"],
"iterate": True
},
self.route_contiguity: {
"code": 22,
"datasets": ["roadseg"],
"iterate": False
},
self.speed: {
"code": 23,
"datasets": ["roadseg"],
"iterate": True
},
self.encoding: {
"code": 24,
"datasets": self.dframes.keys(),
"iterate": True
},
self.out_of_scope: {
"code": 25,
"datasets": {*self.df_lines, *self.df_points} - {"junction"},
"iterate": True
}
}
def conflicting_exitnbrs(self, name: str) -> Dict[int, list]:
"""
Applies a set of validations to exitnbr field.
:param str name: NRN dataset name.
:return Dict[int, list]: dictionary of validation codes and associated lists of error messages.
"""
errors = defaultdict(list)
df = self.dframes[name]
default = self.defaults_all[name]["exitnbr"]
# Query multi-segment road elements (via nid field) where exitnbr is not the default value or not "None".
df_filtered = df.loc[(df["nid"].duplicated(keep=False)) &
(df["nid"] != default) &
(~df["exitnbr"].isin({default, "None"}))]
if len(df_filtered):
# Group exitnbrs by nid, removing duplicate values.
grouped = helpers.groupby_to_list(df_filtered, "nid", "exitnbr").map(np.unique)
# Validation: ensure road element has <= 1 unique exitnbr.
flag_nids = grouped.loc[grouped.map(len) > 1]
# Compile error properties.
for nid, exitnbrs in flag_nids.iteritems():
exitnbrs = ", ".join(map(lambda val: f"'{val}'", exitnbrs))
errors[1].append(f"nid '{nid}' has multiple exitnbrs: {exitnbrs}.")
return errors
def conflicting_pavement_status(self, name: str) -> Dict[int, list]:
"""
Applies a set of validations to pavstatus, pavsurf, and unpavsurf fields.
:param str name: NRN dataset name.
:return Dict[int, list]: dictionary of validation codes and associated lists of error messages.
"""
errors = defaultdict(list)
df = self.dframes[name]
# Subset dataframe to non-default values, keep only required fields.
default = self.defaults_all[name]["pavstatus"]
df_filtered = df.loc[df["pavstatus"] != default, ["pavstatus", "pavsurf", "unpavsurf"]]
# Apply validations and compile uuids of flagged records.
if len(df_filtered):
# Validation: when pavstatus == "Paved", ensure pavsurf != "None" and unpavsurf == "None".
paved = df_filtered.loc[df_filtered["pavstatus"] == "Paved"]
errors[1] = paved.loc[paved["pavsurf"] == "None"].index.values
errors[2] = paved.loc[paved["unpavsurf"] != "None"].index.values
# Validation: when pavstatus == "Unpaved", ensure pavsurf == "None" and unpavsurf != "None".
unpaved = df_filtered.loc[df_filtered["pavstatus"] == "Unpaved"]
errors[3] = unpaved.loc[unpaved["pavsurf"] != "None"].index.values
errors[4] = unpaved.loc[unpaved["unpavsurf"] == "None"].index.values
# Compile error properties.
for code, vals in errors.items():
if len(vals):
errors[code] = list(map(lambda val: f"uuid: '{val}'", vals))
return errors
def dates(self, name: str) -> Dict[int, list]:
"""
Applies a set of validations to credate and revdate fields.
:param str name: NRN dataset name.
:return Dict[int, list]: dictionary of validation codes and associated lists of error messages.
"""
errors = defaultdict(list)
df = self.dframes[name]
defaults = helpers.compile_default_values()[name]
df = df[["credate", "revdate"]].copy(deep=True)
# Get current date.
today = datetime.today().strftime("%Y%m%d")
today = {"year": int(today[:4]), "month": int(today[4:6]), "day": int(today[6:8]), "full": int(today)}
# Define functions.
def validate_day(date: int) -> bool:
"""
Validate the day value in a date.
:param int date: integer date in format YYYYMMDD.
:return bool: boolean validation of the date.
"""
date_str = str(date)
year, month, day = map(int, [date_str[:4], date_str[4:6], date_str[6:8]])
try:
if not 1 <= day <= calendar.mdays[month]:
if not all([day == 29, month == 2, calendar.isleap(year)]):
return False
# Captures exception raised by an invalid month value, which itself it handled by another validation.
except IndexError:
return False
return True
# Iterate credate and revdate, applying validations.
for col in ("credate", "revdate"):
# Subset to non-default values.
s_filtered = df.loc[df[col] != defaults[col], col]
if len(s_filtered):
# Validation 1: length must be 4, 6, or 8.
results = s_filtered.loc[s_filtered.map(lambda date: len(str(date)) not in {4, 6, 8})].index.values
errors[1].extend(results)
# Subset to valid records only for remaining validations.
invalid_indexes = list(set(chain.from_iterable(errors.values())))
s_filtered2 = s_filtered.loc[~s_filtered.index.isin(invalid_indexes)]
if len(s_filtered2):
# Temporarily set missing month and day values to 01.
series_mod = s_filtered2.loc[s_filtered2.map(lambda date: len(str(date)) in {4, 6})]
if len(series_mod):
append_vals = {4: "0101", 6: "01"}
s_filtered2.loc[series_mod.index] = series_mod.map(str).map(
lambda date: date + append_vals[len(date)]).map(int)
df.loc[s_filtered2.index, col] = s_filtered2
# Validation 2: valid date - year.
results = s_filtered2.loc[~s_filtered2.map(
lambda date: 1960 <= int(str(date)[:4]) <= today["year"])].index.values
errors[2].extend(results)
# Validation 3: valid date - month.
results = s_filtered2.loc[~s_filtered2.map(
lambda date: 1 <= int(str(date)[4:6]) <= 12)].index.values
errors[3].extend(results)
# Validation 4: valid date - day.
results = s_filtered2.loc[~s_filtered2.map(validate_day)].index.values
errors[4].extend(results)
# Validation 5: ensure date <= today.
results = s_filtered2.loc[s_filtered2.map(lambda date: date > today["full"])].index.values
errors[5].extend(results)
# Validation 6: ensure credate <= revdate.
df_filtered = df.loc[(df["credate"] != defaults["credate"]) &
(df["revdate"] != defaults["revdate"]) &
~(df.index.isin(set(chain.from_iterable(itemgetter(1, 2)(errors)))))]
if len(df_filtered):
results = df_filtered.loc[df_filtered["credate"] > df_filtered["revdate"]].index.values
errors[6].extend(results)
# Compile error properties.
for code, vals in errors.items():
if len(vals):
errors[code] = list(map(lambda val: f"uuid: '{val}'", vals))
return errors
def deadend_proximity(self, junction: str = "junction", roadseg: str = "roadseg") -> Dict[int, list]:
"""
Validates the proximity of deadend junctions to disjoint / non-connected road segments.
:param str junction: NRN dataset name for NRN junction.
:param str roadseg: NRN dataset name for NRN roadseg.
:return Dict[int, list]: dictionary of validation codes and associated lists of error messages.
"""
# Validation: deadend junctions must be >= 5 meters from disjoint road segments.
errors = defaultdict(list)
junction = self.dframes_m[junction]
roadseg = self.dframes_m[roadseg]
# Filter junctions to junctype = "Dead End", keep only required fields.
deadends = junction.loc[junction["junctype"] == "Dead End", "geometry"]
roadseg = roadseg["geometry"]
# Compile coordinates (used multiple times).
deadends = deadends.map(lambda pt: itemgetter(0)(attrgetter("coords")(pt)))
roadseg = roadseg.map(lambda g: set(attrgetter("coords")(g)))
# Generate a lookup dict for the index of each roadseg coordinate, mapped to the full range of coordinate
# indexes for the road segment associated with that coordinate. Therefore, the coordinate identified for
# exclusion at distance=0 can be associated with, and expanded to include, all other coordinates along that road
# segment.
# Process: get the road segment coordinate counts and cumulative counts to generate an index range for each road
# segment. Stack the results and duplicate the ranges by the coordinate counts. Convert to a dict.
coords_count = roadseg.map(len)
coords_idx_cumsum = coords_count.cumsum()
coords_full_idx_range = np.repeat(list(map(
lambda indexes: set(range(*indexes)),
np.column_stack((coords_idx_cumsum - coords_count, coords_idx_cumsum)))),
coords_count)
coords_full_idx_range_lookup = dict(zip(range(len(coords_full_idx_range)), coords_full_idx_range))
# Generate kdtree.
tree = cKDTree(list(chain.from_iterable(roadseg)))
# Compile indexes of road segments within 5 meters distance of each deadend.
proxi_idx_all = deadends.map(lambda deadend: set(chain(*tree.query_ball_point([deadend], r=5))))
# Compile index of road segment at 0 meters distance from each deadend. These represent the connected roads.
# Expand indexes to ranges.
proxi_idx_exclude = deadends.map(lambda deadend: tree.query([deadend])[-1])
proxi_idx_exclude = proxi_idx_exclude.map(lambda idx: itemgetter(*idx)(coords_full_idx_range_lookup))
# Filter coincident indexes from all indexes. Keep only non-empty results.
proxi_idx_keep = proxi_idx_all - proxi_idx_exclude
proxi_idx_keep = proxi_idx_keep.loc[proxi_idx_keep.map(len) > 0]
proxi_idx_keep = proxi_idx_keep.map(tuple).explode()
# Generate a lookup dict for the index of each roadseg coordinate, mapped to the associated uuid.
coords_idx_uuid_lookup = dict(zip(range(coords_count.sum()), np.repeat(roadseg.index.values, coords_count)))
# Compile the uuid associated with resulting proximity point indexes for each deadend.
proxi_results = proxi_idx_keep.map(lambda idx: itemgetter(idx)(coords_idx_uuid_lookup))
# Compile error properties: calculate min distance measurement between source and target geometries.
results = pd.Series(proxi_results.items()).map(
lambda idxs: (itemgetter(idxs[0])(deadends), tuple(itemgetter(idxs[1])(roadseg))))
distances = results.map(lambda pts: min(map(lambda target_pt: euclidean(pts[0], target_pt), pts[1]))).round(2)
# Compile and sort final results.
results = pd.DataFrame({"target": proxi_results.values, "distance": distances.values},
index=proxi_results.index).sort_values(by="distance", ascending=True)
# Compile error properties: store results.
for vals in results.itertuples(index=True):
source, target, distance = attrgetter("Index", "target", "distance")(vals)
errors[1].append(f"junction uuid '{source}' is too close to roadseg uuid '{target}': {distance} meters.")
return errors
def duplicated_lines(self, name: str) -> Dict[int, list]:
"""
Identifies the uuids of duplicate and overlapping line geometries.
:param str name: NRN dataset name.
:return Dict[int, list]: dictionary of validation codes and associated lists of error messages.
"""
errors = defaultdict(list)
df = self.dframes_m[name]
# Keep only required fields.
series = df["geometry"]
# Validation 1: ensure line segments are not duplicated.
# Filter geometries to those with duplicate lengths.
s_filtered = series.loc[series.length.duplicated(keep=False)]
if len(s_filtered):
# Filter geometries to those with duplicate endpoint coordinates.
s_filtered = s_filtered.loc[s_filtered.map(
lambda g: tuple(sorted(itemgetter(0, -1)(g.coords)))).duplicated(keep=False)]
if len(s_filtered):
# Identify duplicate geometries.
dups = s_filtered.loc[s_filtered.map(
lambda geom1: s_filtered.map(lambda geom2: geom1.equals(geom2)).sum() > 1)]
# Configure duplicate groups and their uuids.
uuid_groups = set(dups.map(
lambda geom1: tuple(set(dups.loc[dups.map(lambda geom2: geom1.equals(geom2))].index))).tolist())
# Compile error properties.
if len(uuid_groups):
for uuid_group in uuid_groups:
vals = ", ".join(map(lambda val: f"'{val}'", uuid_group))
errors[1].append(f"Duplicated geometries identified for uuids: {vals}.")
# Validation 2: ensure line segments do not have repeated adjacent points.
# Filter geometries to those with duplicated coordinates.
s_filtered = series.loc[series.map(lambda g: len(g.coords) != len(set(g.coords)))]
if len(s_filtered):
# Identify geometries with repeated adjacent coordinates.
mask = s_filtered.map(lambda g: len(g.coords) != len(list(groupby(g.coords))))
# Compile uuids of flagged records.
errors[2] = s_filtered.loc[mask].index.values
# Validation 3: ensure line segments do not overlap (i.e. contain duplicated adjacent points).
# Extract coordinates from geometries (used multiple times).
series_coords = series.map(attrgetter("coords")).map(tuple)
# Create ordered coordinate pairs, sorted.
coord_pairs = series_coords.map(ordered_pairs).explode()
# Remove invalid pairs (duplicated adjacent coordinates).
coord_pairs = coord_pairs.loc[coord_pairs.map(lambda pair: pair[0] != pair[1])]
# Group uuids of matching pairs.
coord_pairs_df = coord_pairs.reset_index(drop=False)
coord_pairs_grouped = helpers.groupby_to_list(coord_pairs_df, "geometry", "uuid")
coord_pairs_grouped = pd.DataFrame({"pairs": coord_pairs_grouped.index, "uuid": coord_pairs_grouped.values})
# Filter to duplicated pairs.
coord_pairs_dup = coord_pairs_grouped.loc[coord_pairs_grouped["uuid"].map(len) > 1]
if len(coord_pairs_dup):
# Group duplicated pairs by sorted uuid groups.
coord_pairs_dup["uuid"] = coord_pairs_dup["uuid"].map(sorted).map(tuple)
coord_pairs_dup_grouped = helpers.groupby_to_list(coord_pairs_dup, "uuid", "pairs")
# Compile error properties.
if len(coord_pairs_dup_grouped):
for uuid_group, pairs in coord_pairs_dup_grouped.iteritems():
vals = ", ".join(map(lambda val: f"'{val}'", uuid_group))
errors[3].append(f"{len(pairs)} overlapping segments identified between uuids: {vals}.")
# Compile error properties.
for code, vals in errors.items():
if code in {2} and len(vals):
errors[code] = list(map(lambda val: f"uuid: '{val}'", vals))
return errors
def duplicated_points(self, name: str) -> Dict[int, list]:
"""
Identifies the uuids of duplicate point geometries.
:param str name: NRN dataset name.
:return Dict[int, list]: dictionary of validation codes and associated lists of error messages.
"""
errors = defaultdict(list)
df = self.dframes[name]
# Extract coordinates of points.
pts = df["geometry"].map(lambda g: itemgetter(0)(g.coords))
# Identify duplicated geometries.
dups = pts.loc[pts.duplicated(keep=False)]
if len(dups):
# Configure duplicated groups and their uuids.
uuid_groups = set(dups.map(
lambda geom1: tuple(set(dups.loc[dups.map(lambda geom2: geom1.equals(geom2))].index))).tolist())
# Compile error properties.
if len(uuid_groups):
for uuid_group in uuid_groups:
vals = ", ".join(map(lambda val: f"'{val}'", uuid_group))
errors[1].append(f"Duplicated geometries identified for uuids: {vals}.")
return errors
def encoding(self, name: str) -> Dict[int, list]:
"""
Identifies potential encoding errors within string fields.
:param str name: NRN dataset name.
:return Dict[int, list]: dictionary of validation codes and associated lists of error messages.
"""
errors = defaultdict(list)
df = self.dframes[name]
# Iterate string columns.
for col in set(df.select_dtypes(include="object").columns) - {"geometry", "uuid", "nid"}:
# Validation: identify values containing one or more question mark ("?"), which may be the result of invalid
# character encoding.
# Flag invalid records.
flag = df[col].str.contains("?", regex=False)
# Compile error properties.
for uid, val in df.loc[flag, col].iteritems():
errors[1].append(f"uuid: '{uid}', attribute: '{val}', based on attribute field: {col}.")
return errors
def exitnbr_roadclass_relationship(self, name: str) -> Dict[int, list]:
"""
Applies a set of validations to exitnbr and roadclass fields.
:param str name: NRN dataset name.
:return Dict[int, list]: dictionary of validation codes and associated lists of error messages.
"""
errors = defaultdict(list)
df = self.dframes[name]
# Subset dataframe to non-default and non-"None" values, keep only required fields.
default = self.defaults_all[name]["exitnbr"]
s_filtered = df.loc[~df["exitnbr"].isin({default, "None"}), "roadclass"]
if len(s_filtered):
# Validation: ensure roadclass is one of: "Expressway / Highway", "Freeway", "Ramp", "Rapid Transit",
# "Service Lane" when exitnbr is not the default value or not "None".
# Compile uuids of flagged records.
errors[1] = s_filtered.loc[~s_filtered.isin(
{"Expressway / Highway", "Freeway", "Ramp", "Rapid Transit", "Service Lane"})].index.values
# Compile error properties.
for code, vals in errors.items():
if len(vals):
errors[code] = list(map(lambda val: f"uuid: '{val}'", vals))
return errors
def ferry_road_connectivity(self, ferryseg: str = "ferryseg", roadseg: str = "roadseg",
junction: str = "junction") -> Dict[int, list]:
"""
Validates the connectivity between ferry and road line segments.
:param str ferryseg: NRN dataset name for NRN ferryseg.
:param str roadseg: NRN dataset name for NRN roadseg.
:param str junction: NRN dataset name for NRN junction.
:return Dict[int, list]: dictionary of validation codes and associated lists of error messages.
"""
errors = defaultdict(list)
# Filter dataframes to only required fields.
ferryseg = self.dframes[ferryseg]["geometry"]
roadseg = self.dframes[roadseg]["geometry"]
junction = self.dframes[junction]
# Validation 1: ensure ferry segments connect to a road segment at at least one endpoint.
# Compile junction coordinates where junctype = "Ferry".
ferry_junctions = list(set(chain([geom.coords[0] for geom in
junction.loc[junction["junctype"] == "Ferry", "geometry"].values])))
# Identify ferry segments which do not connect to any road segments.
mask = ferryseg.map(
lambda geom: not any(coords in ferry_junctions for coords in itemgetter(0, -1)(geom.coords)))
# Compile uuids of flagged records.
errors[1] = ferryseg.loc[mask].index.values
# Validation 2: ensure ferry segments connect to <= 1 road segment at either endpoint.
# Compile road segments which connect to ferry segments.
roads_connected = roadseg.loc[roadseg.map(
lambda geom: any(coords in ferry_junctions for coords in itemgetter(0, -1)(geom.coords)))]
# Compile coordinates of connected road segments.
road_coords_count = Counter(chain.from_iterable(roads_connected.map(
lambda g: tuple(set(itemgetter(0, -1)(g.coords))))))
# Identify ferry endpoints which intersect multiple road segments.
ferry_multi_intersect = ferryseg.map(
lambda ferry: any(itemgetter(coords)(road_coords_count) > 1 for coords in itemgetter(0, -1)(ferry.coords)))
# Compile uuids of flagged records.
errors[2] = ferryseg.loc[ferry_multi_intersect].index.values
# Compile error properties.
for code, vals in errors.items():
if len(vals):
errors[code] = list(map(lambda val: f"uuid: '{val}'", vals))
return errors
def ids(self, name: str) -> Dict[int, list]:
"""
Applies a set of validations to all id fields.
:param str name: NRN dataset name.
:return Dict[int, list]: dictionary of validation codes and associated lists of error messages.
"""
errors = defaultdict(list)
df = self.dframes[name]
dtypes, defaults = self.dtypes_all[name], self.defaults_all[name]
# Iterate fields which a) end with "id", b) are str type, and c) are not uuid.
for col in [fld for fld in df.columns.difference(["uuid"]) if fld.endswith("id") and dtypes[fld] == "str"]:
# Subset dataframe to required column with non-default and non-"None" values.
series = df.loc[~df[col].isin([defaults[col], "None"]), col]
if len(series):
# Validation 1: ensure ids are 32 digits.
# Compile uuids of flagged records.
flag_uuids = series.loc[series.map(len) != 32].index.values
for uid in flag_uuids:
errors[1].append(f"uuid: '{uid}', based on attribute field: {col}.")
# Validation 2: ensure ids are hexadecimal.
# Compile uuids of flagged records.
hexdigits = set(string.hexdigits)
flag_uuids = series.loc[series.map(lambda uid: not set(uid).issubset(hexdigits))].index.values
for uid in flag_uuids:
errors[2].append(f"uuid: '{uid}', based on attribute field: {col}.")
# Iterate unique id fields.
unique_fields = {"ferrysegid", "roadsegid"}
for col in unique_fields.intersection(set(df.columns)):
# Filter dataframe to required column.
series = df[col]
# Validation 3: ensure unique id fields are unique within their column.
# Compile uuids of flagged records.
flag_uuids = series.loc[series.duplicated(keep=False)].index.values
for uid in flag_uuids:
errors[3].append(f"uuid: '{uid}', based on attribute field: {col}.")
# Validation 4: ensure unique id fields are not "None" nor the default field value.
# Compile uuids of flagged records.
flag_uuids = series.loc[series.isin([defaults[col], "None"])].index.values
for uid in flag_uuids:
errors[4].append(f"uuid: '{uid}', based on attribute field: {col}.")
return errors
def isolated_lines(self, name: str, junction: str = "junction") -> Dict[int, list]:
"""
Identifies the uuids of isolated line segments.
:param str name: NRN dataset name.
:param str junction: NRN dataset name for NRN junction.
:return Dict[int, list]: dictionary of validation codes and associated lists of error messages.
"""
errors = defaultdict(list)
# Filter dataframes to only required fields.
df = self.dframes[name][["uuid", "geometry"]]
junction = self.dframes[junction][["junctype", "geometry"]]
# Validation 1: ensure line segments are connected to at least one other line segment.
# Compile junctions for 'Dead End'.
pts = set(chain([geom.coords[0] for geom in
junction.loc[junction["junctype"] == "Dead End", "geometry"].values]))
# Identify isolated segments.
# Flag records where both endpoints are 'Dead End'.
mask = df["geometry"].map(lambda g: all(map(lambda pt: pt in pts, itemgetter(0, -1)(g.coords))))
# Compile uuids of flagged records, compile error properties.
if sum(mask):
errors[1] = list(map(lambda val: f"uuid: '{val}'", df.loc[mask].index.values))
# Validation 2: identify line segments which connect to another line segment at intermediate / non-endpoint
# vertices.
# Compile all coordinates and their count from across the entire dataset.
df_nodes_all = df["geometry"].map(attrgetter("coords")).map(tuple)
nodes_count = Counter(chain.from_iterable(df_nodes_all.map(set)))
# Filter analysis records to those with > 2 constituent points.
df_nodes = df_nodes_all.loc[df_nodes_all.map(len) > 2]
# Configure duplicated non-endpoints for analysis records relative to the full dataframe.
def non_endpoint_dups(nodes: Tuple[tuple, ...]) -> Union[None, Tuple[Tuple[tuple, ...], Tuple[int, ...]]]:
"""
Returns intermediate / non-endpoint nodes and their dataframe counts if they are duplicated.
:param Tuple[tuple, ...] nodes: tuple of coordinate tuples.
:return Union[None, Tuple[Tuple[tuple, ...], Tuple[int, ...]]]: None or a nested tuple containing a tuple of
all non-endpoint coordinate tuples and a tuple of the frequency of each node within the entire dataset.
"""
counts = itemgetter(*nodes[1:-1])(nodes_count)
if not isinstance(counts, tuple):
counts = (counts,)
counts_valid = tuple(map(lambda count: count > 1, counts))
if any(counts_valid):
return tuple(compress(nodes[1:-1], counts_valid)), tuple(compress(counts, counts_valid))
else:
return None
dups = df_nodes.map(non_endpoint_dups)
dups = dups.loc[~dups.isna()]
# Nest nodes with counts and explode records.
dups = dups.map(lambda vals: tuple(zip(*vals))).explode()
# Compile uuids of flagged records, compile error properties.
for index, data in dups.iteritems():
errors[2].append(f"uuid: '{index}' intersects {data[1] - 1} other line segment(s) at non-endpoint vertex: "
f"{data[0]}.")
return errors
def line_internal_clustering(self, name: str) -> Dict[int, list]:
"""
Validates the distance between adjacent coordinates of line segments.
Validation: line segments must have >= 1x10^(-2) (0.01) meters distance between adjacent coordinates.
:param str name: NRN dataset name.
:return Dict[int, list]: dictionary of validation codes and associated lists of error messages.
"""
errors = defaultdict(list)
min_distance = 0.01
series = self.dframes_m[name]["geometry"]
# Extract coordinates from geometries.
series_coords = series.map(attrgetter("coords")).map(tuple)
# Filter out records with only 2 constituent points.
series_coords = series_coords.loc[series_coords.map(len) > 2]
if len(series_coords):
# Create ordered coordinate pairs, sorted.
coord_pairs = series_coords.map(ordered_pairs).explode()
# Remove invalid pairs (duplicated adjacent coordinates).
coord_pairs = coord_pairs.loc[coord_pairs.map(lambda pair: pair[0] != pair[1])]
# Calculate distance between coordinate pairs.
coord_dist = coord_pairs.map(lambda pair: euclidean(pair[0], pair[-1]))
# Flag invalid distances and create dataframe with invalid pairs and distances.
flag = coord_dist < min_distance
invalid_df = pd.DataFrame({"pair": coord_pairs.loc[flag], "distance": coord_dist.loc[flag]},
index=coord_dist.loc[flag].index)
if len(invalid_df):
# Compile error properties.
for record in invalid_df.sort_values(by=["uuid", "distance"], ascending=True).itertuples(index=True):
index, coords, distance = attrgetter("Index", "pair", "distance")(record)
# Reproject coordinates back to NRN CRS.
coords = [shapely.ops.transform(self.prj_3348_to_4617, Point(pt)).coords[0] for pt in coords]
errors[1].append(f"uuid: '{index}', coordinates: {coords[0]} and {coords[1]}, are too close: "
f"{distance} meters.")
return errors
def line_length(self, name: str) -> Dict[int, list]:
"""
Validates the minimum feature length of line geometries.
:param str name: NRN dataset name.
:return Dict[int, list]: dictionary of validation codes and associated lists of error messages.
"""
errors = defaultdict(list)
min_length = 5
series = self.dframes_m[name]["geometry"]
# Validation: ensure line segments are >= 5 meters in length.
flag = series.length < min_length
# Compile error properties.
if sum(flag):
for index, val in series.loc[flag].length.round(2).sort_values(ascending=True).iteritems():
errors[1].append(f"uuid: '{index}' is too short: {val} meters.")
return errors
def line_merging_angle(self, name: str) -> Dict[int, list]:
"""
Validates the merging angle of line segments.
Validation: ensure line segments merge at angles >= 5 degrees.
:param str name: NRN dataset name.
:return Dict[int, list]: dictionary of validation codes and associated lists of error messages.
"""
# Define function to calculate the angular degrees between two intersecting lines.
def get_angle(ref_pt: tuple, pt1: tuple, pt2: tuple) -> bool:
"""
Validates the angle formed by the 2 points and reference point.
:param tuple ref_pt: coordinate tuple of the reference point.
:param tuple pt1: coordinate tuple
:param tuple pt2: coordinate tuple
:return bool: boolean validation of the angle formed by the 2 points and 1 reference point.
"""
angle_1 = np.angle(complex(*(np.array(pt1) - np.array(ref_pt))), deg=True)
angle_2 = np.angle(complex(*(np.array(pt2) - np.array(ref_pt))), deg=True)
return round(abs(angle_1 - angle_2), 2)
errors = defaultdict(list)
merging_angle = 5
series = self.dframes_m[name]["geometry"]
# Compile line endpoints and their neighbours, convert to uuid-neighbour lookup dict.
endpts_nbrs = series.map(
lambda g: tuple(map(itemgetter(0, 1), itemgetter(0, 1, -2, -1)(attrgetter("coords")(g)))))
uuid_nbr_lookup = endpts_nbrs.to_dict()
# Compile only endpoints.
endpts = endpts_nbrs.map(itemgetter(0, -1))
# Explode point groups, filter to only duplicates, and construct a dataframe of the uuids and coordinates.
pts_exploded = endpts.explode()
pts_dups = pts_exploded.loc[pts_exploded.duplicated(keep=False)]
pts_df = pd.DataFrame({"coords": pts_dups, "uuid": pts_dups.index})
# Proceed only if duplicated points exist.
if len(pts_df):
# Group uuids according to coordinates. Explode and convert to DataFrame, keeping index as column.
grouped_pt_uuid = helpers.groupby_to_list(pts_df, "coords", "uuid")
uuid_pt_df = grouped_pt_uuid.explode().reset_index(drop=False).rename(columns={"index": "pt", 0: "uuid"})
# Compile endpoint-neighbouring points.
# Process: Flag uuids according to duplication status within their group. For unique uuids, configure the
# neighbouring point based on whichever endpoint matches the common group point. For duplicated uuids
# (which represent self-loops), the first duplicate takes the second point, the second duplicate takes the
# second-last point - thereby avoiding the same neighbour being taken twice for self-loop intersections.
dup_flags = {
"dup_none": uuid_pt_df.loc[~uuid_pt_df.duplicated(keep=False), ["uuid", "pt"]],
"dup_first": uuid_pt_df.loc[uuid_pt_df.duplicated(keep="first"), "uuid"],
"dup_last": uuid_pt_df.loc[uuid_pt_df.duplicated(keep="last"), "uuid"]
}
dup_results = {
"dup_none": np.vectorize(
lambda uid, pt:
itemgetter({True: 1, False: -2}[uuid_nbr_lookup[uid][0] == pt])(uuid_nbr_lookup[uid]),
otypes=[tuple])(dup_flags["dup_none"]["uuid"], dup_flags["dup_none"]["pt"]),
"dup_first": dup_flags["dup_first"].map(lambda uid: uuid_nbr_lookup[uid][1]).values,
"dup_last": dup_flags["dup_last"].map(lambda uid: uuid_nbr_lookup[uid][-2]).values
}
uuid_pt_df["pt_nbr"] = None
uuid_pt_df.loc[dup_flags["dup_none"].index, "pt_nbr"] = dup_results["dup_none"]
uuid_pt_df.loc[dup_flags["dup_first"].index, "pt_nbr"] = dup_results["dup_first"]
uuid_pt_df.loc[dup_flags["dup_last"].index, "pt_nbr"] = dup_results["dup_last"]
# Aggregate groups of points and associated neighbours.
grouped_pt_nbrs = helpers.groupby_to_list(uuid_pt_df, "pt", "pt_nbr")
# Configure all point-neighbour and point-uuid combinations.
combos_pt_nbrs = grouped_pt_nbrs.map(lambda vals: combinations(vals, r=2)).map(tuple).explode()
combos_pt_uuid = grouped_pt_uuid.map(lambda vals: combinations(vals, r=2)).map(tuple).explode()
# Prepend reference point to neighbour point tuples, add uuid combinations as index.
combos = pd.Series(
np.vectorize(lambda pt, nbrs: (pt, *nbrs), otypes=[tuple])(combos_pt_nbrs.index, combos_pt_nbrs),
index=combos_pt_uuid.values)
# Calculate the merging angle (in degrees) between each set of points. Filter to invalid records.
angles = combos.map(lambda pts: get_angle(*pts))
results = angles.loc[angles < merging_angle]
# Compile error properties.
if len(results):
for uuids, angle in results.drop_duplicates().sort_values(ascending=True).iteritems():
line1, line2 = itemgetter(0, 1)(uuids)
errors[1].append(f"uuids: '{line1}', '{line2}' merge at too small an angle: {angle} degrees.")
return errors
def line_proximity(self, name: str) -> Dict[int, list]:
"""
Validates the proximity of line segments.
:param str name: NRN dataset name.
:return Dict[int, list]: dictionary of validation codes and associated lists of error messages.
"""
# Validation: ensure line segments are >= 5 meters from each other, excluding connected segments.
errors = defaultdict(list)
prox_limit = 5
series = self.dframes_m[name]["geometry"]
# Compile all unique segment coordinates.
pts = series.map(lambda g: list(set(attrgetter("coords")(g))))
pts_exploded = pts.explode()
# Generate lookup dicts for:
# 1) point coordinate to connected segment uuids.
# 2) point index to segment uuid.
pts_exploded_df = pd.DataFrame(pts_exploded).reset_index(drop=False)
pts_uuids_lookup = helpers.groupby_to_list(pts_exploded_df, "geometry", "uuid").to_dict()
pts_idx_uuid_lookup = pts_exploded_df["uuid"].to_dict()
# Generate kdtree.
tree = cKDTree(pts_exploded.to_list())
# Compile uuids connected to each segment.
uuids_exclude = pts.map(lambda points: set(chain.from_iterable(itemgetter(*points)(pts_uuids_lookup))))
# Compile indexes of segment points < 5 meters distance of each segment, retrieve uuids of returned indexes.
uuids_proxi = pts.map(
lambda points: set(itemgetter(*chain(*tree.query_ball_point(points, r=prox_limit)))(pts_idx_uuid_lookup)))
# Remove connected uuids from each set of uuids, keep non-empty results.
proxi_results = uuids_proxi - uuids_exclude
proxi_results = proxi_results.loc[proxi_results.map(len) > 0]
proxi_results = proxi_results.map(list).explode()
# Reduce duplicated result pairs.
results = pd.Series(tuple(set(map(lambda vals: tuple(sorted(vals)), proxi_results.items()))))
# Compile error properties: calculate min distance measurement between source and target geometries.
distances = results.map(lambda idxs: tuple(product(itemgetter(idxs[0])(pts), itemgetter(idxs[1])(pts)))).map(
lambda pts: min(map(lambda pair: euclidean(*pair), pts))).round(2)
# Compile error properties: compile and sort final results.
results = pd.DataFrame({"target": results.map(itemgetter(1)).values, "distance": distances.values},
index=results.map(itemgetter(0)).values).sort_values(by="distance", ascending=True)
# Compile error properties: store results.
for vals in results.itertuples(index=True):
source, target, distance = attrgetter("Index", "target", "distance")(vals)
errors[1].append(f"uuids: '{source}', '{target}' are too close: {distance} meters.")
return errors
def nbrlanes(self, name: str) -> Dict[int, list]:
"""
Applies a set of validations to nbrlanes field.
:param str name: NRN dataset name.
:return Dict[int, list]: dictionary of validation codes and associated lists of error messages.
"""
errors = defaultdict(list)
df = self.dframes[name]
# Subset dataframe to non-default values, keep only required fields.
default = self.defaults_all[name]["nbrlanes"]
s_filtered = df.loc[df["nbrlanes"] != default, "nbrlanes"]
if len(s_filtered):
# Validation: ensure 1 <= nbrlanes <= 8.
# Compile uuids of flagged records.
errors[1] = s_filtered.loc[~s_filtered.map(lambda nbrlanes: 1 <= int(nbrlanes) <= 8)].index.values
# Compile error properties.
for code, vals in errors.items():
if len(vals):
errors[code] = list(map(lambda val: f"uuid: '{val}'", vals))
return errors
def nid_linkages(self, name: str) -> Dict[int, list]:
"""
Validates the nid linkages for the input dataframe, excluding 'None'.
:param str name: NRN dataset name.
:return Dict[int, list]: dictionary of validation codes and associated lists of error messages.
"""
errors = defaultdict(list)
df = self.dframes[name]
# Define nid linkages.
linkages = {
"addrange":
{
"roadseg": ["adrangenid"]
},
"altnamlink":
{
"addrange": ["l_altnanid", "r_altnanid"]
},
"roadseg":
{
"blkpassage": ["roadnid"],
"tollpoint": ["roadnid"]
},
"strplaname":
{
"addrange": ["l_offnanid", "r_offnanid"],
"altnamlink": ["strnamenid"]
}
}
# Iterate nid tables which link to the id table.
id_table = name
for nid_table in filter(lambda t: id_table in linkages[t], set(linkages).intersection(self.dframes)):
# Retrieve nids as lowercase.
nids = set(self.dframes[nid_table]["nid"].map(str.lower))
# Iterate linked columns.
for col in linkages[nid_table][id_table]:
# Validation: ensure all nid linkages are valid.
logger.info(f"Validating nid linkage: {nid_table}.nid - {id_table}.{col}.")
# Retrieve column ids as lowercase.
ids = set(df[col].map(str.lower))
# Compile invalid ids, excluding "None" (lower cased).
invalid_ids = ids - nids - {"none"}
# Configure error properties.
if len(invalid_ids):
for invalid_id in invalid_ids:
errors[1].append(f"{id_table}.{col} '{invalid_id}' is not present in {nid_table}.nid.")
return errors
def out_of_scope(self, name: str, junction: str = "junction") -> Dict[int, list]:
"""
Validates the containment of geometries within the associated provincial / territorial boundaries.
NatProvTer junctions are used to infer boundaries, therefore, a record will only be flagged if one of it's
endpoints lies outside of the provincial / territorial boundaries.
:param str name: NRN dataset name.
:param str junction: NRN dataset name for NRN junction.
:return Dict[int, list]: dictionary of validation codes and associated lists of error messages.
"""
# Validation: ensure geometries are completely within the associated provincial / territorial boundary.
errors = defaultdict(list)
series = self.dframes[name]["geometry"]
junction = self.dframes[junction]
# Compile out-of-scope junctions (natprovter).
natprovter = set(chain.from_iterable(junction.loc[junction["junctype"] == "NatProvTer", "geometry"].map(
lambda g: attrgetter("coords")(g))))
# Compile series points.
if series.iloc[0].geom_type == "LineString":
series_pts = series.map(lambda g: set(itemgetter(0, -1)(attrgetter("coords")(g))))
else:
series_pts = series.map(lambda g: {itemgetter(0)(attrgetter("coords")(g))})
# Flag series points within the set of natprovter points.
mask = series_pts.map(lambda pts: len(pts.intersection(natprovter)) > 0)
# Compile uuids of flagged records.
errors[1] = series.loc[mask].index.values
# Compile error properties.
for code, vals in errors.items():
if len(vals):
errors[code] = list(map(lambda val: f"uuid: '{val}'", vals))
return errors
def point_proximity(self, name: str) -> Dict[int, list]:
"""
Validates the proximity of points.
:param str name: NRN dataset name.
:return Dict[int, list]: dictionary of validation codes and associated lists of error messages.
"""
# Validation: ensure points are >= 5 meters from each other.
errors = defaultdict(list)
prox_limit = 5
series = self.dframes_m[name]["geometry"]
# Compile coordinates (used multiple times)
pts = series.map(lambda g: itemgetter(0)(attrgetter("coords")(g)))
# Generate kdtree.
tree = cKDTree(pts.to_list())
# Compile indexes of points with other points within 5 meters distance. Only keep results with > 1 match.
proxi_idx_all = pts.map(lambda pt: set(chain(*tree.query_ball_point([pt], r=prox_limit))))
proxi_idx_all = proxi_idx_all.loc[proxi_idx_all.map(len) > 1]
# Compile and filter coincident index from each set of indexes for each point, keep non-empty results.
proxi_idx_exclude = pd.Series(range(len(pts)), index=pts.index).map(lambda index: {index})
proxi_idx_keep = proxi_idx_all - proxi_idx_exclude.loc[proxi_idx_all.index]
proxi_idx_keep = proxi_idx_keep.map(tuple).explode()
# Compile uuids associated with each index.
pts_idx_uuid_lookup = {index: uid for index, uid in enumerate(pts.index)}
results = proxi_idx_keep.map(lambda idx: itemgetter(idx)(pts_idx_uuid_lookup))
# Reduce duplicated result pairs.
results = pd.Series(tuple(set(map(lambda vals: tuple(sorted(vals)), results.items()))))
# Compile error properties: calculate min distance measurement between source and target geometries.
distances = results.map(lambda idxs: euclidean(*itemgetter(*idxs)(pts))).round(2)
# Compile error properties: compile and sort final results.
results = pd.DataFrame({"target": results.map(itemgetter(1)).values, "distance": distances.values},
index=results.map(itemgetter(0)).values).sort_values(by="distance", ascending=True)
# Compile error properties: store results.
for vals in results.itertuples(index=True):
source, target, distance = attrgetter("Index", "target", "distance")(vals)
errors[1].append(f"uuids: '{source}', '{target}' are too close: {distance} meters.")
return errors
def roadclass_rtnumber_relationship(self, name: str) -> Dict[int, list]:
"""
Applies a set of validations to roadclass and rtnumber1 fields.
:param str name: NRN dataset name.
:return Dict[int, list]: dictionary of validation codes and associated lists of error messages.
"""
errors = defaultdict(list)
df = self.dframes[name]
# Filter dataframe to only required fields.
df_filtered = df[["roadclass", "rtnumber1"]]
# Apply validations and compile uuids of flagged records.
# Validation: ensure rtnumber1 is not the default value or "None" when roadclass = "Expressway / Highway" or
# "Freeway".
default = self.defaults_all[name]["rtnumber1"]
errors[1] = df_filtered.loc[
df_filtered["roadclass"].isin({"Expressway / Highway", "Freeway"}) &
df_filtered["rtnumber1"].map(lambda rtnumber1: rtnumber1 in {default, "None"})].index.values
# Compile error properties.
for code, vals in errors.items():
if len(vals):
errors[code] = list(map(lambda val: f"uuid: '{val}'", vals))
return errors
def route_contiguity(self, roadseg: str = "roadseg", ferryseg: Union[None, str] = None) -> Dict[int, list]:
"""
Applies a set of validations to route attributes (rows represent field groups):
rtename1en, rtename2en, rtename3en, rtename4en,
rtename1fr, rtename2fr, rtename3fr, rtename4fr,
rtnumber1, rtnumber2, rtnumber3, rtnumber4, rtnumber5.
:param str roadseg: NRN dataset name for NRN roadseg.
:param Union[None, str] ferryseg: NRN dataset name for NRN ferryseg.
:return Dict[int, list]: dictionary of validation codes and associated lists of error messages.
"""
errors = defaultdict(list)
# Define field groups.
field_groups = [["rtename1en", "rtename2en", "rtename3en", "rtename4en"],
["rtename1fr", "rtename2fr", "rtename3fr", "rtename4fr"],
["rtnumber1", "rtnumber2", "rtnumber3", "rtnumber4", "rtnumber5"]]
# Filter dataframes to only required fields, concatenate resulting dataframes.
keep_fields = list(chain.from_iterable([*field_groups, ["geometry"]]))
if ferryseg is not None:
df = gpd.GeoDataFrame(pd.concat([self.dframes[ferryseg][keep_fields].copy(deep=True),
self.dframes[roadseg][keep_fields].copy(deep=True)],
ignore_index=True, sort=False))
else:
df = self.dframes[roadseg][keep_fields].copy(deep=True)
# Validation: ensure route has contiguous geometry.
for field_group in field_groups:
logger.info(f"Validating routes in field group: {', '.join(map(str, field_group))}.")
# Filter dataframe to records with >= 1 non-default values across the field group, keep only required
# fields.
default = self.defaults_all[roadseg][field_group[0]]
df_filtered = df.loc[(df[field_group].values != default).any(axis=1), [*field_group, "geometry"]]
# Compile route names, excluding default value and "None".
route_names = set(np.unique(df_filtered[field_group].values)) - {default, "None"}
# Iterate route names.
route_count = len(route_names)
for index, route_name in enumerate(sorted(route_names)):
logger.info(f"Validating route {index + 1} of {route_count}: \"{route_name}\".")
# Subset dataframe to those records with route name in at least one field.
route_df = df_filtered.loc[(df_filtered[field_group].values == route_name).any(axis=1)]
# Only process duplicated route names.
if len(route_df) > 1:
# Load dataframe as networkx graph.
route_graph = helpers.gdf_to_nx(route_df, keep_attributes=False)
# Validate contiguity (networkx connectivity).
if not nx.is_connected(route_graph):
# Identify deadends (locations of discontiguity).
deadends = [coords for coords, degree in route_graph.degree() if degree == 1]
deadends = "\n".join(map(lambda deadend: f"{deadend[0]}, {deadend[1]}", deadends))
# Compile error properties.
errors[1].append(f"Discontiguous route: '{route_name}', based on attribute fields: "
f"{', '.join(field_group)}."
f"\nCoordinates of discontiguity:\n{deadends}\n")
return errors
def self_intersecting_elements(self, name: str) -> Dict[int, list]:
"""
Applies a set of validations to self-intersecting road elements.
:param str name: NRN dataset name.
:return Dict[int, list]: dictionary of validation codes and associated lists of error messages.
"""
errors = defaultdict(list)
df = self.dframes[name]
default = self.defaults_all[name]["nid"]
# Validation: ensure roadclass is in ("Expressway / Highway", "Freeway", "Ramp", "Rapid Transit",
# "Service Lane") for all road elements which a) self-intersect and b) touch another road segment
# where roadclass is in this set.
flag_nids = list()
valid = {"Expressway / Highway", "Freeway", "Ramp", "Rapid Transit", "Service Lane"}
# Compile coords of road segments where roadclass is in the validation list.
valid_coords = set(chain(
*[itemgetter(0, -1)(geom.coords) for geom in df.loc[df["roadclass"].isin(valid), "geometry"].values]))
# Single-segment road elements:
# Retrieve single-segment self-intersections.
# Function call intended to avoid duplicating logic in this current function.
segments_single = self.self_intersecting_structures(df, return_segments_only=True)
if not segments_single.empty:
# Compile nids of road segments with coords in the validation coords list.
flagged = segments_single["geometry"].map(lambda g: g.coords[0] in valid_coords)
flag_nids.extend(segments_single.loc[flagged, "nid"].values)
# Multi-segment road elements:
# Compile multi-segment road elements (via non-unique nids).
# Filter to nids with invalid roadclass.
segments_multi = df.loc[(df["nid"].duplicated(keep=False)) &
(~df["roadclass"].isin(valid)) & (df["nid"] != default)]
if not segments_multi.empty:
logger.info("Validating multi-segment road elements.")
# Compile nids of road segments with coords in the validation coords list.
flagged_nids = segments_multi.loc[segments_multi["geometry"].map(
lambda g: len(set(itemgetter(0, -1)(g.coords)).intersection(valid_coords)) > 0), "nid"].unique()
if len(flagged_nids):
# Compile dataframe records with a flagged nid.
flagged_df = df.loc[df["nid"].isin(flagged_nids)]
# Group geometries by nid.
grouped_segments = helpers.groupby_to_list(flagged_df, "nid", "geometry")
# Dissolve road segments.
elements = grouped_segments.map(shapely.ops.linemerge)
# Identify self-intersections and store nids.
vals = elements.loc[elements.map(lambda element: element.is_ring or not element.is_simple)].values
flag_nids.extend(vals)
# Compile uuids of road segments with flagged nid and invalid roadclass.
errors[1] = df.loc[(df["nid"].isin(flag_nids)) & (~df["roadclass"].isin(valid))].index.values
# Compile error properties.
for code, vals in errors.items():
if len(vals):
errors[code] = list(map(lambda val: f"uuid: '{val}'", vals))
return errors
def self_intersecting_structures(self, name: Union[gpd.GeoDataFrame, str], return_segments_only: bool = False) -> \
Union[Dict[int, list], gpd.GeoDataFrame]:
"""
Applies a set of validations to self-intersecting road structures.
:param Union[gpd.GeoDataFrame, str] name: GeoDataFrame or NRN dataset name. This allows this function to be
called by other validations.
:param bool return_segments_only: return flagged GeoDataFrame rather than validation error messages, default
False.
:return Union[Dict[int, list], gpd.GeoDataFrame]: dictionary of validation codes and associated lists of error
messages or flagged GeoDataFrame.
"""
errors = defaultdict(list)
flag_segments = pd.DataFrame()
df = self.dframes[name] if isinstance(name, str) else name.copy(deep=True)
default = self.defaults_all["roadseg"]["nid"]
# Identify self-intersections formed by single-segment road elements (i.e. where nid is unique).
# Compile single-segment road elements (via unique nids).
segments = df.loc[(~df["nid"].duplicated(keep=False)) & (df["nid"] != default)]
if not segments.empty:
logger.info("Validating single-segment road elements.")
# Identify self-intersections (start coord == end coord).
flag_segments = segments.loc[segments["geometry"].map(lambda g: g.is_ring or not g.is_simple)]
# Validation: for self-intersecting road segments, ensure structtype != "None".
errors[1] = flag_segments.loc[flag_segments["structtype"] == "None"].index.values
if return_segments_only:
return flag_segments
else:
# Compile error properties.
for code, vals in errors.items():
if len(vals):
errors[code] = list(map(lambda val: f"uuid: '{val}'", vals))
return errors
def speed(self, name: str) -> Dict[int, list]:
"""
Applies a set of validations to speed field.
:param str name: NRN dataset name.
:return Dict[int, list]: dictionary of validation codes and associated lists of error messages.
"""
errors = defaultdict(list)
df = self.dframes[name]
# Subset dataframe to non-default values, keep only required fields.
default = self.defaults_all[name]["speed"]
s_filtered = df.loc[df["speed"] != default, "speed"]
if len(s_filtered):
# Validation 1: ensure 5 <= speed <= 120.
# Compile uuids of flagged records.
errors[1] = s_filtered.loc[~s_filtered.map(lambda speed: 5 <= int(speed) <= 120)].index.values
# Validation 2: ensure speed is a multiple of 5.
errors[2] = s_filtered.loc[s_filtered.map(lambda speed: int(speed) % 5 != 0)].index.values
# Compile error properties.
for code, vals in errors.items():
if len(vals):
errors[code] = list(map(lambda val: f"uuid: '{val}'", vals))
return errors
def structure_attributes(self, roadseg: str = "roadseg", junction: str = "junction") -> Dict[int, list]:
"""
Validates the structid and structtype attributes of road segments.
:param str roadseg: NRN dataset name for NRN roadseg.
:param str junction: NRN dataset name for NRN junction.
:return Dict[int, list]: dictionary of validation codes and associated lists of error messages.
"""
errors = defaultdict(list)
defaults = self.defaults_all[roadseg]
roadseg = self.dframes[roadseg]
junction = self.dframes[junction]
# Filter dataframes to only required fields.
junction = junction.loc[junction["junctype"] == "Dead End", "geometry"]
roadseg = roadseg[["uuid", "structid", "structtype", "geometry"]]
# Validation 1: ensure dead end road segments have structtype = "None" or the default field value.
# Compile dead end coordinates.
deadend_coords = set(chain(junction.map(lambda pt: itemgetter(0)(attrgetter("coords")(pt)))))
# Compile road segments with potentially invalid structtype.
roadseg_invalid = roadseg.loc[~roadseg["structtype"].isin({"None", defaults["structtype"]}), "geometry"]
# Compile truly invalid road segments.
roadseg_invalid = roadseg_invalid.loc[roadseg_invalid.map(
lambda g: any(coords in deadend_coords for coords in attrgetter("coords")(g)))]
# Compile uuids of flagged records, compile error properties.
if len(roadseg_invalid):
errors[1] = list(map(lambda val: f"uuid: '{val}'", roadseg_invalid.index.values))
# Validation 2: ensure structid is contiguous.
# Compile records with duplicated structids, excluding "None" and the default field value.
structids_df = roadseg.loc[(~roadseg["structid"].isin({"None", defaults["structid"]})) &
(roadseg["structid"].duplicated(keep=False))]
if len(structids_df):
# Group records by structid.
structures = helpers.groupby_to_list(structids_df, "structid", "geometry")
# Load structure geometries as networkx graphs.
structure_graphs = structures.map(
lambda geoms: helpers.gdf_to_nx(gpd.GeoDataFrame(geometry=geoms), keep_attributes=False))
# Validate contiguity (networkx connectivity).
structures_invalid = structure_graphs.loc[~structure_graphs.map(nx.is_connected)]
if len(structures_invalid):
# Identify deadends (locations of discontiguity).
results = structures_invalid.map(lambda graph: [pt for pt, degree in graph.degree() if degree == 1])
# Compile error properties.
for structid, deadends in results.iteritems():
deadends = "\n".join(map(lambda deadend: f"{deadend[0]}, {deadend[1]}", deadends))
errors[2].append(f"Discontiguous structure structid: '{structid}'."
f"\nCoordinates of discontiguity:\n{deadends}\n.")
# Validation 3: ensure a single, non-default structid is applied to all contiguous road segments with the same
# structtype.
# Validation 4: ensure road segments with different structtypes, excluding "None" and the default field value,
# are not contiguous.
# Compile road segments with valid structtype.
segments = roadseg.loc[~roadseg["structtype"].isin({"None", defaults["structtype"]})]
# Convert dataframe to networkx graph.
segments_graph = helpers.gdf_to_nx(segments, keep_attributes=True, endpoints_only=False)
# Configure subgraphs.
sub_g = pd.Series(list(map(segments_graph.subgraph, nx.connected_components(segments_graph))))
# Validation 3.
default = defaults["structid"]
structids = sub_g.map(lambda graph: set(nx.get_edge_attributes(graph, "structid").values()))
structids_invalid = structids.loc[structids.map(lambda vals: (len(vals) > 1) or (default in vals))]
if len(structids_invalid):
# Compile uuids of invalid structure.
uuids_invalid = sub_g.loc[structids_invalid.index].map(
lambda graph: set(nx.get_edge_attributes(graph, "uuid").values()))
# Compile error properties.
for index, row in pd.DataFrame({"uuids": uuids_invalid, "structids": structids_invalid}).iterrows():
uuids = ", ".join(map(lambda val: f"'{val}'", row[0]))
structids = ", ".join(map(lambda val: f"'{val}'", row[1]))
errors[3].append(f"Structure formed by uuids: {uuids} contains multiple structids: {structids}.")
# Validation 4.
structtypes = sub_g.map(lambda graph: set(nx.get_edge_attributes(graph, "structtype").values()))
structtypes_invalid = structtypes.loc[structtypes.map(len) > 1]
if len(structtypes_invalid):
# Compile uuids of invalid structure.
uuids_invalid = sub_g.loc[structtypes_invalid.index].map(
lambda graph: set(nx.get_edge_attributes(graph, "uuid").values()))
# Compile error properties.
for index, row in pd.DataFrame({"uuids": uuids_invalid, "structtypes": structtypes_invalid}).iterrows():
uuids = ", ".join(map(lambda val: f"'{val}'", row[0]))
structtypes = ", ".join(map(lambda val: f"'{val}'", row[1]))
errors[4].append(f"Structure formed by uuids: {uuids} contains multiple structtypes: {structtypes}.")
return errors
def execute(self) -> None:
"""Orchestrates the execution of validation functions and compiles the resulting errors."""
try:
# Iterate validation definitions.
for func, params in self.validations.items():
for dataset in params["datasets"]:
# Continue with single dataset or compile all if non-iterative.
datasets = (dataset,) if params["iterate"] else (*params["datasets"],)
logger.info(f"Applying validation \"{func.__name__}\" to dataset(s): {', '.join(datasets)}.")
# Validate dataset availability.
missing = set(datasets) - set(self.dframes)
if missing:
logger.warning(f"Skipping validation due to missing dataset(s): {', '.join(missing)}.")
if params["iterate"]:
continue
else:
break
# Execute validation.
results = func(*datasets)
# Generate error heading and store results.
for code, errors in results.items():
if len(errors):
heading = f"E{params['code']:03}{code:02} for dataset(s): {', '.join(datasets)}"
self.errors[heading] = errors
except (KeyError, SyntaxError, ValueError) as e:
logger.exception("Unable to apply validation.")
logger.exception(e)
sys.exit(1)
```
|
{
"source": "jessestricker/fastfmt",
"score": 4
}
|
#### File: fastfmt/tools/int_to_chars_max_len.py
```python
def calc_longest_value(signed: bool, bits: int) -> int:
# compute the value with the greatest absolute difference from 0
# which fits into the type described by the parameters
value_unsigned = (2 ** bits) - 1 # 255 for uint8
value_signed = -(2 ** (bits - 1)) # -128 for int8
return value_signed if signed else value_unsigned
def longest_value_str(signed: bool, bits: int, base: int) -> str:
longest_value = calc_longest_value(signed, bits)
if base == 2:
return f"{longest_value:b}"
if base == 8:
return f"{longest_value:o}"
if base == 10:
return f"{longest_value:d}"
if base == 16:
return f"{longest_value:x}"
raise "unsupported integer type"
def main():
signed_list = [False, True]
bits_list = [8, 16, 32, 64]
base_list = [2, 8, 10, 16]
# print longest values for test code
for signed in signed_list:
for bits in bits_list:
print(f"signed: {signed}, bits: {bits}")
for base in base_list:
longest_value = longest_value_str(signed, bits, base)
print(f" base {base:>2} = {longest_value}")
print()
print()
# print C++ code for determining max length
for signed in signed_list:
print(f"if ({'!' if not signed else ''}is_signed) {{")
for bits in bits_list:
print(f" if (bits == {bits}) {{")
for base in base_list:
longest_value = longest_value_str(signed, bits, base)
print(f" if (base == {base}) return {len(longest_value)};")
print(" }")
print("}")
print('throw std::invalid_argument{"unsupported type"};')
if __name__ == '__main__':
main()
```
|
{
"source": "jessetan/sphinx",
"score": 2
}
|
#### File: sphinx/builders/epub3.py
```python
import html
from os import path
from typing import Any, Dict, List, NamedTuple, Set, Tuple
from sphinx import package_dir
from sphinx.application import Sphinx
from sphinx.builders import _epub_base
from sphinx.config import ENUM, Config
from sphinx.locale import __
from sphinx.util import logging, xmlname_checker
from sphinx.util.fileutil import copy_asset_file
from sphinx.util.i18n import format_date
from sphinx.util.osutil import make_filename
logger = logging.getLogger(__name__)
class NavPoint(NamedTuple):
text: str
refuri: str
children: List[Any] # mypy does not support recursive types
# https://github.com/python/mypy/issues/7069
# writing modes
PAGE_PROGRESSION_DIRECTIONS = {
'horizontal': 'ltr',
'vertical': 'rtl',
}
IBOOK_SCROLL_AXIS = {
'horizontal': 'vertical',
'vertical': 'horizontal',
}
THEME_WRITING_MODES = {
'vertical': 'vertical-rl',
'horizontal': 'horizontal-tb',
}
DOCTYPE = '''<!DOCTYPE html>'''
HTML_TAG = (
'<html xmlns="http://www.w3.org/1999/xhtml" '
'xmlns:epub="http://www.idpf.org/2007/ops">'
)
class Epub3Builder(_epub_base.EpubBuilder):
"""
Builder that outputs epub3 files.
It creates the metainfo files content.opf, nav.xhtml, toc.ncx, mimetype,
and META-INF/container.xml. Afterwards, all necessary files are zipped to
an epub file.
"""
name = 'epub'
epilog = __('The ePub file is in %(outdir)s.')
supported_remote_images = False
template_dir = path.join(package_dir, 'templates', 'epub3')
doctype = DOCTYPE
html_tag = HTML_TAG
use_meta_charset = True
# Finish by building the epub file
def handle_finish(self) -> None:
"""Create the metainfo files and finally the epub."""
self.get_toc()
self.build_mimetype()
self.build_container()
self.build_content()
self.build_navigation_doc()
self.build_toc()
self.build_epub()
def content_metadata(self) -> Dict:
"""Create a dictionary with all metadata for the content.opf
file properly escaped.
"""
writing_mode = self.config.epub_writing_mode
metadata = super().content_metadata()
metadata['description'] = html.escape(self.config.epub_description)
metadata['contributor'] = html.escape(self.config.epub_contributor)
metadata['page_progression_direction'] = PAGE_PROGRESSION_DIRECTIONS.get(writing_mode)
metadata['ibook_scroll_axis'] = IBOOK_SCROLL_AXIS.get(writing_mode)
metadata['date'] = html.escape(format_date("%Y-%m-%dT%H:%M:%SZ"))
metadata['version'] = html.escape(self.config.version)
metadata['epub_version'] = self.config.epub_version
return metadata
def prepare_writing(self, docnames: Set[str]) -> None:
super().prepare_writing(docnames)
writing_mode = self.config.epub_writing_mode
self.globalcontext['theme_writing_mode'] = THEME_WRITING_MODES.get(writing_mode)
self.globalcontext['html_tag'] = self.html_tag
self.globalcontext['use_meta_charset'] = self.use_meta_charset
self.globalcontext['skip_ua_compatible'] = True
def build_navlist(self, navnodes: List[Dict[str, Any]]) -> List[NavPoint]:
"""Create the toc navigation structure.
This method is almost same as build_navpoints method in epub.py.
This is because the logical navigation structure of epub3 is not
different from one of epub2.
The difference from build_navpoints method is templates which are used
when generating navigation documents.
"""
navstack = [] # type: List[NavPoint]
navstack.append(NavPoint('', '', []))
level = 0
for node in navnodes:
if not node['text']:
continue
file = node['refuri'].split('#')[0]
if file in self.ignored_files:
continue
if node['level'] > self.config.epub_tocdepth:
continue
navpoint = NavPoint(node['text'], node['refuri'], [])
if node['level'] == level:
navstack.pop()
navstack[-1].children.append(navpoint)
navstack.append(navpoint)
elif node['level'] == level + 1:
level += 1
navstack[-1].children.append(navpoint)
navstack.append(navpoint)
elif node['level'] < level:
while node['level'] < len(navstack):
navstack.pop()
level = node['level']
navstack[-1].children.append(navpoint)
navstack.append(navpoint)
else:
raise RuntimeError('Should never reach here. It might be a bug.')
return navstack[0].children
def navigation_doc_metadata(self, navlist: List[NavPoint]) -> Dict:
"""Create a dictionary with all metadata for the nav.xhtml file
properly escaped.
"""
metadata = {} # type: Dict
metadata['lang'] = html.escape(self.config.epub_language)
metadata['toc_locale'] = html.escape(self.guide_titles['toc'])
metadata['navlist'] = navlist
return metadata
def build_navigation_doc(self) -> None:
"""Write the metainfo file nav.xhtml."""
logger.info(__('writing nav.xhtml file...'))
if self.config.epub_tocscope == 'default':
doctree = self.env.get_and_resolve_doctree(
self.config.master_doc, self,
prune_toctrees=False, includehidden=False)
refnodes = self.get_refnodes(doctree, [])
self.toc_add_files(refnodes)
else:
# 'includehidden'
refnodes = self.refnodes
navlist = self.build_navlist(refnodes)
copy_asset_file(path.join(self.template_dir, 'nav.xhtml_t'), self.outdir,
self.navigation_doc_metadata(navlist))
# Add nav.xhtml to epub file
if 'nav.xhtml' not in self.files:
self.files.append('nav.xhtml')
def validate_config_values(app: Sphinx) -> None:
if app.builder.name != 'epub':
return
# <package> lang attribute, dc:language
if not app.config.epub_language:
logger.warning(__('conf value "epub_language" (or "language") '
'should not be empty for EPUB3'))
# <package> unique-identifier attribute
if not xmlname_checker().match(app.config.epub_uid):
logger.warning(__('conf value "epub_uid" should be XML NAME for EPUB3'))
# dc:title
if not app.config.epub_title:
logger.warning(__('conf value "epub_title" (or "html_title") '
'should not be empty for EPUB3'))
# dc:creator
if not app.config.epub_author:
logger.warning(__('conf value "epub_author" should not be empty for EPUB3'))
# dc:contributor
if not app.config.epub_contributor:
logger.warning(__('conf value "epub_contributor" should not be empty for EPUB3'))
# dc:description
if not app.config.epub_description:
logger.warning(__('conf value "epub_description" should not be empty for EPUB3'))
# dc:publisher
if not app.config.epub_publisher:
logger.warning(__('conf value "epub_publisher" should not be empty for EPUB3'))
# dc:rights
if not app.config.epub_copyright:
logger.warning(__('conf value "epub_copyright" (or "copyright")'
'should not be empty for EPUB3'))
# dc:identifier
if not app.config.epub_identifier:
logger.warning(__('conf value "epub_identifier" should not be empty for EPUB3'))
# meta ibooks:version
if not app.config.version:
logger.warning(__('conf value "version" should not be empty for EPUB3'))
def convert_epub_css_files(app: Sphinx, config: Config) -> None:
"""This converts string styled epub_css_files to tuple styled one."""
epub_css_files = [] # type: List[Tuple[str, Dict]]
for entry in config.epub_css_files:
if isinstance(entry, str):
epub_css_files.append((entry, {}))
else:
try:
filename, attrs = entry
epub_css_files.append((filename, attrs))
except Exception:
logger.warning(__('invalid css_file: %r, ignored'), entry)
continue
config.epub_css_files = epub_css_files # type: ignore
def setup(app: Sphinx) -> Dict[str, Any]:
app.add_builder(Epub3Builder)
# config values
app.add_config_value('epub_basename', lambda self: make_filename(self.project), None)
app.add_config_value('epub_version', 3.0, 'epub') # experimental
app.add_config_value('epub_theme', 'epub', 'epub')
app.add_config_value('epub_theme_options', {}, 'epub')
app.add_config_value('epub_title', lambda self: self.project, 'epub')
app.add_config_value('epub_author', lambda self: self.author, 'epub')
app.add_config_value('epub_language', lambda self: self.language or 'en', 'epub')
app.add_config_value('epub_publisher', lambda self: self.author, 'epub')
app.add_config_value('epub_copyright', lambda self: self.copyright, 'epub')
app.add_config_value('epub_identifier', 'unknown', 'epub')
app.add_config_value('epub_scheme', 'unknown', 'epub')
app.add_config_value('epub_uid', 'unknown', 'env')
app.add_config_value('epub_cover', (), 'env')
app.add_config_value('epub_guide', (), 'env')
app.add_config_value('epub_pre_files', [], 'env')
app.add_config_value('epub_post_files', [], 'env')
app.add_config_value('epub_css_files', lambda config: config.html_css_files, 'epub')
app.add_config_value('epub_exclude_files', [], 'env')
app.add_config_value('epub_tocdepth', 3, 'env')
app.add_config_value('epub_tocdup', True, 'env')
app.add_config_value('epub_tocscope', 'default', 'env')
app.add_config_value('epub_fix_images', False, 'env')
app.add_config_value('epub_max_image_width', 0, 'env')
app.add_config_value('epub_show_urls', 'inline', 'epub')
app.add_config_value('epub_use_index', lambda self: self.html_use_index, 'epub')
app.add_config_value('epub_description', 'unknown', 'epub')
app.add_config_value('epub_contributor', 'unknown', 'epub')
app.add_config_value('epub_writing_mode', 'horizontal', 'epub',
ENUM('horizontal', 'vertical'))
# event handlers
app.connect('config-inited', convert_epub_css_files, priority=800)
app.connect('builder-inited', validate_config_values)
return {
'version': 'builtin',
'parallel_read_safe': True,
'parallel_write_safe': True,
}
```
#### File: sphinx/writers/latex.py
```python
import re
import warnings
from collections import defaultdict
from os import path
from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Set, Tuple, cast
from docutils import nodes, writers
from docutils.nodes import Element, Node, Text
from sphinx import addnodes, highlighting
from sphinx.deprecation import RemovedInSphinx50Warning
from sphinx.domains import IndexEntry
from sphinx.domains.std import StandardDomain
from sphinx.errors import SphinxError
from sphinx.locale import _, __, admonitionlabels
from sphinx.util import logging, split_into, texescape
from sphinx.util.docutils import SphinxTranslator
from sphinx.util.nodes import clean_astext, get_prev_node
from sphinx.util.template import LaTeXRenderer
from sphinx.util.texescape import tex_replace_map
try:
from docutils.utils.roman import toRoman
except ImportError:
# In Debain/Ubuntu, roman package is provided as roman, not as docutils.utils.roman
from roman import toRoman # type: ignore
if TYPE_CHECKING:
from sphinx.builders.latex import LaTeXBuilder
from sphinx.builders.latex.theming import Theme
logger = logging.getLogger(__name__)
MAX_CITATION_LABEL_LENGTH = 8
LATEXSECTIONNAMES = ["part", "chapter", "section", "subsection",
"subsubsection", "paragraph", "subparagraph"]
ENUMERATE_LIST_STYLE = defaultdict(lambda: r'\arabic',
{
'arabic': r'\arabic',
'loweralpha': r'\alph',
'upperalpha': r'\Alph',
'lowerroman': r'\roman',
'upperroman': r'\Roman',
})
EXTRA_RE = re.compile(r'^(.*\S)\s+\(([^()]*)\)\s*$')
class collected_footnote(nodes.footnote):
"""Footnotes that are collected are assigned this class."""
class UnsupportedError(SphinxError):
category = 'Markup is unsupported in LaTeX'
class LaTeXWriter(writers.Writer):
supported = ('sphinxlatex',)
settings_spec = ('LaTeX writer options', '', (
('Document name', ['--docname'], {'default': ''}),
('Document class', ['--docclass'], {'default': 'manual'}),
('Author', ['--author'], {'default': ''}),
))
settings_defaults = {} # type: Dict
output = None
def __init__(self, builder: "LaTeXBuilder") -> None:
super().__init__()
self.builder = builder
self.theme = None # type: Theme
def translate(self) -> None:
try:
visitor = self.builder.create_translator(self.document, self.builder, self.theme)
except TypeError:
warnings.warn('LaTeXTranslator now takes 3rd argument; "theme".',
RemovedInSphinx50Warning, stacklevel=2)
visitor = self.builder.create_translator(self.document, self.builder)
self.document.walkabout(visitor)
self.output = cast(LaTeXTranslator, visitor).astext()
# Helper classes
class Table:
"""A table data"""
def __init__(self, node: Element) -> None:
self.header = [] # type: List[str]
self.body = [] # type: List[str]
self.align = node.get('align', 'default')
self.colcount = 0
self.colspec = None # type: str
self.colwidths = [] # type: List[int]
self.has_problematic = False
self.has_oldproblematic = False
self.has_verbatim = False
self.caption = None # type: List[str]
self.stubs = [] # type: List[int]
# current position
self.col = 0
self.row = 0
# for internal use
self.classes = node.get('classes', []) # type: List[str]
self.cells = defaultdict(int) # type: Dict[Tuple[int, int], int]
# it maps table location to cell_id
# (cell = rectangular area)
self.cell_id = 0 # last assigned cell_id
def is_longtable(self) -> bool:
"""True if and only if table uses longtable environment."""
return self.row > 30 or 'longtable' in self.classes
def get_table_type(self) -> str:
"""Returns the LaTeX environment name for the table.
The class currently supports:
* longtable
* tabular
* tabulary
"""
if self.is_longtable():
return 'longtable'
elif self.has_verbatim:
return 'tabular'
elif self.colspec:
return 'tabulary'
elif self.has_problematic or (self.colwidths and 'colwidths-given' in self.classes):
return 'tabular'
else:
return 'tabulary'
def get_colspec(self) -> str:
"""Returns a column spec of table.
This is what LaTeX calls the 'preamble argument' of the used table environment.
.. note:: the ``\\X`` and ``T`` column type specifiers are defined in ``sphinx.sty``.
"""
if self.colspec:
return self.colspec
elif self.colwidths and 'colwidths-given' in self.classes:
total = sum(self.colwidths)
colspecs = ['\\X{%d}{%d}' % (width, total) for width in self.colwidths]
return '{|%s|}\n' % '|'.join(colspecs)
elif self.has_problematic:
return '{|*{%d}{\\X{1}{%d}|}}\n' % (self.colcount, self.colcount)
elif self.get_table_type() == 'tabulary':
# sphinx.sty sets T to be J by default.
return '{|' + ('T|' * self.colcount) + '}\n'
elif self.has_oldproblematic:
return '{|*{%d}{\\X{1}{%d}|}}\n' % (self.colcount, self.colcount)
else:
return '{|' + ('l|' * self.colcount) + '}\n'
def add_cell(self, height: int, width: int) -> None:
"""Adds a new cell to a table.
It will be located at current position: (``self.row``, ``self.col``).
"""
self.cell_id += 1
for col in range(width):
for row in range(height):
assert self.cells[(self.row + row, self.col + col)] == 0
self.cells[(self.row + row, self.col + col)] = self.cell_id
def cell(self, row: int = None, col: int = None) -> "TableCell":
"""Returns a cell object (i.e. rectangular area) containing given position.
If no option arguments: ``row`` or ``col`` are given, the current position;
``self.row`` and ``self.col`` are used to get a cell object by default.
"""
try:
if row is None:
row = self.row
if col is None:
col = self.col
return TableCell(self, row, col)
except IndexError:
return None
class TableCell:
"""A cell data of tables."""
def __init__(self, table: Table, row: int, col: int) -> None:
if table.cells[(row, col)] == 0:
raise IndexError
self.table = table
self.cell_id = table.cells[(row, col)]
self.row = row
self.col = col
# adjust position for multirow/multicol cell
while table.cells[(self.row - 1, self.col)] == self.cell_id:
self.row -= 1
while table.cells[(self.row, self.col - 1)] == self.cell_id:
self.col -= 1
@property
def width(self) -> int:
"""Returns the cell width."""
width = 0
while self.table.cells[(self.row, self.col + width)] == self.cell_id:
width += 1
return width
@property
def height(self) -> int:
"""Returns the cell height."""
height = 0
while self.table.cells[(self.row + height, self.col)] == self.cell_id:
height += 1
return height
def escape_abbr(text: str) -> str:
"""Adjust spacing after abbreviations."""
return re.sub(r'\.(?=\s|$)', r'.\@', text)
def rstdim_to_latexdim(width_str: str, scale: int = 100) -> str:
"""Convert `width_str` with rst length to LaTeX length."""
match = re.match(r'^(\d*\.?\d*)\s*(\S*)$', width_str)
if not match:
raise ValueError
res = width_str
amount, unit = match.groups()[:2]
if scale == 100:
float(amount) # validate amount is float
if unit in ('', "px"):
res = "%s\\sphinxpxdimen" % amount
elif unit == 'pt':
res = '%sbp' % amount # convert to 'bp'
elif unit == "%":
res = "%.3f\\linewidth" % (float(amount) / 100.0)
else:
amount_float = float(amount) * scale / 100.0
if unit in ('', "px"):
res = "%.5f\\sphinxpxdimen" % amount_float
elif unit == 'pt':
res = '%.5fbp' % amount_float
elif unit == "%":
res = "%.5f\\linewidth" % (amount_float / 100.0)
else:
res = "%.5f%s" % (amount_float, unit)
return res
class LaTeXTranslator(SphinxTranslator):
builder = None # type: LaTeXBuilder
secnumdepth = 2 # legacy sphinxhowto.cls uses this, whereas article.cls
# default is originally 3. For book/report, 2 is already LaTeX default.
ignore_missing_images = False
# sphinx specific document classes
docclasses = ('howto', 'manual')
def __init__(self, document: nodes.document, builder: "LaTeXBuilder",
theme: "Theme" = None) -> None:
super().__init__(document, builder)
self.body = [] # type: List[str]
self.theme = theme
if theme is None:
warnings.warn('LaTeXTranslator now takes 3rd argument; "theme".',
RemovedInSphinx50Warning, stacklevel=2)
# flags
self.in_title = 0
self.in_production_list = 0
self.in_footnote = 0
self.in_caption = 0
self.in_term = 0
self.needs_linetrimming = 0
self.in_minipage = 0
self.no_latex_floats = 0
self.first_document = 1
self.this_is_the_title = 1
self.literal_whitespace = 0
self.in_parsed_literal = 0
self.compact_list = 0
self.first_param = 0
sphinxpkgoptions = []
# sort out some elements
self.elements = self.builder.context.copy()
# initial section names
self.sectionnames = LATEXSECTIONNAMES[:]
if self.theme:
# new style: control sectioning via theme's setting
#
# .. note:: template variables(elements) are already assigned in builder
docclass = self.theme.docclass
if self.theme.toplevel_sectioning == 'section':
self.sectionnames.remove('chapter')
else:
# old style: sectioning control is hard-coded
# but some have other interface in config file
self.elements['wrapperclass'] = self.format_docclass(self.settings.docclass)
# we assume LaTeX class provides \chapter command except in case
# of non-Japanese 'howto' case
if document.get('docclass') == 'howto':
docclass = self.config.latex_docclass.get('howto', 'article')
if docclass[0] == 'j': # Japanese class...
pass
else:
self.sectionnames.remove('chapter')
else:
docclass = self.config.latex_docclass.get('manual', 'report')
self.elements['docclass'] = docclass
# determine top section level
self.top_sectionlevel = 1
if self.config.latex_toplevel_sectioning:
try:
self.top_sectionlevel = \
self.sectionnames.index(self.config.latex_toplevel_sectioning)
except ValueError:
logger.warning(__('unknown %r toplevel_sectioning for class %r') %
(self.config.latex_toplevel_sectioning, docclass))
if self.config.numfig:
self.numfig_secnum_depth = self.config.numfig_secnum_depth
if self.numfig_secnum_depth > 0: # default is 1
# numfig_secnum_depth as passed to sphinx.sty indices same names as in
# LATEXSECTIONNAMES but with -1 for part, 0 for chapter, 1 for section...
if len(self.sectionnames) < len(LATEXSECTIONNAMES) and \
self.top_sectionlevel > 0:
self.numfig_secnum_depth += self.top_sectionlevel
else:
self.numfig_secnum_depth += self.top_sectionlevel - 1
# this (minus one) will serve as minimum to LaTeX's secnumdepth
self.numfig_secnum_depth = min(self.numfig_secnum_depth,
len(LATEXSECTIONNAMES) - 1)
# if passed key value is < 1 LaTeX will act as if 0; see sphinx.sty
sphinxpkgoptions.append('numfigreset=%s' % self.numfig_secnum_depth)
else:
sphinxpkgoptions.append('nonumfigreset')
if self.config.numfig and self.config.math_numfig:
sphinxpkgoptions.append('mathnumfig')
if (self.config.language not in {None, 'en', 'ja'} and
'fncychap' not in self.config.latex_elements):
# use Sonny style if any language specified (except English)
self.elements['fncychap'] = ('\\usepackage[Sonny]{fncychap}\n'
'\\ChNameVar{\\Large\\normalfont'
'\\sffamily}\n\\ChTitleVar{\\Large'
'\\normalfont\\sffamily}')
self.babel = self.builder.babel
if self.config.language and not self.babel.is_supported_language():
# emit warning if specified language is invalid
# (only emitting, nothing changed to processing)
logger.warning(__('no Babel option known for language %r'),
self.config.language)
minsecnumdepth = self.secnumdepth # 2 from legacy sphinx manual/howto
if self.document.get('tocdepth'):
# reduce tocdepth if `part` or `chapter` is used for top_sectionlevel
# tocdepth = -1: show only parts
# tocdepth = 0: show parts and chapters
# tocdepth = 1: show parts, chapters and sections
# tocdepth = 2: show parts, chapters, sections and subsections
# ...
tocdepth = self.document.get('tocdepth', 999) + self.top_sectionlevel - 2
if len(self.sectionnames) < len(LATEXSECTIONNAMES) and \
self.top_sectionlevel > 0:
tocdepth += 1 # because top_sectionlevel is shifted by -1
if tocdepth > len(LATEXSECTIONNAMES) - 2: # default is 5 <-> subparagraph
logger.warning(__('too large :maxdepth:, ignored.'))
tocdepth = len(LATEXSECTIONNAMES) - 2
self.elements['tocdepth'] = '\\setcounter{tocdepth}{%d}' % tocdepth
minsecnumdepth = max(minsecnumdepth, tocdepth)
if self.config.numfig and (self.config.numfig_secnum_depth > 0):
minsecnumdepth = max(minsecnumdepth, self.numfig_secnum_depth - 1)
if minsecnumdepth > self.secnumdepth:
self.elements['secnumdepth'] = '\\setcounter{secnumdepth}{%d}' %\
minsecnumdepth
contentsname = document.get('contentsname')
if contentsname:
self.elements['contentsname'] = self.babel_renewcommand('\\contentsname',
contentsname)
if self.elements['maxlistdepth']:
sphinxpkgoptions.append('maxlistdepth=%s' % self.elements['maxlistdepth'])
if sphinxpkgoptions:
self.elements['sphinxpkgoptions'] = '[,%s]' % ','.join(sphinxpkgoptions)
if self.elements['sphinxsetup']:
self.elements['sphinxsetup'] = ('\\sphinxsetup{%s}' %
self.elements['sphinxsetup'])
if self.elements['extraclassoptions']:
self.elements['classoptions'] += ',' + \
self.elements['extraclassoptions']
self.highlighter = highlighting.PygmentsBridge('latex', self.config.pygments_style,
latex_engine=self.config.latex_engine)
self.context = [] # type: List[Any]
self.descstack = [] # type: List[str]
self.tables = [] # type: List[Table]
self.next_table_colspec = None # type: str
self.bodystack = [] # type: List[List[str]]
self.footnote_restricted = None # type: nodes.Element
self.pending_footnotes = [] # type: List[nodes.footnote_reference]
self.curfilestack = [] # type: List[str]
self.handled_abbrs = set() # type: Set[str]
def pushbody(self, newbody: List[str]) -> None:
self.bodystack.append(self.body)
self.body = newbody
def popbody(self) -> List[str]:
body = self.body
self.body = self.bodystack.pop()
return body
def format_docclass(self, docclass: str) -> str:
""" prepends prefix to sphinx document classes
"""
warnings.warn('LaTeXWriter.format_docclass() is deprecated.',
RemovedInSphinx50Warning, stacklevel=2)
if docclass in self.docclasses:
docclass = 'sphinx' + docclass
return docclass
def astext(self) -> str:
self.elements.update({
'body': ''.join(self.body),
'indices': self.generate_indices()
})
return self.render('latex.tex_t', self.elements)
def hypertarget(self, id: str, withdoc: bool = True, anchor: bool = True) -> str:
if withdoc:
id = self.curfilestack[-1] + ':' + id
return ('\\phantomsection' if anchor else '') + \
'\\label{%s}' % self.idescape(id)
def hypertarget_to(self, node: Element, anchor: bool = False) -> str:
labels = ''.join(self.hypertarget(node_id, anchor=False) for node_id in node['ids'])
if anchor:
return r'\phantomsection' + labels
else:
return labels
def hyperlink(self, id: str) -> str:
return '{\\hyperref[%s]{' % self.idescape(id)
def hyperpageref(self, id: str) -> str:
return '\\autopageref*{%s}' % self.idescape(id)
def escape(self, s: str) -> str:
return texescape.escape(s, self.config.latex_engine)
def idescape(self, id: str) -> str:
return '\\detokenize{%s}' % str(id).translate(tex_replace_map).\
encode('ascii', 'backslashreplace').decode('ascii').\
replace('\\', '_')
def babel_renewcommand(self, command: str, definition: str) -> str:
if self.elements['multilingual']:
prefix = '\\addto\\captions%s{' % self.babel.get_language()
suffix = '}'
else: # babel is disabled (mainly for Japanese environment)
prefix = ''
suffix = ''
return ('%s\\renewcommand{%s}{%s}%s\n' % (prefix, command, definition, suffix))
def generate_indices(self) -> str:
def generate(content: List[Tuple[str, List[IndexEntry]]], collapsed: bool) -> None:
ret.append('\\begin{sphinxtheindex}\n')
ret.append('\\let\\bigletter\\sphinxstyleindexlettergroup\n')
for i, (letter, entries) in enumerate(content):
if i > 0:
ret.append('\\indexspace\n')
ret.append('\\bigletter{%s}\n' % self.escape(letter))
for entry in entries:
if not entry[3]:
continue
ret.append('\\item\\relax\\sphinxstyleindexentry{%s}' %
self.encode(entry[0]))
if entry[4]:
# add "extra" info
ret.append('\\sphinxstyleindexextra{%s}' % self.encode(entry[4]))
ret.append('\\sphinxstyleindexpageref{%s:%s}\n' %
(entry[2], self.idescape(entry[3])))
ret.append('\\end{sphinxtheindex}\n')
ret = []
# latex_domain_indices can be False/True or a list of index names
indices_config = self.config.latex_domain_indices
if indices_config:
for domain in self.builder.env.domains.values():
for indexcls in domain.indices:
indexname = '%s-%s' % (domain.name, indexcls.name)
if isinstance(indices_config, list):
if indexname not in indices_config:
continue
content, collapsed = indexcls(domain).generate(
self.builder.docnames)
if not content:
continue
ret.append('\\renewcommand{\\indexname}{%s}\n' %
indexcls.localname)
generate(content, collapsed)
return ''.join(ret)
def render(self, template_name: str, variables: Dict) -> str:
renderer = LaTeXRenderer(latex_engine=self.config.latex_engine)
for template_dir in self.config.templates_path:
template = path.join(self.builder.confdir, template_dir,
template_name)
if path.exists(template):
return renderer.render(template, variables)
return renderer.render(template_name, variables)
@property
def table(self) -> Table:
"""Get current table."""
if self.tables:
return self.tables[-1]
else:
return None
def visit_document(self, node: Element) -> None:
self.curfilestack.append(node.get('docname', ''))
if self.first_document == 1:
# the first document is all the regular content ...
self.first_document = 0
elif self.first_document == 0:
# ... and all others are the appendices
self.body.append('\n\\appendix\n')
self.first_document = -1
if 'docname' in node:
self.body.append(self.hypertarget(':doc'))
# "- 1" because the level is increased before the title is visited
self.sectionlevel = self.top_sectionlevel - 1
def depart_document(self, node: Element) -> None:
pass
def visit_start_of_file(self, node: Element) -> None:
self.curfilestack.append(node['docname'])
def depart_start_of_file(self, node: Element) -> None:
self.curfilestack.pop()
def visit_section(self, node: Element) -> None:
if not self.this_is_the_title:
self.sectionlevel += 1
self.body.append('\n\n')
def depart_section(self, node: Element) -> None:
self.sectionlevel = max(self.sectionlevel - 1,
self.top_sectionlevel - 1)
def visit_problematic(self, node: Element) -> None:
self.body.append(r'{\color{red}\bfseries{}')
def depart_problematic(self, node: Element) -> None:
self.body.append('}')
def visit_topic(self, node: Element) -> None:
self.in_minipage = 1
self.body.append('\n\\begin{sphinxShadowBox}\n')
def depart_topic(self, node: Element) -> None:
self.in_minipage = 0
self.body.append('\\end{sphinxShadowBox}\n')
visit_sidebar = visit_topic
depart_sidebar = depart_topic
def visit_glossary(self, node: Element) -> None:
pass
def depart_glossary(self, node: Element) -> None:
pass
def visit_productionlist(self, node: Element) -> None:
self.body.append('\n\n\\begin{productionlist}\n')
self.in_production_list = 1
def depart_productionlist(self, node: Element) -> None:
self.body.append('\\end{productionlist}\n\n')
self.in_production_list = 0
def visit_production(self, node: Element) -> None:
if node['tokenname']:
tn = node['tokenname']
self.body.append(self.hypertarget('grammar-token-' + tn))
self.body.append('\\production{%s}{' % self.encode(tn))
else:
self.body.append('\\productioncont{')
def depart_production(self, node: Element) -> None:
self.body.append('}\n')
def visit_transition(self, node: Element) -> None:
self.body.append(self.elements['transition'])
def depart_transition(self, node: Element) -> None:
pass
def visit_title(self, node: Element) -> None:
parent = node.parent
if isinstance(parent, addnodes.seealso):
# the environment already handles this
raise nodes.SkipNode
elif isinstance(parent, nodes.section):
if self.this_is_the_title:
if len(node.children) != 1 and not isinstance(node.children[0],
nodes.Text):
logger.warning(__('document title is not a single Text node'),
location=node)
if not self.elements['title']:
# text needs to be escaped since it is inserted into
# the output literally
self.elements['title'] = self.escape(node.astext())
self.this_is_the_title = 0
raise nodes.SkipNode
else:
short = ''
if node.traverse(nodes.image):
short = ('[%s]' % self.escape(' '.join(clean_astext(node).split())))
try:
self.body.append(r'\%s%s{' % (self.sectionnames[self.sectionlevel], short))
except IndexError:
# just use "subparagraph", it's not numbered anyway
self.body.append(r'\%s%s{' % (self.sectionnames[-1], short))
self.context.append('}\n' + self.hypertarget_to(node.parent))
elif isinstance(parent, nodes.topic):
self.body.append(r'\sphinxstyletopictitle{')
self.context.append('}\n')
elif isinstance(parent, nodes.sidebar):
self.body.append(r'\sphinxstylesidebartitle{')
self.context.append('}\n')
elif isinstance(parent, nodes.Admonition):
self.body.append('{')
self.context.append('}\n')
elif isinstance(parent, nodes.table):
# Redirect body output until title is finished.
self.pushbody([])
else:
logger.warning(__('encountered title node not in section, topic, table, '
'admonition or sidebar'),
location=node)
self.body.append('\\sphinxstyleothertitle{')
self.context.append('}\n')
self.in_title = 1
def depart_title(self, node: Element) -> None:
self.in_title = 0
if isinstance(node.parent, nodes.table):
self.table.caption = self.popbody()
else:
self.body.append(self.context.pop())
def visit_subtitle(self, node: Element) -> None:
if isinstance(node.parent, nodes.sidebar):
self.body.append('\\sphinxstylesidebarsubtitle{')
self.context.append('}\n')
else:
self.context.append('')
def depart_subtitle(self, node: Element) -> None:
self.body.append(self.context.pop())
def visit_desc(self, node: Element) -> None:
self.body.append('\n\n\\begin{fulllineitems}\n')
if self.table:
self.table.has_problematic = True
def depart_desc(self, node: Element) -> None:
self.body.append('\n\\end{fulllineitems}\n\n')
def _visit_signature_line(self, node: Element) -> None:
for child in node:
if isinstance(child, addnodes.desc_parameterlist):
self.body.append(r'\pysiglinewithargsret{')
break
else:
self.body.append(r'\pysigline{')
def _depart_signature_line(self, node: Element) -> None:
self.body.append('}')
def visit_desc_signature(self, node: Element) -> None:
if node.parent['objtype'] != 'describe' and node['ids']:
hyper = self.hypertarget(node['ids'][0])
else:
hyper = ''
self.body.append(hyper)
if not node.get('is_multiline'):
self._visit_signature_line(node)
else:
self.body.append('%\n\\pysigstartmultiline\n')
def depart_desc_signature(self, node: Element) -> None:
if not node.get('is_multiline'):
self._depart_signature_line(node)
else:
self.body.append('%\n\\pysigstopmultiline')
def visit_desc_signature_line(self, node: Element) -> None:
self._visit_signature_line(node)
def depart_desc_signature_line(self, node: Element) -> None:
self._depart_signature_line(node)
def visit_desc_addname(self, node: Element) -> None:
self.body.append(r'\sphinxcode{\sphinxupquote{')
self.literal_whitespace += 1
def depart_desc_addname(self, node: Element) -> None:
self.body.append('}}')
self.literal_whitespace -= 1
def visit_desc_type(self, node: Element) -> None:
pass
def depart_desc_type(self, node: Element) -> None:
pass
def visit_desc_returns(self, node: Element) -> None:
self.body.append(r'{ $\rightarrow$ ')
def depart_desc_returns(self, node: Element) -> None:
self.body.append(r'}')
def visit_desc_name(self, node: Element) -> None:
self.body.append(r'\sphinxbfcode{\sphinxupquote{')
self.literal_whitespace += 1
def depart_desc_name(self, node: Element) -> None:
self.body.append('}}')
self.literal_whitespace -= 1
def visit_desc_parameterlist(self, node: Element) -> None:
# close name, open parameterlist
self.body.append('}{')
self.first_param = 1
def depart_desc_parameterlist(self, node: Element) -> None:
# close parameterlist, open return annotation
self.body.append('}{')
def visit_desc_parameter(self, node: Element) -> None:
if not self.first_param:
self.body.append(', ')
else:
self.first_param = 0
if not node.hasattr('noemph'):
self.body.append(r'\emph{')
def depart_desc_parameter(self, node: Element) -> None:
if not node.hasattr('noemph'):
self.body.append('}')
def visit_desc_optional(self, node: Element) -> None:
self.body.append(r'\sphinxoptional{')
def depart_desc_optional(self, node: Element) -> None:
self.body.append('}')
def visit_desc_annotation(self, node: Element) -> None:
self.body.append(r'\sphinxbfcode{\sphinxupquote{')
def depart_desc_annotation(self, node: Element) -> None:
self.body.append('}}')
def visit_desc_content(self, node: Element) -> None:
if node.children and not isinstance(node.children[0], nodes.paragraph):
# avoid empty desc environment which causes a formatting bug
self.body.append('~')
def depart_desc_content(self, node: Element) -> None:
pass
def visit_seealso(self, node: Element) -> None:
self.body.append('\n\n\\sphinxstrong{%s:}\n\\nopagebreak\n\n'
% admonitionlabels['seealso'])
def depart_seealso(self, node: Element) -> None:
self.body.append("\n\n")
def visit_rubric(self, node: Element) -> None:
if len(node) == 1 and node.astext() in ('Footnotes', _('Footnotes')):
raise nodes.SkipNode
self.body.append('\\subsubsection*{')
self.context.append('}\n')
self.in_title = 1
def depart_rubric(self, node: Element) -> None:
self.in_title = 0
self.body.append(self.context.pop())
def visit_footnote(self, node: Element) -> None:
self.in_footnote += 1
label = cast(nodes.label, node[0])
if self.in_parsed_literal:
self.body.append('\\begin{footnote}[%s]' % label.astext())
else:
self.body.append('%%\n\\begin{footnote}[%s]' % label.astext())
self.body.append('\\sphinxAtStartFootnote\n')
def depart_footnote(self, node: Element) -> None:
if self.in_parsed_literal:
self.body.append('\\end{footnote}')
else:
self.body.append('%\n\\end{footnote}')
self.in_footnote -= 1
def visit_label(self, node: Element) -> None:
raise nodes.SkipNode
def visit_tabular_col_spec(self, node: Element) -> None:
self.next_table_colspec = node['spec']
raise nodes.SkipNode
def visit_table(self, node: Element) -> None:
if len(self.tables) == 1:
if self.table.get_table_type() == 'longtable':
raise UnsupportedError(
'%s:%s: longtable does not support nesting a table.' %
(self.curfilestack[-1], node.line or ''))
else:
# change type of parent table to tabular
# see https://groups.google.com/d/msg/sphinx-users/7m3NeOBixeo/9LKP2B4WBQAJ
self.table.has_problematic = True
elif len(self.tables) > 2:
raise UnsupportedError(
'%s:%s: deeply nested tables are not implemented.' %
(self.curfilestack[-1], node.line or ''))
self.tables.append(Table(node))
if self.next_table_colspec:
self.table.colspec = '{%s}\n' % self.next_table_colspec
if 'colwidths-given' in node.get('classes', []):
logger.info(__('both tabularcolumns and :widths: option are given. '
':widths: is ignored.'), location=node)
self.next_table_colspec = None
def depart_table(self, node: Element) -> None:
labels = self.hypertarget_to(node)
table_type = self.table.get_table_type()
table = self.render(table_type + '.tex_t',
dict(table=self.table, labels=labels))
self.body.append("\n\n")
self.body.append(table)
self.body.append("\n")
self.tables.pop()
def visit_colspec(self, node: Element) -> None:
self.table.colcount += 1
if 'colwidth' in node:
self.table.colwidths.append(node['colwidth'])
if 'stub' in node:
self.table.stubs.append(self.table.colcount - 1)
def depart_colspec(self, node: Element) -> None:
pass
def visit_tgroup(self, node: Element) -> None:
pass
def depart_tgroup(self, node: Element) -> None:
pass
def visit_thead(self, node: Element) -> None:
# Redirect head output until header is finished.
self.pushbody(self.table.header)
def depart_thead(self, node: Element) -> None:
self.popbody()
def visit_tbody(self, node: Element) -> None:
# Redirect body output until table is finished.
self.pushbody(self.table.body)
def depart_tbody(self, node: Element) -> None:
self.popbody()
def visit_row(self, node: Element) -> None:
self.table.col = 0
# fill columns if the row starts with the bottom of multirow cell
while True:
cell = self.table.cell(self.table.row, self.table.col)
if cell is None: # not a bottom of multirow cell
break
else: # a bottom of multirow cell
self.table.col += cell.width
if cell.col:
self.body.append('&')
if cell.width == 1:
# insert suitable strut for equalizing row heights in given multirow
self.body.append('\\sphinxtablestrut{%d}' % cell.cell_id)
else: # use \multicolumn for wide multirow cell
self.body.append('\\multicolumn{%d}{|l|}'
'{\\sphinxtablestrut{%d}}' %
(cell.width, cell.cell_id))
def depart_row(self, node: Element) -> None:
self.body.append('\\\\\n')
cells = [self.table.cell(self.table.row, i) for i in range(self.table.colcount)]
underlined = [cell.row + cell.height == self.table.row + 1 for cell in cells]
if all(underlined):
self.body.append('\\hline')
else:
i = 0
underlined.extend([False]) # sentinel
while i < len(underlined):
if underlined[i] is True:
j = underlined[i:].index(False)
self.body.append('\\cline{%d-%d}' % (i + 1, i + j))
i += j
i += 1
self.table.row += 1
def visit_entry(self, node: Element) -> None:
if self.table.col > 0:
self.body.append('&')
self.table.add_cell(node.get('morerows', 0) + 1, node.get('morecols', 0) + 1)
cell = self.table.cell()
context = ''
if cell.width > 1:
if self.config.latex_use_latex_multicolumn:
if self.table.col == 0:
self.body.append('\\multicolumn{%d}{|l|}{%%\n' % cell.width)
else:
self.body.append('\\multicolumn{%d}{l|}{%%\n' % cell.width)
context = '}%\n'
else:
self.body.append('\\sphinxstartmulticolumn{%d}%%\n' % cell.width)
context = '\\sphinxstopmulticolumn\n'
if cell.height > 1:
# \sphinxmultirow 2nd arg "cell_id" will serve as id for LaTeX macros as well
self.body.append('\\sphinxmultirow{%d}{%d}{%%\n' % (cell.height, cell.cell_id))
context = '}%\n' + context
if cell.width > 1 or cell.height > 1:
self.body.append('\\begin{varwidth}[t]{\\sphinxcolwidth{%d}{%d}}\n'
% (cell.width, self.table.colcount))
context = ('\\par\n\\vskip-\\baselineskip'
'\\vbox{\\hbox{\\strut}}\\end{varwidth}%\n') + context
self.needs_linetrimming = 1
if len(node.traverse(nodes.paragraph)) >= 2:
self.table.has_oldproblematic = True
if isinstance(node.parent.parent, nodes.thead) or (cell.col in self.table.stubs):
if len(node) == 1 and isinstance(node[0], nodes.paragraph) and node.astext() == '':
pass
else:
self.body.append('\\sphinxstyletheadfamily ')
if self.needs_linetrimming:
self.pushbody([])
self.context.append(context)
def depart_entry(self, node: Element) -> None:
if self.needs_linetrimming:
self.needs_linetrimming = 0
body = self.popbody()
# Remove empty lines from top of merged cell
while body and body[0] == "\n":
body.pop(0)
self.body.extend(body)
self.body.append(self.context.pop())
cell = self.table.cell()
self.table.col += cell.width
# fill columns if next ones are a bottom of wide-multirow cell
while True:
nextcell = self.table.cell()
if nextcell is None: # not a bottom of multirow cell
break
else: # a bottom part of multirow cell
self.table.col += nextcell.width
self.body.append('&')
if nextcell.width == 1:
# insert suitable strut for equalizing row heights in multirow
# they also serve to clear colour panels which would hide the text
self.body.append('\\sphinxtablestrut{%d}' % nextcell.cell_id)
else:
# use \multicolumn for wide multirow cell
self.body.append('\\multicolumn{%d}{l|}'
'{\\sphinxtablestrut{%d}}' %
(nextcell.width, nextcell.cell_id))
def visit_acks(self, node: Element) -> None:
# this is a list in the source, but should be rendered as a
# comma-separated list here
bullet_list = cast(nodes.bullet_list, node[0])
list_items = cast(Iterable[nodes.list_item], bullet_list)
self.body.append('\n\n')
self.body.append(', '.join(n.astext() for n in list_items) + '.')
self.body.append('\n\n')
raise nodes.SkipNode
def visit_bullet_list(self, node: Element) -> None:
if not self.compact_list:
self.body.append('\\begin{itemize}\n')
if self.table:
self.table.has_problematic = True
def depart_bullet_list(self, node: Element) -> None:
if not self.compact_list:
self.body.append('\\end{itemize}\n')
def visit_enumerated_list(self, node: Element) -> None:
def get_enumtype(node: Element) -> str:
enumtype = node.get('enumtype', 'arabic')
if 'alpha' in enumtype and 26 < node.get('start', 0) + len(node):
# fallback to arabic if alphabet counter overflows
enumtype = 'arabic'
return enumtype
def get_nested_level(node: Element) -> int:
if node is None:
return 0
elif isinstance(node, nodes.enumerated_list):
return get_nested_level(node.parent) + 1
else:
return get_nested_level(node.parent)
enum = "enum%s" % toRoman(get_nested_level(node)).lower()
enumnext = "enum%s" % toRoman(get_nested_level(node) + 1).lower()
style = ENUMERATE_LIST_STYLE.get(get_enumtype(node))
prefix = node.get('prefix', '')
suffix = node.get('suffix', '.')
self.body.append('\\begin{enumerate}\n')
self.body.append('\\sphinxsetlistlabels{%s}{%s}{%s}{%s}{%s}%%\n' %
(style, enum, enumnext, prefix, suffix))
if 'start' in node:
self.body.append('\\setcounter{%s}{%d}\n' % (enum, node['start'] - 1))
if self.table:
self.table.has_problematic = True
def depart_enumerated_list(self, node: Element) -> None:
self.body.append('\\end{enumerate}\n')
def visit_list_item(self, node: Element) -> None:
# Append "{}" in case the next character is "[", which would break
# LaTeX's list environment (no numbering and the "[" is not printed).
self.body.append(r'\item {} ')
def depart_list_item(self, node: Element) -> None:
self.body.append('\n')
def visit_definition_list(self, node: Element) -> None:
self.body.append('\\begin{description}\n')
if self.table:
self.table.has_problematic = True
def depart_definition_list(self, node: Element) -> None:
self.body.append('\\end{description}\n')
def visit_definition_list_item(self, node: Element) -> None:
pass
def depart_definition_list_item(self, node: Element) -> None:
pass
def visit_term(self, node: Element) -> None:
self.in_term += 1
ctx = ''
if node.get('ids'):
ctx = '\\phantomsection'
for node_id in node['ids']:
ctx += self.hypertarget(node_id, anchor=False)
ctx += '}] \\leavevmode'
self.body.append('\\item[{')
self.context.append(ctx)
def depart_term(self, node: Element) -> None:
self.body.append(self.context.pop())
self.in_term -= 1
def visit_classifier(self, node: Element) -> None:
self.body.append('{[}')
def depart_classifier(self, node: Element) -> None:
self.body.append('{]}')
def visit_definition(self, node: Element) -> None:
pass
def depart_definition(self, node: Element) -> None:
self.body.append('\n')
def visit_field_list(self, node: Element) -> None:
self.body.append('\\begin{quote}\\begin{description}\n')
if self.table:
self.table.has_problematic = True
def depart_field_list(self, node: Element) -> None:
self.body.append('\\end{description}\\end{quote}\n')
def visit_field(self, node: Element) -> None:
pass
def depart_field(self, node: Element) -> None:
pass
visit_field_name = visit_term
depart_field_name = depart_term
visit_field_body = visit_definition
depart_field_body = depart_definition
def visit_paragraph(self, node: Element) -> None:
index = node.parent.index(node)
if (index > 0 and isinstance(node.parent, nodes.compound) and
not isinstance(node.parent[index - 1], nodes.paragraph) and
not isinstance(node.parent[index - 1], nodes.compound)):
# insert blank line, if the paragraph follows a non-paragraph node in a compound
self.body.append('\\noindent\n')
elif index == 1 and isinstance(node.parent, (nodes.footnote, footnotetext)):
# don't insert blank line, if the paragraph is second child of a footnote
# (first one is label node)
pass
else:
self.body.append('\n')
def depart_paragraph(self, node: Element) -> None:
self.body.append('\n')
def visit_centered(self, node: Element) -> None:
self.body.append('\n\\begin{center}')
if self.table:
self.table.has_problematic = True
def depart_centered(self, node: Element) -> None:
self.body.append('\n\\end{center}')
def visit_hlist(self, node: Element) -> None:
# for now, we don't support a more compact list format
# don't add individual itemize environments, but one for all columns
self.compact_list += 1
self.body.append('\\begin{itemize}\\setlength{\\itemsep}{0pt}'
'\\setlength{\\parskip}{0pt}\n')
if self.table:
self.table.has_problematic = True
def depart_hlist(self, node: Element) -> None:
self.compact_list -= 1
self.body.append('\\end{itemize}\n')
def visit_hlistcol(self, node: Element) -> None:
pass
def depart_hlistcol(self, node: Element) -> None:
pass
def latex_image_length(self, width_str: str, scale: int = 100) -> str:
try:
return rstdim_to_latexdim(width_str, scale)
except ValueError:
logger.warning(__('dimension unit %s is invalid. Ignored.'), width_str)
return None
def is_inline(self, node: Element) -> bool:
"""Check whether a node represents an inline element."""
return isinstance(node.parent, nodes.TextElement)
def visit_image(self, node: Element) -> None:
attrs = node.attributes
pre = [] # type: List[str]
# in reverse order
post = [] # type: List[str]
include_graphics_options = []
has_hyperlink = isinstance(node.parent, nodes.reference)
if has_hyperlink:
is_inline = self.is_inline(node.parent)
else:
is_inline = self.is_inline(node)
if 'width' in attrs:
if 'scale' in attrs:
w = self.latex_image_length(attrs['width'], attrs['scale'])
else:
w = self.latex_image_length(attrs['width'])
if w:
include_graphics_options.append('width=%s' % w)
if 'height' in attrs:
if 'scale' in attrs:
h = self.latex_image_length(attrs['height'], attrs['scale'])
else:
h = self.latex_image_length(attrs['height'])
if h:
include_graphics_options.append('height=%s' % h)
if 'scale' in attrs:
if not include_graphics_options:
# if no "width" nor "height", \sphinxincludegraphics will fit
# to the available text width if oversized after rescaling.
include_graphics_options.append('scale=%s'
% (float(attrs['scale']) / 100.0))
if 'align' in attrs:
align_prepost = {
# By default latex aligns the top of an image.
(1, 'top'): ('', ''),
(1, 'middle'): ('\\raisebox{-0.5\\height}{', '}'),
(1, 'bottom'): ('\\raisebox{-\\height}{', '}'),
(0, 'center'): ('{\\hspace*{\\fill}', '\\hspace*{\\fill}}'),
# These 2 don't exactly do the right thing. The image should
# be floated alongside the paragraph. See
# https://www.w3.org/TR/html4/struct/objects.html#adef-align-IMG
(0, 'left'): ('{', '\\hspace*{\\fill}}'),
(0, 'right'): ('{\\hspace*{\\fill}', '}'),
}
try:
pre.append(align_prepost[is_inline, attrs['align']][0])
post.append(align_prepost[is_inline, attrs['align']][1])
except KeyError:
pass
if self.in_parsed_literal:
pre.append('{\\sphinxunactivateextrasandspace ')
post.append('}')
if not is_inline and not has_hyperlink:
pre.append('\n\\noindent')
post.append('\n')
pre.reverse()
if node['uri'] in self.builder.images:
uri = self.builder.images[node['uri']]
else:
# missing image!
if self.ignore_missing_images:
return
uri = node['uri']
if uri.find('://') != -1:
# ignore remote images
return
self.body.extend(pre)
options = ''
if include_graphics_options:
options = '[%s]' % ','.join(include_graphics_options)
base, ext = path.splitext(uri)
if self.in_title and base:
# Lowercase tokens forcely because some fncychap themes capitalize
# the options of \sphinxincludegraphics unexpectly (ex. WIDTH=...).
self.body.append('\\lowercase{\\sphinxincludegraphics%s}{{%s}%s}' %
(options, base, ext))
else:
self.body.append('\\sphinxincludegraphics%s{{%s}%s}' %
(options, base, ext))
self.body.extend(post)
def depart_image(self, node: Element) -> None:
pass
def visit_figure(self, node: Element) -> None:
align = self.elements['figure_align']
if self.no_latex_floats:
align = "H"
if self.table:
# TODO: support align option
if 'width' in node:
length = self.latex_image_length(node['width'])
if length:
self.body.append('\\begin{sphinxfigure-in-table}[%s]\n'
'\\centering\n' % length)
else:
self.body.append('\\begin{sphinxfigure-in-table}\n\\centering\n')
if any(isinstance(child, nodes.caption) for child in node):
self.body.append('\\capstart')
self.context.append('\\end{sphinxfigure-in-table}\\relax\n')
elif node.get('align', '') in ('left', 'right'):
length = None
if 'width' in node:
length = self.latex_image_length(node['width'])
elif isinstance(node[0], nodes.image) and 'width' in node[0]:
length = self.latex_image_length(node[0]['width'])
self.body.append('\n\n') # Insert a blank line to prevent infinite loop
# https://github.com/sphinx-doc/sphinx/issues/7059
self.body.append('\\begin{wrapfigure}{%s}{%s}\n\\centering' %
('r' if node['align'] == 'right' else 'l', length or '0pt'))
self.context.append('\\end{wrapfigure}\n')
elif self.in_minipage:
self.body.append('\n\\begin{center}')
self.context.append('\\end{center}\n')
else:
self.body.append('\n\\begin{figure}[%s]\n\\centering\n' % align)
if any(isinstance(child, nodes.caption) for child in node):
self.body.append('\\capstart\n')
self.context.append('\\end{figure}\n')
def depart_figure(self, node: Element) -> None:
self.body.append(self.context.pop())
def visit_caption(self, node: Element) -> None:
self.in_caption += 1
if isinstance(node.parent, captioned_literal_block):
self.body.append('\\sphinxSetupCaptionForVerbatim{')
elif self.in_minipage and isinstance(node.parent, nodes.figure):
self.body.append('\\captionof{figure}{')
elif self.table and node.parent.tagname == 'figure':
self.body.append('\\sphinxfigcaption{')
else:
self.body.append('\\caption{')
def depart_caption(self, node: Element) -> None:
self.body.append('}')
if isinstance(node.parent, nodes.figure):
labels = self.hypertarget_to(node.parent)
self.body.append(labels)
self.in_caption -= 1
def visit_legend(self, node: Element) -> None:
self.body.append('\n\\begin{sphinxlegend}')
def depart_legend(self, node: Element) -> None:
self.body.append('\\end{sphinxlegend}\n')
def visit_admonition(self, node: Element) -> None:
self.body.append('\n\\begin{sphinxadmonition}{note}')
self.no_latex_floats += 1
def depart_admonition(self, node: Element) -> None:
self.body.append('\\end{sphinxadmonition}\n')
self.no_latex_floats -= 1
def _visit_named_admonition(self, node: Element) -> None:
label = admonitionlabels[node.tagname]
self.body.append('\n\\begin{sphinxadmonition}{%s}{%s:}' %
(node.tagname, label))
self.no_latex_floats += 1
def _depart_named_admonition(self, node: Element) -> None:
self.body.append('\\end{sphinxadmonition}\n')
self.no_latex_floats -= 1
visit_attention = _visit_named_admonition
depart_attention = _depart_named_admonition
visit_caution = _visit_named_admonition
depart_caution = _depart_named_admonition
visit_danger = _visit_named_admonition
depart_danger = _depart_named_admonition
visit_error = _visit_named_admonition
depart_error = _depart_named_admonition
visit_hint = _visit_named_admonition
depart_hint = _depart_named_admonition
visit_important = _visit_named_admonition
depart_important = _depart_named_admonition
visit_note = _visit_named_admonition
depart_note = _depart_named_admonition
visit_tip = _visit_named_admonition
depart_tip = _depart_named_admonition
visit_warning = _visit_named_admonition
depart_warning = _depart_named_admonition
def visit_versionmodified(self, node: Element) -> None:
pass
def depart_versionmodified(self, node: Element) -> None:
pass
def visit_target(self, node: Element) -> None:
def add_target(id: str) -> None:
# indexing uses standard LaTeX index markup, so the targets
# will be generated differently
if id.startswith('index-'):
return
# equations also need no extra blank line nor hypertarget
# TODO: fix this dependency on mathbase extension internals
if id.startswith('equation-'):
return
# insert blank line, if the target follows a paragraph node
index = node.parent.index(node)
if index > 0 and isinstance(node.parent[index - 1], nodes.paragraph):
self.body.append('\n')
# do not generate \phantomsection in \section{}
anchor = not self.in_title
self.body.append(self.hypertarget(id, anchor=anchor))
# skip if visitor for next node supports hyperlink
next_node = node # type: nodes.Node
while isinstance(next_node, nodes.target):
next_node = next_node.next_node(ascend=True)
domain = cast(StandardDomain, self.builder.env.get_domain('std'))
if isinstance(next_node, HYPERLINK_SUPPORT_NODES):
return
elif domain.get_enumerable_node_type(next_node) and domain.get_numfig_title(next_node):
return
if 'refuri' in node:
return
if 'anonymous' in node:
return
if node.get('refid'):
prev_node = get_prev_node(node)
if isinstance(prev_node, nodes.reference) and node['refid'] == prev_node['refid']:
# a target for a hyperlink reference having alias
pass
else:
add_target(node['refid'])
for id in node['ids']:
add_target(id)
def depart_target(self, node: Element) -> None:
pass
def visit_attribution(self, node: Element) -> None:
self.body.append('\n\\begin{flushright}\n')
self.body.append('---')
def depart_attribution(self, node: Element) -> None:
self.body.append('\n\\end{flushright}\n')
def visit_index(self, node: Element) -> None:
def escape(value: str) -> str:
value = self.encode(value)
value = value.replace(r'\{', r'\sphinxleftcurlybrace{}')
value = value.replace(r'\}', r'\sphinxrightcurlybrace{}')
value = value.replace('"', '""')
value = value.replace('@', '"@')
value = value.replace('!', '"!')
value = value.replace('|', r'\textbar{}')
return value
def style(string: str) -> str:
match = EXTRA_RE.match(string)
if match:
return match.expand(r'\\spxentry{\1}\\spxextra{\2}')
else:
return '\\spxentry{%s}' % string
if not node.get('inline', True):
self.body.append('\n')
entries = node['entries']
for type, string, tid, ismain, key_ in entries:
m = ''
if ismain:
m = '|spxpagem'
try:
if type == 'single':
try:
p1, p2 = [escape(x) for x in split_into(2, 'single', string)]
P1, P2 = style(p1), style(p2)
self.body.append(r'\index{%s@%s!%s@%s%s}' % (p1, P1, p2, P2, m))
except ValueError:
p = escape(split_into(1, 'single', string)[0])
P = style(p)
self.body.append(r'\index{%s@%s%s}' % (p, P, m))
elif type == 'pair':
p1, p2 = [escape(x) for x in split_into(2, 'pair', string)]
P1, P2 = style(p1), style(p2)
self.body.append(r'\index{%s@%s!%s@%s%s}\index{%s@%s!%s@%s%s}' %
(p1, P1, p2, P2, m, p2, P2, p1, P1, m))
elif type == 'triple':
p1, p2, p3 = [escape(x) for x in split_into(3, 'triple', string)]
P1, P2, P3 = style(p1), style(p2), style(p3)
self.body.append(
r'\index{%s@%s!%s %s@%s %s%s}'
r'\index{%s@%s!%s, %s@%s, %s%s}'
r'\index{%s@%s!%s %s@%s %s%s}' %
(p1, P1, p2, p3, P2, P3, m,
p2, P2, p3, p1, P3, P1, m,
p3, P3, p1, p2, P1, P2, m))
elif type == 'see':
p1, p2 = [escape(x) for x in split_into(2, 'see', string)]
P1 = style(p1)
self.body.append(r'\index{%s@%s|see{%s}}' % (p1, P1, p2))
elif type == 'seealso':
p1, p2 = [escape(x) for x in split_into(2, 'seealso', string)]
P1 = style(p1)
self.body.append(r'\index{%s@%s|see{%s}}' % (p1, P1, p2))
else:
logger.warning(__('unknown index entry type %s found'), type)
except ValueError as err:
logger.warning(str(err))
if not node.get('inline', True):
self.body.append('\\ignorespaces ')
raise nodes.SkipNode
def visit_raw(self, node: Element) -> None:
if not self.is_inline(node):
self.body.append('\n')
if 'latex' in node.get('format', '').split():
self.body.append(node.astext())
if not self.is_inline(node):
self.body.append('\n')
raise nodes.SkipNode
def visit_reference(self, node: Element) -> None:
if not self.in_title:
for id in node.get('ids'):
anchor = not self.in_caption
self.body += self.hypertarget(id, anchor=anchor)
if not self.is_inline(node):
self.body.append('\n')
uri = node.get('refuri', '')
if not uri and node.get('refid'):
uri = '%' + self.curfilestack[-1] + '#' + node['refid']
if self.in_title or not uri:
self.context.append('')
elif uri.startswith('#'):
# references to labels in the same document
id = self.curfilestack[-1] + ':' + uri[1:]
self.body.append(self.hyperlink(id))
self.body.append(r'\emph{')
if self.config.latex_show_pagerefs and not \
self.in_production_list:
self.context.append('}}} (%s)' % self.hyperpageref(id))
else:
self.context.append('}}}')
elif uri.startswith('%'):
# references to documents or labels inside documents
hashindex = uri.find('#')
if hashindex == -1:
# reference to the document
id = uri[1:] + '::doc'
else:
# reference to a label
id = uri[1:].replace('#', ':')
self.body.append(self.hyperlink(id))
if (len(node) and
isinstance(node[0], nodes.Element) and
'std-term' in node[0].get('classes', [])):
# don't add a pageref for glossary terms
self.context.append('}}}')
# mark up as termreference
self.body.append(r'\sphinxtermref{')
else:
self.body.append(r'\sphinxcrossref{')
if self.config.latex_show_pagerefs and not self.in_production_list:
self.context.append('}}} (%s)' % self.hyperpageref(id))
else:
self.context.append('}}}')
else:
if len(node) == 1 and uri == node[0]:
if node.get('nolinkurl'):
self.body.append('\\sphinxnolinkurl{%s}' % self.encode_uri(uri))
else:
self.body.append('\\sphinxurl{%s}' % self.encode_uri(uri))
raise nodes.SkipNode
else:
self.body.append('\\sphinxhref{%s}{' % self.encode_uri(uri))
self.context.append('}')
def depart_reference(self, node: Element) -> None:
self.body.append(self.context.pop())
if not self.is_inline(node):
self.body.append('\n')
def visit_number_reference(self, node: Element) -> None:
if node.get('refid'):
id = self.curfilestack[-1] + ':' + node['refid']
else:
id = node.get('refuri', '')[1:].replace('#', ':')
title = self.escape(node.get('title', '%s')).replace('\\%s', '%s')
if '\\{name\\}' in title or '\\{number\\}' in title:
# new style format (cf. "Fig.%{number}")
title = title.replace('\\{name\\}', '{name}').replace('\\{number\\}', '{number}')
text = escape_abbr(title).format(name='\\nameref{%s}' % self.idescape(id),
number='\\ref{%s}' % self.idescape(id))
else:
# old style format (cf. "Fig.%{number}")
text = escape_abbr(title) % ('\\ref{%s}' % self.idescape(id))
hyperref = '\\hyperref[%s]{%s}' % (self.idescape(id), text)
self.body.append(hyperref)
raise nodes.SkipNode
def visit_download_reference(self, node: Element) -> None:
pass
def depart_download_reference(self, node: Element) -> None:
pass
def visit_pending_xref(self, node: Element) -> None:
pass
def depart_pending_xref(self, node: Element) -> None:
pass
def visit_emphasis(self, node: Element) -> None:
self.body.append(r'\sphinxstyleemphasis{')
def depart_emphasis(self, node: Element) -> None:
self.body.append('}')
def visit_literal_emphasis(self, node: Element) -> None:
self.body.append(r'\sphinxstyleliteralemphasis{\sphinxupquote{')
def depart_literal_emphasis(self, node: Element) -> None:
self.body.append('}}')
def visit_strong(self, node: Element) -> None:
self.body.append(r'\sphinxstylestrong{')
def depart_strong(self, node: Element) -> None:
self.body.append('}')
def visit_literal_strong(self, node: Element) -> None:
self.body.append(r'\sphinxstyleliteralstrong{\sphinxupquote{')
def depart_literal_strong(self, node: Element) -> None:
self.body.append('}}')
def visit_abbreviation(self, node: Element) -> None:
abbr = node.astext()
self.body.append(r'\sphinxstyleabbreviation{')
# spell out the explanation once
if node.hasattr('explanation') and abbr not in self.handled_abbrs:
self.context.append('} (%s)' % self.encode(node['explanation']))
self.handled_abbrs.add(abbr)
else:
self.context.append('}')
def depart_abbreviation(self, node: Element) -> None:
self.body.append(self.context.pop())
def visit_manpage(self, node: Element) -> None:
return self.visit_literal_emphasis(node)
def depart_manpage(self, node: Element) -> None:
return self.depart_literal_emphasis(node)
def visit_title_reference(self, node: Element) -> None:
self.body.append(r'\sphinxtitleref{')
def depart_title_reference(self, node: Element) -> None:
self.body.append('}')
def visit_thebibliography(self, node: Element) -> None:
citations = cast(Iterable[nodes.citation], node)
labels = (cast(nodes.label, citation[0]) for citation in citations)
longest_label = max((label.astext() for label in labels), key=len)
if len(longest_label) > MAX_CITATION_LABEL_LENGTH:
# adjust max width of citation labels not to break the layout
longest_label = longest_label[:MAX_CITATION_LABEL_LENGTH]
self.body.append('\n\\begin{sphinxthebibliography}{%s}\n' %
self.encode(longest_label))
def depart_thebibliography(self, node: Element) -> None:
self.body.append('\\end{sphinxthebibliography}\n')
def visit_citation(self, node: Element) -> None:
label = cast(nodes.label, node[0])
self.body.append('\\bibitem[%s]{%s:%s}' % (self.encode(label.astext()),
node['docname'], node['ids'][0]))
def depart_citation(self, node: Element) -> None:
pass
def visit_citation_reference(self, node: Element) -> None:
if self.in_title:
pass
else:
self.body.append('\\sphinxcite{%s:%s}' % (node['docname'], node['refname']))
raise nodes.SkipNode
def depart_citation_reference(self, node: Element) -> None:
pass
def visit_literal(self, node: Element) -> None:
if self.in_title:
self.body.append(r'\sphinxstyleliteralintitle{\sphinxupquote{')
elif 'kbd' in node['classes']:
self.body.append(r'\sphinxkeyboard{\sphinxupquote{')
else:
self.body.append(r'\sphinxcode{\sphinxupquote{')
def depart_literal(self, node: Element) -> None:
self.body.append('}}')
def visit_footnote_reference(self, node: Element) -> None:
raise nodes.SkipNode
def visit_footnotemark(self, node: Element) -> None:
self.body.append('\\sphinxfootnotemark[')
def depart_footnotemark(self, node: Element) -> None:
self.body.append(']')
def visit_footnotetext(self, node: Element) -> None:
label = cast(nodes.label, node[0])
self.body.append('%%\n\\begin{footnotetext}[%s]'
'\\sphinxAtStartFootnote\n' % label.astext())
def depart_footnotetext(self, node: Element) -> None:
# the \ignorespaces in particular for after table header use
self.body.append('%\n\\end{footnotetext}\\ignorespaces ')
def visit_captioned_literal_block(self, node: Element) -> None:
pass
def depart_captioned_literal_block(self, node: Element) -> None:
pass
def visit_literal_block(self, node: Element) -> None:
if node.rawsource != node.astext():
# most probably a parsed-literal block -- don't highlight
self.in_parsed_literal += 1
self.body.append('\\begin{sphinxalltt}\n')
else:
labels = self.hypertarget_to(node)
if isinstance(node.parent, captioned_literal_block):
labels += self.hypertarget_to(node.parent)
if labels and not self.in_footnote:
self.body.append('\n\\def\\sphinxLiteralBlockLabel{' + labels + '}')
lang = node.get('language', 'default')
linenos = node.get('linenos', False)
highlight_args = node.get('highlight_args', {})
highlight_args['force'] = node.get('force', False)
opts = self.config.highlight_options.get(lang, {})
hlcode = self.highlighter.highlight_block(
node.rawsource, lang, opts=opts, linenos=linenos,
location=node, **highlight_args
)
if self.in_footnote:
self.body.append('\n\\sphinxSetupCodeBlockInFootnote')
hlcode = hlcode.replace('\\begin{Verbatim}',
'\\begin{sphinxVerbatim}')
# if in table raise verbatim flag to avoid "tabulary" environment
# and opt for sphinxVerbatimintable to handle caption & long lines
elif self.table:
self.table.has_problematic = True
self.table.has_verbatim = True
hlcode = hlcode.replace('\\begin{Verbatim}',
'\\begin{sphinxVerbatimintable}')
else:
hlcode = hlcode.replace('\\begin{Verbatim}',
'\\begin{sphinxVerbatim}')
# get consistent trailer
hlcode = hlcode.rstrip()[:-14] # strip \end{Verbatim}
if self.table and not self.in_footnote:
hlcode += '\\end{sphinxVerbatimintable}'
else:
hlcode += '\\end{sphinxVerbatim}'
hllines = str(highlight_args.get('hl_lines', []))[1:-1]
if hllines:
self.body.append('\n\\fvset{hllines={, %s,}}%%' % hllines)
self.body.append('\n' + hlcode + '\n')
if hllines:
self.body.append('\\sphinxresetverbatimhllines\n')
raise nodes.SkipNode
def depart_literal_block(self, node: Element) -> None:
self.body.append('\n\\end{sphinxalltt}\n')
self.in_parsed_literal -= 1
visit_doctest_block = visit_literal_block
depart_doctest_block = depart_literal_block
def visit_line(self, node: Element) -> None:
self.body.append('\\item[] ')
def depart_line(self, node: Element) -> None:
self.body.append('\n')
def visit_line_block(self, node: Element) -> None:
if isinstance(node.parent, nodes.line_block):
self.body.append('\\item[]\n'
'\\begin{DUlineblock}{\\DUlineblockindent}\n')
else:
self.body.append('\n\\begin{DUlineblock}{0em}\n')
if self.table:
self.table.has_problematic = True
def depart_line_block(self, node: Element) -> None:
self.body.append('\\end{DUlineblock}\n')
def visit_block_quote(self, node: Element) -> None:
# If the block quote contains a single object and that object
# is a list, then generate a list not a block quote.
# This lets us indent lists.
done = 0
if len(node.children) == 1:
child = node.children[0]
if isinstance(child, nodes.bullet_list) or \
isinstance(child, nodes.enumerated_list):
done = 1
if not done:
self.body.append('\\begin{quote}\n')
if self.table:
self.table.has_problematic = True
def depart_block_quote(self, node: Element) -> None:
done = 0
if len(node.children) == 1:
child = node.children[0]
if isinstance(child, nodes.bullet_list) or \
isinstance(child, nodes.enumerated_list):
done = 1
if not done:
self.body.append('\\end{quote}\n')
# option node handling copied from docutils' latex writer
def visit_option(self, node: Element) -> None:
if self.context[-1]:
# this is not the first option
self.body.append(', ')
def depart_option(self, node: Element) -> None:
# flag that the first option is done.
self.context[-1] += 1
def visit_option_argument(self, node: Element) -> None:
"""The delimiter betweeen an option and its argument."""
self.body.append(node.get('delimiter', ' '))
def depart_option_argument(self, node: Element) -> None:
pass
def visit_option_group(self, node: Element) -> None:
self.body.append('\\item [')
# flag for first option
self.context.append(0)
def depart_option_group(self, node: Element) -> None:
self.context.pop() # the flag
self.body.append('] ')
def visit_option_list(self, node: Element) -> None:
self.body.append('\\begin{optionlist}{3cm}\n')
if self.table:
self.table.has_problematic = True
def depart_option_list(self, node: Element) -> None:
self.body.append('\\end{optionlist}\n')
def visit_option_list_item(self, node: Element) -> None:
pass
def depart_option_list_item(self, node: Element) -> None:
pass
def visit_option_string(self, node: Element) -> None:
ostring = node.astext()
self.body.append(self.encode(ostring))
raise nodes.SkipNode
def visit_description(self, node: Element) -> None:
self.body.append(' ')
def depart_description(self, node: Element) -> None:
pass
def visit_superscript(self, node: Element) -> None:
self.body.append('$^{\\text{')
def depart_superscript(self, node: Element) -> None:
self.body.append('}}$')
def visit_subscript(self, node: Element) -> None:
self.body.append('$_{\\text{')
def depart_subscript(self, node: Element) -> None:
self.body.append('}}$')
def visit_inline(self, node: Element) -> None:
classes = node.get('classes', [])
if classes in [['menuselection']]:
self.body.append(r'\sphinxmenuselection{')
self.context.append('}')
elif classes in [['guilabel']]:
self.body.append(r'\sphinxguilabel{')
self.context.append('}')
elif classes in [['accelerator']]:
self.body.append(r'\sphinxaccelerator{')
self.context.append('}')
elif classes and not self.in_title:
self.body.append(r'\DUrole{%s}{' % ','.join(classes))
self.context.append('}')
else:
self.context.append('')
def depart_inline(self, node: Element) -> None:
self.body.append(self.context.pop())
def visit_generated(self, node: Element) -> None:
pass
def depart_generated(self, node: Element) -> None:
pass
def visit_compound(self, node: Element) -> None:
pass
def depart_compound(self, node: Element) -> None:
pass
def visit_container(self, node: Element) -> None:
pass
def depart_container(self, node: Element) -> None:
pass
def visit_decoration(self, node: Element) -> None:
pass
def depart_decoration(self, node: Element) -> None:
pass
# docutils-generated elements that we don't support
def visit_header(self, node: Element) -> None:
raise nodes.SkipNode
def visit_footer(self, node: Element) -> None:
raise nodes.SkipNode
def visit_docinfo(self, node: Element) -> None:
raise nodes.SkipNode
# text handling
def encode(self, text: str) -> str:
text = self.escape(text)
if self.literal_whitespace:
# Insert a blank before the newline, to avoid
# ! LaTeX Error: There's no line here to end.
text = text.replace('\n', '~\\\\\n').replace(' ', '~')
return text
def encode_uri(self, text: str) -> str:
# TODO: it is probably wrong that this uses texescape.escape()
# this must be checked against hyperref package exact dealings
# mainly, %, #, {, } and \ need escaping via a \ escape
# in \href, the tilde is allowed and must be represented literally
return self.encode(text).replace('\\textasciitilde{}', '~').\
replace('\\sphinxhyphen{}', '-').\
replace('\\textquotesingle{}', "'")
def visit_Text(self, node: Text) -> None:
text = self.encode(node.astext())
self.body.append(text)
def depart_Text(self, node: Text) -> None:
pass
def visit_comment(self, node: Element) -> None:
raise nodes.SkipNode
def visit_meta(self, node: Element) -> None:
# only valid for HTML
raise nodes.SkipNode
def visit_system_message(self, node: Element) -> None:
pass
def depart_system_message(self, node: Element) -> None:
self.body.append('\n')
def visit_math(self, node: Element) -> None:
if self.in_title:
self.body.append(r'\protect\(%s\protect\)' % node.astext())
else:
self.body.append(r'\(%s\)' % node.astext())
raise nodes.SkipNode
def visit_math_block(self, node: Element) -> None:
if node.get('label'):
label = "equation:%s:%s" % (node['docname'], node['label'])
else:
label = None
if node.get('nowrap'):
if label:
self.body.append(r'\label{%s}' % label)
self.body.append(node.astext())
else:
from sphinx.util.math import wrap_displaymath
self.body.append(wrap_displaymath(node.astext(), label,
self.config.math_number_all))
raise nodes.SkipNode
def visit_math_reference(self, node: Element) -> None:
label = "equation:%s:%s" % (node['docname'], node['target'])
eqref_format = self.config.math_eqref_format
if eqref_format:
try:
ref = r'\ref{%s}' % label
self.body.append(eqref_format.format(number=ref))
except KeyError as exc:
logger.warning(__('Invalid math_eqref_format: %r'), exc,
location=node)
self.body.append(r'\eqref{%s}' % label)
else:
self.body.append(r'\eqref{%s}' % label)
def depart_math_reference(self, node: Element) -> None:
pass
def unknown_visit(self, node: Node) -> None:
raise NotImplementedError('Unknown node: ' + node.__class__.__name__)
# FIXME: Workaround to avoid circular import
# refs: https://github.com/sphinx-doc/sphinx/issues/5433
from sphinx.builders.latex.nodes import ( # NOQA isort:skip
HYPERLINK_SUPPORT_NODES, captioned_literal_block, footnotetext,
)
```
|
{
"source": "JesseTCS/test_driven_development_in_django",
"score": 3
}
|
#### File: JesseTCS/test_driven_development_in_django/func_test.py
```python
from selenium import webdriver
def func_test() -> None:
"""
:var webdriver safari: Used to operate the safari web-browser
:var string localhost_8000: Holds the localhost url with port number
:var string test_text: Hold what string you would like tested
:except assertion: Raised if test_text is not in the page source
:return: None
"""
safari = webdriver.Safari()
localhost_8000 = 'http://localhost:8000'
safari.get(localhost_8000)
test_text = 'install'
assert test_text in safari.page_source
safari.close()
```
#### File: sphinx/ext/mathbase.py
```python
import warnings
from typing import Callable, List, Tuple
from docutils import nodes
from docutils.nodes import Element, Node
from docutils.parsers.rst.roles import math_role as math_role_base
from sphinx.addnodes import math, math_block as displaymath # NOQA # to keep compatibility
from sphinx.application import Sphinx
from sphinx.builders.latex.nodes import math_reference as eqref # NOQA # to keep compatibility
from sphinx.deprecation import RemovedInSphinx30Warning
from sphinx.directives.patches import MathDirective as MathDirectiveBase
from sphinx.domains.math import MathDomain # NOQA # to keep compatibility
from sphinx.domains.math import MathReferenceRole as EqXRefRole # NOQA # to keep compatibility
from sphinx.writers.html import HTMLTranslator
from sphinx.writers.latex import LaTeXTranslator
from sphinx.writers.manpage import ManualPageTranslator
from sphinx.writers.texinfo import TexinfoTranslator
from sphinx.writers.text import TextTranslator
class MathDirective(MathDirectiveBase):
def run(self) -> List[Node]:
warnings.warn('sphinx.ext.mathbase.MathDirective is moved to '
'sphinx.directives.patches package.',
RemovedInSphinx30Warning, stacklevel=2)
return super().run()
def math_role(role, rawtext, text, lineno, inliner, options={}, content=[]):
warnings.warn('sphinx.ext.mathbase.math_role() is deprecated. '
'Please use docutils.parsers.rst.roles.math_role() instead.',
RemovedInSphinx30Warning, stacklevel=2)
return math_role_base(role, rawtext, text, lineno, inliner, options, content)
def get_node_equation_number(writer: HTMLTranslator, node: nodes.math_block) -> str:
warnings.warn('sphinx.ext.mathbase.get_node_equation_number() is moved to '
'sphinx.util.math package.',
RemovedInSphinx30Warning, stacklevel=2)
from sphinx.util.math import get_node_equation_number
return get_node_equation_number(writer, node)
def wrap_displaymath(text: str, label: str, numbering: bool) -> str:
warnings.warn('sphinx.ext.mathbase.wrap_displaymath() is moved to '
'sphinx.util.math package.',
RemovedInSphinx30Warning, stacklevel=2)
from sphinx.util.math import wrap_displaymath
return wrap_displaymath(text, label, numbering)
def is_in_section_title(node: Element) -> bool:
"""Determine whether the node is in a section title"""
from sphinx.util.nodes import traverse_parent
warnings.warn('is_in_section_title() is deprecated.',
RemovedInSphinx30Warning, stacklevel=2)
for ancestor in traverse_parent(node):
if isinstance(ancestor, nodes.title) and \
isinstance(ancestor.parent, nodes.section):
return True
return False
def latex_visit_math(self: LaTeXTranslator, node: Element) -> None:
warnings.warn('latex_visit_math() is deprecated. '
'Please use LaTeXTranslator.visit_math() instead.',
RemovedInSphinx30Warning, stacklevel=2)
self.visit_math(node)
def latex_visit_displaymath(self: LaTeXTranslator, node: Element) -> None:
warnings.warn('latex_visit_displaymath() is deprecated. '
'Please use LaTeXTranslator.visit_math_block() instead.',
RemovedInSphinx30Warning, stacklevel=2)
self.visit_math_block(node)
def man_visit_math(self: ManualPageTranslator, node: Element) -> None:
warnings.warn('man_visit_math() is deprecated. '
'Please use ManualPageTranslator.visit_math() instead.',
RemovedInSphinx30Warning, stacklevel=2)
self.visit_math(node)
def man_visit_displaymath(self: ManualPageTranslator, node: Element) -> None:
warnings.warn('man_visit_displaymath() is deprecated. '
'Please use ManualPageTranslator.visit_math_block() instead.',
RemovedInSphinx30Warning, stacklevel=2)
self.visit_math_block(node)
def man_depart_displaymath(self: ManualPageTranslator, node: Element) -> None:
warnings.warn('man_depart_displaymath() is deprecated. '
'Please use ManualPageTranslator.depart_math_block() instead.',
RemovedInSphinx30Warning, stacklevel=2)
self.depart_math_block(node)
def texinfo_visit_math(self: TexinfoTranslator, node: Element) -> None:
warnings.warn('texinfo_visit_math() is deprecated. '
'Please use TexinfoTranslator.visit_math() instead.',
RemovedInSphinx30Warning, stacklevel=2)
self.visit_math(node)
def texinfo_visit_displaymath(self: TexinfoTranslator, node: Element) -> None:
warnings.warn('texinfo_visit_displaymath() is deprecated. '
'Please use TexinfoTranslator.visit_math_block() instead.',
RemovedInSphinx30Warning, stacklevel=2)
self.visit_math_block(node)
def texinfo_depart_displaymath(self: TexinfoTranslator, node: Element) -> None:
warnings.warn('texinfo_depart_displaymath() is deprecated. '
'Please use TexinfoTranslator.depart_math_block() instead.',
RemovedInSphinx30Warning, stacklevel=2)
def text_visit_math(self: TextTranslator, node: Element) -> None:
warnings.warn('text_visit_math() is deprecated. '
'Please use TextTranslator.visit_math() instead.',
RemovedInSphinx30Warning, stacklevel=2)
self.visit_math(node)
def text_visit_displaymath(self: TextTranslator, node: Element) -> None:
warnings.warn('text_visit_displaymath() is deprecated. '
'Please use TextTranslator.visit_math_block() instead.',
RemovedInSphinx30Warning, stacklevel=2)
self.visit_math_block(node)
def setup_math(app: Sphinx,
htmlinlinevisitors: Tuple[Callable, Callable],
htmldisplayvisitors: Tuple[Callable, Callable]) -> None:
warnings.warn('setup_math() is deprecated. '
'Please use app.add_html_math_renderer() instead.',
RemovedInSphinx30Warning, stacklevel=2)
app.add_html_math_renderer('unknown', htmlinlinevisitors, htmldisplayvisitors)
```
|
{
"source": "JesseTG/Liar",
"score": 2
}
|
#### File: liar/public/edges.py
```python
import pymongo
from pymongo import MongoClient
import itertools
import scipy
from sklearn import manifold
from scipy.spatial.distance import squareform, pdist
def numberCommon(x, y):
if x == y:
return 0
else:
return statements.find({
'$and': [{'subjects': x}, {'subjects': y}]
}).count()
def nodes():
liar_db=connection.liar
statements=liar_db.statements
return statements.aggregate([
{
"$unwind": {
"path": "$subjects"
}
},
{
"$group": {
"_id": "$subjects",
"count": {"$sum": 1}
}
},
{
"$sort": {"_id": 1}
}
])
def edges():
liar_db=connection.liar
statements=liar_db.statements
subjects = tuple(sorted(statements.distinct("subjects")))
subjectLists = tuple(s["subjects"] for s in statements.find({}, {"subjects": True, "_id": False}))
combos = frozenset(itertools.chain.from_iterable(itertools.combinations(l, 2) for l in subjectLists))
length = len(subjects)
matrix = scipy.zeros((length, length))
for i, j in combos:
i_index = subjects.index(i)
j_index = subjects.index(j)
common = numberCommon(i, j)
matrix[i_index, j_index] = common
matrix[j_index, i_index] = common
most = matrix.max()
mds = manifold.MDS(n_components=2, n_init=4, max_iter=300, eps=1e-6, dissimilarity="precomputed", n_jobs=-1)
def add_coordinates():
n=nodes()
coords=manifold.MDS(n_components=2, n_init=4, max_iter=300, eps=1e-6, dissimilarity="precomputed", n_jobs=-1)
connection=MongoClient()
```
#### File: liar/public/views.py
```python
from collections import Counter, defaultdict
import operator
import re
import itertools
import math
from flask import Blueprint, flash, redirect, render_template, request, url_for
from flask import current_app
from nltk.corpus import stopwords
import nltk
from liar.utils import flash_errors
from liar.extensions import cache, mongo
from .. import queries
import scipy
import pandas as pd
from sklearn import manifold
from scipy.interpolate import interp1d
from scipy.spatial.distance import squareform, pdist
from numpy import amax
from colour import Color
def take(n, iterable):
"Return first n items of the iterable as a list"
return list(islice(iterable, n))
gag_list=["EX","RP","TO","VB","WP","PRP","DT","VBP","IN","POS",".","CD","``"]
def split_sentence(text):
sentence=nltk.word_tokenize(text)
tagged = nltk.pos_tag(sentence)
tagged=[tag for tag in tagged if tag[1] not in gag_list]
pass_list=[tag[0] for tag in tagged]
return pass_list
def gen_dict(statement_text):
words=[split_sentence(sentence) for sentence in statement_text]
word_dict=defaultdict(int)
for word_list in words:
temp_dict=dict(Counter(word_list))
word_dict={**word_dict,**temp_dict}
return word_dict
blueprint = Blueprint('public', __name__, static_folder='../static')
COLORS = tuple(map(Color, ("#661a00", "#E71F28", "#EE9022", "#FFD503", "#C3D52D", "#83BF44")))
interval = tuple(i/(len(COLORS) - 1) for i in range(len(COLORS)))
red = interp1d(interval, [c.red for c in COLORS])
green = interp1d(interval, [c.green for c in COLORS])
blue = interp1d(interval, [c.blue for c in COLORS])
def gradient(i):
return Color(rgb=(red(i), green(i), blue(i)))
@cache.cached(timeout=300)
def compute_points(combos):
subjects = tuple(sorted(tuple(queries.subjects())))
length = len(subjects)
matrix = scipy.zeros((length, length))
for c in combos:
_id = c['_id']
count = c['count']
i_index = subjects.index(_id[0])
j_index = subjects.index(_id[1])
matrix[i_index, j_index] = count
matrix[j_index, i_index] = count
most = matrix.max()
mds = manifold.MDS(n_components=2, n_init=10, max_iter=1000, eps=1e-9, dissimilarity="precomputed", n_jobs=-1)
return scipy.array(mds.fit_transform(most - matrix))
def viewbox(points):
am = amax(points)
margin = am * 0.05
return "{0} {1} {2} {3}".format(-am - margin, -am - margin, am*2 + margin, am*2 + margin)
def build_data(points):
nodes = tuple(queries.nodes())
assert len(nodes) == len(points)
# The MDS should provide one 2D point for each topic...
for i in range(len(nodes)):
node = nodes[i]
point = points[i]
node['x'] = point[0]
node['y'] = point[1]
node['radius'] = math.sqrt(node['numberOfRulings'])
return { n['_id'] : n for n in nodes}
#######################Word cloud#####################
def word_cloud():
statements=mongo.db.statements
statement_text=statements_df['statement'].tolist()
wordcount=defaultdict(int)
word_dict=gen_dict(statement_text)
word_dict=dict(sorted(word_dict.items(), key=operator.itemgetter(1), reverse=True))
return word_cloud
#####################################################
def compute_edges(nodes, combos):
def make_edge(combo):
return {
'a': nodes[combo['_id'][0]],
'b': nodes[combo['_id'][1]],
'count': combo['count']
}
def allow_edge(edge):
a = edge['a']
b = edge['b']
count = edge['count']
return (count / a['numberOfRulings'] >= 0.05) or (count / b['numberOfRulings'] >= 0.05)
return tuple(e for e in map(make_edge, combos))
@blueprint.route('/', methods=['GET'])
<EMAIL>(timeout=10)
def home():
combos = tuple(queries.combos())
points = compute_points(combos)
nodes = build_data(points)
edges = compute_edges(nodes, combos)
v = viewbox(points)
"""Home page."""
return render_template('public/home.html', nodes=nodes, edges=edges, viewbox=v, gradient=gradient, colors=COLORS)
@blueprint.route('/about/')
def about():
"""About page."""
return render_template('public/about.html')
```
|
{
"source": "JesseTG/Sock",
"score": 2
}
|
#### File: cli/train/__main__.py
```python
import argparse
import logging
import math
import random
from argparse import ArgumentParser, ArgumentTypeError, FileType
import ignite
import torch
from ignite.engine import Engine, Events, State, create_supervised_evaluator, create_supervised_trainer
from ignite.handlers import EarlyStopping, Timer
from ignite.metrics import BinaryAccuracy, Loss, Precision, Recall
from torch.nn import Module
from torch.optim import Adam, Optimizer
from torch.utils.data import ConcatDataset, DataLoader, Dataset
from torch.utils.data.dataset import Subset, random_split
from sock.model.data import WordEmbeddings, tokenize
from sock.model.data.batching import sentence_label_pad, sentence_pad
from sock.model.dataset import (CresciTweetDataset, Five38TweetDataset, LabelDataset, NbcTweetDataset,
SingleLabelDataset, TweetTensorDataset)
from sock.model.nn import ContextualLSTM
from sock.model.serial import load, save
from sock.utils import BOT, NOT_BOT, Metrics, Splits, expand_binary_class, split_integers, to_singleton_row
def positive_int(arg: str) -> int:
i = int(arg)
if i <= 0:
raise ArgumentTypeError(f"{i} is not a positive integer")
return i
def positive_finite_float(arg: str) -> float:
f = float(arg)
if f <= 0 or math.isnan(f) or math.isinf(f):
raise ArgumentTypeError(f"{f} is not a positive and finite number")
return f
def nonzero_finite_float(arg: str) -> float:
f = float(arg)
if math.isnan(f) or math.isinf(f):
raise ArgumentTypeError(f"{f} is not a finite nonzero number")
return f
def nonzero_fraction(arg: str) -> float:
f = float(arg)
if f <= 0.0 or f >= 1.0:
raise ArgumentTypeError(f"{f} is not between 0 and 1 (exclusive)")
return f
def build_parser() -> ArgumentParser:
parser = ArgumentParser(
description="Train a model"
)
data_args = parser.add_argument_group("Data")
data_args.add_argument(
"--glove",
help="The word vector embeddings to use",
metavar="path",
type=FileType('r', encoding="utf8"),
required=True
)
data_args.add_argument(
"--bots",
help="One or more files containing tweets known to be from bots",
metavar="path",
type=FileType('r', encoding="utf8"),
nargs="+",
required=True
)
data_args.add_argument(
"--humans",
help="One or more files containing tweets known to be from humans",
metavar="path",
type=FileType('r', encoding="utf8"),
nargs="+",
required=True
)
data_args.add_argument(
"--max-tweets",
help="The maximum number of the given tweets to use in training the model. Default: all tweets.",
metavar="max",
type=positive_int
)
data_args.add_argument(
"--output",
help="Location to save the trained model",
metavar="out",
type=FileType("wb"),
required=True
)
optimizer_hyperparams = parser.add_argument_group("Optimizer Hyperparameters")
optimizer_hyperparams.add_argument(
"--lr",
help="Learning rate (default: %(default)s)",
type=positive_finite_float,
default=1e-3,
metavar="lr"
)
optimizer_hyperparams.add_argument(
"--eps",
help="Term added to the denominator to improve numerical stability (default: %(default)s)",
type=positive_finite_float,
default=1e-8,
metavar="e"
)
optimizer_hyperparams.add_argument(
"--beta0",
help="First coefficient used for computing running averages of gradient and its square (default: %(default)s)",
type=positive_finite_float,
default=0.9,
metavar="b0"
)
optimizer_hyperparams.add_argument(
"--beta1",
help="Second coefficient used for computing running averages of gradient and its square (default: %(default)s)",
type=positive_finite_float,
default=0.999,
metavar="b1"
)
optimizer_hyperparams.add_argument(
"--weight-decay",
help="Weight decay (L2 penalty) (default: %(default)s)",
type=nonzero_finite_float,
default=0.0,
metavar="wd"
)
optimizer_hyperparams.add_argument(
"--amsgrad",
help="Whether to use the AMSGrad variant of this algorithm from the paper On the Convergence of Adam and Beyond (default: %(default)s)",
action="store_true"
)
lr_hyperparams = parser.add_argument_group("LR Scheduler Hyperparameters")
lr_hyperparams.add_argument(
"--lr-patience",
help="If no improvement after this many epochs, reduce the learning rate (default: %(default)s)",
type=positive_int,
default=3,
metavar="patience"
)
training_hyperparams = parser.add_argument_group("Training Hyperparameters")
training_hyperparams.add_argument(
"--max-epochs",
help="The maximum number of passes to make over the input data (default: %(default)s)",
type=positive_int,
default=50,
metavar="epochs"
)
training_hyperparams.add_argument(
"--trainer-patience",
help="If no improvement after this many epochs, end the training (default: %(default)s)",
type=positive_int,
default=10,
metavar="patience"
)
training_hyperparams.add_argument(
"--batch-size",
help="The number of tweets to process at once (default: %(default)s)",
metavar="size",
type=positive_int,
default=500
)
training_hyperparams.add_argument(
"--train-split",
help="Fraction of input data set aside for training the model (default: %(default)s)",
type=nonzero_fraction,
default=0.5
)
training_hyperparams.add_argument(
"--valid-split",
help="Fraction of input data set aside for tuning hyperparameters (default: %(default)s)",
type=nonzero_fraction,
default=0.2
)
training_hyperparams.add_argument(
"--test-split",
help="Fraction of input data set aside for evaluating model performance (default: %(default)s)",
type=nonzero_fraction,
default=0.3
)
return parser
def validate_args(args):
if args.beta0 >= args.beta1:
raise ArgumentTypeError(f"{args.beta0} is not less than {args.beta1}")
if args.train_split + args.valid_split + args.test_split != 1.0:
raise ArgumentTypeError(f"{args.train_split}, {args.valid_split}, and {args.test_split} do not add to 1")
def load_tweets(file, embeddings: WordEmbeddings) -> Dataset:
try:
logging.debug("Loading %s as a Cresci-format dataset", file.name)
tweets = CresciTweetDataset(file.name)
logging.info("Loaded %s as a Cresci-format dataset (len=%d)", file.name, len(tweets))
return TweetTensorDataset(tweets, tokenize, embeddings)
except Exception as e:
logging.debug("Couldn't load %s as a Cresci-format dataset: %s", file.name, e)
try:
logging.debug("Loading %s as a NBC-format dataset", file.name)
tweets = NbcTweetDataset(file.name)
logging.info("Loaded %s as a NBC-format dataset (len=%d)", file.name, len(tweets))
return TweetTensorDataset(tweets, tokenize, embeddings)
except Exception as e:
logging.debug("Couldn't load %s as a NBC-format dataset: %s", file.name, e)
try:
logging.debug("Loading %s as a 538-format dataset", file.name)
tweets = Five38TweetDataset(file.name)
logging.info("Loaded %s as a 538-format dataset (len=%d)", file.name, len(tweets))
return TweetTensorDataset(tweets, tokenize, embeddings)
except Exception as e:
logging.debug("Couldn't load %s as a 538-format dataset: %s", file.name, e)
logging.error("Could not load %s as a tweet dataset!", file.name)
raise ValueError(f"Could not load {file.name} as a tweet dataset")
def load_glove(args) -> WordEmbeddings:
logging.info("Loading GloVe embeddings from %s", args.glove.name)
embeddings = WordEmbeddings(args.glove, device="cuda")
logging.info(
"Loaded GloVe embeddings from %s (dim=%d, device=%s, len=%d)",
args.glove.name,
embeddings.dim,
embeddings.device,
len(embeddings)
)
return embeddings
def create_model(args, glove: WordEmbeddings) -> ContextualLSTM:
model = ContextualLSTM(glove, device="cuda")
model.to(device="cuda")
logging.info("Created ContextualLSTM to train (device=%s)", model.device)
return model
def create_optimizer(args, model: ContextualLSTM) -> Optimizer:
# TODO: Exclude embedding weights from Adam
optimizer = Adam(
model.parameters(),
lr=args.lr,
betas=(args.beta0, args.beta1),
eps=args.eps,
weight_decay=args.weight_decay,
amsgrad=args.amsgrad
)
logging.info(
"Created Adam optimizer (lr=%g, betas=(%g, %g), eps=%g, weight_decay=%g, amsgrad=%s)",
args.lr,
args.beta0,
args.beta1,
args.eps,
args.weight_decay,
args.amsgrad
)
return optimizer
def load_tweet_datasets(args, datasets, type: str, glove: WordEmbeddings) -> Dataset:
loaded = []
for d in datasets:
logging.info("Loading known %ss from %s", type, d.name)
loaded.append(load_tweets(d, glove))
dataset = None
if len(loaded) == 1:
dataset = loaded[0]
else:
dataset = ConcatDataset(loaded)
if args.max_tweets is not None:
indices = random.sample(range(len(dataset)), args.max_tweets // 2)
dataset = Subset(dataset, indices)
logging.info("Loaded %d %s datasets with %d tweets", len(loaded), type, len(dataset))
return dataset
def create_splits(args, type: str, data: Dataset) -> Splits:
length = len(data)
split_lengths = split_integers(length, (args.train_split, args.valid_split, args.test_split))
logging.info(
"Splitting %d %s tweets (train=%g, valid=%g, test=%g)",
length,
type,
args.train_split,
args.valid_split,
args.test_split
)
splits = random_split(data, split_lengths)
logging.info("Split %d %s tweets (train=%d, valid=%d, test=%d)", length, type, *split_lengths)
return Splits(data, *splits)
def create_loader(args, human: DataLoader, bot: DataLoader, subset: str) -> DataLoader:
human = SingleLabelDataset(human, NOT_BOT)
bot = SingleLabelDataset(bot, BOT)
dataset = ConcatDataset([human, bot])
dataloader = DataLoader(dataset=dataset, shuffle=True, batch_size=args.batch_size, collate_fn=sentence_label_pad)
logging.info("Created a %s DataLoader (len=%d, batch_size=%d)", subset, len(dataset), args.batch_size)
return dataloader
def create_lr_scheduler(args, optimizer: Optimizer):
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=args.lr_patience)
return scheduler
def create_evaluator(model: ContextualLSTM, cost: Module):
evaluator = create_supervised_evaluator(
model,
metrics={
"loss": Loss(cost, output_transform=to_singleton_row),
"accuracy": BinaryAccuracy(output_transform=to_singleton_row),
"recall": Recall(average=True, output_transform=expand_binary_class),
"precision": Precision(average=True, output_transform=expand_binary_class),
}
)
evaluator._logger.setLevel(logging.WARNING)
return evaluator
def create_trainer(
args,
model: ContextualLSTM,
optimizer: Optimizer,
cost: Module,
evaluator: Engine,
scheduler,
training_data: DataLoader,
validation_data: DataLoader
):
model.train(True)
trainer = ignite.engine.create_supervised_trainer(model, optimizer, cost, model.device)
trainer.state = ignite.engine.State()
@trainer.on(Events.COMPLETED)
def finish_training(trainer: Engine):
model.train(False)
logging.info("Finished training and evaluation")
@trainer.on(Events.STARTED)
def init_metrics(trainer: Engine):
trainer.state.training_metrics = Metrics([], [], [], [])
trainer.state.validation_metrics = Metrics([], [], [], [])
logging.info("Initialized metrics")
@trainer.on(Events.EPOCH_COMPLETED)
def validate(trainer: Engine):
training_metrics = evaluator.run(training_data).metrics # type: Dict[str, float]
trainer.state.training_metrics.loss.append(training_metrics["loss"])
trainer.state.training_metrics.accuracy.append(training_metrics["accuracy"])
trainer.state.training_metrics.recall.append(training_metrics["recall"])
trainer.state.training_metrics.precision.append(training_metrics["precision"])
logging.info(
"[%d / %d] Train: (loss=%.4f, accuracy=%.4f, recall=%.4f, precision=%.4f",
trainer.state.epoch,
trainer.state.max_epochs,
training_metrics["loss"],
training_metrics["accuracy"],
training_metrics["recall"],
training_metrics["precision"]
)
validation_metrics = evaluator.run(validation_data).metrics # type: Dict[str, float]
trainer.state.validation_metrics.loss.append(validation_metrics["loss"])
trainer.state.validation_metrics.accuracy.append(validation_metrics["accuracy"])
trainer.state.validation_metrics.recall.append(validation_metrics["recall"])
trainer.state.validation_metrics.precision.append(validation_metrics["precision"])
logging.info(
"[%d / %d] Valid: (loss=%.4f, accuracy=%.4f, recall=%.4f, precision=%.4f",
trainer.state.epoch,
trainer.state.max_epochs,
validation_metrics["loss"],
validation_metrics["accuracy"],
validation_metrics["recall"],
validation_metrics["precision"]
)
scheduler.step(validation_metrics["loss"])
timer = Timer(average=True)
@trainer.on(Events.COMPLETED)
def record_time(trainer: Engine):
trainer.state.duration = timer.value()
def score_function(trainer: Engine) -> float:
return -trainer.state.validation_metrics.loss[-1]
handler = EarlyStopping(patience=args.trainer_patience, score_function=score_function, trainer=trainer)
trainer.add_event_handler(Events.EPOCH_COMPLETED, handler)
timer.attach(trainer, start=Events.STARTED, pause=Events.COMPLETED)
trainer._logger.setLevel(logging.WARNING)
return trainer
def main():
logging.basicConfig(
format='[%(levelname)s %(asctime)s] %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=logging.INFO
)
parser = build_parser()
args = parser.parse_args()
if not torch.cuda.is_available():
raise RuntimeError("CUDA is required but not available")
if not torch.backends.cudnn.is_available():
raise RuntimeError("CUDNN is required but not available")
validate_args(args)
cost = torch.nn.BCELoss()
glove = load_glove(args)
bots = load_tweet_datasets(args, args.bots, "bot", glove)
humans = load_tweet_datasets(args, args.humans, "human", glove)
bot_splits = create_splits(args, "bot", bots)
human_splits = create_splits(args, "human", humans)
training_data = create_loader(args, human_splits.training, bot_splits.training, "training")
validation_data = create_loader(args, human_splits.validation, bot_splits.validation, "validation")
testing_data = create_loader(args, human_splits.testing, bot_splits.testing, "testing")
model = create_model(args, glove)
optimizer = create_optimizer(args, model)
lr_scheduler = create_lr_scheduler(args, optimizer)
evaluator = create_evaluator(model, cost)
trainer = create_trainer(
args,
model,
optimizer,
cost,
evaluator,
lr_scheduler,
training_data,
validation_data
)
train_result = trainer.run(training_data, max_epochs=args.max_epochs) # type: State
logging.info("Running trained model on test set")
test_metrics = evaluator.run(testing_data).metrics # type: dict
logging.info("Finished running trained model on test set")
logging.info("Results:")
logging.info(" Time: %.2fs", train_result.duration)
logging.info(" Epochs: %d / %d", train_result.epoch, train_result.max_epochs)
logging.info(" Iterations: %d", train_result.iteration)
logging.info(" Training:")
logging.info(" Loss: %.4f", train_result.training_metrics.loss[-1])
logging.info(" Accuracy: %.4f", train_result.training_metrics.accuracy[-1])
logging.info(" Recall: %.4f", train_result.training_metrics.recall[-1])
logging.info(" Precision: %.4f", train_result.training_metrics.precision[-1])
logging.info(" Validation:")
logging.info(" Loss: %.4f", train_result.validation_metrics.loss[-1])
logging.info(" Accuracy: %.4f", train_result.validation_metrics.accuracy[-1])
logging.info(" Recall: %.4f", train_result.validation_metrics.recall[-1])
logging.info(" Precision: %.4f", train_result.validation_metrics.precision[-1])
logging.info(" Testing:")
logging.info(" Loss: %.4f", test_metrics['loss'])
logging.info(" Accuracy: %.4f", test_metrics['accuracy'])
logging.info(" Recall: %.4f", test_metrics['recall'])
logging.info(" Precision: %.4f", test_metrics['precision'])
logging.info("Accuracy: %.2f%% of all guesses were correct", test_metrics["accuracy"] * 100)
logging.info("Recall: %.2f%% of guesses that should have identified bots did", test_metrics["recall"] * 100)
logging.info("Precision: %.2f%% of 'bot' guesses were correct", test_metrics["precision"] * 100)
save(model, args.output)
logging.info("Saved trained model (minus embeddings) to %s", args.output.name)
if __name__ == '__main__':
main()
```
#### File: model/data/embedding.py
```python
import csv
from io import IOBase
from typing import Iterable, Sequence, Union
from pathlib import Path
import numpy
import pandas
import torch
from pandas import DataFrame
from torch import Tensor
from torch.nn import Embedding
TORCH_INT_DTYPES = (torch.uint8, torch.int8, torch.short, torch.int, torch.long)
class WordEmbeddings:
def __init__(self, path: Union[DataFrame, str, IOBase], device="cpu", pinned=False):
if isinstance(path, (str, IOBase, Path)):
data = self._load_frame(path)
elif isinstance(path, DataFrame):
data = path
else:
raise TypeError(f"Expected a str, open file, Path, or DataFrame, got {path}")
# self.words = data[0]
# No need to keep around the wordlist separately, but if so we can just keep the dataframe
if data[0][0] != "<pad>":
raise ValueError(f"First word must be '<pad>', but it's {data[0][0]}")
if data[0][1] not in ("<unknown>", "<unk>"):
raise ValueError(f"Second word must be '<unknown>' or '<unk>', but it's {data[0][1]}")
# TODO: Verify no duplicate words
self._dim = int(data.get_dtype_counts().float64)
self.vectors = torch.as_tensor(data.iloc[:, 1:].values, dtype=torch.float, device=device) # type: Tensor
self.vectors.requires_grad_(False)
if pinned:
self.vectors = self.vectors.pin_memory()
# [all rows, second column:last column]
# Pinning self.vectors does *not* improve encoding performance
# torch.half isn't available for index_select, so we'll just use torch.float
self.indices = {word: index for index, word in enumerate(data[0])}
# note: must prepend a <unknown> zero vector to embedding file
# do so with python3 -c 'print("<unknown>", *([0.0]*25))' >> the_data_file.txt
def _load_frame(self, file):
return pandas.read_table(
file,
delim_whitespace=True,
header=None,
engine="c",
encoding="utf8",
na_filter=False,
memory_map=True,
quoting=csv.QUOTE_NONE
)
def _get_word(self, index):
return self.indices.get(index, 1)
def __len__(self) -> int:
return len(self.indices)
@property
def device(self) -> torch.device:
return self.vectors.device
@property
def dim(self) -> int:
return self._dim
def __getitem__(self, index) -> Tensor:
# Indexing uses the same underlying storage
if isinstance(index, int):
# If we're indexing by integer...
return self.vectors[index]
elif isinstance(index, str):
# If we're trying to get a vector from a word...
return self.vectors[self._get_word(index)]
elif torch.is_tensor(index) and index.dim() == 0:
# If this is a one-element tensor...
return self.vectors[index]
else:
raise TypeError(f"Cannot index with a {type(index).__name__}")
def encode(self, tokens: Sequence[str]) -> Tensor:
if len(tokens) > 0:
# If this is a non-empty sentence...
return torch.as_tensor([self._get_word(t) for t in tokens], dtype=torch.long, device=self.device)
else:
return torch.as_tensor([1], dtype=torch.long, device=self.device)
def to_layer(self) -> Embedding:
return Embedding.from_pretrained(self.vectors)
```
#### File: model/dataset/common.py
```python
import csv
import sys
from numbers import Integral
from typing import Callable, Sequence, Tuple, Union
import numpy
import torch
from torch import Tensor
from torch.utils.data.dataset import Dataset
from sock.model.data import WordEmbeddings
def _to_int(i):
if i in ("", "NULL"):
return 0
else:
return int(i)
class TweetDataset(Dataset):
pass
class TweetTensorDataset(Dataset):
def __init__(
self,
data_source: TweetDataset,
tokenizer: Callable[[str], Sequence[str]],
embeddings: WordEmbeddings
):
# TODO: Consider supporting reading directly from a file
self.embeddings = embeddings
self.tokenizer = tokenizer
self.data_source = data_source
self.tensors = [None] * len(data_source)
# NOTE: Each tensor might have a different shape, as each tensor represents a tweet
@property
def device(self):
return self.embeddings.device
def __len__(self) -> int:
return len(self.tensors)
def __getitem__(self, index: Integral) -> Tensor:
if self.tensors[index] is None:
text = self.data_source[index].text
tokens = self.tokenizer(text)
self.tensors[index] = self.embeddings.encode(tokens)
return self.tensors[index]
```
#### File: model/dataset/label.py
```python
from numbers import Integral
from typing import Any, Sequence, Tuple, TypeVar
import torch
from torch.utils.data.dataset import Dataset
T = TypeVar('T')
U = TypeVar('U')
class LabelDataset(Dataset):
def __init__(self, data: Sequence[T], labels: Sequence[U]):
self.data = data
self.labels = labels
if len(data) != len(labels):
raise ValueError(f"data and labels must have the same length ({len(data)} vs {len(labels)})")
def __len__(self) -> int:
return len(self.data)
def __getitem__(self, index: Integral) -> Tuple[T, U]:
return (self.data[index], self.labels[index])
class SingleLabelDataset(Dataset):
def __init__(self, data: Sequence[T], label: Any):
self.data = data
self.label = label
def __len__(self) -> int:
return len(self.data)
def __getitem__(self, index: Integral) -> Tuple[T, Any]:
return (self.data[index], self.label)
```
#### File: model/nn/ContextualLSTM.py
```python
from typing import Sequence, Tuple, Union
import torch
from torch import LongTensor, Tensor
from torch.nn import LSTM, Embedding, Linear, Module, ReLU, Sequential, Sigmoid, functional
from torch.nn.init import normal_
from torch.nn.utils.rnn import PackedSequence, pack_padded_sequence, pack_sequence, pad_packed_sequence, pad_sequence
from sock.model.data import WordEmbeddings
class ContextualLSTM(Module):
def __init__(
self,
word_embeddings: WordEmbeddings,
hidden_layers: int=32,
device: Union[torch.device, str]="cpu"
):
super(ContextualLSTM, self).__init__()
self.word_embeddings = word_embeddings
self.embeddings = self.word_embeddings.to_layer() # type: Embedding
self.embeddings.padding_idx = 0
self.lstm = LSTM(word_embeddings.dim, hidden_layers, batch_first=False)
self.output = Sequential(
Linear(hidden_layers, 128),
ReLU(),
Linear(128, 64),
ReLU(),
Linear(64, 1),
Sigmoid()
)
self.to(device, non_blocking=True)
# is there a layer that takes the weighted average of two like-shaped tensors? would be useful
# for mixing the main output and the aux output like the paper describes
# if not, just mix them myself
@property
def device(self) -> torch.device:
return self.embeddings.weight.device
def _init_hidden(self, batch_size) -> Tuple[Tensor, Tensor]:
def make_zeros():
return torch.zeros(
self.lstm.num_layers,
batch_size,
self.lstm.hidden_size,
dtype=torch.float,
device=self.device
)
return (make_zeros(), make_zeros())
def extra_repr(self) -> str:
return f"<device>: {self.device}"
def forward(self, sentences: Sequence[LongTensor]) -> Tensor:
padded = sentences[0]
lengths = sentences[1]
num_sentences = len(lengths)
self.hidden = self._init_hidden(num_sentences)
embedding = self.embeddings(padded)
# ^ Size([num_tweets, longest_tweet, self.word_embeddings.dim])
packed = pack_padded_sequence(embedding, lengths, True)
self.lstm.flatten_parameters()
# NOTE: Don't know what this does, need to ask around
out, (hn, cn) = self.lstm(packed, self.hidden)
# ^ Size([num_tweets, num_tokens, num_dims]) -> Size([???])
# TODO: Figure out exactly what the dimensions are
# out: Output features on every element (word vector) of the input
# hn: Last element's hidden state
# cn: Last element's cell state
hn = hn.view(num_sentences, self.lstm.hidden_size)
# Only using one LSTM layer
result = self.output(hn)
return result.view(num_sentences)
# a = functional.relu(self.dense1(hn)) # Size([???]) -> Size([???])
# b = functional.relu(self.dense2(a)) # Size([???]) -> Size([???])
# c = torch.sigmoid(self.output(b)) # Size([???]) -> Size([num_tweets, 1])
# return c.view(num_sentences)
# TODO: Consider using BCEWithLogitsLoss
# TODO: What optimizer did the paper use? What loss function?
def save(model: ContextualLSTM, out: Union[str]):
state = model.state_dict() # type: dict
del state["embeddings.weight"]
torch.save(state, out)
def load(embeddings: WordEmbeddings, path, device) -> ContextualLSTM:
model = ContextualLSTM(embeddings)
state = torch.load(path, device)
model.load_state_dict(state, strict=False)
model.train(False)
return model
```
#### File: model/serial/__init__.py
```python
from typing import Union, Sequence, Tuple, Optional
import torch
from torch.optim import Optimizer
from sock.model.data import WordEmbeddings
from sock.model.nn import ContextualLSTM
# All parameters EXCEPT the embeddings are saved to disk
# The embeddings determine the shapes of some parameters, and load_state_dict needs the shapes to be the same
# TODO: Save the hash of the embeddings
def save(model: ContextualLSTM, out: Union[str]):
state = model.state_dict()
del state["embeddings.weight"]
torch.save(state, out)
def load(embeddings: WordEmbeddings, path, device: torch.device) -> ContextualLSTM:
model = ContextualLSTM(embeddings, device=device)
state = torch.load(path, device)
model.load_state_dict(state, strict=False)
model.eval()
return model
```
#### File: Sock/tests/test_glove.py
```python
import pytest
import torch
from pandas import DataFrame
from tests.marks import *
from torch.nn import Embedding
from sock.model.data import WordEmbeddings
FIRST_ROW_VECTOR = torch.as_tensor([
0.62415, 0.62476, -0.082335, 0.20101, -0.13741, -0.11431, 0.77909, 2.6356, -0.46351, 0.57465,
-0.024888, -0.015466, -2.9696, -0.49876, 0.095034, -0.94879, -0.017336, -0.86349, -1.3348, 0.046811,
0.36999, -0.57663, -0.48469, 0.40078, 0.75345
], dtype=torch.float)
ZERO_VECTOR = torch.zeros_like(FIRST_ROW_VECTOR)
@pytest.fixture(scope="module")
def embedding_layer(glove_embedding: WordEmbeddings):
return Embedding.from_pretrained(glove_embedding.vectors)
def test_correct_embedding_words_loaded(glove_data: DataFrame):
assert glove_data[0][2] == "<user>"
@modes("cpu", "cuda")
def test_all_embedding_vectors_loaded(glove_embedding: WordEmbeddings):
assert len(glove_embedding) == 1193516
def test_pad_is_index_0(glove_data: DataFrame):
assert glove_data[0][0] == "<pad>"
def test_unk_is_index_1(glove_data: DataFrame):
assert glove_data[0][1] == "<unk>"
@modes("cpu", "cuda")
def test_first_word_vector_is_all_zeros(glove_embedding: WordEmbeddings):
assert glove_embedding[0].cpu().numpy() == pytest.approx(ZERO_VECTOR.numpy())
@modes("cpu", "cuda")
def test_correct_embedding_vector_length(glove_embedding: WordEmbeddings):
assert len(glove_embedding.vectors[0]) == 25
@modes("cpu", "cuda")
def test_correct_embedding_values_loaded(glove_embedding: WordEmbeddings):
assert glove_embedding.vectors[2].cpu().numpy() == pytest.approx(FIRST_ROW_VECTOR.numpy())
@modes("cpu", "cuda")
def test_embedding_length_consistent(glove_embedding: WordEmbeddings, glove_data: DataFrame):
assert len(glove_embedding.vectors) == len(glove_data)
@modes("cpu", "cuda")
def test_get_vector_by_int_index(glove_embedding: WordEmbeddings):
assert glove_embedding[2].cpu().numpy() == pytest.approx(FIRST_ROW_VECTOR.numpy())
@modes("cpu", "cuda")
def test_get_vector_by_str_index(glove_embedding: WordEmbeddings):
assert glove_embedding["<user>"].cpu().numpy() == pytest.approx(FIRST_ROW_VECTOR.numpy())
@modes("cpu", "cuda")
def test_encode_returns_tensor(glove_embedding: WordEmbeddings):
tokens = "<user> it is not in my video".split()
encoding = glove_embedding.encode(tokens)
assert torch.is_tensor(encoding)
@modes("cpu", "cuda")
def test_encode_has_correct_value(glove_embedding: WordEmbeddings):
tokens = "<user> it is not in my video".split()
encoding = glove_embedding.encode(tokens)
assert torch.equal(encoding, torch.tensor([2, 35, 34, 80, 37, 31, 288],
dtype=torch.long, device=glove_embedding.device))
@modes("cpu", "cuda")
def test_unknown_word_embeds_to_zero_vector(glove_embedding: WordEmbeddings):
embedding = glove_embedding["<france>"]
assert embedding.cpu().numpy() == pytest.approx(ZERO_VECTOR.numpy())
@modes("cpu", "cuda")
def test_unknown_word_encodes_to_index_1(glove_embedding: WordEmbeddings):
tokens = "<france> <spain> <china> <user>".split()
encoding = glove_embedding.encode(tokens)
assert torch.equal(encoding, torch.as_tensor([1, 1, 1, 2], dtype=torch.long, device=glove_embedding.device))
@modes("cpu", "cuda")
def test_bench_encode(benchmark, glove_embedding: WordEmbeddings):
tokens = "<user> it <PASSWORD> my video".split()
result = benchmark(glove_embedding.encode, tokens)
assert result is not None
@modes("cpu", "cuda")
def test_embedding_can_create_layer(glove_embedding: WordEmbeddings):
layer = glove_embedding.to_layer()
assert isinstance(layer, Embedding)
@modes("cpu", "cuda")
def test_embedding_layer_can_embed_words(glove_embedding: WordEmbeddings):
tokens = "<user> <PASSWORD>".split()
encoding = glove_embedding.encode(tokens)
layer = glove_embedding.to_layer()
assert layer(encoding).cpu().numpy()[0] == pytest.approx(FIRST_ROW_VECTOR.numpy())
```
#### File: Sock/tests/test_lstm.py
```python
import pytest
import torch
from tests.marks import *
from sock.model.data import WordEmbeddings, sentence_label_pad, sentence_pad
from sock.model.nn import ContextualLSTM
@modes("cpu", "cuda")
def test_devices_are_the_same(lstm: ContextualLSTM, glove_embedding: WordEmbeddings):
assert lstm.device == glove_embedding.device
def test_create_lstm(lstm: ContextualLSTM):
assert lstm is not None
def test_has_modules(lstm: ContextualLSTM):
modules = tuple(lstm.modules())
assert modules != []
def test_has_parameters(lstm: ContextualLSTM):
parameters = tuple(lstm.parameters())
assert parameters != []
@modes("cuda", "dp")
def test_lstm_moves_all_data_to_cuda(lstm: ContextualLSTM):
for p in lstm.parameters():
assert p.is_cuda
@modes("cuda")
def test_lstm_moves_embeddings_to_cuda(lstm_cuda: ContextualLSTM):
assert lstm_cuda.embeddings.weight.is_cuda
@modes("dp")
def test_lstm_moves_embeddings_to_cuda_in_dp_mode(lstm_dp):
assert lstm_dp.module.embeddings.weight.is_cuda
@modes("cuda", "dp")
def test_lstm_needs_input_from_same_device(lstm: ContextualLSTM):
with pytest.raises(RuntimeError):
encoding = sentence_pad([
torch.tensor([0, 1, 5, 78, 3, 1], dtype=torch.long, device="cpu")
])
lstm(encoding)
def test_lstm_evaluates(lstm: ContextualLSTM, device: torch.device):
encoding = sentence_pad([
torch.tensor([7, 1, 5, 78, 3, 1], dtype=torch.long, device=device)
])
result = lstm(encoding)
assert torch.is_tensor(result)
assert result.device == device
@pytest.mark.benchmark(group="test_bench_lstm_evaluates")
def test_bench_lstm_evaluates(benchmark, lstm: ContextualLSTM, device: torch.device):
encoding = sentence_pad([
torch.tensor([7, 1, 5, 78, 3, 1], dtype=torch.long, device=device)
] * 1000)
result = benchmark(lstm, encoding)
assert torch.is_tensor(result)
assert result.device == device
def test_lstm_rejects_list_of_lists(lstm: ContextualLSTM):
encoding = [
[0, 1, 5, 8, 3, 1],
[1, 4, 6, 1, 9, 7],
[9, 0, 6, 9, 9, 0],
[2, 3, 6, 1, 2, 4],
]
with pytest.raises(Exception):
result = lstm(encoding)
def test_lstm_rejects_tensor(lstm: ContextualLSTM, device: torch.device):
encoding = torch.tensor([
[0, 1, 5, 8, 3, 1],
[1, 4, 6, 1, 9, 7],
[9, 0, 6, 9, 9, 0],
[2, 3, 6, 1, 2, 4],
], dtype=torch.long, device=device)
with pytest.raises(Exception):
result = lstm(encoding)
def test_lstm_evaluates_batches_of_same_length(lstm: ContextualLSTM, device: torch.device):
encoding = sentence_pad([
torch.tensor([0, 1, 5, 8, 3, 1], dtype=torch.long, device=device),
torch.tensor([1, 4, 6, 1, 9, 7], dtype=torch.long, device=device),
torch.tensor([9, 0, 6, 9, 9, 0], dtype=torch.long, device=device),
torch.tensor([2, 3, 6, 1, 2, 4], dtype=torch.long, device=device),
])
result = lstm(encoding)
assert torch.is_tensor(result)
def test_lstm_evaluates_batches_of_different_length_unsorted(lstm: ContextualLSTM, device: torch.device):
encoding = sentence_pad([
torch.tensor([0, 1, 5, 8, 3], dtype=torch.long, device=device),
torch.tensor([1, 4, 6, 1, 9, 7, 9, 1], dtype=torch.long, device=device),
torch.tensor([9, 0, 6, 9], dtype=torch.long, device=device),
torch.tensor([2, 3, 6, 1, 2, 4, 4], dtype=torch.long, device=device),
])
result = lstm(encoding)
assert torch.is_tensor(result)
def test_lstm_evaluates_batches_of_different_length_in_sorted(lstm: ContextualLSTM, device: torch.device):
encoding = sentence_pad([
torch.tensor([1, 4, 6, 1, 9, 7, 9, 1], dtype=torch.long, device=device),
torch.tensor([2, 3, 6, 1, 2, 4, 4], dtype=torch.long, device=device),
torch.tensor([0, 1, 5, 8, 3], dtype=torch.long, device=device),
torch.tensor([9, 0, 6, 9], dtype=torch.long, device=device),
])
result = lstm(encoding)
assert torch.is_tensor(result)
def test_lstm_returns_1d_float_tensor(lstm: ContextualLSTM, device: torch.device):
encoding = sentence_pad([
torch.tensor([0, 1, 5, 8, 3, 1], dtype=torch.long, device=device),
torch.tensor([1, 4, 6, 1, 9, 7], dtype=torch.long, device=device),
torch.tensor([9, 0, 6, 9, 9, 0], dtype=torch.long, device=device),
torch.tensor([2, 3, 6, 1, 2, 4], dtype=torch.long, device=device),
])
result = lstm(encoding)
assert result.dtype.is_floating_point
assert result.shape == torch.Size([len(encoding[0])])
def test_lstm_in_training_mode_by_default(lstm: ContextualLSTM):
assert lstm.training
def test_lstm_eval_sets_eval_mode(lstm: ContextualLSTM):
lstm.eval()
assert not lstm.training
def test_lstm_train_false_sets_eval_mode(lstm: ContextualLSTM):
lstm.train(False)
assert not lstm.training
def test_lstm_results_have_no_gradient_with_no_grad(lstm: ContextualLSTM, device: torch.device):
encoding = sentence_pad([
torch.tensor([0, 1, 5, 8, 3, 1], dtype=torch.long, device=device),
torch.tensor([1, 4, 6, 1, 9, 7], dtype=torch.long, device=device),
torch.tensor([9, 0, 6, 9, 9, 0], dtype=torch.long, device=device),
torch.tensor([2, 3, 6, 1, 2, 4], dtype=torch.long, device=device),
])
with torch.no_grad():
result = lstm(encoding)
assert not result.requires_grad
def test_get_lstm_cpu(request, lstm_cpu: ContextualLSTM):
assert lstm_cpu is not None
assert type(lstm_cpu) == ContextualLSTM
assert lstm_cpu.device.type == "cpu"
```
#### File: Sock/tests/test_tokenize.py
```python
import pytest
from sock.model.data import tokenize
@pytest.mark.parametrize("input,expected", [
("<3", "<heart>"),
("💓", "<heart>"),
("<3 ♥", "<heart> <heart>"),
("♥♥", "<heart> <heart>"),
("@iconography", "<user>"),
("@NASA", "<user>"),
("@", "@"),
("#EXCELLENT", "<hashtag> excellent <allcaps>"),
("NASA", "nasa <allcaps>"),
("NASAcool", "nasacool"),
("wayyyy", "way <elong>"),
("!!!", "! <repeat>"),
("holy shit!!", "holy shit ! <repeat>"),
("What are you doing?", "what are you doing ?"),
("Are you ok!?", "are you ok ! ?"),
("be careful what you wish for.....", "be careful what you wish for . <repeat>"),
("you are wayyyy out of line, buddy", "you are way <elong> out of line , buddy"),
("Here's an idea: be nice to them. :)", "here's an idea : be nice to them . <smile>"),
("Let's be sure to #getoutthevote", "let's be sure to <hashtag> getoutthevote"),
("We must #GetOutTheVote this November", "we must <hashtag> getoutthevote this november"),
("I met <NAME> #MOTHERFUCKER", "i met samuel l . jackson <hashtag> motherfucker <allcaps>"),
("#", "#"),
("alpha#beta", "alpha#beta"),
(".", "."),
(":", ":"),
(",", ","),
("60,", "<number> ,"),
(",60", ", <number>"),
("I need .7 (aka 0.7)", "i need <number> ( aka <number> )"),
("7.0", "<number>"),
("RT @idaho: Look at me!", "<retweet> <user> : look at me !"),
(":( ): :< >: :[ ]: D:", "<sadface> <sadface> <sadface> <sadface> <sadface> <sadface> <sadface>"),
("Not sad):", "not sad ) :"),
(":L :l :| :/ :\\ :*", "<neutralface> <neutralface> <neutralface> <neutralface> <neutralface> <neutralface>"),
("Download to C://Users", "download to c : / / users"),
("I'm dying:P", "i'm dying : p"),
(":P :p index:P :p", "<lolface> <lolface> index : p <lolface>"),
("I am a walrus :) wanna do this??? lets go... 3 times now YEAAAA #excellent",
"i am a walrus <smile> wanna do this ? <repeat> lets go . <repeat> <number> times now yea <allcaps> <elong> <hashtag> excellent"),
("RT @somebody hey How's it going? :D", "<retweet> <user> hey how's it going ? <smile>"),
("", "<empty>"),
('I need to get some "things"', 'i need to get some " things "'),
("My IP is 192.168.1.1, can you help me", "my ip <allcaps> is <number> , can you help me"),
("Today I made 100,000 dollars", "today i made <number> dollars"),
("That'll run you about $10", "that'll run you about $ <number>"),
("I have 1,000, what about you?", "i have <number> , what about you ?"),
("You need 6000, hope that helps.", "you need <number> , hope that helps ."),
("I am so l33t", "i am so l33t"),
("I will never watch RT again.", "i will never watch rt <allcaps> again ."),
("This is\nthe end", "this is <newline> the end"),
("The last\nline\n", "the last <newline> line"),
(" ", "<empty>"),
("\n", "<empty>"),
("\n\n", "<empty>"),
("Two line breaks at the end\n\n", "two line breaks at the end"),
("Send an email to <EMAIL>", "send an email to <EMAIL>")
])
def test_tokenize(input, expected):
actual = tokenize(input)
expected = expected.split()
assert actual == expected
```
|
{
"source": "jessethegame/posthog",
"score": 2
}
|
#### File: posthog/ee/urls.py
```python
from rest_framework_extensions.routers import NestedRegistryItem
from posthog.api.routing import DefaultRouterPlusPlus
from .api import hooks, license
def extend_api_router(root_router: DefaultRouterPlusPlus, *, projects_router: NestedRegistryItem):
root_router.register(r"license", license.LicenseViewSet)
projects_router.register(r"hooks", hooks.HookViewSet, "project_hooks", ["team_id"])
```
|
{
"source": "jessethegame/pusher_client_python",
"score": 2
}
|
#### File: pusher_client_python/pusher/acceptance_test.py
```python
from shim import httplib
import unittest, re, time, cgi
from nose.tools import *
import sys
sys.path.append("../")
import pusher
try:
import test_config
except ImportError:
raise Exception("you must have a test_config.py file in order to run the acceptance tests. Rename test_config.example.py to test_config.py and add your credentials.")
class RequestTest(unittest.TestCase):
def test_trigger(self):
my_pusher = pusher.Pusher(app_id=test_config.app_id, key=test_config.app_key, secret=test_config.app_secret)
channel = my_pusher['test-channel']
result = channel.trigger('test-event', {'message': 'hello world'})
eq_(result, True)
def test_trigger_with_data_key_containing_percent(self):
my_pusher = pusher.Pusher(app_id=test_config.app_id, key=test_config.app_key, secret=test_config.app_secret)
channel = my_pusher['test-channel']
result = channel.trigger('test-event', {'message %': 'hello world'})
eq_(result, True)
def test_trigger_with_data_value_containing_percent(self):
my_pusher = pusher.Pusher(app_id=test_config.app_id, key=test_config.app_key, secret=test_config.app_secret)
channel = my_pusher['test-channel']
result = channel.trigger('test-event', {'message': "fish %"})
eq_(result, True)
```
|
{
"source": "Jesse-Tingle/python-interview-practice",
"score": 4
}
|
#### File: Jesse-Tingle/python-interview-practice/recurring_character.py
```python
given_string = "DBCABA"
def first_recurring(string):
counts = {}
for char in string:
if char in counts:
return char
counts[char] = 1
return None
print(first_recurring(given_string))
```
|
{
"source": "jesse-toftum/cash_ml",
"score": 2
}
|
#### File: tests/core_tests/categorical_ensembling_test.py
```python
import os
import sys
sys.path = [os.path.abspath(os.path.dirname(__file__))] + sys.path
sys.path = [os.path.abspath(os.path.dirname(os.path.dirname(__file__)))] + sys.path
os.environ['is_test_suite'] = 'True'
from cash_ml import Predictor
import dill
import numpy as np
from nose.tools import assert_equal, assert_not_equal, with_setup
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
import tests.utils_testing as utils
def test_categorical_ensemble_basic_classifier():
np.random.seed(0)
df_titanic_train, df_titanic_test = utils.get_titanic_binary_classification_dataset()
column_descriptions = {
'survived': 'output',
'pclass': 'categorical',
'embarked': 'categorical',
'sex': 'categorical'
}
ml_predictor = Predictor(
type_of_estimator='classifier', column_descriptions=column_descriptions)
ml_predictor.train_categorical_ensemble(
df_titanic_train, categorical_column='pclass', optimize_final_model=False)
test_score = ml_predictor.score(df_titanic_test, df_titanic_test.survived)
print('test_score')
print(test_score)
# Small sample sizes mean there's a fair bit of noise here
assert -0.155 < test_score < -0.135
def test_categorical_ensembling_regression(model_name=None):
np.random.seed(0)
df_boston_train, df_boston_test = utils.get_boston_regression_dataset()
column_descriptions = {'MEDV': 'output', 'CHAS': 'categorical'}
ml_predictor = Predictor(type_of_estimator='regressor', column_descriptions=column_descriptions)
ml_predictor.train_categorical_ensemble(
df_boston_train,
perform_feature_selection=True,
model_names=model_name,
categorical_column='CHAS')
test_score = ml_predictor.score(df_boston_test, df_boston_test.MEDV)
print('test_score')
print(test_score)
lower_bound = -4.2
assert lower_bound < test_score < -2.8
```
|
{
"source": "Jessevanbekkum/vote-block",
"score": 3
}
|
#### File: Jessevanbekkum/vote-block/voting.py
```python
import json
import sys
from functions import isValidTxn, hashMe, updateState, checkBlockHash, checkBlockValidity, checkChain, getInitialState
import random
import copy
def newVote(voter, party):
return {'voter': voter, 'party': party}
voterList = [u'Alice', u'Bob', u'Charlie', u'Dave']
partyList = ['VVD', 'PVDA', 'D66']
state = getInitialState(voterList, partyList)
genesisBlockTxns = [state]
genesisBlockContents = {u'blockNumber': 0, u'parentHash': None, u'txnCount': 1, u'txns': genesisBlockTxns}
genesisHash = hashMe(genesisBlockContents)
genesisBlock = {u'hash': genesisHash, u'contents': genesisBlockContents}
genesisBlockStr = json.dumps(genesisBlock, sort_keys=True)
chain = [genesisBlock]
blockSizeLimit = 1
def vote(vote, state):
# Inputs: txn, state: dictionaries keyed with account names, holding numeric values for transfer amount (txn) or account balance (state)
# Returns: Updated state, with additional users added to state if necessary
# NOTE: This does not not validate the transaction- just updates the state!
# If the transaction is valid, then update the state
state = state.copy() # As dictionaries are mutable, let's avoid any confusion by creating a working copy of the data.
state['voters'][vote['voter']] = 0
state['parties'][vote['party']] += 1
return state
while len(txnBuffer) > 0:
bufferStartSize = len(txnBuffer)
## Gather a set of valid transactions for inclusion
txnList = []
while (len(txnBuffer) > 0) & (len(txnList) < blockSizeLimit):
newTxn = txnBuffer.pop()
validTxn = isValidTxn(newTxn, state) # This will return False if txn is invalid
if validTxn: # If we got a valid state, not 'False'
txnList.append(newTxn)
state = updateState(newTxn, state)
else:
print("ignored transaction")
sys.stdout.flush()
continue # This was an invalid transaction; ignore it and move on
## Make a block
myBlock = makeBlock(txnList, chain)
chain.append(myBlock)
# txnBuffer = [makeTransaction() for i in range(30)]
```
|
{
"source": "JessevanKempen/nutils",
"score": 3
}
|
#### File: nutils/files/blackbox.py
```python
import numpy as np
import pymc3 as pm
import theano
import theano.tensor as tt
# for reproducibility here's some version info for modules used in this notebook
import platform
import IPython
import matplotlib
import matplotlib.pyplot as plt
import emcee
import corner
import math
import os
from autograd import grad
print("Python version: {}".format(platform.python_version()))
print("IPython version: {}".format(IPython.__version__))
print("Numpy version: {}".format(np.__version__))
print("Theano version: {}".format(theano.__version__))
print("PyMC3 version: {}".format(pm.__version__))
print("Matplotlib version: {}".format(matplotlib.__version__))
print("emcee version: {}".format(emcee.__version__))
print("corner version: {}".format(corner.__version__))
import numpy as np
import pymc3 as pm
import arviz as az
import warnings
def gradients(vals, func, releps=1e-3, abseps=None, mineps=1e-9, reltol=1e-3,
epsscale=0.5):
"""
Calculate the partial derivatives of a function at a set of values. The
derivatives are calculated using the central difference, using an iterative
method to check that the values converge as step size decreases.
Parameters
----------
vals: array_like
A set of values, that are passed to a function, at which to calculate
the gradient of that function
func:
A function that takes in an array of values.
releps: float, array_like, 1e-3
The initial relative step size for calculating the derivative.
abseps: float, array_like, None
The initial absolute step size for calculating the derivative.
This overrides `releps` if set.
`releps` is set then that is used.
mineps: float, 1e-9
The minimum relative step size at which to stop iterations if no
convergence is achieved.
epsscale: float, 0.5
The factor by which releps if scaled in each iteration.
Returns
-------
grads: array_like
An array of gradients for each non-fixed value.
"""
grads = np.zeros(len(vals))
# maximum number of times the gradient can change sign
flipflopmax = 10.
# set steps
if abseps is None:
if isinstance(releps, float):
eps = np.abs(vals) * releps
eps[eps == 0.] = releps # if any values are zero set eps to releps
teps = releps * np.ones(len(vals))
elif isinstance(releps, (list, np.ndarray)):
if len(releps) != len(vals):
raise ValueError("Problem with input relative step sizes")
eps = np.multiply(np.abs(vals), releps)
eps[eps == 0.] = np.array(releps)[eps == 0.]
teps = releps
else:
raise RuntimeError("Relative step sizes are not a recognised type!")
else:
if isinstance(abseps, float):
eps = abseps * np.ones(len(vals))
elif isinstance(abseps, (list, np.ndarray)):
if len(abseps) != len(vals):
raise ValueError("Problem with input absolute step sizes")
eps = np.array(abseps)
else:
raise RuntimeError("Absolute step sizes are not a recognised type!")
teps = eps
# for each value in vals calculate the gradient
count = 0
for i in range(len(vals)):
# initial parameter diffs
leps = eps[i]
cureps = teps[i]
flipflop = 0
# get central finite difference
fvals = np.copy(vals)
bvals = np.copy(vals)
# central difference
fvals[i] += 0.5 * leps # change forwards distance to half eps
bvals[i] -= 0.5 * leps # change backwards distance to half eps
cdiff = (func(fvals) - func(bvals)) / leps
while 1:
fvals[i] -= 0.5 * leps # remove old step
bvals[i] += 0.5 * leps
# change the difference by a factor of two
cureps *= epsscale
if cureps < mineps or flipflop > flipflopmax:
# if no convergence set flat derivative (TODO: check if there is a better thing to do instead)
warnings.warn("Derivative calculation did not converge: setting flat derivative.")
grads[count] = 0.
break
leps *= epsscale
# central difference
fvals[i] += 0.5 * leps # change forwards distance to half eps
bvals[i] -= 0.5 * leps # change backwards distance to half eps
cdiffnew = (func(fvals) - func(bvals)) / leps
if cdiffnew == cdiff:
grads[count] = cdiff
break
# check whether previous diff and current diff are the same within reltol
rat = (cdiff / cdiffnew)
if np.isfinite(rat) and rat > 0.:
# gradient has not changed sign
if np.abs(1. - rat) < reltol:
grads[count] = cdiffnew
break
else:
cdiff = cdiffnew
continue
else:
cdiff = cdiffnew
flipflop += 1
continue
count += 1
return grads
# define a theano Op for our likelihood function
class LogLikeWithGrad(tt.Op):
itypes = [tt.dvector] # expects a vector of parameter values when called
otypes = [tt.dscalar] # outputs a single scalar value (the log likelihood)
def __init__(self, loglike, data, x, sigma):
"""
Initialise with various things that the function requires. Below
are the things that are needed in this particular example.
Parameters
----------
loglike:
The log-likelihood (or whatever) function we've defined
data:
The "observed" data that our log-likelihood function takes in
x:
The dependent variable (aka 'x') that our model requires
sigma:
The noise standard deviation that out function requires.
"""
# add inputs as class attributes
self.likelihood = loglike
self.data = data
self.x = x
self.sigma = sigma
# initialise the gradient Op (below)
self.logpgrad = LogLikeGrad(self.likelihood, self.data, self.x, self.sigma)
def perform(self, node, inputs, outputs):
# the method that is used when calling the Op
theta, = inputs # this will contain my variables
# call the log-likelihood function
logl = self.likelihood(theta, self.x, self.data, self.sigma)
outputs[0][0] = np.array(logl) # output the log-likelihood
def grad(self, inputs, g):
# the method that calculates the gradients - it actually returns the
# vector-Jacobian product - g[0] is a vector of parameter values
theta, = inputs # our parameters
return [g[0] * self.logpgrad(theta)]
class LogLikeGrad(tt.Op):
"""
This Op will be called with a vector of values and also return a vector of
values - the gradients in each dimension.
"""
itypes = [tt.dvector]
otypes = [tt.dvector]
def __init__(self, loglike, data, x, sigma):
"""
Initialise with various things that the function requires. Below
are the things that are needed in this particular example.
Parameters
----------
loglike:
The log-likelihood (or whatever) function we've defined
data:
The "observed" data that our log-likelihood function takes in
x:
The dependent variable (aka 'x') that our model requires
sigma:
The noise standard deviation that out function requires.
Returns
-------
grads: array_like
An array of gradients for each non-fixed value.
"""
# add inputs as class attributes
self.likelihood = loglike
self.data = data
self.x = x
self.sigma = sigma
def perform(self, node, inputs, outputs):
theta, = inputs
# define version of likelihood function to pass to derivative function
def lnlike(values):
return self.likelihood(values, self.x, self.data, self.sigma)
# calculate gradients
grads = gradients(theta, lnlike)
outputs[0][0] = grads
# define your super-complicated model that uses load of external codes
def my_model(theta, x):
"""
A straight line!
Note:
This function could simply be:
m, c = theta
return m*x + x
but I've made it more complicated for demonstration purposes
"""
m, c = theta # unpack line gradient and y-intercept
return m * x + c
# define your really-complicated likelihood function that uses loads of external codes
def my_loglike(theta, x, data, sigma):
"""
A Gaussian log-likelihood function for a model with parameters given in theta
"""
model = my_model(theta, x)
return -0.5*len(x)*np.log(2*math.pi*sigma**2) - (0.5/sigma**2) * np.sum((data-model)**2)
def my_model_random(point=None, size=None):
"""
Draw posterior predictive samples from model.
"""
return my_model((point["m"], point["c"]), x)
###########################
# Synthetic data #
###########################
# Set up our data
N = 10 # number of data points
sigma = 1. # standard deviation of noise
x = np.linspace(0., 9., N)
mtrue = 0.4 # true gradient
ctrue = 3. # true y-intercept
truemodel = my_model([mtrue, ctrue], x)
# Make data
data = sigma * np.random.randn(N) + truemodel
ndraws = 2000 # number of draws from the distribution
nburn = 1000 # number of "burn-in points" (which we'll discard)
chains = 4
# Create our Op
logl = LogLikeWithGrad(my_loglike, data, x, sigma)
# use PyMC3 to sampler from log-likelihood
with pm.Model() as opmodel:
# uniform priors on m and c
m = pm.Uniform('m', lower=-10., upper=10.)
c = pm.Uniform('c', lower=-10., upper=10.)
# convert m and c to a tensor vector
theta = tt.as_tensor_variable([m, c])
# use a DensityDist
pm.DensityDist(
'likelihood',
lambda v: logl(v),
observed={'v': theta},
random=my_model_random,
)
trace = pm.sample(ndraws, cores=1, chains=chains, tune=nburn, discard_tuned_samples=True)
# trace = pm.sample(ndraws, tune=nburn, discard_tuned_samples=True)
# plot the traces
print(az.summary(trace, round_to=2))
_ = pm.traceplot(trace, lines=(('m', {}, [mtrue]), ('c', {}, [ctrue])))
# put the chains in an array (for later!)
samples_pymc3_2 = np.vstack((trace['m'], trace['c'])).T
# just because we can, let's draw posterior predictive samples of the model
ppc = pm.sample_posterior_predictive(trace, samples=250, model=opmodel)
_, ax = plt.subplots()
for vals in ppc['likelihood']:
plt.plot(x, vals, color='b', alpha=0.05, lw=3)
ax.plot(x, my_model((mtrue, ctrue), x), 'k--', lw=2)
ax.set_xlabel("Predictor (stdz)")
ax.set_ylabel("Outcome (stdz)")
ax.set_title("Posterior predictive checks");
plt.show()
###########################
# Simple PyMC3 dis #
###########################
with pm.Model() as pymodel:
# uniform priors on m and c
m = pm.Uniform('m', lower=-10., upper=10.)
c = pm.Uniform('c', lower=-10., upper=10.)
# convert m and c to a tensor vector
theta = tt.as_tensor_variable([m, c])
# use a Normal distribution
pm.Normal('likelihood', mu=(m * x + c), sd=sigma, observed=data)
trace = pm.sample(ndraws, cores=1, chains=chains, tune=nburn, discard_tuned_samples=True)
# plot the traces
_ = pm.traceplot(trace, lines=(('m', {}, [mtrue]), ('c', {}, [ctrue])))
# put the chains in an array (for later!)
samples_pymc3_3 = np.vstack((trace['m'], trace['c'])).T
###########################
# Postprocessing #
###########################
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning) # supress emcee autocorr FutureWarning
matplotlib.rcParams['font.size'] = 22
hist2dkwargs = {'plot_datapoints': False,
'plot_density': False,
'levels': 1.0 - np.exp(-0.5 * np.arange(1.5, 2.1, 0.5) ** 2)} # roughly 1 and 2 sigma
colors = ['r', 'g', 'b']
labels = ['Theanp Op (no grad)', 'Theano Op (with grad)', 'Pure PyMC3']
for i, samples in enumerate([samples_pymc3_2, samples_pymc3_3]):
# get maximum chain autocorrelartion length
autocorrlen = int(np.max(emcee.autocorr.integrated_time(samples, c=3)));
print('Auto-correlation length ({}): {}'.format(labels[i], autocorrlen))
if i == 0:
fig = corner.corner(samples, labels=[r"$m$", r"$c$"], color=colors[i],
hist_kwargs={'density': True}, **hist2dkwargs,
truths=[mtrue, ctrue])
else:
corner.corner(samples, color=colors[i], hist_kwargs={'density': True},
fig=fig, **hist2dkwargs)
fig.set_size_inches(9, 9)
# test the gradient Op by direct call
theano.config.compute_test_value = "ignore"
theano.config.exception_verbosity = "high"
var = tt.dvector()
test_grad_op = LogLikeGrad(my_loglike, data, x, sigma)
test_grad_op_func = theano.function([var], test_grad_op(var))
grad_vals = test_grad_op_func([mtrue, ctrue])
print('Gradient returned by "LogLikeGrad": {}'.format(grad_vals))
# test the gradient called through LogLikeWithGrad
test_gradded_op = LogLikeWithGrad(my_loglike, data, x, sigma)
test_gradded_op_grad = tt.grad(test_gradded_op(var), var)
test_gradded_op_grad_func = theano.function([var], test_gradded_op_grad)
grad_vals_2 = test_gradded_op_grad_func([mtrue, ctrue])
print('Gradient returned by "LogLikeWithGrad": {}'.format(grad_vals_2))
# test the gradient that PyMC3 uses for the Normal log likelihood
test_model = pm.Model()
with test_model:
m = pm.Uniform('m', lower=-10., upper=10.)
c = pm.Uniform('c', lower=-10., upper=10.)
pm.Normal('likelihood', mu=(m*x + c), sigma=sigma, observed=data)
gradfunc = test_model.logp_dlogp_function([m, c], dtype=None)
gradfunc.set_extra_values({'m_interval__': mtrue, 'c_interval__': ctrue})
grad_vals_pymc3 = gradfunc(np.array([mtrue, ctrue]))[1] # get dlogp values
print('Gradient returned by PyMC3 "Normal" distribution: {}'.format(grad_vals_pymc3))
# profile logpt using our Op
opmodel.profile(opmodel.logpt).summary()
# profile using our PyMC3 distribution
pymodel.profile(pymodel.logpt).summary()
```
#### File: nutils/files/myFUQlib.py
```python
from nutils import mesh, function, solver, util, export, cli, testing
import numpy as np, treelog
from CoolProp.CoolProp import PropsSI
import scipy.special as sc
from matplotlib import pyplot as plt
from scipy.stats import norm
from matplotlib import collections, colors
import pandas as pd
# import seaborn as sns
import matplotlib.pyplot as plt
import math
#################### Doublet model library #########################
#Objects
class Aquifer:
def __init__(self, aquifer):
#if stoichastic params not used
self.H = aquifer['H']
self.φ = aquifer['porosity']
self.K = aquifer['K']
self.Q = aquifer['Q'] # pumping rate from well (negative value = extraction)
#deterministic
self.dtop = aquifer['dtop'] # depth to top aquifer
self.dsensor = aquifer['dsensor'] # depth to esp sensor
self.dpump = aquifer['dpump'] # depth to pump location
self.labda = aquifer['labda'] # geothermal gradient
self.Tsur = aquifer['Tsurface']
self.ρf = self.rhof = aquifer['rhof']
self.rhos = aquifer['rhos']
self.cpf = aquifer['cpf']
self.cps = aquifer['cps'] # stone specific heat capacity (limestone) [J/kg K]
self.labdas = aquifer['labdas'] # thermal conductivity solid [W/mK]
self.labdaf = aquifer['labdaf'] # thermal conductivity fluid [W/mK]
self.mu = aquifer['viscosity']
self.pref = aquifer['pref'] # initial reservoir pressure [Pa]
self.Tref = aquifer['Tref'] # initial reservoir temperature [K]
self.rw = aquifer['rw'] # well radius [m]
self.rmax = aquifer['rmax'] # well radius of influence [m]
self.mdot = self.Q * aquifer['rhof']
self.D = 2 * aquifer['rw']
self.Aw = 2 * np.pi * aquifer['rw']
self.g = 9.81
self.L = aquifer['L'] # distance between injection well and production well
self.Tinj = aquifer['Tinj'] # initial temperature of injection well (reinjection temperature)
self.patm = aquifer['patm'] # atmospheric pressure
self.ε = aquifer['ε'] # tubing roughness [m]
self.ct = aquifer['ct']
# total system (rock + fluid) variable
self.ρ = self.φ * self.rhof + (1 - self.φ) * self.rhos
self.cp = self.φ * self.cpf + (1 - self.φ) * self.cps
self.λ = self.φ * self.labdaf + (1 - self.φ) * self.labdas
# class Well:
#
# def __init__(self, well, aquifer):
#
# self.Q = well['Q'] # pumping rate from well (negative value = extraction)
# self.mdot = self.Q * aquifer['rho_f']
# self.D = 2 * aquifer['rw']
# self.Aw = 2 * np.pi * aquifer['rw']
class DoubletGenerator:
"""Generates all properties for a doublet
Args:
"""
def __init__(self, aquifer, sol, params=None):
# Initialize deterministic parameters
self.aquifer = aquifer
self.time = 365*24*60*60 #1 year [s]
self.H = self.aquifer.H
self.Q = self.aquifer.Q
self.alpha = self.aquifer.labdas / ( self.aquifer.rhos * self.aquifer.cps) #thermal diffusion of rock
self.gamma = 0.577216 #euler constant
self.pnode9 = sol[0]
self.Tnode9 = sol[1]
self.Tinj = self.aquifer.Tinj * np.ones_like(self.Tnode9)
# if params:
# Stoichastic parameters with effect on well test
# self.params = params
# self.H = np.mean(params[0])
# self.Q = np.mean(params[4])
# Set lengths in system
self.lpipe = self.z = self.aquifer.dsensor
self.dpump = self.aquifer.dpump
# Set specs
self.effpump = 0.61 # Efficiency of pump [-]
self.eta = 0.61 # Efficiency of heat exchanger [-]
self.Ppump = 2.671e5/2 # Power of pump [W]
# Evaluate objects within doublet
self.T_aqinjector = self.Tinj
self.T_aqproducer = self._get_Tz(self.lpipe)
self.P_aqproducer = self._get_pgz(self.aquifer.patm, self.lpipe, self.T_aqproducer)
self.P_aqinjector = self._get_pgz(self.aquifer.patm, self.lpipe, self.Tinj)
self.ppump = self._get_ppump(self.Ppump, self.Q)
# Evaluate Tnodes within doublet
self.Tnode10 = self.T_aqproducer # Tref when based on depth of sensor
self.Tnode8 = self.get_Tnode8(self.Tnode9)
self.Tnode6 = self.Tnode7 = self.get_Tnode7(self.Tnode9)
self.Tnode4 = self.Tnode5 = self.Tinj
self.Tnode3 = self.get_Tnode3(self.Tnode4)
self.Tnode2 = self.get_Twinj(self.z - self.dpump, self.Tinj)
self.Tnode1 = self.T_aqproducer
# Evaluate pnodes within doublet
self.pnode10 = self.P_aqproducer # pref when based on depth
self.pnode8 = self.get_pnode8(self.pnode9)
self.pnode6 = self.pnode7 = self.get_pnode7(self.pnode8)
self.pnode4 = self.pnode5 = self.pnode6
self.pnode3 = self.get_pnode3(self.pnode4)
self.pnode2 = self.get_pnode2(self.pnode3)
self.pnode1 = self.P_aqinjector # pref when based on depth and injection temperature
# Calculate power output system
self.Phe = self.aquifer.mdot * self.aquifer.cpf * (self.Tnode6 - self.Tinj)
def get_Tw(self, dz, Tw):
Tw = Tw.copy()
dl = 10 # pipe segment [m]
zi = np.linspace(self.z, self.z - dz, dz/dl + 1)
for i in range(len(zi)-1):
Tw -= dl * self._getqw(Tw, zi[i]) / ( self.aquifer.mdot * self.aquifer.cpf )
return Tw
def get_Twinj(self, dz, Tw):
Tw = Tw.copy()
dl = 10 # pipe segment [m]
zi = np.linspace(0, dz, dz/dl + 1)
for i in range(len(zi)-1):
Tw += dl * self._getqw(Tw, zi[i]) / ( self.aquifer.mdot * self.aquifer.cpf )
return Tw
def _getqw(self, Tw, zi):
qw = 4 * math.pi * self.aquifer.labdas * ( Tw - self._get_Tz(zi) ) / math.log( ( 4 * self.alpha * self.time ) / (math.exp(self.gamma) * self.aquifer.rw**2 ) )
return qw
def get_Tnode8(self, Tnode9):
Tnode8 = self.get_Tw(self.z - self.dpump, Tnode9)
return Tnode8
def get_Tnode7(self, Tnode9):
Tnode7 = self.get_Tw(self.z, Tnode9)
return Tnode7
def get_Tnode3(self, Tnode4):
Tnode3 = self.get_Twinj(self.dpump, Tnode4)
return Tnode3
def get_Tnode2(self, Tnode4):
Tnode2 = self.get_Twinj(self.z, Tnode4)
return Tnode2
def get_pnode8(self, pnode9):
pnode8 = pnode9 - self._get_pgz(0, (self.z - self.dpump), self.Tnode9) - self._get_pfriction(self.z - self.dpump)
# print('loss of pressure by height', self._get_pgz(0, (self.z - self.dpump), self.Tnode9))
# print('loss of pressure by friction', self._get_pfriction(self.z - self.dpump))
return pnode8
def get_pnode7(self, pnode8):
pnode7 = pnode8 - self._get_pgz(0, self.dpump, self._get_Tz(self.lpipe)) - self._get_pfriction(self.dpump) + self._get_ppump(self.Ppump, self.Q)
return pnode7
def get_pnode3(self, pnode4):
pnode3 = pnode4 + self._get_pgz(0, self.dpump, self._get_Tz(self.lpipe)) + self._get_pfriction(self.dpump) #+ self._get_ppump(self.Ppump, self.Q)
return pnode3
def get_pnode2(self, pnode3):
pnode2 = pnode3 + self._get_pgz(0, (self.z - self.dpump), self.T_aqinjector) + self._get_pfriction(self.z - self.dpump)
return pnode2
def _get_ppump(self, Ppump, Q):
ppump = Ppump / (Q * self.effpump) # appropiate value is 20e5 Pa
# print('pump added pressure', ppump)
return ppump
def _get_pgz(self, patm, z, T):
""" Computes pressure of the aquifer as a function of the depth, temperature and pressure
Arguments:
z (float): depth (downwards from groundlevel is positive)
Returns:
p (float): value of pressure
"""
pgz = patm + self.aquifer.g * self.aquifer.rhof * z # density as a constant
# pgz = patm + self.aquifer.g * self.rho(np.mean(T)-273, pgz) * z # density as a function of temperature and pressure
return pgz
def _get_pfriction(self, z):
pfriction = (self._get_f() * self.aquifer.rhof * self.get_vmean(self.Q) * z) / 2 * self.aquifer.D
return pfriction
def _get_f(self):
f = ( 1.14 - 2 * math.log10( self.aquifer.ε / self.aquifer.D + 21.25 / ( self.get_Re( self.get_vmean(self.Q) )**0.9 ) ) )**-2
return f
def get_vmean(self, Q):
vmean = 4 * Q / ( math.pi * ( self.aquifer.D ** 2 ) )
return vmean
def get_Re(self, vmean):
Re = ( self.aquifer.rhof * vmean ) / self.aquifer.mu
return Re
# Theis solution, temperature and pressure as a function of depth
# def _get_P_wb(self, P_aquifer, T_aquifer):
# """ Computes pressure at wellbore
#
# Arguments:
# d (float): depth (downwards from groundlevel is positive)
# Returns:
# P_wb (float): value of pressure at well bore
# """
# if P_aquifer == self.P_aqproducer:
# Q = -self.Q
# else:
# Q = self.Q
#
# P_wb = P_aquifer + ( ( Q * self.mu(T_aquifer, P_aquifer) ) / ( 2 * math.pi * self.aquifer.K * self.aquifer.H ) ) * np.log ( self.aquifer.L / self.aquifer.rw)
# return P_wb
def _get_Tz(self, z):
""" Computes temperature of the aquifer as a function of the depth
Arguments:
z (float): depth (downwards from groundlevel is positive)
Returns:
T (float): value of temperature
"""
T = self.aquifer.Tsur + z * self.aquifer.labda
return T
# Thermophysical properties
def rho(self, Twater, Pwater):
# rho = (1 + 10e-6 * (-80 * T - 3.3 * T**2 + 0.00175 * T**3 + 489 * p - 2 * T * p + 0.016 * T**2 * p - 1.3e-5 * T**3\
# * p - 0.333 * p**2 - 0.002 * T * p**2) )
rho = PropsSI('D', 'T', Twater, 'P', Pwater, 'IF97::Water')
# rho = self.aquifer.rhof * (1 - 3.17e-4 * (Twater - 298.15) - 2.56e-6 * (Twater - 298.15) ** 2)
return rho
def mu(self, Twater, Pwater):
# mu = 0.1 + 0.333 * saltcontent + (1.65 + 91.9 * saltcontent**3) * math.exp(-(0.42*(saltcontent**0.8 - 0.17)**2 + 0.045) * Twater**0.8)
mu = PropsSI('V', 'T', Twater, 'P', Pwater, 'IF97::Water')
return mu
## Graphical variables for GUI ##
# self.Dx = self.aquifer.L * 3 # domain of x
# self.Dy = - (2 * self.aquifer.dtop + self.aquifer.H) # domain of y
# self.Nx = 24 # number of nodes by x
# self.Ny = 10 # number of nodes by y
# self.nNodes = self.Nx * self.Ny # total number of nodes
# self.ne = (self.Nx - 1) * (self.Ny - 1)
# self.dx = self.Dx / self.Nx # segment length of x
# self.dy = self.Dy / self.Ny # segment length of y
# self.domain = np.array([self.dx, self.dy])
# self.x_grid, self.y_grid = self._make_grid()
# self.x_well, self.y_well = self._construct_well()
# self.nodes_grid = self._make_nodes_grid()
# self.coordinate_grid = self._make_coordinates_grid()
# self.P_grid = self._compute_P_grid()
# self.T_grid = self._compute_T_grid()
# def _get_gaussian_points
# def _compute_T_grid(self):
# T_grid = self._get_T(-self.y_grid)
# # P_grid[self.Ny/2][self.Nx/3] = self.P_wellbore
# # P_grid[5][16] = self.P_wellbore
# # P_grid[4][16] = self.P_wellbore
# T_grid[5][8] = self.Tinj
# T_grid[4][8] = self.Tinj
#
# return T_grid
# def _compute_P_grid(self):
# P_grid = self._get_P(-self.y_grid)
# # P_grid[self.Ny/2][self.Nx/3] = self.P_wellbore
# P_grid[5][16] = self.P_wellbore
# P_grid[4][16] = self.P_wellbore
# P_grid[5][8] = self.P_wellbore
# P_grid[4][8] = self.P_wellbore
#
# return P_grid
# def _make_nodes_grid(self):
# """ Compute a nodes grid for the doublet
#
# Returns:
# x_grid_nodes, y_grid_nodes (np.array): arrays of the domain in x and y direction
# """
# i = np.arange(0, self.Nx+1, 1)
# j = np.arange(0, -self.Ny-1, -1)
#
# i_coords, j_coords = np.meshgrid(i, j)
#
# nodes_grid = np.array([i_coords, j_coords])
#
# return nodes_grid
# def _make_coordinates_grid(self):
# coordinates_grid = self.nodes_grid
#
# coordinates_grid[0,:,:] = self.nodes_grid[0,:,:] * self.domain[0]
# coordinates_grid[1,:,:] = self.nodes_grid[1,:,:] * -self.domain[1]
#
# return coordinates_grid
# def _make_grid(self):
# """ Compute a cartesian grid for the doublet
#
# Returns:
# domain (np.array): array of the domain in x and y direction
# """
# x = np.linspace(0, self.aquifer.L * 3, self.Nx)
# y = np.linspace(0,- (2 * self.aquifer.dtop + self.aquifer.H) , self.Ny)
# x_grid, y_grid = np.meshgrid(x, y)
#
# return x_grid, y_grid
# def _construct_well(self):
# """ Compute two wells for the doublet
#
# Returns:
# x_well, y_well (np.array): array of the x and y of the well
# """
# # x = np.array([[self.aquifer.L * 5 - self.aquifer.L * 0.5], [self.aquifer.L * 5 + self.aquifer.L * 0.5]])
# # y = np.linspace(0,- (self.aquifer.dtop + self.aquifer.H) , (20 * self.Ny) - 1)
# x_well = np.array([[self.x_grid[0][math.floor(self.Nx/3)]], [self.x_grid[0][2*math.floor(self.Nx/3)]]])
# y_well = self.y_grid[math.floor(self.Ny/2)][0] * np.ones(2)
#
# return x_well, y_well
#Forward Analysis
def evaluateDoublet(doublet):
print("\r\n############## Analytical values model ##############\n"
"m_dot: ", doublet.aquifer.mdot, "Kg/s\n"
"ppump,p/i ", doublet.ppump/1e5, "Bar\n"
"pnode10/p_aq,p: ", doublet.pnode10/1e5, "Bar\n"
"pnode9/p_bh,p: ", doublet.pnode9/1e5, "Bar\n"
"pnode8/p_pu,p: ", doublet.pnode8/1e5, "Bar\n"
"pnode7/p_out,p: ", doublet.pnode7/1e5, "Bar\n"
"pnode6/p_in,HE: ", doublet.pnode6/1e5, "Bar\n"
"pnode5/p_out,HE: ", doublet.pnode5/1e5, "Bar\n"
"pnode2/p_bh,i: ", doublet.pnode2/1e5, "Bar\n"
"pnode1/p_aq,i: ", doublet.pnode1/1e5, "Bar\n"
"Tnode9/T_bh,p: ", doublet.Tnode9-273, "Celcius\n"
"Tnode8/T_pu,p: ", doublet.Tnode8-273, "Celcius\n"
"Tnode7/T_in,HE: ", doublet.Tnode7-273, "Celcius\n"
"Tnode6/T_in,HE: ", doublet.Tnode6-273, "Celcius\n"
"Tnode5/T_out,HE: ", doublet.Tnode5-273, "Celcius\n"
"Tnode4/T_in,i: ", doublet.Tnode4-273, "Celcius\n"
"Tnode3/T_pu,i: ", doublet.Tnode3-273, "Celcius\n"
"Tnode2/T_bh,i: ", doublet.Tnode2-273, "Celcius\n"
"Power,HE: ", doublet.Phe/1e6, "MW")
MPA = 1e6
pnodelist = [doublet.pnode2 / MPA, doublet.pnode3 / MPA, doublet.pnode4 / MPA, doublet.pnode5 / MPA,
doublet.pnode6 / MPA, doublet.pnode7 / MPA, doublet.pnode8 / MPA, doublet.pnode9 / MPA]
Tnodelist = [doublet.Tnode2, doublet.Tnode3, doublet.Tnode4, doublet.Tnode5, doublet.Tnode6, doublet.Tnode7,
doublet.Tnode8, doublet.Tnode9]
return pnodelist, Tnodelist
# ## Finite element thermo-hydraulic model
#
# def DoubletFlow(aquifer, well, doublet, k, porosity, timestep, endtime):
#
# # construct mesh
# nelemsX = 10
# nelemsY = 10
# vertsX = np.linspace(0, well.L, nelemsX + 1)
# vertsY = np.linspace(0, aquifer.H, nelemsY + 1)
# vertsZ = np.linspace(0, aquifer.H, nelemsY + 1)
# topo, geom = mesh.rectilinear([vertsX, vertsY])
# # topo = topo.withboundary(inner='left', outer='right')
#
# bezier = topo.sample('bezier', 3)
# points, vals = bezier.eval([geom, 0])
#
# # # plot
# # plt.figure(figsize=(10, 10))
# # cmap = colors.ListedColormap("limegreen")
# # plt.tripcolor(points[:, 0], points[:, 1], bezier.tri, vals, shading='gouraud', cmap=cmap)
# # ax = plt.gca()
# # ax.add_collection(collections.LineCollection(points[bezier.hull], colors='r', linewidth=2, alpha=1))
#
# # create namespace
# ns = function.Namespace()
# degree = 3
# ns.pbasis = topo.basis('std', degree=degree)
# ns.Tbasis = topo.basis('std', degree=degree - 1)
# ns.p = 'pbasis_n ?lhsp_n'
# ns.T = 'Tbasis_n ?lhsT_n'
# ns.x = geom
# ns.cf = aquifer.Cp_f
# ns.g = aquifer.g
# ns.g_i = '<0, -g>_i'
# ns.uinf = 1, 0
# ns.mdot = well.mdot
# ns.r = well.r
# ns.Awell = well.A_well
# ns.nyy = 0, 1
# ns.pout = doublet.P_aqproducer
# ns.p0 = ns.pout
# ns.Tatm = 20 + 273
# ns.Tin = doublet.well.Tinj
# ns.Tout = doublet.T_HE
# ns.T0 = doublet.T_HE
# ns.ρf = aquifer.rhof
# ns.ρ = ns.ρf #* (1 - 3.17e-4 * (ns.T - 298.15) - 2.56e-6 * (ns.T - 298.15)**2) #no lhsT in lhsp
# ns.lambdl = aquifer.labda_l #'thermal conductivity liquid [W/mK]'
# ns.lambds = aquifer.labda_s #'thermal conductivity solid [W/mK]'
# ns.qh = ns.lambds * aquifer.labda #heat source production rocks [W/m^2]
# k_int_x = k #'intrinsic permeability [m2]'
# k_int_y = k #'intrinsic permeability [m2]'
# k_int= (k_int_x,k_int_y)
# ns.k = (1/aquifer.mu)*np.diag(k_int)
# ns.k1 = k
# ns.u_i = '-k_ij (p_,j - (ρ g_1)_,j)' #darcy velocity
# ns.ur = '-k1 (p_,i)' #darcy velocity, but now simple
# ns.u0 = (ns.mdot / (ns.ρ * ns.Awell))
# ns.qf = -ns.u0
# ns.λ = porosity * ns.lambdl + (1 - porosity) * ns.lambds # heat conductivity λ [W/m/K]
# ns.porosity = porosity
# ns.w = math.sin()
# ns.Ar = aquifer.H * ns.w
#
# # define initial condition for mass balance and darcy's law
# sqr = topo.integral('(p - p0) (p - p0)' @ ns, degree=degree * 2) # set initial temperature to T=T0
# pdofs0 = solver.optimize('lhsp', sqr)
# statep0 = dict(lhsp=pdofs0)
#
# # define dirichlet constraints for hydraulic process
# sqrp = topo.boundary['right'].integral('(p - pout) (p - pout) d:x' @ ns, degree=degree * 2) # set outflow condition to p=p_out
# consp = solver.optimize('lhsp', sqrp, droptol=1e-15)
# # consp = dict(lhsp=consp)
#
# # formulate hydraulic process single field
# resp = topo.integral('(u_i porosity pbasis_n,i) d:x' @ ns, degree=degree*2) # formulation of velocity
# resp -= topo.boundary['left'].integral('pbasis_n qf d:x' @ ns, degree=degree*2) # set inflow boundary to q=u0
# resp += topo.boundary['top,bottom'].integral('(pbasis_n u_i n_i) d:x' @ ns, degree=degree*2) #neumann condition
# pinertia = topo.integral('ρ pbasis_n,i u_i porosity d:x' @ ns, degree=degree*4)
#
# # solve for transient state of pressure
# # lhsp = solver.solve_linear('lhsp', resp, constrain=consp)
#
# # introduce temperature dependent variables
# ns.ρ = ns.ρf * (1 - 3.17e-4 * (ns.T - 298.15) - 2.56e-6 * (ns.T - 298.15)**2)
# ns.lambdl = 4187.6 * (-922.47 + 2839.5 * (ns.T / ns.Tatm) - 1800.7 * (ns.T / ns.Tatm)**2 + 525.77*(ns.T / ns.Tatm)**3 - 73.44*(ns.T / ns.Tatm)**4)
# # ns.cf = 3.3774 - 1.12665e-2 * ns.T + 1.34687e-5 * ns.T**2 # if temperature above T=100 [K]
#
# # define initial condition for thermo process
# sqr = topo.integral('(T - T0) (T - T0)' @ ns, degree=degree * 2) # set initial temperature to T=T0
# Tdofs0 = solver.optimize('lhsT', sqr)
# stateT0 = dict(lhsT=Tdofs0)
#
# # define dirichlet constraints for thermo process
# sqrT = topo.boundary['left'].integral('(T - Tin) (T - Tin) d:x' @ ns, degree=degree*2) # set temperature injection pipe to T=Tin
# # sqrT = topo.boundary['left, bottom, top'].integral('(T - T0) (T - T0) d:x' @ ns, degree=degree*2) #set bottom temperature T=T0
# consT = solver.optimize('lhsT', sqrT, droptol=1e-15)
# consT = dict(lhsT=consT)
#
# # formulate thermo process
# resT = topo.integral('(ρ cf Tbasis_n (u_k T)_,k ) d:x' @ ns, degree=degree*2) # formulation of convection of energy
# resT -= topo.integral('Tbasis_n,i (- λ) T_,i d:x' @ ns, degree=degree*2) # formulation of conductive heat flux
# resT -= topo.boundary['top,bottom'].integral('Tbasis_n qh d:x' @ ns, degree=degree*2) # heat flux on boundary
# # resT -= topo.integral('Tbasis_n qh d:x' @ ns, degree=degree*2) # heat source/sink term within domain
# Tinertia = topo.integral('ρ cf Tbasis_n T d:x' @ ns, degree=degree*4)
#
# def make_plots():
# fig, ax = plt.subplots(2)
#
# ax[0].set(xlabel='X [m]', ylabel='Pressure [Bar]')
# ax[0].set_ylim([min(p/1e5), doublet.P_aqproducer/1e5])
# # ax[0].set_xlim([0, 1000])
# print("wellbore pressure", p[0])
# print("pressure difference", p[0] - doublet.P_aqproducer)
# ax[0].plot(x[:, 0].take(bezier.tri.T, 0), (p/1e5).take(bezier.tri.T, 0))
#
# # ax[1].set(xlabel='X [m]', ylabel='Temperature [Celcius]')
# # ax[1].plot(x[:,0].take(bezier.tri.T, 0), T.take(bezier.tri.T, 0)-273)
#
# fig, axs = plt.subplots(3, sharex=True, sharey=True)
# fig.suptitle('2D Aquifer')
#
# plot0 = axs[0].tripcolor(x[:, 0], x[:, 1], bezier.tri, p / 1e5, vmin=min(p/1e5), vmax=doublet.P_aqproducer/1e5, shading='gouraud', rasterized=True)
# fig.colorbar(plot0, ax=axs[0], label="Darcy p [Bar]")
#
# plot1 = axs[1].tripcolor(x[:, 0], x[:, 1], bezier.tri, u[:, 0], vmin=0, vmax=0.05, shading='gouraud',
# rasterized=True)
# fig.colorbar(plot1, ax=axs[1], label="Darcy Ux [m/s]")
# plt.xlabel('x')
# plt.ylabel('z')
#
# # plot2 = axs[2].tripcolor(x[:, 0], x[:, 1], bezier.tri, T-273, shading='gouraud', rasterized=True)
# # fig.colorbar(plot2, ax=axs[2], label="T [C]")
#
# plt.show()
#
# # Time dependent pressure development
#
# bezier = topo.sample('bezier', 5)
# with treelog.iter.plain(
# 'timestep', solver.impliciteuler(('lhsp'), residual=resp, inertia=pinertia,
# arguments=statep0, timestep=timestep, constrain=consp,
# newtontol=1e-2)) as steps:
# #arguments=dict(lhsp=lhsp, lhsT=Tdofs0)
#
# for istep, lhsp in enumerate(steps):
#
# time = istep * timestep
# # x, u, p, T = bezier.eval(['x_i', 'u_i', 'p', 'T'] @ ns, **state)
# x, p, u = bezier.eval(['x_i', 'p', 'u_i'] @ ns, lhsp=lhsp)
#
# if time >= endtime:
# print(len(x[:, 0]), len(p))
#
# make_plots()
# break
#
# # Time dependent heat transport process
# bezier = topo.sample('bezier', 5)
# with treelog.iter.plain(
# 'timestep', solver.impliciteuler(('lhsT'), residual=resT, inertia=Tinertia,
# arguments=dict(lhsp=lhsp, lhsT=Tdofs0), timestep=timestep, constrain=consT,
# newtontol=1e-2)) as steps:
#
# for istep, lhsT in enumerate(steps):
#
# time = istep * timestep
# # x, u, p, T = bezier.eval(['x_i', 'u_i', 'p', 'T'] @ ns, **state)
# x, p, u, T = bezier.eval(['x_i', 'p', 'u_i', 'T'] @ ns, lhsp=lhsp, lhsT=lhsT)
#
# if time >= endtime:
# print(len(x[:,0]), len(T))
#
# make_plots()
# break
#
# bar = 1e5
# p_inlet = p[0]/bar
# T_prod = T[-1]
#
# return p_inlet, T_prod
#
# # solve for steady state of temperature
# # lhsT = solver.newton('lhsT', resT, constrain=consT, arguments=dict(lhsp=lhsp)).solve(tol=1e-2)
#
#
# #################
# # Postprocessing
# #################
#
# # bezier = topo.sample('bezier', 5)
# # # x, p, u = bezier.eval(['x_i', 'p', 'u_i'] @ ns, lhsp=lhsp)
# # x, p, u, T = bezier.eval(['x_i', 'p', 'u_i', 'T'] @ ns, lhsp=lhsp, lhsT=lhsT)
#
# def add_value_to_plot():
# for i, j in zip(x[:,0], x[:,1]):
# for index in range(len(T)):
# print(T[index], index)
# # axs[2].annotate(T[index], xy=(i, j))
#
# # add_value_to_plot()
# # fig, ax = plt.subplots(4)
# # density = 'True'
# #
# # ax[0].plot(x1,frozen_lognorm.pdf(x1)*(max(x1)-min(x1)))
# # # ax[0].hist(permeability, bins=bin_centers1, density=density, histtype='stepfilled', alpha=0.2)
# # ax[0].set(xlabel='Permeability K [m/s]', ylabel='Probability')
# # ax[0].axvline(x=2.2730989084434785e-08)
# #
# # ax[1].plot(x2, frozen_norm_por.pdf(x2)*(max(x2)-min(x2)))
# # # ax[1].hist(porosity, bins=bin_centers2, density=density, histtype='stepfilled', alpha=0.2)
# # ax[1].set(xlabel='Porosity [-]', ylabel='Probability')
# # ax[1].axvline(x=0.163)
# #
# # ax[2].hist(p_inlet, density=density, bins=50, histtype='stepfilled', alpha=0.2)
# # mu_p = np.mean(p_inlet)
# # # print(mu_p)
# # stddv_p = np.var(p_inlet)**0.5
# # # print(stddv_p)
# # frozen_norm_p = stats.norm(loc=mu_p, scale=stddv_p)
# # x3 = np.linspace(mu_p-3*stddv_p, mu_p+3*stddv_p, 10)
# # # print(frozen_norm_p.pdf(x3))
# # # ax[2].plot(x3,frozen_lognorm_p.pdf(x3))
# # ax[2].plot(x3,frozen_norm_p.pdf(x3))
# # # ax[2].xaxis.set_major_locator(MaxNLocator(integer=True))
# # ax[2].get_xaxis().get_major_formatter().set_useOffset(False)
# # ax[2].set(xlabel='Injector Pressure [Bar]', ylabel='Probability')
# # # plt.xlabel('Inlet Pressure [Bar]')
# # # plt.ylabel('Probability')
# #
# # ax[3].hist(T_prod, density=density, bins=50, histtype='stepfilled', alpha=0.2)
# # mu_T = np.mean(T_prod)
# # stddv_T = np.var(T_prod)**0.5
# # frozen_norm_T = stats.norm(loc=mu_T, scale=stddv_T)
# # x4 = np.linspace(mu_T-3*stddv_T, mu_T+3*stddv_T, 10)
# # # print(frozen_norm_p.pdf(x4))
# # ax[3].plot(x4,frozen_norm_T.pdf(x4))
# # ax[3].set(xlabel='Producer Temperature [Celcius]', ylabel='Probability')
# #
# # # print(ns.u0.eval())
# # # print("velocity horizontal", (u[:,0]))
# # # print((p[0]))
# # plt.subplots_adjust(hspace=1)
# # # plt.show()
# #
# # Confidence_mu = 0.95
# # N_min = (norm.ppf((1 + Confidence_mu)/2) / (1 - Confidence_mu))**2 * (stddv_p / mu_p)**2
# # print("Cdf", norm.ppf((1 + Confidence_mu)/2))
# # print("N_min", N_min)
#
# # fig1, ax1 = plt.subplots(2)
#
# # import numpy as np
# # from scipy import stats
#
# # sns.set(color_codes=True)
#
# # x = np.random.normal(size=100)
# # sns.distplot(x);
# #
# # mean, cov = [0, 1], [(1, .5), (.5, 1)]
# # data = np.random.multivariate_normal(mean, cov, 200)
# # df = pd.DataFrame(data, columns=["x1", "x2"])
# # sns.jointplot(x="x1", y="x2", data=df);
#
# # f, ax = plt.subplots(figsize=(6, 6))
# # sns.kdeplot(x1, x2, ax=ax)
# # sns.rugplot(x1, color="g", ax=ax)
# # sns.rugplot(x2, vertical=True, ax=ax);
#
# # fig1.suptitle('2D Probability plot')
# # triang = tri.Triangulation(x1, x2)
#
# # plot1 = ax1[0].tripcolor(x1, x2, triang, frozen_lognorm.pdf(x1)+frozen_norm_por.pdf(x2), shading='gouraud', rasterized=True)
# # fig1.colorbar(plot1, ax=ax1[0], label="Probability [x]")
#
# # Z = frozen_lognorm.pdf(x1)*frozen_norm_por.pdf(x2)
# # print("permeability", len(x1))
# # print("porosity", len(x2))
# # print("dit is Z", len(Z))
# # fig1, ax1 = plt.subplots()
# # CS = ax1.contour(x1, x2, Z)
# # ax1.clabel(CS, inline=1, fontsize=10)
# # # ax1.set_title('Simplest default with labels')
# #
# # plt.show()
```
#### File: nutils/files/myUQ.py
```python
import numpy as np
from matplotlib import pyplot as plt
from scipy.stats import lognorm
from scipy import stats
import math
import pandas as pd
import seaborn as sns
from myUQlib import *
# fig, ax = plt.subplots(2)
# standard deviation of normal distribution K
# sigma_K = 1
# mean of normal distribution
# mu_K = math.log(9e-9)
# create pdf plot
# bin_centers1 = 0.5*(x1[1:] + x1[:-1])
# frozen_lognorm = stats.lognorm(s=sigma_K, scale=math.exp(mu_K))
# ax[0].set(xlabel='Permeability K [m/s]', ylabel='Probability')
# ax[0].plot(x1,frozen_lognorm.pdf(x1)*(max(x1)-min(x1)))
# ax[0].set_xscale('log')
# create histogram plot
# permeability = frozen_lognorm.rvs(size=N)
# ax[0].hist(permeability, bins=bin_centers1, density=True, histtype='stepfilled', alpha=0.2)
# joined probability
# c_0 = 2.65
# Tau = (2) ** (1 / 2)
# SA = 5000 # surface area limestone [cm^2/g]
# rho_limestone = 2.711 # density limestone [g/cm^3]
# rho_sandstone = np.random.uniform(low=2.2, high=2.8, size=N) # density sandstone [g/cm^3]
# S0 = (SA * rho_limestone) # specific surface area [1/cm]
# porosity = (( permeability * S0_sand**2 ) / (constant) )**(1/tothepower)
# N=50
# porosity = get_samples_porosity(N)
# permeability = get_samples_permeability(porosity, N)
# x2 = np.linspace(0, 1, 100)
# bin_centers2 = 0.5*(x2[1:] + x2[:-1])
# frozen_norm = stats.norm(loc=mu_epsilon, scale=sigma_epsilon)
# ax[1].plot(x2,frozen_norm.pdf(x2))
# ax[0].set(xlabel='Porosity [-]', ylabel='Probability')
# permeability = frozen_lognorm.rvs(size=N)
# ax[0].hist(permeability, bins=bin_centers1, density=True, histtype='stepfilled', alpha=0.2)
# create histogram plot
# r2=frozen_norm.rvs(size=N)
# for index, k in enumerate(porosity_samples):
# ax[1].hist(porosity_samples, bins=bin_centers2, density=True, histtype='stepfilled', alpha=0.2)
# print('Mean Permeability K:', np.mean(permeability), 'm/s')
# print('Standard Deviation of Permeability K:',
# np.var(permeability)**0.5, 'm/s')
#
# print("permeability", permeability)
#
# mu_por = np.mean(porosity)
# stddv_por = np.var(porosity)**0.5
# # frozen_lognorm_por = stats.lognorm(s=stddv_por, scale=mu_por)
# frozen_norm_por = stats.norm(loc=mu_por, scale=stddv_por)
# # print(frozen_lognorm_por.pdf(x2))
# # ax[1].plot(x2, frozen_lognorm_por.pdf(x2))
# ax[1].plot(x2, frozen_norm_por.pdf(x2)*(max(x2)-min(x2)))
# print(r2)
# ## dit is correct maar heb een joined probability nodig
# # standard deviation of normal distribution epsilon
# sigma_epsilon = 0.01
# # mean of normal distribution
# mu_epsilon = 0.046
#
# x2 = np.linspace(0, 0.1, 100)
# bin_centers2 = 0.5*(x2[1:] + x2[:-1])
# frozen_norm = stats.norm(loc=mu_epsilon, scale=sigma_epsilon)
# ax[1].plot(x2,frozen_norm.pdf(x2))
# ax[1].set(xlabel='Porosity [-]', ylabel='Probability')
# # Using map() and lambda
# def listOfTuples(l1, l2):
# return list(map(lambda x, y: (x, y), l1, l2))
#
# df = pd.DataFrame(listOfTuples(permeability, porosity), columns=["Permeability", "Porosity"])
#
# sns.jointplot(x="Permeability", y="Porosity", data=df, kind="kde");
#
# f, ax = plt.subplots(figsize=(6, 6))
#
# sns.kdeplot(df.Permeability, df.Porosity, ax=ax)
# sns.rugplot(df.Permeability, color="g", ax=ax)
# sns.rugplot(df.Porosity, vertical=True, ax=ax);
#
## Using map() and lambda
def listOfTuples(l1, l2):
return list(map(lambda x, y: (x, y), l1, l2))
import plotly.figure_factory as ff
import plotly.express as px
################### Uncertainty Quantification #########################
# N=2000
# porosity = get_samples_porosity(N)
# permeability = get_samples_permeability(porosity, N)
#
# df = pd.DataFrame(listOfTuples(permeability, porosity), columns=["Permeability", "Porosity"])
#
# f, ax = plt.subplots(figsize=(6, 6))
# # sns.kdeplot(df.Permeability, df.Porosity, n_levels=10, ax=ax)
# # sns.rugplot(df.Permeability, color="g", ax=ax)
# # sns.rugplot(df.Porosity, vertical=True, ax=ax)
#
# # distributionHeight = stats.lognorm(scale=70, s=0.25)
# # Height = distributionHeight.rvs(size=N)
# # ax.hist(Height, density=True, histtype='stepfilled', alpha=0.2, bins=20)
#
# # sns.jointplot(data=df, x="Permeability", y="Porosity", ax=ax, hue="species", kind="kde", n_levels=10);
# # ax.set(xlabel='K [m^2]', ylabel='φ [-]')
#
# # fig = px.histogram(df, x="Permeability", y="Porosity",
# # marginal="box", # or violin, rug
# # hover_data=df.columns)
#
# plt.show()
# # plot waaier
# sns.lineplot(
# data=fmri, x="timepoint", y="signal", hue="event", err_style="bars", ci=95
# )
# plt.show()
def performIUQ(aquifer, N, timestep, endtime):
# Amount of chains
chains = 4
# True data
permeability_true = 2.2730989084434785e-08
porosity_true = 0.163
# Observed data
T_data = stats.norm(loc=89.94, scale=0.05).rvs(size=N)
p_data = stats.norm(loc=244, scale=0.05).rvs(size=N)
constant = np.random.uniform(low=3.5, high=5.8, size=N)
tothepower = np.random.uniform(low=3, high=5, size=N)
Tau = (2) ** (1 / 2)
S0_sand = np.random.uniform(low=1.5e2, high=2.2e2, size=N) # specific surface area [1/cm]
# Mean of variables
𝜇_H = aquifer.H
𝜇_φ = aquifer.φ
𝜇_ct = aquifer.ct
𝜇_Q = aquifer.Q
𝜇_cs = aquifer.cs
with pm.Model() as PriorModel:
# Priors for unknown model parameters
Hpdf = H = pm.Normal('H', mu=𝜇_H, sd=0.025)
φpdf = φ = pm.Lognormal('por', mu=math.log(𝜇_φ), sd=0.24) # joined distribution
K_samples = constant * (φ.random(size=N) ** tothepower / S0_sand ** 2)
Kpdf = K = pm.Lognormal('K', mu=math.log(np.mean(K_samples)), sd=1) # joined distribution
ctpdf = ct = pm.Normal('ct', mu=𝜇_ct, sd=0.025)
Qpdf = Q = pm.Normal('Q', mu=𝜇_Q, sd=0.025)
cspdf = cs = pm.Normal('cs', mu=𝜇_cs, sd=0.025)
parametersRVS = [Hpdf, φpdf, Kpdf, ctpdf, Qpdf, cspdf]
# permeability = pm.Lognormal('permeability', mu=math.log(9e-9), sd=0.025)
#
# porosity_samples = ((permeability.random(size=N) * S0_sand ** 2) / (constant)) ** (1 / tothepower)
# mu_por = np.mean(porosity_samples)
# stddv_por = np.var(porosity_samples) ** 0.5
# porosity = pm.Normal('porosity', mu=mu_por, sd=0.025) #porosity 0 - 0.3 als primary data, permeability als secundary data
# Priors for unknown model parameters based on porosity first as joined distribution
# porosity = pm.Uniform('porosity', lower=0.1, upper=0.5)
# porosity = pm.Lognormal('porosity', mu=math.log(0.3), sd=0.24) #porosity 0 - 0.3 als primary data, permeability als secundary data
# porosity_samples = porosity.random(size=N)
# permeability_samples = constant * ( porosity_samples** tothepower / S0_sand ** 2 )
# mu_per = np.mean(permeability_samples)
# permeability = pm.Lognormal('permeability', mu=math.log(mu_per), sd=1)
# stddv_per = np.var(permeability_samples) ** 0.5
# print("permeability mean", m?u_per, "permeability standard deviation", stddv_per)
# Expected value of outcome (problem is that i can not pass a pdf to my external model function, now i pass N random values to the function, which return N random values back, needs to be a pdf again)
# print("\r\nRunning FE model...", permeability_samples, 'por', porosity_samples)
# p_model = np.empty([N])
# T_model = np.empty([N])
# Hier moeten meerdere variable.random(size=N) in de for loop. Hoe?
# Uit alle verdelingen boven een array vormen met waardes, en dan hier in stoppen
# Run Analytical Analysis (Backward)
# Run Analytical Analysis (Backward)
print("\r\nRunning Analytical Analysis... (Backward)")
solAA = performAA(parametersRVS, aquifer, N, timestep, endtime)
pdrawdown = solAA[0][:, t1steps]
mu_p = np.mean(pdrawdown)
stddv_p = np.var(pdrawdown) ** 0.5
# Likelihood (sampling distribution) of observations
p_obs = pm.Normal('p_obs', mu=mu_p, sd=10, observed=p_data)
with PriorModel:
# Inference
start = pm.find_MAP() # Find starting value by optimization
step = pm.NUTS(scaling=start) # Instantiate MCMC sampling algoritm
# pm.Metropolis() pm.GaussianRandomWalk()
trace = pm.sample(1000, start=start, step=step, cores=1, chains=chains) # Draw 1000 posterior samples
# print("length posterior", len(trace['permeability']), trace.get_values('permeability', combine=True), len(trace.get_values('permeability', combine=True)))
print(az.summary(trace))
chain_count = trace.get_values('permeability').shape[0]
# T_pred = pm.sample_posterior_predictive(trace, samples=chain_count, model=m0)
data_spp = az.from_pymc3(trace=trace)
joint_plt = az.plot_joint(data_spp, var_names=['K', 'por'], kind='kde', fill_last=False);
trace_fig = az.plot_trace(trace, var_names=['K', 'por'], figsize=(12, 8));
# pm.traceplot(trace, varnames=['permeability', 'porosity'])
plt.show()
traces = [trace]
for _ in range(6):
with pm.Model() as InferenceModel:
# Priors are posteriors from previous iteration
H = from_posterior('H', trace['H'])
φ = from_posterior('por', trace['por'])
K = from_posterior('K', trace['K'])
ct = from_posterior('ct', trace['ct'])
Q = from_posterior('Q', trace['Q'])
cs = from_posterior('cs', trace['cs'])
parametersRVS = [H, φ, K, ct, Q, cs]
# p_posterior = np.empty(N)
# T_posterior = np.empty(N)
# for index, (k, eps) in enumerate(zip(permeability.random(size=N), porosity.random(size=N))):
# p_inlet, T_prod = DoubletFlow(aquifer, well, doublet, k, eps)
# p_posterior[index] = p_inlet
# T_posterior[index] = T_prod
# Run Analytical Analysis (Backward)
print("\r\nRunning Analytical Analysis... (Backward)")
solAA = performAA(parametersRVS, aquifer, N, timestep, endtime)
pposterior = solAA[0][:, t1steps]
print("mean pressure", np.mean(pposterior))
mu_p = np.mean(pposterior)
stddv_p = np.var(pposterior) ** 0.5
# mu_T = np.mean(T_posterior)
# stddv_T = np.var(T_posterior) ** 0.5
# Likelihood (sampling distribution) of observations
p_obs = pm.Normal('p_obs', mu=mu_p, sd=1, observed=p_data)
# T_obs = pm.Normal('T_obs', mu=mu_T, sd=1, observed=T_data)
# draw 1000 posterior samples
trace = pm.sample(1000, cores=1, chains=chains)
traces.append(trace)
return
```
#### File: nutils/files/pyMC3priortutorial.py
```python
import matplotlib.pyplot as plt
import matplotlib as mpl
import pymc3 as pm
from pymc3 import Model, Normal, Slice
from pymc3 import sample
from pymc3 import traceplot
from pymc3.distributions import Interpolated
from theano import as_op
import theano
import theano.tensor as tt
import numpy as np
import math
from scipy import stats
# print("theano path", theano.__path__)
# np.show_config()
# dtype=theano.config.floatX
plt.style.use('seaborn-darkgrid')
# Initialize random number generator
np.random.seed(93457)
# True parameter values
alpha_true = 5
beta0_true = 7
beta1_true = 13
# permeability_true = 2.2730989084434785e-08
# porosity_true = 0.163
# Size of dataset
size = 100
# Predictor variable
X1 = np.random.randn(size)
X2 = np.random.randn(size) * 0.2
# Simulate outcome variable
Y = alpha_true + beta0_true * X1 + beta1_true * X2 + np.random.randn(size)
# T = stats.norm(loc=89.94, scale=1)
import pymc3 as pm
print('Running on PyMC3 v{}'.format(pm.__version__))
basic_model = pm.Model()
# import myUQ.py
# import myFUQlib.py
with basic_model:
# Priors for unknown model parameters (hier je uncertainty quantification) import myUQ.py
alpha = pm.Normal('alpha', mu=0, sd=1)
beta0 = pm.Normal('beta0', mu=12, sd=1)
beta1 = pm.Normal('beta1', mu=18, sd=1)
# sigma_K = 1
# mu_K = math.log(9e-9)
# permeability = stats.lognorm(s=sigma_K, scale=math.exp(mu_K))
#
# constant = np.random.uniform(low=3.5, high=5.8, size=N)
# tothepower = np.random.uniform(low=3, high=5, size=N)
# Tau = (2) ** (1 / 2)
# SA = 5000 # surface area limestone [cm^2/g]
# rho_limestone = 2.711 # density limestone [g/cm^3]
# rho_sandstone = np.random.uniform(low=2.2, high=2.8, size=N) # density sandstone [g/cm^3]
# S0 = (SA * rho_limestone) # specific surface area [1/cm]
# S0_sand = np.random.uniform(low=1.5e2, high=2.2e2, size=N) # specific surface area [1/cm]
# porosity = ((permeability * S0_sand ** 2) / (constant)) ** (1 / tothepower)
# Expected value of outcome (hier je uitkomst van je model) import myFUQlib.py
mu = alpha + beta0 * X1 + beta1 * X2
# print("Running FE model...")
# p_inlet, T_prod = DoubletFlow(aquifer, well, doublet, permeability, porosity)
# mu_T = np.mean(T_prod)
# stddv_T = np.var(T_prod)**0.5
# Likelihood (sampling distribution) of observations
Y_obs = pm.Normal('Y_obs', mu=mu, sd=1, observed=Y)
# T_obs = pm.Normal('T_obs', mu=mu, sd=1, observed=T)
# draw 1000 posterior samples
trace = pm.sample(1000, cores=1, chains=4)
pm.traceplot(trace)
# plt.show()
def from_posterior(param, samples):
smin, smax = np.min(samples), np.max(samples)
width = smax - smin
x = np.linspace(smin, smax, 100)
y = stats.gaussian_kde(samples)(x)
# what was never sampled should have a small probability but not 0,
# so we'll extend the domain and use linear approximation of density on it
x = np.concatenate([[x[0] - 3 * width], x, [x[-1] + 3 * width]])
y = np.concatenate([[0], y, [0]])
# print("Interpolated", pm.Interpolated(param, x, y))
return Interpolated(param, x, y)
traces = [trace]
for _ in range(10):
# generate more data
X1 = np.random.randn(size)
X2 = np.random.randn(size) * 0.2
Y = alpha_true + beta0_true * X1 + beta1_true * X2 + np.random.randn(size)
model = pm.Model()
with model:
# Priors are posteriors from previous iteration
alpha = from_posterior('alpha', trace['alpha'])
beta0 = from_posterior('beta0', trace['beta0'])
beta1 = from_posterior('beta1', trace['beta1'])
posterior_sample = pm.sample(size, tune=2000, chains=4, cores=1)
# print("info var", beta0, beta1)
# Expected value of outcome
mu = alpha + beta0 * X1 + beta1 * X2
print("info var", mu)
# Likelihood (sampling distribution) of observations
Y_obs = pm.Normal('Y_obs', mu=mu, sd=1, observed=Y)
print("info var", Y_obs)
# draw 10000 posterior samples
trace = pm.sample(1000, cores=1, chains=4)
traces.append(trace)
plt.show()
print('Posterior distributions after ' + str(len(traces)) + ' iterations.')
cmap = mpl.cm.autumn
for param in ['alpha', 'beta0', 'beta1']:
plt.figure(figsize=(8, 2))
for update_i, trace in enumerate(traces):
samples = trace[param]
smin, smax = np.min(samples), np.max(samples)
x = np.linspace(smin, smax, 100)
y = stats.gaussian_kde(samples)(x)
plt.plot(x, y, color=cmap(1 - update_i / len(traces)))
plt.axvline({'alpha': alpha_true, 'beta0': beta0_true, 'beta1': beta1_true}[param], c='k')
plt.ylabel('Frequency')
plt.title(param)
plt.tight_layout();
plt.show()
```
#### File: nutils/files/revolved.py
```python
from nutils import mesh, function, solver, export, cli, topology
from matplotlib import collections
import numpy
def main(viscosity=1.3e-3, density=1e3, pout=223e5, uw=-0.01, nelems=10):
# viscosity = 1.3e-3
# density = 1e3
# pout = 223e5
# nelems = 10
# uw = -0.01
domain, geom = mesh.rectilinear([numpy.linspace(0, 1, nelems), numpy.linspace(1, 2, nelems), [0, 2 * numpy.pi]],
periodic=[2])
domain = domain.withboundary(inner='bottom', outer='top')
ns = function.Namespace()
ns.y, ns.r, ns.θ = geom
ns.x_i = '<r cos(θ), y, r sin(θ)>_i'
ns.uybasis, ns.urbasis, ns.pbasis = function.chain([
domain.basis('std', degree=3, removedofs=((0,-1), None, None)), # remove normal component at y=0 and y=1
domain.basis('std', degree=3, removedofs=((0,-1), None, None)), # remove tangential component at y=0 (no slip)
domain.basis('std', degree=2)])
ns.ubasis_ni = '<urbasis_n cos(θ), uybasis_n, urbasis_n sin(θ)>_i'
ns.viscosity = viscosity
ns.density = density
ns.u_i = 'ubasis_ni ?lhs_n'
ns.p = 'pbasis_n ?lhs_n'
ns.sigma_ij = 'viscosity (u_i,j + u_j,i) - p δ_ij'
ns.pout = pout
ns.uw = uw
ns.uw_i = 'uw <cos(phi), 0, sin(phi)>_i'
ns.tout_i = '-pout n_i'
ns.uw_i = 'uw n_i' # uniform outflow
res = domain.integral('(viscosity ubasis_ni,j u_i,j - p ubasis_ni,i + pbasis_n u_k,k) d:x' @ ns, degree=6)
# res -= domain[1].boundary['inner'].integral('ubasis_ni tout_i d:x' @ ns, degree=6)
sqr = domain.boundary['inner'].integral('(u_i - uw_i) (u_i - uw_i) d:x' @ ns, degree=6)
# sqr = domain.boundary['outer'].integral('(u_i - uin_i) (u_i - uin_i) d:x' @ ns, degree=6)
sqr -= domain.boundary['outer'].integral('(p - pout) (p - pout) d:x' @ ns, degree=6)
cons = solver.optimize('lhs', sqr, droptol=1e-15)
lhs = solver.solve_linear('lhs', res, constrain=cons)
plottopo = domain[:, :, 0:].boundary['back']
bezier = plottopo.sample('bezier', 10)
r, y, p, u = bezier.eval([ns.r, ns.y, ns.p, function.norm2(ns.u)], lhs=lhs)
with export.mplfigure('pressure.png', dpi=800) as fig:
ax = fig.add_subplot(111, title='pressure', aspect=1)
ax.autoscale(enable=True, axis='both', tight=True)
im = ax.tripcolor(r, y, bezier.tri, p, shading='gouraud', cmap='jet')
ax.add_collection(
collections.LineCollection(numpy.array([y, r]).T[bezier.hull], colors='k', linewidths=0.2, alpha=0.2))
fig.colorbar(im)
uniform = plottopo.sample('uniform', 1)
r_, y_, uv = uniform.eval([ns.r, ns.y, ns.u], lhs=lhs)
with export.mplfigure('velocity.png', dpi=800) as fig:
ax = fig.add_subplot(111, title='Velocity', aspect=1)
ax.autoscale(enable=True, axis='both', tight=True)
im = ax.tripcolor(r, y, bezier.tri, u, shading='gouraud', cmap='jet')
ax.quiver(r_, y_, uv[:, 0], uv[:, 1], angles='xy', scale_units='xy')
fig.colorbar(im)
cli.run(main)
```
#### File: nutils/matrix/_auto.py
```python
from ._base import BackendNotAvailable
def setassemble(sets):
try:
from ._mkl import setassemble
except BackendNotAvailable:
try:
from ._scipy import setassemble
except BackendNotAvailable:
from ._numpy import setassemble
return setassemble(sets)
```
#### File: nutils/matrix/_scipy.py
```python
from ._base import Matrix, MatrixError, BackendNotAvailable
from .. import numeric
import treelog as log
import numpy
try:
import scipy.sparse.linalg
except ImportError:
raise BackendNotAvailable('the Scipy matrix backend requires scipy to be installed (try: pip install scipy)')
def setassemble(sets):
return sets(assemble)
def assemble(data, index, shape):
return ScipyMatrix(scipy.sparse.csr_matrix((data, index), shape))
class ScipyMatrix(Matrix):
'''matrix based on any of scipy's sparse matrices'''
def __init__(self, core):
self.core = core
super().__init__(core.shape)
def convert(self, mat):
if not isinstance(mat, Matrix):
raise TypeError('cannot convert {} to Matrix'.format(type(mat).__name__))
if self.shape != mat.shape:
raise MatrixError('non-matching shapes')
if isinstance(mat, ScipyMatrix):
return mat
return ScipyMatrix(scipy.sparse.csr_matrix(mat.export('csr'), self.shape), scipy)
def __add__(self, other):
return ScipyMatrix(self.core + self.convert(other).core)
def __sub__(self, other):
return ScipyMatrix(self.core - self.convert(other).core)
def __mul__(self, other):
if not numeric.isnumber(other):
raise TypeError
return ScipyMatrix(self.core * other)
def __matmul__(self, other):
if not isinstance(other, numpy.ndarray):
raise TypeError
if other.shape[0] != self.shape[1]:
raise MatrixError
return self.core * other
def __neg__(self):
return ScipyMatrix(-self.core)
def export(self, form):
if form == 'dense':
return self.core.toarray()
if form == 'csr':
csr = self.core.tocsr()
return csr.data, csr.indices, csr.indptr
if form == 'coo':
coo = self.core.tocoo()
return coo.data, (coo.row, coo.col)
raise NotImplementedError('cannot export NumpyMatrix to {!r}'.format(form))
@property
def T(self):
return ScipyMatrix(self.core.transpose())
def _solver(self, rhs, solver, **kwargs):
if solver in ['bicg', 'bicgstab', 'cg', 'cgs', 'gmres', 'lgmres', 'minres']:
kwargs['method'] = solver
solver = 'scipy'
return super()._solver(rhs, solver, **kwargs)
def _solver_scipy(self, rhs, method, atol, callback=None, precon=None, **solverargs):
rhsnorm = numpy.linalg.norm(rhs)
solverfun = getattr(scipy.sparse.linalg, method)
myrhs = rhs / rhsnorm # normalize right hand side vector for best control over scipy's stopping criterion
mytol = atol / rhsnorm
if precon is not None:
precon = scipy.sparse.linalg.LinearOperator(self.shape, self.getprecon(precon), dtype=float)
with log.context(method + ' {:.0f}%', 0) as reformat:
def mycallback(arg):
# some solvers provide the residual, others the left hand side vector
res = numpy.linalg.norm(myrhs - self @ arg) if numpy.ndim(arg) == 1 else float(arg)
if callback:
callback(res)
reformat(100 * numpy.log10(max(mytol, res)) / numpy.log10(mytol))
mylhs, status = solverfun(self.core, myrhs, M=precon, tol=mytol, callback=mycallback, **solverargs)
if status != 0:
raise Exception('status {}'.format(status))
return mylhs * rhsnorm
def _precon_direct(self):
return scipy.sparse.linalg.factorized(self.core.tocsc())
def _precon_splu(self):
return scipy.sparse.linalg.splu(self.core.tocsc()).solve
def _precon_spilu(self, **kwargs):
return scipy.sparse.linalg.spilu(self.core.tocsc(), **kwargs).solve
def _submatrix(self, rows, cols):
return ScipyMatrix(self.core[rows,:][:,cols])
def diagonal(self):
return self.core.diagonal()
# vim:sw=2:sts=2:et
```
#### File: nutils/nutils/mesh.py
```python
from . import topology, function, util, element, numeric, transform, transformseq, warnings, types, cache
from .elementseq import References
import numpy, os, itertools, re, math, treelog as log, io, contextlib
_ = numpy.newaxis
# MESH GENERATORS
@log.withcontext
def rectilinear(richshape, periodic=(), name='rect'):
'rectilinear mesh'
ndims = len(richshape)
shape = []
offset = []
scale = []
uniform = True
for v in richshape:
if numeric.isint(v):
assert v > 0
shape.append(v)
scale.append(1)
offset.append(0)
elif numpy.equal(v, numpy.linspace(v[0],v[-1],len(v))).all():
shape.append(len(v)-1)
scale.append((v[-1]-v[0]) / float(len(v)-1))
offset.append(v[0])
else:
shape.append(len(v)-1)
uniform = False
root = transform.Identifier(ndims, name)
axes = [transformseq.DimAxis(0,n,idim in periodic) for idim, n in enumerate(shape)]
topo = topology.StructuredTopology(root, axes)
if uniform:
if all(o == offset[0] for o in offset[1:]):
offset = offset[0]
if all(s == scale[0] for s in scale[1:]):
scale = scale[0]
geom = function.rootcoords(ndims) * scale + offset
else:
funcsp = topo.basis('spline', degree=1, periodic=())
coords = numeric.meshgrid(*richshape).reshape(ndims, -1)
geom = (funcsp * coords).sum(-1)
return topo, geom
def line(nodes, periodic=False, bnames=None):
if isinstance(nodes, int):
uniform = True
assert nodes > 0
nelems = nodes
scale = 1
offset = 0
else:
nelems = len(nodes)-1
scale = (nodes[-1]-nodes[0]) / nelems
offset = nodes[0]
uniform = numpy.equal(nodes, offset + numpy.arange(nelems+1) * scale).all()
root = transform.Identifier(1, 'line')
domain = topology.StructuredLine(root, 0, nelems, periodic=periodic, bnames=bnames)
geom = function.rootcoords(1) * scale + offset if uniform else domain.basis('std', degree=1, periodic=False).dot(nodes)
return domain, geom
def newrectilinear(nodes, periodic=None, bnames=[['left','right'],['bottom','top'],['front','back']]):
if periodic is None:
periodic = numpy.zeros(len(nodes), dtype=bool)
else:
periodic = numpy.asarray(periodic)
assert len(periodic) == len(nodes) and periodic.ndim == 1 and periodic.dtype == bool
dims = [line(nodesi, periodici, bnamesi) for nodesi, periodici, bnamesi in zip(nodes, periodic, tuple(bnames)+(None,)*len(nodes))]
domain, geom = dims.pop(0)
for domaini, geomi in dims:
domain = domain * domaini
geom = function.concatenate(function.bifurcate(geom,geomi))
return domain, geom
@log.withcontext
def multipatch(patches, nelems, patchverts=None, name='multipatch'):
'''multipatch rectilinear mesh generator
Generator for a :class:`~nutils.topology.MultipatchTopology` and geometry.
The :class:`~nutils.topology.MultipatchTopology` consists of a set patches,
where each patch is a :class:`~nutils.topology.StructuredTopology` and all
patches have the same number of dimensions.
The ``patches`` argument, a :class:`numpy.ndarray`-like with shape
``(npatches, 2*ndims)`` or ``(npatches,)+(2,)*ndims``, defines the
connectivity by labelling the patch vertices. For example, three
one-dimensional patches can be connected at one edge by::
# connectivity: 3
# │
# 1──0──2
patches=[[0,1], [0,2], [0,3]]
Or two two-dimensional patches along an edge by::
# connectivity: 3──4──5
# │ │ │
# 0──1──2
patches=[[[0,3],[1,4]], [[1,4],[2,5]]]
The geometry is specified by the ``patchverts`` argument: a
:class:`numpy.ndarray`-like with shape ``(nverts,ngeomdims)`` specifying for
each vertex a coordinate. Note that the dimension of the geometry may be
higher than the dimension of the patches. The created geometry is a
patch-wise linear interpolation of the vertex coordinates. If the
``patchverts`` argument is omitted the geometry describes a unit hypercube
per patch.
The ``nelems`` argument is either an :class:`int` defining the number of
elements per patch per dimension, or a :class:`dict` with edges (a pair of
vertex numbers) as keys and the number of elements (:class:`int`) as values,
with key ``None`` specifying the default number of elements. Example::
# connectivity: 3─────4─────5
# │ 4x3 │ 8x3 │
# 0─────1─────2
patches=[[[0,3],[1,4]], [[1,4],[2,5]]]
nelems={None: 4, (1,2): 8, (4,5): 8, (0,3): 3, (1,4): 3, (2,5): 3}
Since the patches are structured topologies, the number of elements per
patch per dimension should be unambiguous. In above example specifying
``nelems={None: 4, (1,2): 8}`` will raise an exception because the patch on
the right has 8 elements along edge ``(1,2)`` and 4 along ``(4,5)``.
Example
-------
An L-shaped domain can be generated by::
>>> # connectivity: 2──5
>>> # │ |
>>> # 1──4─────7 y
>>> # │ │ │ │
>>> # 0──3─────6 └──x
>>> domain, geom = multipatch(
... patches=[[0,1,3,4], [1,2,4,5], [3,4,6,7]],
... patchverts=[[0,0], [0,1], [0,2], [1,0], [1,1], [1,2], [3,0], [3,1]],
... nelems={None: 4, (3,6): 8, (4,7): 8})
The number of elements is chosen such that all elements in the domain have
the same size.
A topology and geometry describing the surface of a sphere can be generated
by creating a multipatch cube surface and inflating the cube to a sphere:
>>> # connectivity: 3────7
>>> # ╱│ ╱│
>>> # 2────6 │ y
>>> # │ │ │ │ │
>>> # │ 1──│─5 │ z
>>> # │╱ │╱ │╱
>>> # 0────4 *────x
>>> import itertools
>>> from nutils import function
>>> topo, cube = multipatch(
... patches=[
... [0,1,2,3], # left, normal: x
... [4,5,6,7], # right, normal: x
... [0,1,4,5], # bottom, normal: -y
... [2,3,6,7], # top, normal: -y
... [0,2,4,6], # front, normal: z
... [1,3,5,7], # back, normal: z
... ],
... patchverts=tuple(itertools.product(*([[-1,1]]*3))),
... nelems=1)
>>> sphere = function.normalized(cube)
The normals of the patches are determined by the order of the vertex numbers.
An outward normal for the cube is obtained by flipping the left, top and
front faces:
>>> cubenormal = cube.normal(exterior=True) * topo.basis('patch').dot([-1,1,1,-1,-1,1])
At the centroids of the faces the outward normal should equal the cube geometry:
>>> numpy.testing.assert_allclose(*topo.sample('gauss', 1).eval([cubenormal, cube]))
Similarly, the outward normal of the sphere is obtained by:
>>> spherenormal = sphere.normal(exterior=True) * topo.basis('patch').dot([-1,1,1,-1,-1,1])
>>> numpy.testing.assert_allclose(*topo.sample('gauss', 1).eval([spherenormal, cube]))
Args
----
patches:
A :class:`numpy.ndarray` with shape sequence of patches with each patch being a list of vertex indices.
patchverts:
A sequence of coordinates of the vertices.
nelems:
Either an :class:`int` specifying the number of elements per patch per
dimension, or a :class:`dict` with edges (a pair of vertex numbers) as
keys and the number of elements (:class:`int`) as values, with key
``None`` specifying the default number of elements.
Returns
-------
:class:`nutils.topology.MultipatchTopology`:
The multipatch topology.
:class:`nutils.function.Array`:
The geometry defined by the ``patchverts`` or a unit hypercube per patch
if ``patchverts`` is not specified.
'''
patches = numpy.array(patches)
if patches.dtype != int:
raise ValueError('`patches` should be an array of ints.')
if patches.ndim < 2 or patches.ndim == 2 and patches.shape[-1] % 2 != 0:
raise ValueError('`patches` should be an array with shape (npatches,2,...,2) or (npatches,2*ndims).')
elif patches.ndim > 2 and patches.shape[1:] != (2,) * (patches.ndim - 1):
raise ValueError('`patches` should be an array with shape (npatches,2,...,2) or (npatches,2*ndims).')
patches = patches.reshape(patches.shape[0], -1)
# determine topological dimension of patches
ndims = 0
while 2**ndims < patches.shape[1]:
ndims += 1
if 2**ndims > patches.shape[1]:
raise ValueError('Only hyperrectangular patches are supported: ' \
'number of patch vertices should be a power of two.')
patches = patches.reshape([patches.shape[0]] + [2]*ndims)
# group all common patch edges (and/or boundaries?)
if isinstance(nelems, int):
nelems = {None: nelems}
elif isinstance(nelems, dict):
nelems = {(k and frozenset(k)): v for k, v in nelems.items()}
else:
raise ValueError('`nelems` should be an `int` or `dict`')
# create patch topologies, geometries
if patchverts is not None:
patchverts = numpy.array(patchverts)
indices = set(patches.flat)
if tuple(sorted(indices)) != tuple(range(len(indices))):
raise ValueError('Patch vertices in `patches` should be numbered consecutively, starting at 0.')
if len(patchverts) != len(indices):
raise ValueError('Number of `patchverts` does not equal number of vertices specified in `patches`.')
if len(patchverts.shape) != 2:
raise ValueError('Every patch vertex should be an array of dimension 1.')
topos = []
coords = []
for i, patch in enumerate(patches):
# find shape of patch and local patch coordinates
shape = []
for dim in range(ndims):
nelems_sides = []
sides = [(0,1)]*ndims
sides[dim] = slice(None),
for side in itertools.product(*sides):
sideverts = frozenset(patch[side])
if sideverts in nelems:
nelems_sides.append(nelems[sideverts])
else:
nelems_sides.append(nelems[None])
if len(set(nelems_sides)) != 1:
raise ValueError('duplicate number of elements specified for patch {} in dimension {}'.format(i, dim))
shape.append(nelems_sides[0])
# create patch topology
topos.append(rectilinear(shape, name='{}{}'.format(name, i))[0])
# compute patch geometry
patchcoords = [numpy.linspace(0, 1, n+1) for n in shape]
patchcoords = numeric.meshgrid(*patchcoords).reshape(ndims, -1)
if patchverts is not None:
patchcoords = numpy.array([
sum(
patchverts[j]*util.product(c if s else 1-c for c, s in zip(coord, side))
for j, side in zip(patch.flat, itertools.product(*[[0,1]]*ndims))
)
for coord in patchcoords.T
]).T
coords.append(patchcoords)
# build patch boundary data
boundarydata = topology.MultipatchTopology.build_boundarydata(patches)
# join patch topologies, geometries
topo = topology.MultipatchTopology(tuple(map(topology.Patch, topos, patches, boundarydata)))
funcsp = topo.basis('spline', degree=1, patchcontinuous=False)
geom = (funcsp * numpy.concatenate(coords, axis=1)).sum(-1)
return topo, geom
@cache.function
def parsegmsh(mshdata):
"""Gmsh parser
Parser for Gmsh data in ``msh2`` or ``msh4`` format. See the `Gmsh manual
<http://geuz.org/gmsh/doc/texinfo/gmsh.html>`_ for details.
Parameters
----------
mshdata : :class:`io.BufferedIOBase`
Msh file contents.
Returns
-------
:class:`dict`:
Keyword arguments for :func:`simplex`
"""
try:
from meshio import gmsh
except ImportError as e:
raise Exception('parsegmsh requires the meshio module to be installed') from e
msh = gmsh.main.read_buffer(mshdata)
if not msh.cell_sets:
# Old versions of the gmsh file format repeat elements that have multiple
# tags. To support this we edit the meshio data to bring it in the same
# form as the new files by deduplicating cells and creating cell_sets.
renums = []
for icell, cells in enumerate(msh.cells):
keep = (cells.data[1:] != cells.data[:-1]).any(axis=1)
if keep.all():
renum = numpy.arange(len(cells.data))
else:
msh.cells[icell] = cells._replace(data=cells.data[numpy.hstack([True, keep])])
renum = numpy.hstack([0, keep.cumsum()])
renums.append(renum)
for name, (itag, nd) in msh.field_data.items():
msh.cell_sets[name] = [renum[data == itag] for data, renum in zip(msh.cell_data['gmsh:physical'], renums)]
# Coords is a 2d float-array such that coords[inode,idim] == coordinate.
coords = msh.points
# Nodes is a dictionary that maps a topological dimension to a 2d int-array
# dictionary such that nodes[nd][ielem,ilocal] == inode, where ilocal < nd+1
# for linear geometries or larger for higher order geometries. Since meshio
# stores nodes by simplex type and cell, simplex types are mapped to
# dimensions and gathered, after which cells are concatenated under the
# assumption that there is only one simplex type per dimension.
nodes = {('ver','lin','tri','tet').index(typename[:3]): numpy.concatenate(datas, axis=0)
for typename, datas in util.gather((cells.type, cells.data) for cells in msh.cells)}
# Identities is a 2d [master, slave] int-aray that pairs matching nodes on
# periodic walls. For the topological connectivity, all slaves in the nodes
# arrays will be replaced by their master counterpart.
identities = numpy.zeros((0, 2), dtype=int) if not msh.gmsh_periodic \
else numpy.concatenate([d for a, b, c, d in msh.gmsh_periodic], axis=0)
# Tags is a list of (nd, name, ndelems) tuples that define topological groups
# per dimension. Since meshio associates group names with cells, which are
# concatenated in nodes, element ids are offset and concatenated to match.
tags = [(msh.field_data[name][1], name, numpy.concatenate([selection
+ sum(len(cells.data) for cells in msh.cells[:icell] if cells.type == msh.cells[icell].type) # offset into nodes
for icell, selection in enumerate(selections)]))
for name, selections in msh.cell_sets.items()]
# determine the dimension of the topology
ndims = max(nodes)
# determine the dimension of the geometry
assert not numpy.isnan(coords).any()
while coords.shape[1] > ndims and not coords[:,-1].any():
coords = coords[:,:-1]
# separate geometric, topological nodes
cnodes = nodes[ndims]
if cnodes.shape[1] > ndims+1: # higher order geometry
nodes = {nd: n[:,:nd+1] for nd, n in nodes.items()} # remove high order info
if len(identities):
slaves, masters = identities.T
keep = numpy.ones(len(coords), dtype=bool)
keep[slaves] = False
assert keep[masters].all()
renumber = keep.cumsum()-1
renumber[slaves] = renumber[masters]
nodes = {nd: renumber[n] for nd, n in nodes.items()}
vnodes = nodes[ndims]
bnodes = nodes.get(ndims-1)
pnodes = nodes.get(0)
if cnodes is vnodes: # geometry is linear and non-periodic, dofs follow in-place sorting of nodes
degree = 1
elif cnodes.shape[1] == ndims+1: # linear elements: match sorting of nodes
degree = 1
shuffle = vnodes.argsort(axis=1)
cnodes = cnodes[numpy.arange(len(cnodes))[:,_], shuffle] # gmsh conveniently places the primary ndim+1 vertices first
else: # higher order elements: match sorting of nodes and renumber higher order coefficients
degree, nodeorder = { # for meshio's node ordering conventions see http://www.vtk.org/VTK/img/file-formats.pdf
(2, 6): (2, (0,3,1,5,4,2)),
(2,10): (3, (0,3,4,1,8,9,5,7,6,2)),
(2,15): (4, (0,3,4,5,1,11,12,13,6,10,14,7,9,8,2)),
(3,10): (2, (0,4,1,6,5,2,7,8,9,3))}[ndims, cnodes.shape[1]]
enum = numpy.empty([degree+1]*(ndims+1), dtype=int)
bari = tuple(numpy.array([index[::-1] for index in numpy.ndindex(*enum.shape) if sum(index) == degree]).T)
enum[bari] = numpy.arange(cnodes.shape[1]) # maps baricentric index to corresponding enumerated index
shuffle = vnodes.argsort(axis=1)
cnodes = cnodes[:,nodeorder] # convert from gmsh to nutils order
for i in range(ndims): # strategy: apply shuffle to cnodes by sequentially swapping vertices...
for j in range(i+1, ndims+1): # ...considering all j > i pairs...
m = shuffle[:,i] == j # ...and swap vertices if vertex j is shuffled into i...
r = enum.swapaxes(i,j)[bari] # ...using the enum table to generate the appropriate renumbering
cnodes[m,:] = cnodes[numpy.ix_(m,r)]
m = shuffle[:,j] == i
shuffle[m,j] = shuffle[m,i] # update shuffle to track changed vertex positions
vnodes.sort(axis=1)
nnodes = vnodes[:,-1].max()+1
vtags, btags, ptags = {}, {}, {}
edge_vertices = numpy.arange(ndims+1).repeat(ndims).reshape(ndims, ndims+1)[:,::-1].T # nedges x ndims
for nd, name, ielems in tags:
if nd == ndims:
vtags[name] = numpy.array(ielems)
elif nd == ndims-1:
edgenodes = bnodes[ielems]
nodemask = numeric.asboolean(edgenodes.ravel(), size=nnodes, ordered=False)
ielems, = (nodemask[vnodes].sum(axis=1) >= ndims).nonzero() # all elements sharing at least ndims edgenodes
edgemap = {tuple(b): (ielem, iedge) for ielem, a in zip(ielems, vnodes[ielems[:,_,_], edge_vertices[_,:,:]]) for iedge, b in enumerate(a)}
btags[name] = numpy.array([edgemap[tuple(sorted(n))] for n in edgenodes])
elif nd == 0:
ptags[name] = pnodes[ielems][...,0]
log.info('\n- '.join(['loaded {}d gmsh topology consisting of #{} elements'.format(ndims, len(cnodes))]
+ [name + ' groups: ' + ', '.join('{} #{}'.format(n, len(e)) for n, e in tags.items())
for name, tags in (('volume', vtags), ('boundary', btags), ('point', ptags)) if tags]))
return dict(nodes=vnodes, cnodes=cnodes, coords=coords, tags=vtags, btags=btags, ptags=ptags)
@log.withcontext
@types.apply_annotations
def gmsh(fname:util.binaryfile, name='gmsh'):
"""Gmsh parser
Parser for Gmsh files in `.msh` format. Only files with physical groups are
supported. See the `Gmsh manual
<http://geuz.org/gmsh/doc/texinfo/gmsh.html>`_ for details.
Parameters
----------
fname : :class:`str` or :class:`io.BufferedIOBase`
Path to mesh file or mesh file object.
name : :class:`str` or :any:`None`
Name of parsed topology, defaults to 'gmsh'.
Returns
-------
topo : :class:`nutils.topology.SimplexTopology`
Topology of parsed Gmsh file.
geom : :class:`nutils.function.Array`
Isoparametric map.
"""
with fname as f:
return simplex(name=name, **parsegmsh(f))
def simplex(nodes, cnodes, coords, tags, btags, ptags, name='simplex'):
'''Simplex topology.
Parameters
----------
nodes : :class:`numpy.ndarray`
Vertex indices as (nelems x ndims+1) integer array, sorted along the
second dimension. This table fully determines the connectivity of the
simplices.
cnodes : :class:`numpy.ndarray`
Coordinate indices as (nelems x ncnodes) integer array following Nutils'
conventions for Bernstein polynomials. The polynomial degree is inferred
from the array shape.
coords : :class:`numpy.ndarray`
Coordinates as (nverts x ndims) float array to be indexed by ``cnodes``.
tags : :class:`dict`
Dictionary of name->element numbers. Element order is preserved in the
resulting volumetric groups.
btags : :class:`dict`
Dictionary of name->edges, where edges is a (nedges x 2) integer array
containing pairs of element number and edge number. The segments are
assigned to boundary or interfaces groups automatically while otherwise
preserving order.
ptags : :class:`dict`
Dictionary of name->node numbers referencing the ``nodes`` table.
name : :class:`str`
Name of simplex topology.
Returns
-------
topo : :class:`nutils.topology.SimplexTopology`
Topology with volumetric, boundary and interface groups.
geom : :class:`nutils.function.Array`
Geometry function.
'''
nverts = len(coords)
nelems, ncnodes = cnodes.shape
ndims = nodes.shape[1] - 1
assert len(nodes) == nelems
assert numpy.greater(nodes[:,1:], nodes[:,:-1]).all(), 'nodes must be sorted'
if ncnodes == ndims+1:
degree = 1
vnodes = cnodes
else:
degree = int((ncnodes * math.factorial(ndims))**(1/ndims))-1 # degree**ndims/ndims! < ncnodes < (degree+1)**ndims/ndims!
dims = numpy.arange(ndims)
strides = (dims+1+degree).cumprod() // (dims+1).cumprod() # (i+1+degree)!/(i+1)!
assert strides[-1] == ncnodes
vnodes = cnodes[:,(0,*strides-1)]
assert vnodes.shape == nodes.shape
transforms = transformseq.IdentifierTransforms(ndims=ndims, name=name, length=nelems)
topo = topology.SimplexTopology(nodes, transforms, transforms)
coeffs = element.getsimplex(ndims).get_poly_coeffs('lagrange', degree=degree)
basis = function.PlainBasis([coeffs] * nelems, cnodes, nverts, topo.f_index, topo.f_coords).wrapped
geom = (basis[:,_] * coords).sum(0)
connectivity = topo.connectivity
bgroups = {}
igroups = {}
for name, elems_edges in btags.items():
bitems = [], [], None
iitems = [], [], []
for ielem, iedge in elems_edges:
ioppelem = connectivity[ielem, iedge]
simplices, transforms, opposites = bitems if ioppelem == -1 else iitems
simplices.append(tuple(nodes[ielem][:iedge])+tuple(nodes[ielem][iedge+1:]))
transforms.append(topo.transforms[ielem] + (transform.SimplexEdge(ndims, iedge),))
if opposites is not None:
opposites.append(topo.transforms[ioppelem] + (transform.SimplexEdge(ndims, tuple(connectivity[ioppelem]).index(ielem)),))
for groups, (simplices, transforms, opposites) in (bgroups, bitems), (igroups, iitems):
if simplices:
transforms = transformseq.PlainTransforms(transforms, ndims-1)
opposites = transforms if opposites is None else transformseq.PlainTransforms(opposites, ndims-1)
groups[name] = topology.SimplexTopology(simplices, transforms, opposites)
pgroups = {}
if ptags:
ptrans = [transform.Matrix(linear=numpy.zeros(shape=(ndims,0)), offset=offset) for offset in numpy.eye(ndims+1)[:,1:]]
pmap = {inode: numpy.array(numpy.equal(nodes, inode).nonzero()).T for inode in set.union(*map(set, ptags.values()))}
for pname, inodes in ptags.items():
ptransforms = transformseq.PlainTransforms([topo.transforms[ielem] + (ptrans[ivertex],) for inode in inodes for ielem, ivertex in pmap[inode]], 0)
preferences = References.uniform(element.getsimplex(0), len(ptransforms))
pgroups[pname] = topology.Topology(preferences, ptransforms, ptransforms)
vgroups = {}
for name, ielems in tags.items():
if len(ielems) == nelems and numpy.equal(ielems, numpy.arange(nelems)).all():
vgroups[name] = topo.withgroups(bgroups=bgroups, igroups=igroups, pgroups=pgroups)
continue
transforms = topo.transforms[ielems]
vtopo = topology.SimplexTopology(nodes[ielems], transforms, transforms)
keep = numpy.zeros(nelems, dtype=bool)
keep[ielems] = True
vbgroups = {}
vigroups = {}
for bname, elems_edges in btags.items():
bitems = [], [], []
iitems = [], [], []
for ielem, iedge in elems_edges:
ioppelem = connectivity[ielem, iedge]
if ioppelem == -1:
keepopp = False
else:
keepopp = keep[ioppelem]
ioppedge = tuple(connectivity[ioppelem]).index(ielem)
if keepopp and keep[ielem]:
simplices, transforms, opposites = iitems
elif keepopp or keep[ielem]:
simplices, transforms, opposites = bitems
if keepopp:
ielem, iedge, ioppelem, ioppedge = ioppelem, ioppedge, ielem, iedge
else:
continue
simplices.append(tuple(nodes[ielem][:iedge])+tuple(nodes[ielem][iedge+1:]))
transforms.append(topo.transforms[ielem] + (transform.SimplexEdge(ndims, iedge),))
if ioppelem != -1:
opposites.append(topo.transforms[ioppelem] + (transform.SimplexEdge(ndims, ioppedge),))
for groups, (simplices, transforms, opposites) in (vbgroups, bitems), (vigroups, iitems):
if simplices:
transforms = transformseq.PlainTransforms(transforms, ndims-1)
opposites = transformseq.PlainTransforms(opposites, ndims-1) if len(opposites) == len(transforms) else transforms
groups[bname] = topology.SimplexTopology(simplices, transforms, opposites)
vpgroups = {}
for pname, inodes in ptags.items():
ptransforms = transformseq.PlainTransforms([topo.transforms[ielem] + (ptrans[ivertex],) for inode in inodes for ielem, ivertex in pmap[inode] if keep[ielem]], 0)
preferences = References.uniform(element.getsimplex(0), len(ptransforms))
vpgroups[pname] = topology.Topology(preferences, ptransforms, ptransforms)
vgroups[name] = vtopo.withgroups(bgroups=vbgroups, igroups=vigroups, pgroups=vpgroups)
return topo.withgroups(vgroups=vgroups, bgroups=bgroups, igroups=igroups, pgroups=pgroups), geom
def fromfunc(func, nelems, ndims, degree=1):
'piecewise'
if isinstance(nelems, int):
nelems = [nelems]
assert len(nelems) == func.__code__.co_argcount
topo, ref = rectilinear([numpy.linspace(0,1,n+1) for n in nelems])
funcsp = topo.basis('spline', degree=degree).vector(ndims)
coords = topo.projection(func, onto=funcsp, coords=ref, exact_boundaries=True)
return topo, coords
def unitsquare(nelems, etype):
'''Unit square mesh.
Args
----
nelems : :class:`int`
Number of elements along boundary
etype : :class:`str`
Type of element used for meshing. Supported are:
* ``"square"``: structured mesh of squares.
* ``"triangle"``: unstructured mesh of triangles.
* ``"mixed"``: unstructured mesh of triangles and squares.
Returns
-------
:class:`nutils.topology.Topology`:
The structured/unstructured topology.
:class:`nutils.function.Array`:
The geometry function.
'''
root = transform.Identifier(2, 'unitsquare')
if etype == 'square':
topo = topology.StructuredTopology(root, [transformseq.DimAxis(0, nelems, False)] * 2)
elif etype in ('triangle', 'mixed'):
simplices = numpy.concatenate([
numpy.take([i*(nelems+1)+j, i*(nelems+1)+j+1, (i+1)*(nelems+1)+j, (i+1)*(nelems+1)+j+1], [[0,1,2],[1,2,3]] if i%2==j%2 else [[0,1,3],[0,2,3]], axis=0)
for i in range(nelems) for j in range(nelems)])
v = numpy.arange(nelems+1, dtype=float)
coords = numeric.meshgrid(v, v).reshape(2,-1).T
transforms = transformseq.PlainTransforms([(root, transform.Square((c[1:]-c[0]).T, c[0])) for c in coords[simplices]], 2)
topo = topology.SimplexTopology(simplices, transforms, transforms)
if etype == 'mixed':
references = list(topo.references)
transforms = list(topo.transforms)
square = element.getsimplex(1)**2
connectivity = list(topo.connectivity)
isquares = [i * nelems + j for i in range(nelems) for j in range(nelems) if i%2==j%3]
for n in sorted(isquares, reverse=True):
i, j = divmod(n, nelems)
references[n*2:(n+1)*2] = square,
transforms[n*2:(n+1)*2] = (root, transform.Shift([float(i),float(j)])),
connectivity[n*2:(n+1)*2] = numpy.concatenate(connectivity[n*2:(n+1)*2])[[3,2,4,1] if i%2==j%2 else [3,2,0,5]],
connectivity = [c-numpy.greater(c,n*2) for c in connectivity]
topo = topology.ConnectedTopology(References.from_iter(references, 2), transformseq.PlainTransforms(transforms, 2),transformseq.PlainTransforms(transforms, 2), tuple(types.frozenarray(c, copy=False) for c in connectivity))
x, y = topo.boundary.elem_mean(function.rootcoords(2), degree=1).T
bgroups = dict(left=x==0, right=x==nelems, bottom=y==0, top=y==nelems)
topo = topo.withboundary(**{name: topo.boundary[numpy.where(mask)[0]] for name, mask in bgroups.items()})
else:
raise Exception('invalid element type {!r}'.format(etype))
return topo, function.rootcoords(2) / nelems
# vim:sw=2:sts=2:et
```
#### File: nutils/nutils/types.py
```python
import inspect, functools, hashlib, builtins, numbers, collections.abc, itertools, abc, sys, weakref, re, io, types
import numpy
def aspreprocessor(apply):
'''
Convert ``apply`` into a preprocessor decorator. When applied to a function,
``wrapped``, the returned decorator preprocesses the arguments with ``apply``
before calling ``wrapped``. The ``apply`` function should return a tuple of
``args`` (:class:`tuple` or :class:`list`) and ``kwargs`` (:class:`dict`).
The decorated function ``wrapped`` will be called with ``wrapped(*args,
**kwargs)``. The ``apply`` function is allowed to change the signature of
the decorated function.
Examples
--------
The following preprocessor swaps two arguments.
>>> @aspreprocessor
... def swapargs(a, b):
... return (b, a), {}
Decorating a function with ``swapargs`` will cause the arguments to be
swapped before the wrapped function is called.
>>> @swapargs
... def func(a, b):
... return a, b
>>> func(1, 2)
(2, 1)
'''
def preprocessor(wrapped):
@functools.wraps(wrapped)
def wrapper(*args, **kwargs):
args, kwargs = apply(*args, **kwargs)
return wrapped(*args, **kwargs)
wrapper.__preprocess__ = apply
wrapper.__signature__ = inspect.signature(apply)
return wrapper
return preprocessor
def _build_apply_annotations(signature):
try:
# Find a prefix for internal variables that is guaranteed to be
# collision-free with the parameter names of `signature`.
for i in itertools.count():
internal_prefix = '__apply_annotations_internal{}_'.format(i)
if not any(name.startswith(internal_prefix) for name in signature.parameters):
break
# The `l` dictionary is used as locals when compiling the `apply` function.
l = {}
# Function to add `obj` to the locals `l`. Returns the name of the
# variable (in `l`) that refers to `obj`.
def add_local(obj):
name = '{}{}'.format(internal_prefix, len(l))
assert name not in l
l[name] = obj
return name
# If there are positional-only parameters and there is no var-keyword
# parameter, we can create an equivalent signature with positional-only
# parameters converted to positional-or-keyword with unused names.
if any(param.kind == param.POSITIONAL_ONLY for param in signature.parameters.values()) and not any(param.kind == param.VAR_KEYWORD for param in signature.parameters.values()):
n_positional_args = 0
new_params = []
for param in signature.parameters.values():
if param.kind == param.POSITIONAL_ONLY:
param = param.replace(kind=param.POSITIONAL_OR_KEYWORD, name='{}pos{}'.format(internal_prefix, n_positional_args))
new_params.append(param)
equiv_signature = signature.replace(parameters=new_params)
else:
equiv_signature = signature
# We build the following function
#
# def apply(<params>):
# <body>
# return (<args>), {<kwargs>}
#
# `params`, `body`, `args` and `kwargs` are lists of valid python code (as `str`).
params = []
body = []
args = []
kwargs = []
allow_positional = True
for name, param in equiv_signature.parameters.items():
if param.kind == param.KEYWORD_ONLY and allow_positional:
allow_positional = False
params.append('*')
if param.kind in (param.POSITIONAL_OR_KEYWORD, param.KEYWORD_ONLY):
p = name
if param.default is not param.empty:
p = '{}={}'.format(p, add_local(param.default))
params.append(p)
if allow_positional:
args.append(name)
else:
kwargs.append('{0!r}:{0}'.format(name))
elif param.kind == param.VAR_POSITIONAL:
allow_positional = False
p = '*{}'.format(name)
params.append(p)
args.append(p)
elif param.kind == param.VAR_KEYWORD:
allow_positional = False
p = '**{}'.format(name)
params.append(p)
kwargs.append(p)
else:
raise ValueError('Cannot create function definition with parameter {}.'.format(param))
if param.annotation is param.empty:
pass
elif param.default is None:
# Omit the annotation if the argument is the default is None.
body.append(' {arg} = None if {arg} is None else {ann}({arg})\n'.format(arg=name, ann=add_local(param.annotation)))
else:
body.append(' {arg} = {ann}({arg})\n'.format(arg=name, ann=add_local(param.annotation)))
f = 'def apply({params}):\n{body} return ({args}), {{{kwargs}}}\n'.format(params=','.join(params), body=''.join(body), args=''.join(arg+',' for arg in args), kwargs=','.join(kwargs))
exec(f, l)
apply = l['apply']
except ValueError:
def apply(*args, **kwargs):
bound = signature.bind(*args, **kwargs)
bound.apply_defaults()
for name, param in signature.parameters.items():
if param.annotation is param.empty:
continue
if param.default is None and bound.arguments[name] is None:
continue
bound.arguments[name] = param.annotation(bound.arguments[name])
return bound.args, bound.kwargs
# Copy the signature of `wrapped` without annotations. This matches the
# behaviour of the compiled `apply` above.
apply.__signature__ = inspect.Signature(parameters=[param.replace(annotation=param.empty) for param in signature.parameters.values()])
apply.returns_canonical_arguments = True
return apply
def apply_annotations(wrapped):
'''
Decorator that applies annotations to arguments. All annotations should be
:any:`callable`\\s taking one argument and returning a transformed argument.
All annotations are strongly recommended to be idempotent_.
.. _idempotent: https://en.wikipedia.org/wiki/Idempotence
If a parameter of the decorated function has a default value ``None`` and the
value of this parameter is ``None`` as well, the annotation is omitted.
Examples
--------
Consider the following function.
>>> @apply_annotations
... def f(a:tuple, b:int):
... return a + (b,)
When calling ``f`` with a :class:`list` and :class:`str` as arguments, the
:func:`apply_annotations` decorator first applies :class:`tuple` and
:class:`int` to the arguments before passing them to the decorated function.
>>> f([1, 2], '3')
(1, 2, 3)
The following example illustrates the behavior of parameters with default
value ``None``.
>>> addone = lambda x: x+1
>>> @apply_annotations
... def g(a:addone=None):
... return a
When calling ``g`` without arguments or with argument ``None``, the
annotation ``addone`` is not applied. Note that ``None + 1`` would raise an
exception.
>>> g() is None
True
>>> g(None) is None
True
When passing a different value, the annotation is applied:
>>> g(1)
2
'''
signature = inspect.signature(wrapped)
if all(param.annotation is param.empty for param in signature.parameters.values()):
return wrapped
else:
return aspreprocessor(_build_apply_annotations(signature))(wrapped)
def argument_canonicalizer(signature):
'''
Returns a function that converts arguments matching ``signature`` to
canonical positional and keyword arguments. If possible, an argument is
added to the list of positional arguments, otherwise to the keyword arguments
dictionary. The returned arguments include default values.
Parameters
----------
signature : :class:`inspect.Signature`
The signature of a function to generate canonical arguments for.
Returns
-------
:any:`callable`
A function that returns a :class:`tuple` of a :class:`tuple` of
positional arguments and a :class:`dict` of keyword arguments.
Examples
--------
Consider the following function.
>>> def f(a, b=4, *, c): pass
The ``argument_canonicalizer`` for ``f`` is generated as follows:
>>> canon = argument_canonicalizer(inspect.signature(f))
Calling ``canon`` with parameter ``b`` passed as keyword returns arguments
with parameter ``b`` as positional argument:
>>> canon(1, c=3, b=2)
((1, 2), {'c': 3})
When calling ``canon`` without parameter ``b`` the default value is added to
the positional arguments:
>>> canon(1, c=3)
((1, 4), {'c': 3})
'''
return _build_apply_annotations(inspect.Signature(parameters=[param.replace(annotation=param.empty) for param in signature.parameters.values()]))
def nutils_hash(data):
'''
Compute a stable hash of immutable object ``data``. The hash is not affected
by Python's hash randomization (see :meth:`object.__hash__`).
Parameters
----------
data
An immutable object of type :class:`bool`, :class:`int`, :class:`float`,
:class:`complex`, :class:`str`, :class:`bytes`, :class:`tuple`,
:class:`frozenset`, or :any:`Ellipsis` or :any:`None`, or the type
itself, or an object with a ``__nutils_hash__`` attribute.
Returns
-------
40 :class:`bytes`
The hash of ``data``.
'''
try:
return data.__nutils_hash__
except AttributeError:
pass
t = type(data)
h = hashlib.sha1(t.__name__.encode()+b'\0')
if data is Ellipsis:
pass
elif data is None:
pass
elif any(data is dtype for dtype in (bool, int, float, complex, str, bytes, builtins.tuple, frozenset, type(Ellipsis), type(None))):
h.update(hashlib.sha1(data.__name__.encode()).digest())
elif any(t is dtype for dtype in (bool, int, float, complex)):
h.update(hashlib.sha1(repr(data).encode()).digest())
elif t is str:
h.update(hashlib.sha1(data.encode()).digest())
elif t is bytes:
h.update(hashlib.sha1(data).digest())
elif t is builtins.tuple:
for item in data:
h.update(nutils_hash(item))
elif t is frozenset:
for item in sorted(map(nutils_hash, data)):
h.update(item)
elif issubclass(t, io.BufferedIOBase) and data.seekable() and not data.writable():
pos = data.tell()
h.update(str(pos).encode())
data.seek(0)
map(h.update, iter(lambda: data.read(0x20000), b''))
data.seek(pos)
elif t is types.MethodType:
h.update(nutils_hash(data.__self__))
h.update(nutils_hash(data.__name__))
else:
raise TypeError('unhashable type: {!r} {!r}'.format(data, t))
return h.digest()
class _CacheMeta_property:
'''
Memoizing property used by :class:`CacheMeta`.
'''
_self = object()
def __init__(self, prop, cache_attr):
assert isinstance(prop, property)
self.fget = prop.fget
self.cache_attr = cache_attr
self.__doc__ = prop.__doc__
def __get__(self, instance, owner):
if instance is None:
return self
try:
cached_value = getattr(instance, self.cache_attr)
except AttributeError:
value = self.fget(instance)
setattr(instance, self.cache_attr, value if value is not instance else self._self)
return value
else:
return cached_value if cached_value is not self._self else instance
def __set__(self, instance, value):
raise AttributeError("can't set attribute")
def __delete__(self, instance):
raise AttributeError("can't delete attribute")
def _CacheMeta_method(func, cache_attr):
'''
Memoizing method decorator used by :class:`CacheMeta`.
'''
_self = object()
orig_func = func
signature = inspect.signature(func)
if not hasattr(func, '__preprocess__') and len(signature.parameters) == 1 and next(iter(signature.parameters.values())).kind in (inspect.Parameter.POSITIONAL_OR_KEYWORD, inspect.Parameter.POSITIONAL_ONLY):
def wrapper(self):
try:
cached_value = getattr(self, cache_attr)
value = self if cached_value is _self else cached_value
except AttributeError:
value = func(self)
assert hash(value), 'cannot cache function because the return value is not hashable'
setattr(self, cache_attr, _self if value is self else value)
return value
else:
# Peel off the preprocessors (see `aspreprocessor`).
preprocessors = []
while hasattr(func, '__preprocess__'):
preprocessors.append(func.__preprocess__)
func = func.__wrapped__
if not preprocessors or not getattr(preprocessors[-1], 'returns_canonical_arguments', False):
preprocessors.append(argument_canonicalizer(inspect.signature(func)))
def wrapper(*args, **kwargs):
self = args[0]
# Apply preprocessors.
for preprocess in preprocessors:
args, kwargs = preprocess(*args, **kwargs)
key = args[1:], tuple(sorted(kwargs.items()))
assert hash(key), 'cannot cache function because arguments are not hashable'
# Fetch cached value, if any, and return cached value if args match.
try:
cached_key, cached_value = getattr(self, cache_attr)
except AttributeError:
pass
else:
if cached_key == key:
return self if cached_value is _self else cached_value
value = func(*args, **kwargs)
assert hash(value), 'cannot cache function because the return value is not hashable'
setattr(self, cache_attr, (key, _self if value is self else value))
return value
wrapper.__name__ = orig_func.__name__
wrapper.__doc__ = orig_func.__doc__
wrapper.__signature__ = signature
return wrapper
# While we do not use `abc.ABCMeta` in `CacheMeta` itself, we will use it in
# many classes having `CacheMeta` as a meta(super)class. To avoid having to
# write `class MCls(CacheMeta, abc.ABCMeta): pass` everywhere, we simply derive
# from `abc.ABCMeta` here.
class CacheMeta(abc.ABCMeta):
'''
Metaclass that adds caching functionality to properties and methods listed in
the special attribute ``__cache__``. If an attribute is of type
:class:`property`, the value of the property will be computed at the first
attribute access and served from cache subsequently. If an attribute is a
method, the arguments and return value are cached and the cached value will
be used if a subsequent call is made with the same arguments; if not, the
cache will be overwritten. The cache lives in private attributes in the
instance. The metaclass supports the use of ``__slots__``. If a subclass
redefines a cached property or method (in the sense of this metaclass) of a
base class, the property or method of the subclass is *not* automatically
cached; ``__cache__`` should be used in the subclass explicitly.
Examples
--------
An example of a class with a cached property:
>>> class T(metaclass=CacheMeta):
... __cache__ = 'x',
... @property
... def x(self):
... print('uncached')
... return 1
The print statement is added to illustrate when method ``x`` (as defined
above) is called:
>>> t = T()
>>> t.x
uncached
1
>>> t.x
1
An example of a class with a cached method:
>>> class U(metaclass=CacheMeta):
... __cache__ = 'y',
... def y(self, a):
... print('uncached')
... return a
Again, the print statement is added to illustrate when the method ``y`` (as defined above) is
called:
>>> u = U()
>>> u.y(1)
uncached
1
>>> u.y(1)
1
>>> u.y(2)
uncached
2
>>> u.y(2)
2
'''
def __new__(mcls, name, bases, namespace, **kwargs):
# Wrap all properties that should be cached and reserve slots.
if '__cache__' in namespace:
cache = namespace['__cache__']
cache = (cache,) if isinstance(cache, str) else tuple(cache)
cache_attrs = []
for attr in cache:
# Apply name mangling (see https://docs.python.org/3/tutorial/classes.html#private-variables).
if attr.startswith('__') and not attr.endswith('__'):
attr = '_{}{}'.format(name, attr)
# Reserve an attribute for caching property values that is reasonably
# unique, by combining the class and attribute names. The following
# artificial situation will fail though, because both the base class
# and the subclass have the same name, hence the cached properties
# point to the same attribute for caching:
#
# Class A(metaclass=CacheMeta):
# __cache__ = 'x',
# @property
# def x(self):
# return 1
#
# class A(A):
# __cache__ = 'x',
# @property
# def x(self):
# return super().x + 1
# @property
# def y(self):
# return super().x
#
# With `a = A()`, `a.x` first caches `1`, then `2` and `a.y` will
# return `2`. On the other hand, `a.y` calls property `x` of the base
# class and caches `1` and subsequently `a.x` will return `1` from
# cache.
cache_attr = '_CacheMeta__cached_property_{}_{}'.format(name, attr)
cache_attrs.append(cache_attr)
if attr not in namespace:
raise TypeError('Attribute listed in __cache__ is undefined: {}'.format(attr))
value = namespace[attr]
if isinstance(value, property):
namespace[attr] = _CacheMeta_property(value, cache_attr)
elif inspect.isfunction(value) and not inspect.isgeneratorfunction(value):
namespace[attr] = _CacheMeta_method(value, cache_attr)
else:
raise TypeError("Don't know how to cache attribute {}: {!r}".format(attr, value))
if '__slots__' in namespace and cache_attrs:
# Add `cache_attrs` to the slots.
slots = namespace['__slots__']
slots = [slots] if isinstance(slots, str) else list(slots)
for cache_attr in cache_attrs:
assert cache_attr not in slots, 'Private attribute for caching is listed in __slots__: {}'.format(cache_attr)
slots.append(cache_attr)
namespace['__slots__'] = tuple(slots)
return super().__new__(mcls, name, bases, namespace, **kwargs)
class ImmutableMeta(CacheMeta):
def __new__(mcls, name, bases, namespace, *, version=0, **kwargs):
if not isinstance(version, int):
raise ValueError("'version' should be of type 'int' but got {!r}".format(version))
cls = super().__new__(mcls, name, bases, namespace, **kwargs)
# Since we redefine `__call__` here and `inspect.signature(cls)` looks at
# `cls.__signature__` and if absent the signature of `__call__`, we
# explicitly copy the signature of `<cls instance>.__init__` to `cls`.
cls.__signature__ = inspect.signature(cls.__init__.__get__(object(), object))
# Peel off the preprocessors (see `aspreprocessor`) and store the
# preprocessors and the uncovered init separately.
pre_init = []
init = cls.__init__
while hasattr(init, '__preprocess__'):
pre_init.append(init.__preprocess__)
init = init.__wrapped__
if not pre_init or not getattr(pre_init[-1], 'returns_canonical_arguments', False):
pre_init.append(argument_canonicalizer(inspect.signature(init)))
cls._pre_init = tuple(pre_init)
cls._init = init
cls._version = version
return cls
def __init__(cls, name, bases, namespace, *, version=0, **kwargs):
super().__init__(name, bases, namespace, **kwargs)
def __call__(*args, **kwargs):
cls = args[0]
# Use `None` as temporary `self` argument, apply preprocessors and
# remove the temporary `self`.
args = None, *args[1:]
for preprocess in cls._pre_init:
args, kwargs = preprocess(*args, **kwargs)
args = args[1:]
return cls._new(args, kwargs)
def _new(cls, args, kwargs):
self = cls.__new__(cls)
self._args = args
self._kwargs = kwargs
self._hash = hash(args + tuple((key, kwargs[key]) for key in sorted(kwargs)))
self._init(*args, **kwargs)
return self
class Immutable(metaclass=ImmutableMeta):
'''
Base class for immutable types. This class adds equality tests, traditional
hashing (:func:`hash`), nutils hashing (:func:`nutils_hash`) and pickling,
all based solely on the (positional) intialization arguments, ``args`` for
future reference. Keyword-only arguments are not supported. All arguments
should be hashable by :func:`nutils_hash`.
Positional and keyword initialization arguments are canonicalized
automatically (by :func:`argument_canonicalizer`). If the ``__init__``
method of a subclass is decorated with preprocessors (see
:func:`aspreprocessor`), the preprocessors are applied to the initialization
arguments and ``args`` becomes the preprocessed positional part.
Examples
--------
Consider the following class.
>>> class Plain(Immutable):
... def __init__(self, a, b):
... pass
Calling ``Plain`` with equivalent positional or keyword arguments produces
equal instances:
>>> Plain(1, 2) == Plain(a=1, b=2)
True
Passing unhashable values to ``Plain`` will fail:
>>> Plain([1, 2], [3, 4]) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: unhashable type: 'list'
This can be solved by adding and applying annotations to ``__init__``. The
following class converts its initialization arguments to :class:`tuple`
automaticaly:
>>> class Annotated(Immutable):
... @apply_annotations
... def __init__(self, a:tuple, b:tuple):
... pass
Calling ``Annotated`` with either :class:`list`\\s of ``1, 2`` and ``3, 4``
or :class:`tuple`\\s gives equal instances:
>>> Annotated([1, 2], [3, 4]) == Annotated((1, 2), (3, 4))
True
'''
__slots__ = '__weakref__', '_args', '_kwargs', '_hash'
__cache__ = '__nutils_hash__',
def __reduce__(self):
return self.__class__._new, (self._args, self._kwargs)
def __hash__(self):
return self._hash
def __eq__(self, other):
return type(self) is type(other) and self._hash == other._hash and self._args == other._args and self._kwargs == other._kwargs
@property
def __nutils_hash__(self):
h = hashlib.sha1('{}.{}:{}\0'.format(type(self).__module__, type(self).__qualname__, type(self)._version).encode())
for arg in self._args:
h.update(nutils_hash(arg))
for name in sorted(self._kwargs):
h.update(nutils_hash(name))
h.update(nutils_hash(self._kwargs[name]))
return h.digest()
def __getstate__(self):
raise Exception('getstate should never be called')
def __setstate__(self, state):
raise Exception('setstate should never be called')
def __str__(self):
return '{}({})'.format(self.__class__.__name__, ','.join(str(arg) for arg in self._args))
class SingletonMeta(ImmutableMeta):
def __new__(mcls, name, bases, namespace, **kwargs):
cls = super().__new__(mcls, name, bases, namespace, **kwargs)
cls._cache = weakref.WeakValueDictionary()
return cls
def _new(cls, args, kwargs):
key = args + tuple((key, kwargs[key]) for key in sorted(kwargs))
try:
self = cls._cache[key]
except KeyError:
cls._cache[key] = self = super()._new(args, kwargs)
return self
class Singleton(Immutable, metaclass=SingletonMeta):
'''
Subclass of :class:`Immutable` that creates a single instance per unique set
of initialization arguments.
Examples
--------
Consider the following class.
>>> class Plain(Singleton):
... def __init__(self, a, b):
... pass
Calling ``Plain`` with equivalent positional or keyword arguments produces
one instance:
>>> Plain(1, 2) is Plain(a=1, b=2)
True
Consider the folling class with annotations.
>>> class Annotated(Singleton):
... @apply_annotations
... def __init__(self, a:tuple, b:tuple):
... pass
Calling ``Annotated`` with either :class:`list`\\s of ``1, 2`` and ``3, 4``
or :class:`tuple`\\s gives a single instance:
>>> Annotated([1, 2], [3, 4]) is Annotated((1, 2), (3, 4))
True
'''
__slots__ = ()
__hash__ = Immutable.__hash__
__eq__ = object.__eq__
def strictint(value):
'''
Converts any type that is a subclass of :class:`numbers.Integral` (e.g.
:class:`int` and ``numpy.int64``) to :class:`int`, and fails otherwise.
Notable differences with the behavior of :class:`int`:
* :func:`strictint` does not convert a :class:`str` to an :class:`int`.
* :func:`strictint` does not truncate :class:`float` to an :class:`int`.
Examples
--------
>>> strictint(1), type(strictint(1))
(1, <class 'int'>)
>>> strictint(numpy.int64(1)), type(strictint(numpy.int64(1)))
(1, <class 'int'>)
>>> strictint(1.0)
Traceback (most recent call last):
...
ValueError: not an integer: 1.0
>>> strictint('1')
Traceback (most recent call last):
...
ValueError: not an integer: '1'
'''
if not isinstance(value, numbers.Integral):
raise ValueError('not an integer: {!r}'.format(value))
return builtins.int(value)
def strictfloat(value):
'''
Converts any type that is a subclass of :class:`numbers.Real` (e.g.
:class:`float` and ``numpy.float64``) to :class:`float`, and fails
otherwise. Notable difference with the behavior of :class:`float`:
* :func:`strictfloat` does not convert a :class:`str` to an :class:`float`.
Examples
--------
>>> strictfloat(1), type(strictfloat(1))
(1.0, <class 'float'>)
>>> strictfloat(numpy.float64(1.2)), type(strictfloat(numpy.float64(1.2)))
(1.2, <class 'float'>)
>>> strictfloat(1.2+3.4j)
Traceback (most recent call last):
...
ValueError: not a real number: (1.2+3.4j)
>>> strictfloat('1.2')
Traceback (most recent call last):
...
ValueError: not a real number: '1.2'
'''
if not isinstance(value, numbers.Real):
raise ValueError('not a real number: {!r}'.format(value))
return builtins.float(value)
def strictstr(value):
'''
Returns ``value`` unmodified if it is a :class:`str`, and fails otherwise.
Notable difference with the behavior of :class:`str`:
* :func:`strictstr` does not call ``__str__`` methods of objects to
automatically convert objects to :class:`str`\\s.
Examples
--------
Passing a :class:`str` to :func:`strictstr` works:
>>> strictstr('spam')
'spam'
Passing an :class:`int` will fail:
>>> strictstr(1)
Traceback (most recent call last):
...
ValueError: not a 'str': 1
'''
if not isinstance(value, str):
raise ValueError("not a 'str': {!r}".format(value))
return value
def _getname(value):
name = []
if hasattr(value, '__module__'):
name.append(value.__module__)
if hasattr(value, '__qualname__'):
name.append(value.__qualname__)
elif hasattr(value, '__name__'):
name.append(value.__name__)
else:
return str(value)
return '.'.join(name)
def _copyname(dst=None, *, src, suffix=''):
if dst is None:
return functools.partial(_copyname, src=src, suffix=suffix)
if hasattr(src, '__name__'):
dst.__name__ = src.__name__+suffix
if hasattr(src, '__qualname__'):
dst.__qualname__ = src.__qualname__+suffix
if hasattr(src, '__module__'):
dst.__module__ = src.__module__
return dst
class _strictmeta(type):
def __getitem__(self, cls):
def constructor(value):
if not isinstance(value, cls):
raise ValueError('expected an object of type {!r} but got {!r} with type {!r}'.format(cls.__qualname__, value, type(value).__qualname__))
return value
constructor.__qualname__ = constructor.__name__ = 'strict[{}]'.format(_getname(cls))
return constructor
def __call__(*args, **kwargs):
raise TypeError("cannot create an instance of class 'strict'")
class strict(metaclass=_strictmeta):
'''
Type checker. The function ``strict[cls](value)`` returns ``value``
unmodified if ``value`` is an instance of ``cls``, otherwise a
:class:`ValueError` is raised.
Examples
--------
The ``strict[int]`` function passes integers unmodified:
>>> strict[int](1)
1
Other types fail:
>>> strict[int]('1')
Traceback (most recent call last):
...
ValueError: expected an object of type 'int' but got '1' with type 'str'
'''
class _tuplemeta(type):
def __getitem__(self, itemtype):
@_copyname(src=self, suffix='[{}]'.format(_getname(itemtype)))
def constructor(value):
return builtins.tuple(map(itemtype, value))
return constructor
@staticmethod
def __call__(*args, **kwargs):
return builtins.tuple(*args, **kwargs)
class tuple(builtins.tuple, metaclass=_tuplemeta):
'''
Wrapper of :class:`tuple` that supports a user-defined item constructor via
the notation ``tuple[I]``, with ``I`` the item constructor. This is
shorthand for ``lambda items: tuple(map(I, items))``. The item constructor
should be any callable that takes one argument.
Examples
--------
A tuple with items processed with :func:`strictint`:
>>> tuple[strictint]((False, 1, 2, numpy.int64(3)))
(0, 1, 2, 3)
If the item constructor raises an exception, the construction of the
:class:`tuple` failes accordingly:
>>> tuple[strictint]((1, 2, 3.4))
Traceback (most recent call last):
...
ValueError: not an integer: 3.4
'''
__slots__ = ()
class _frozendictmeta(CacheMeta):
def __getitem__(self, keyvaluetype):
if not isinstance(keyvaluetype, builtins.tuple) or len(keyvaluetype) != 2:
raise RuntimeError("expected a 'tuple' of length 2 but got {!r}".format(keyvaluetype))
keytype, valuetype = keyvaluetype
@_copyname(src=self, suffix='[{},{}]'.format(_getname(keytype), _getname(valuetype)))
def constructor(arg):
if isinstance(arg, collections.abc.Mapping):
items = arg.items()
elif isinstance(arg, (collections.abc.MappingView, collections.abc.Iterable)):
items = arg
else:
raise ValueError('expected a mapping or iterable but got {!r}'.format(arg))
return self((keytype(key), valuetype(value)) for key, value in items)
return constructor
class frozendict(collections.abc.Mapping, metaclass=_frozendictmeta):
'''
An immutable version of :class:`dict`. The :class:`frozendict` is hashable
and both the keys and values should be hashable as well.
Custom key and value constructors can be supplied via the ``frozendict[K,V]``
notation, with ``K`` the key constructor and ``V`` the value constructor,
which is roughly equivalent to ``lambda *args, **kwargs: {K(k): V(v) for k, v
in dict(*args, **kwargs).items()}``.
Examples
--------
A :class:`frozendict` with :func:`strictstr` as key constructor and
:func:`strictfloat` as value constructor:
>>> frozendict[strictstr,strictfloat]({'spam': False})
frozendict({'spam': 0.0})
Similar but with non-strict constructors:
>>> frozendict[str,float]({None: '1.2'})
frozendict({'None': 1.2})
Applying the strict constructors to invalid data raises an exception:
>>> frozendict[strictstr,strictfloat]({None: '1.2'})
Traceback (most recent call last):
...
ValueError: not a 'str': None
'''
__slots__ = '__base', '__hash'
__cache__ = '__nutils_hash__',
def __new__(cls, base):
if isinstance(base, frozendict):
return base
self = object.__new__(cls)
self.__base = dict(base)
self.__hash = hash(frozenset(self.__base.items())) # check immutability and precompute hash
return self
@property
def __nutils_hash__(self):
h = hashlib.sha1('{}.{}\0'.format(type(self).__module__, type(self).__qualname__).encode())
for item in sorted(nutils_hash(k)+nutils_hash(v) for k, v in self.items()):
h.update(item)
return h.digest()
def __reduce__(self):
return frozendict, (self.__base,)
def __eq__(self, other):
if self is other:
return True
if type(other) is not type(self):
return False
if self.__base is other.__base:
return True
if self.__hash != other.__hash or self.__base != other.__base:
return False
# deduplicate
self.__base = other.__base
return True
__getitem__ = lambda self, item: self.__base.__getitem__(item)
__iter__ = lambda self: self.__base.__iter__()
__len__ = lambda self: self.__base.__len__()
__hash__ = lambda self: self.__hash
__contains__ = lambda self, key: self.__base.__contains__(key)
copy = lambda self: self.__base.copy()
__repr__ = __str__ = lambda self: '{}({})'.format(type(self).__name__, self.__base)
class _frozenmultisetmeta(CacheMeta):
def __getitem__(self, itemtype):
@_copyname(src=self, suffix='[{}]'.format(_getname(itemtype)))
def constructor(value):
return self(map(itemtype, value))
return constructor
class frozenmultiset(collections.abc.Container, metaclass=_frozenmultisetmeta):
'''
An immutable multiset_. A multiset is a generalization of a set: items may
occur more than once. Two mutlisets are equal if they have the same set of
items and the same item multiplicities.
A custom item constructor can be supplied via the notation
``frozenmultiset[I]``, with ``I`` the item constructor. This is shorthand
for ``lambda items: frozenmultiset(map(I, items))``. The item constructor
should be any callable that takes one argument.
.. _multiset: https://en.wikipedia.org/wiki/Multiset
Examples
--------
>>> a = frozenmultiset(['spam', 'bacon', 'spam'])
>>> b = frozenmultiset(['sausage', 'spam'])
The :class:`frozenmultiset` objects support ``+``, ``-`` and ``&`` operators:
>>> a + b
frozenmultiset(['spam', 'bacon', 'spam', 'sausage', 'spam'])
>>> a - b
frozenmultiset(['bacon', 'spam'])
>>> a & b
frozenmultiset(['spam'])
The order of the items is irrelevant:
>>> frozenmultiset(['spam', 'spam', 'eggs']) == frozenmultiset(['spam', 'eggs', 'spam'])
True
The multiplicities, however, are not:
>>> frozenmultiset(['spam', 'spam', 'eggs']) == frozenmultiset(['spam', 'eggs'])
False
'''
__slots__ = '__items', '__key'
__cache__ = '__nutils_hash__',
def __new__(cls, items):
if isinstance(items, frozenmultiset):
return items
self = object.__new__(cls)
self.__items = tuple(items)
self.__key = frozenset((item, self.__items.count(item)) for item in self.__items)
return self
@property
def __nutils_hash__(self):
h = hashlib.sha1('{}.{}\0'.format(type(self).__module__, type(self).__qualname__).encode())
for item in sorted('{:04d}'.format(count).encode()+nutils_hash(item) for item, count in self.__key):
h.update(item)
return h.digest()
def __and__(self, other):
'''
Return a :class:`frozenmultiset` with elements from the left and right hand
sides with strict positive multiplicity, where the multiplicity is the
minimum of multiplicitie of the left and right hand side.
'''
items = list(self.__items)
isect = []
for item in other:
try:
items.remove(item)
except ValueError:
pass
else:
isect.append(item)
return frozenmultiset(isect)
def __add__(self, other):
'''
Return a :class:`frozenmultiset` with elements from the left and right hand
sides with a multiplicity equal to the sum of the left and right hand
sides.
'''
return frozenmultiset(self.__items + tuple(other))
def __sub__(self, other):
'''
Return a :class:`frozenmultiset` with elements from the left hand sides with
a multiplicity equal to the difference of the multiplicity of the left and
right hand sides, truncated to zero. Elements with multiplicity zero are
omitted.
'''
items = list(self.__items)
for item in other:
try:
items.remove(item)
except ValueError:
pass
return frozenmultiset(items)
__reduce__ = lambda self: (frozenmultiset, (self.__items,))
__hash__ = lambda self: hash(self.__key)
__eq__ = lambda self, other: type(other) is type(self) and self.__key == other.__key
__contains__ = lambda self, item: item in self.__items
__iter__ = lambda self: iter(self.__items)
__len__ = lambda self: len(self.__items)
__bool__ = lambda self: bool(self.__items)
isdisjoint = lambda self, other: not any(item in self.__items for item in other)
__repr__ = __str__ = lambda self: '{}({})'.format(type(self).__name__, list(self.__items))
class _frozenarraymeta(CacheMeta):
def __getitem__(self, dtype):
@_copyname(src=self, suffix='[{}]'.format(_getname(dtype)))
def constructor(value):
return self(value, dtype=dtype)
return constructor
class frozenarray(collections.abc.Sequence, metaclass=_frozenarraymeta):
'''
An immutable version (and drop-in replacement) of :class:`numpy.ndarray`.
Besides being immutable, the :class:`frozenarray` differs from
:class:`numpy.ndarray` in (in)equality tests. Given two :class:`frozenarray`
objects ``a`` and ``b``, the test ``a == b`` returns ``True`` if both arrays
are equal in its entirety, including dtype and shape, while the same test
with :class:`numpy.ndarray` objects would give a boolean array with
element-wise thruth values.
The constructor with predefined ``dtype`` argument can generated via the
notation ``frozenarray[dtype]``. This is shorthand for ``lambda base:
frozenarray(base, dtype=dtype)``.
Parameters
----------
base : :class:`numpy.ndarray` or array-like
The array data.
dtype
The dtype of the array or ``None``.
copy : :class:`bool`
If ``base`` is a :class:`frozenarray` and the ``dtype`` matches or is
``None``, this argument is ignored. If ``base`` is a
:class:`numpy.ndarray` and the ``dtype`` matches or is ``None`` and
``copy`` is ``False``, ``base`` is stored as is. Otherwise ``base`` is
copied.
'''
__slots__ = '__base'
__cache__ = '__nutils_hash__', '__hash__'
@staticmethod
def full(shape, fill_value):
return frozenarray(numpy.lib.stride_tricks.as_strided(fill_value, shape, [0]*len(shape)), copy=False)
def __new__(cls, base, dtype=None, copy=True):
isstrict = dtype in (strictint, strictfloat)
if dtype is None:
pass
elif dtype == bool:
dtype = bool
elif dtype in (int, strictint):
dtype = int
elif dtype in (float, strictfloat):
dtype = float
elif dtype == complex:
dtype = complex
else:
raise ValueError('unsupported dtype: {!r}'.format(dtype))
if isinstance(base, frozenarray):
if dtype is None or dtype == base.dtype:
return base
base = base.__base
if isstrict:
if not isinstance(base, numpy.ndarray):
base = numpy.array(base)
if base.size == 0:
base = base.astype(dtype)
copy = False
if base.dtype == complex or base.dtype == float and dtype == int:
raise ValueError('downcasting {!r} to {!r} is forbidden'.format(base.dtype, dtype))
self = object.__new__(cls)
self.__base = numpy.array(base, dtype=dtype) if copy or not isinstance(base, numpy.ndarray) or dtype and dtype != base.dtype else base
self.__base.flags.writeable = False
return self
def __hash__(self):
return hash((self.__base.shape, self.__base.dtype, tuple(self.__base.flat[::self.__base.size//32+1]) if self.__base.size else ())) # NOTE special case self.__base.size == 0 necessary for numpy<1.12
@property
def __nutils_hash__(self):
h = hashlib.sha1('{}.{}\0{} {}'.format(type(self).__module__, type(self).__qualname__, self.__base.shape, self.__base.dtype.str).encode())
h.update(self.__base.tobytes())
return h.digest()
@property
def __array_struct__(self):
return self.__base.__array_struct__
def __reduce__(self):
return frozenarray, (self.__base, None, False)
def __eq__(self, other):
if self is other:
return True
if type(other) is not type(self):
return False
if self.__base is other.__base:
return True
if hash(self) != hash(other) or self.__base.dtype != other.__base.dtype or self.__base.shape != other.__base.shape or numpy.not_equal(self.__base, other.__base).any():
return False
# deduplicate
self.__base = other.__base
return True
def __lt__(self, other):
if not isinstance(other, frozenarray):
return NotImplemented
return self != other and (self.dtype < other.dtype
or self.dtype == other.dtype and (self.shape < other.shape
or self.shape == other.shape and self.__base.tolist() < other.__base.tolist()))
def __le__(self, other):
if not isinstance(other, frozenarray):
return NotImplemented
return self == other or (self.dtype < other.dtype
or self.dtype == other.dtype and (self.shape < other.shape
or self.shape == other.shape and self.__base.tolist() < other.__base.tolist()))
def __gt__(self, other):
if not isinstance(other, frozenarray):
return NotImplemented
return self != other and (self.dtype > other.dtype
or self.dtype == other.dtype and (self.shape > other.shape
or self.shape == other.shape and self.__base.tolist() > other.__base.tolist()))
def __ge__(self, other):
if not isinstance(other, frozenarray):
return NotImplemented
return self == other or (self.dtype > other.dtype
or self.dtype == other.dtype and (self.shape > other.shape
or self.shape == other.shape and self.__base.tolist() > other.__base.tolist()))
def __getitem__(self, item):
retval = self.__base.__getitem__(item)
return frozenarray(retval, copy=False) if isinstance(retval, numpy.ndarray) else retval
dtype = property(lambda self: self.__base.dtype)
shape = property(lambda self: self.__base.shape)
size = property(lambda self: self.__base.size)
ndim = property(lambda self: self.__base.ndim)
flat = property(lambda self: self.__base.flat)
T = property(lambda self: frozenarray(self.__base.T, copy=False))
__len__ = lambda self: self.__base.__len__()
__repr__ = lambda self: 'frozenarray'+self.__base.__repr__()[5:]
__str__ = lambda self: self.__base.__str__()
__add__ = lambda self, other: self.__base.__add__(other)
__radd__ = lambda self, other: self.__base.__radd__(other)
__sub__ = lambda self, other: self.__base.__sub__(other)
__rsub__ = lambda self, other: self.__base.__rsub__(other)
__mul__ = lambda self, other: self.__base.__mul__(other)
__rmul__ = lambda self, other: self.__base.__rmul__(other)
__truediv__ = lambda self, other: self.__base.__truediv__(other)
__rtruediv__ = lambda self, other: self.__base.__rtruediv__(other)
__floordiv__ = lambda self, other: self.__base.__floordiv__(other)
__rfloordiv__ = lambda self, other: self.__base.__rfloordiv__(other)
__pow__ = lambda self, other: self.__base.__pow__(other)
__int__ = lambda self: self.__base.__int__()
__float__ = lambda self: self.__base.__float__()
__abs__ = lambda self: self.__base.__abs__()
__neg__ = lambda self: self.__base.__neg__()
__invert__ = lambda self: self.__base.__invert__()
__or__ = lambda self, other: self.__base.__or__(other)
__ror__ = lambda self, other: self.__base.__ror__(other)
__and__ = lambda self, other: self.__base.__and__(other)
__rand__ = lambda self, other: self.__base.__rand__(other)
__xor__ = lambda self, other: self.__base.__xor__(other)
__rxor__ = lambda self, other: self.__base.__rxor__(other)
tolist = lambda self, *args, **kwargs: self.__base.tolist(*args, **kwargs)
copy = lambda self, *args, **kwargs: self.__base.copy(*args, **kwargs)
astype = lambda self, *args, **kwargs: self.__base.astype(*args, **kwargs)
take = lambda self, *args, **kwargs: self.__base.take(*args, **kwargs)
any = lambda self, *args, **kwargs: self.__base.any(*args, **kwargs)
all = lambda self, *args, **kwargs: self.__base.all(*args, **kwargs)
sum = lambda self, *args, **kwargs: self.__base.sum(*args, **kwargs)
min = lambda self, *args, **kwargs: self.__base.min(*args, **kwargs)
max = lambda self, *args, **kwargs: self.__base.max(*args, **kwargs)
prod = lambda self, *args, **kwargs: self.__base.prod(*args, **kwargs)
dot = lambda self, *args, **kwargs: self.__base.dot(*args, **kwargs)
argsort = lambda self, *args, **kwargs: self.__base.argsort(*args, **kwargs)
swapaxes = lambda self, *args, **kwargs: frozenarray(self.__base.swapaxes(*args, **kwargs), copy=False)
ravel = lambda self, *args, **kwargs: frozenarray(self.__base.ravel(*args, **kwargs), copy=False)
reshape = lambda self, *args, **kwargs: frozenarray(self.__base.reshape(*args, **kwargs), copy=False)
transpose = lambda self, *args, **kwargs: frozenarray(self.__base.transpose(*args, **kwargs), copy=False)
cumsum = lambda self, *args, **kwargs: frozenarray(self.__base.cumsum(*args, **kwargs), copy=False)
nonzero = lambda self, *args, **kwargs: frozenarray(self.__base.nonzero(*args, **kwargs), copy=False)
class _c_arraymeta(type):
def __getitem__(self, dtype):
def constructor(value):
if isinstance(value, numpy.core._internal._ctypes):
return value
if not isinstance(value, numpy.ndarray):
value = numpy.array(value, dtype=dtype)
if not value.flags.c_contiguous:
raise ValueError('Array is not contiguous.')
if value.dtype != dtype:
raise ValueError('Expected dtype {} but array has dtype {}.'.format(dtype, value.dtype))
return value.ctypes
constructor.__qualname__ = constructor.__name__ = 'c_array[{}]'.format(_getname(dtype))
return constructor
def __call__(*args, **kwargs):
raise TypeError("cannot create an instance of class 'c_array'")
class c_array(metaclass=_c_arraymeta):
'''
Converts an array-like object to a ctypes array with a specific dtype. The
function ``c_array[dtype](array)`` returns ``array`` unmodified if ``array``
is already a ctypes array. If ``array`` is a :class:`numpy.ndarray`, the
array is converted if the ``dtype`` is correct and the array is contiguous;
otherwise :class:`ValueError` is raised. Otherwise, ``array`` is first
converted to a contiguous :class:`numpy.ndarray` and then converted to ctypes
array. In the first two cases changes made to the ctypes array are reflected
by the ``array`` argument: both are essentially views of the same data. In
the third case, changes to either ``array`` or the returned ctypes array are
not reflected by the other.
'''
class attributes:
'''
Dictionary-like container with attributes instead of keys, instantiated using
keyword arguments:
>>> A = attributes(foo=10, bar=True)
>>> A
attributes(bar=True, foo=10)
>>> A.foo
10
'''
def __init__(self, **args):
self.__dict__.update(args)
def __repr__(self):
return 'attributes({})'.format(', '.join(map('{0[0]}={0[1]!r}'.format, sorted(self.__dict__.items()))))
class unit:
'''
Framework for physical units.
The unit class provides a basic framework for specifying values with physical
units using readable notation such as ``2.5km/h``. The system ensures that
values are consistent with a measurement system derived from base units, but
it does impose or even preload one such system. Instead a derived class,
created using either :func:`unit.create` or class arguments, should specify
the units and scales relevant for the situation to which it is applied.
Once units are defined, the formal syntax for instantiating a quantity is:
.. code:: BNF
<quantity> ::= <number> <units> | <number> <operator> <units>
<number> ::= "" | <integer> | <integer> "." <integer>
; Numerical value, allowing for decimal fractions but not
; scientific notation. An empty number is equivalent to 1.
<units> ::= <unit> | <unit> <operator> <units>
<unit> ::= <prefix> <name> <power>
<prefix> ::= "" | "h" | "k" | "M" | "G" | "T" | "P" | "E" | "Z" | "Y"
| "d" | "c" | "m" | "μ" | "n" | "p" | "f" | "a" | "z" | "y"
; Single character prefix to indicate a multiple or fraction
; of the unit. All SI prefixes are supported except for deca.
; An empty prefix signifies no scaling.
<name> ::= <string>
; One of the defined units, case sensitive, containing Latin
; or Greek symbols.
<power> ::= "" | <integer>
; Integer power to which to raise the unit. An empty power is
; equivalent to 1.
<operator> ::= "*" | "/"
; Multiplication or division.
With the prefix and unit name sharing an alphabet there is potential for
ambiguities (is it mol or micro-ol?). These are resolved using the simple
logic that the first character is considered part of the unit if this unit
exists; otherwise it is considered a prefix.
'''
_words = re.compile('([a-zA-Zα-ωΑ-Ω]+)')
_prefix = dict(Y=1e24, Z=1e21, E=1e18, P=1e15, T=1e12, G=1e9, M=1e6, k=1e3, h=1e2,
d=1e-1, c=1e-2, m=1e-3, μ=1e-6, n=1e-9, p=1e-12, f=1e-15, a=1e-18, z=1e-21, y=1e-24)
class _pdict(dict):
'''
Minimal helper class for sparse addition, multiplication
'''
def __iadd__(self, other):
for key, value in other.items():
value += self.pop(key, 0)
if value:
self[key] = value
return self
def __mul__(self, other):
return {key: value*other for key, value in self.items()}
def __str__(self):
return ''.join(sorted('*/'[value<0] + key + (str(abs(value)) if abs(value) > 1 else '') for key, value in self.items())).lstrip('*') or 'dimensionless'
@classmethod
def create(*args, **units):
'''
Alternative constructor for backwards compatibility.
'''
return args[0](**units)
def __init__(*args, **units):
'''
Create new unit type.
The unit system is defined via variable keyword arguments, with every unit
specified either as a direct numerical value or as a string referencing
other units using the standard expression syntax. Ultimately every unit
should be resolvable to a numerical value by tracing its dependencies.
The following example defines a subset of the SI system. Note that we
cannot use prefixes on the receiving end of a definition for reasons of
ambiguity, hence the definition of a gram as 1/1000:
>>> SI = unit(m=1, s=1, g=1e-3, N='kg*m/s2', Pa='N/m2')
>>> SI('2km')
2000.0
>>> SI('2g')
0.002
Args
----
name : :class:`str` (optional, positional only)
Name of the new class object.
**units :
Unit definitions.
Returns
-------
:
The newly created (uninitiated) unit class.
'''
self, = args
def depth(name, d={}):
if name not in units:
name = name[1:] # strip prefix
if name not in d:
value = units.get(name)
d[name] = isinstance(value, str) and sum(map(depth, self._words.findall(value)), 1)
return d[name]
self._units = {}
for name in sorted(units, key=depth): # sort by dependency depth to establish resolve order
value = units[name]
self._units[name] = self._parse(value) if isinstance(value, str) else (value, self._pdict({name: 1}))
def __getitem__(self, unit):
'''
Create subclass of float with custom stringly loads, dumps methods.
'''
if unit[0] in '1234567890.*':
raise ValueError('unit cannot start with a numeral')
return type('unit:'+unit, (float,), dict(unit=unit, __stringly_loads__=classmethod(self._loads), __stringly_dumps__=classmethod(self._dumps)))
def __call__(self, s):
'''
Create subclass of float and instantiate.
'''
value, powers = self._parse(s)
return self[s.lstrip('1234567890.*')](value)
def _parse(self, s):
'''
Parse string into a tuple of float, _pdict.
'''
parts = self._words.split(s)
value = float(parts[0].rstrip('*/') or 1)
powers = self._pdict()
for i in range(1, len(parts), 2):
s = int(parts[i+1].rstrip('*/') or 1)
if parts[i-1].endswith('/'):
s = -s
name = parts[i]
if name not in self._units:
if name[0] not in self._prefix or name[1:] not in self._units:
raise ValueError('unknown unit: {}'.format(name))
v, p = self._units[name[1:]]
v *= self._prefix[name[0]]
else:
v, p = self._units[name]
value *= v**s
powers += p*s
return value, powers
def _loads(self, U, s):
uvalue, upowers = self._parse(U.unit)
value, powers = self._parse(s)
if powers != upowers:
raise ValueError('invalid unit: expected {}, got {}'.format(upowers, powers))
return value
def _dumps(self, U, v):
if not isinstance(v, (int,float)):
raise ValueError('can only dump numerical values as unit, got {!r}'.format(type(v)))
uvalue, upowers = self._parse(U.unit)
return _f2s(v/uvalue) + U.unit
def _f2s(v):
'convert float to string without scientific notation'
s, sep, e = str(v).partition('e')
a, sep, b = s.partition('.')
pos = len(a) + int(e or 0)
s = (a + b).rstrip('0')
if pos >= len(s):
return s.ljust(pos, '0')
elif pos <= 0:
return '0.' + '0' * -pos + s
else:
return s[:pos] + '.' + s[pos:]
# vim:sw=2:sts=2:et
```
|
{
"source": "jesseVDwolf/ForumMediaAnalyzer",
"score": 2
}
|
#### File: ForumMediaAnalyzer/ForumMediaAnalyzer/MediaAnalyzer.py
```python
import re
import cv2
import json
import base64
import logging
import requests
import numpy as np
from datetime import datetime
import pytz
import gridfs
import pymongo
from pymongo import MongoClient
from pymongo.errors import ServerSelectionTimeoutError as MongoServerSelectionTimeoutError
import imagehash
from PIL import Image
from skimage.metrics import structural_similarity
class AnalyzeConditionsNotMetException(Exception):
"""
Raised when an error is encountered during execution of the run() function
"""
pass
class MediaAnalyzer(object):
"""
This class is used to analyze data generated by a MediaScraper object:
https://github.com/jesseVDwolf/ForumMediaScraper
It will retrieve data in batches using the MediaScraper's REST interface:
https://github.com/jesseVDwolf/ForumMediaScraperREST
"""
# taken from https://github.com/django/django/blob/stable/1.3.x/django/core/validators.py#L45
URL_VALIDATION_REGEX = re.compile(
r'^(?:http|ftp)s?://' # http:// or https://
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' #domain...
r'localhost|' #localhost...
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip
r'(?::\d+)?' # optional port
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
MONGO_DEFAULT_URI = "mongodb://localhost:27017"
def __init__(self, scraper_rest_host: str="http://localhost:5000", log_level: int=logging.DEBUG,
document_retrieval_batch_size: int=5, mongo_uri: str=MONGO_DEFAULT_URI):
if re.match(MediaAnalyzer.URL_VALIDATION_REGEX, scraper_rest_host) is None:
raise ValueError('Invalid scraper_rest_host url: %s' % scraper_rest_host)
self.scraper_rest_host = scraper_rest_host
self.document_retrieval_batch_size = document_retrieval_batch_size
self.timezone = pytz.timezone('Europe/Berlin')
# create database related objects
self._mongo_client = MongoClient(mongo_uri)
self._mongo_database = self._mongo_client['9GagMedia']
self.gridfs = gridfs.GridFS(self._mongo_database)
self.logger = logging.getLogger(__name__)
self.logger.setLevel(log_level)
logging_args = {
"format": '%(asctime)s %(levelname)-8s %(message)s',
"level": logging.INFO,
"datefmt": '%Y-%m-%d %H:%M:%S'
}
logging.basicConfig(**logging_args)
if not self._mongo_database['Counter'].find_one():
self._mongo_database['Counter'].insert_one({'_id': 'OrderNum', 'val': 1})
def _get_tz_date(self):
return datetime.utcnow().replace(tzinfo=pytz.utc).astimezone(self.timezone)
@staticmethod
def _scale_images(image_one: np.ndarray, image_two: np.ndarray, scale_percent_dif: float=0.02):
# Scale the images so that they have the same
# dimensions. The bigger image will always be scaled down;
# It is considered bigger if contains more pixels i.e width x height
if image_one.shape == image_two.shape:
return image_one, image_two
# use aspect ratio to determine if images can be rescaled
*_, w1, h1 = cv2.boundingRect(image_one)
*_, w2, h2 = cv2.boundingRect(image_two)
if abs((float(w1) / h1) - (float(w2) / h2)) >= scale_percent_dif:
return None, None
if sum(image_one.shape[:2]) > sum(image_two.shape[:2]):
image_one = cv2.resize(
src=image_one,
dsize=(image_two.shape[1], image_two.shape[0]),
interpolation=cv2.INTER_CUBIC
)
else:
image_two = cv2.resize(
src=image_two,
dsize=(image_one.shape[1], image_one.shape[0]),
interpolation=cv2.INTER_CUBIC
)
return image_one, image_two
@staticmethod
def _mse(image_one: np.ndarray, image_two: np.ndarray):
# the 'Mean Squared Error' between the two images is the
# sum of the squared difference between the two images;
# NOTE: the two images must have the same dimension
err = np.sum((image_one.astype("float") - image_two.astype("float")) ** 2)
err /= float(image_one.shape[0] * image_one.shape[1])
# return the MSE, the lower the error, the more "similar"
# the two images are
return err
@staticmethod
def _img_hash(image_one: np.ndarray, image_two: np.ndarray, func=imagehash.average_hash, cutoff: int=10):
# Use an image hashing algorithm to check for similarity between images
# Calculate the hashes of both images using one of the functions from
# the https://github.com/JohannesBuchner/imagehash project and subtract
# them from each other. A cutoff can be specified to account for
# little discrepancies
h1 = func(Image.fromarray(image_one))
h2 = func(Image.fromarray(image_two))
s = (h1 - h2) - cutoff
# return the similarity between images where the closer to 0 the better.
# taking into account the specified cutoff where s can not be a negative number
return int((abs(s)+s)/2)
def run(self):
try:
"""
Pre-run validation of resources on scraper rest interface and
the locally configured mongodb server
"""
r = requests.get(
url="%s/query" % self.scraper_rest_host,
params={'limit': 1, 'offset': 0}
)
r.raise_for_status()
self._mongo_client.server_info()
"""
Start processing. If posts have already been processed, use the ArticleId of the
last processed article to determine when to stop retrieving more data. Then use
different methods to determine similairity between images:
- image hashes
- mean squared error
- structural similarity measure
"""
last_article = self._mongo_database['Posts'].find_one(sort=[("OrderNum", pymongo.ASCENDING)])
run = self._mongo_database['Runs'].insert_one({
'StartProcessTime': self._get_tz_date(),
'EndProcessTime': None,
'PostsProcessed': 0,
'BatchesProcessed': 0
})
request_offset = 0
final_batch = False
last_article_found = False
posts_processed = 0
batches_processed = 0
while True:
resp = requests.get(url="%s/query" % self.scraper_rest_host, params={
'limit': self.document_retrieval_batch_size,
'offset': request_offset
})
resp.raise_for_status()
data = resp.json()
self.logger.debug('%s: Received new batch of data at %s using offset %d and limit %d' % (
str(run.inserted_id), self._get_tz_date().strftime("%Y-%m-%d %H:%M:%S"), request_offset, self.document_retrieval_batch_size))
if len(data['documents']) == 0:
self.logger.debug('%s: No more documents returned by %s using offset %d and limit %d' % (
str(run.inserted_id), self.scraper_rest_host, request_offset, self.document_retrieval_batch_size))
self.logger.info('%s: No more documents found. Finished %d batches' % (str(run.inserted_id), batches_processed))
break
if len(data['documents']) < self.document_retrieval_batch_size:
self.logger.debug('%s: No more data available from %s. Setting final batch' % (
str(run.inserted_id), self.scraper_rest_host))
final_batch = True
if len([doc for doc in data['documents'] if len(doc['Posts']) == 0]) == len(data['documents']):
self.logger.debug('%s: No posts found in documents at offset %d with limit %d' % (
str(run.inserted_id), request_offset, self.document_retrieval_batch_size))
self.logger.info('%s: No posts found in batch. Retrieving next batch' % str(run.inserted_id))
request_offset += self.document_retrieval_batch_size
batches_processed += 1
continue
for doc in [doc for doc in data['documents'] if len(doc['Posts']) != 0]:
if last_article:
if last_article['ArticleId'] == doc['StartPostId'] or last_article_found:
self.logger.debug('%s: Last article %s found at offset %d with limit %d' % (
str(run.inserted_id), str(last_article['ArticleId']), request_offset, self.document_retrieval_batch_size))
final_batch = True
break
self.logger.info('%s: %d posts found for processing in document %s' % (
str(run.inserted_id), len(doc['Posts']), doc['_id']))
processed_posts = self._mongo_database['Posts'].find({})
for post in doc['Posts']:
if last_article:
if last_article['ArticleId'] == post['ArticleId']:
self.logger.debug('%s: Last article %s found at offset %d with limit %d' % (
str(run.inserted_id), str(last_article['ArticleId']), request_offset, self.document_retrieval_batch_size))
last_article_found = True
break
im_s = str(post['MediaData'])
im_b = base64.b64decode(im_s.encode('utf-8'))
im_buff = np.asarray(bytearray(im_b), dtype=np.uint8)
im = cv2.imdecode(im_buff, cv2.IMREAD_GRAYSCALE)
media_id = self.gridfs.put(im_b)
md = {
"ArticleId": str(post['ArticleId']),
"RunId": run.inserted_id,
"PostProcessedTime": self._get_tz_date(),
"Dim": im.shape,
"MediaId": media_id,
"IsOriginal": True,
"RepostOff": None,
"Reposts": []
}
for pp in processed_posts:
if post['ArticleId'] == pp['ArticleId']:
# duplicates will always be exactly the same
# solution to a bug in the MediaScraper...
continue
f = self.gridfs.get(pp['MediaId'])
im1_buff = np.asarray(bytearray(f.read(size=-1)), dtype=np.uint8)
im1 = cv2.imdecode(im1_buff, cv2.IMREAD_GRAYSCALE)
im0, im1 = self._scale_images(im, im1)
if not hasattr(im0, "shape"):
# images could not be scaled since difference in dimensions
# is too big. Must be unique based on this
continue
mse = self._mse(im0, im1)
ss = structural_similarity(im0, im1)
hs = self._img_hash(im0, im1)
# The hash similarity will determine if an image is even close to being
# similar to the processed image. The structural similarity measure will
# then decide if this is actually correct. A last check is done to make
# sure that its not a meme that is posted with the same background but
# with different text using the very sensitive mse measure
if hs == 0:
if ss >= 0.65:
if not mse >= 2000.00 and pp['IsOriginal']:
# db image seems to be very similar to the processed image
md.update({"IsOriginal": False, "RepostOff": pp['_id'], "Reposts": None})
pp['Reposts'].append({
"ArticleId": md['ArticleId'],
"mse": mse,
"ssim": ss,
"hs": hs,
"certainty": 1
})
self._mongo_database['Posts'].replace_one({"_id": pp['_id']}, pp)
else:
# image background might be the same with different text
continue
else:
# structural similarity is too far off must be unique
continue
else:
# images are not similar at all
continue
self._mongo_database['Posts'].insert_one(md)
posts_processed += 1
if final_batch:
break
request_offset += self.document_retrieval_batch_size
batches_processed += 1
self.logger.info('%s: Finished final batch. %d posts processed' % (str(run.inserted_id), posts_processed))
self._mongo_database['Runs'].update_one({'_id': run.inserted_id}, {
"$set": {'PostsProcessed': posts_processed, 'EndProcessTime': self._get_tz_date()}
})
except requests.exceptions.RequestException as ree:
raise AnalyzeConditionsNotMetException({'message': ree})
except MongoServerSelectionTimeoutError as msste:
raise AnalyzeConditionsNotMetException({'message': msste})
except json.JSONDecodeError as je:
raise AnalyzeConditionsNotMetException({'message': je})
```
|
{
"source": "jesseVDwolf/ForumMediaScraperREST",
"score": 3
}
|
#### File: ForumMediaScraperREST/ForumMediaScraperREST/__init__.py
```python
import json
import base64
from pymongo.errors import AutoReconnect
from ForumMediaScraper.Scraper import ScraperConfig
from ForumMediaScraperREST.Controller import FlaskController
from flask import (
Flask,
request,
Response
)
config = ScraperConfig({
'WEBDRIVER_EXECUTABLE_PATH': './drivers/geckodriver-win.exe',
'MONGO_INITDB_ROOT_USERNAME': 'admin',
'MONGO_INITDB_ROOT_PASSWORD': '<PASSWORD>',
'SCRAPER_CREATE_LOGFILE': True,
'SCRAPER_HEADLESS_MODE': False,
'SCRAPER_MAX_SCROLL_SECONDS': 40,
'WEBDRIVER_BROWSER_EXECUTABLE_PATH': 'C:\\Program Files\\Mozilla Firefox\\firefox.exe'
})
app = Flask(__name__)
controller = FlaskController(app, config)
@app.route('/query', methods=['GET'])
def query():
status = 200
body = {'success': True, 'documents': []}
limit = request.args.get('limit') if request.args.get('limit') else 5
offset = request.args.get('offset') if request.args.get('offset') else 0
try:
pipeline = [
{"$sort": {"StartScrapeTime": -1}},
{"$skip": int(offset)},
{"$limit": int(limit)},
{"$lookup":
{
"from": "Posts",
"localField": "_id",
"foreignField": "RunId",
"as": "Posts"
}
}
]
cursor = controller.mongo_database['Runs'].aggregate(pipeline)
for run in cursor:
for post in run['Posts']:
file = controller.mongo_gridfs.get(post['MediaId'])
post['file'] = base64.b64encode(file.read(size=-1)).decode('utf-8')
run = controller.convert_objects(run)
body['documents'].append(run)
except AutoReconnect as MongoError:
app.logger.warning('Error reconnecting to the mongo database: {err}'.format(err=str(MongoError)))
body.update({'success': False, 'error': {'type': 'pymongo.errors.AutoReconnect', 'message': str(MongoError)}})
status = 500
return Response(response=json.dumps(body), status=status, content_type='application/json')
@app.route('/config', methods=['GET', 'PUT'])
def config():
status = 200
response_body = {'success': True}
try:
if request.method == 'GET':
response_body['config'] = controller.load_config()
elif request.method == 'PUT':
request_body = request.get_json(silent=True)
if request_body is None:
raise Exception('Failed to parse json')
new_config = controller.put_config(request_body)
response_body['config'] = new_config
except Exception as err:
app.logger.warning('Request failed with reason: %s' % str(err))
status = 500
return Response(response=json.dumps(response_body), status=status, content_type='application/json')
if __name__ == '__main__':
app.run(use_reloader=False, port=80)
```
|
{
"source": "JesseVent/lstm-rapper",
"score": 3
}
|
#### File: lstm-rapper/app/views.py
```python
from app import app
from flask import render_template, request
from random import randint
from .forms import SonnetForm
from network.generate import GenerativeNetwork
net = GenerativeNetwork("app/static/sonnets.txt", "app/static/model.yaml", "app/static/weights.hdf5")
@app.route('/')
def index():
return render_template('index.html')
@app.route('/write', methods=['GET', 'POST'])
def sonnet():
if request.method == 'GET':
seed = net.make_seed()
seed_tag = tag_seed(seed)
sonnet_form = SonnetForm()
sonnet_form.seed.data = seed
sonnet_form.seed_tag.data = seed_tag
return render_template('intro.html',
title='ROBOT SHAKESPEARE',
form=sonnet_form)
if request.method == 'POST':
old_seed = request.form['seed']
old_seed_tag = request.form['seed_tag']
old_seed_phrase = request.form['seed_phrase']
if old_seed_phrase:
old_seed_tag = old_seed_phrase
old_seed = net.make_seed(old_seed_phrase)
message = format_sonnet(net.generate(old_seed))
# Make a new seed
seed = net.make_seed()
seed_tag = tag_seed(seed)
# Make the form
sonnet_form = SonnetForm()
sonnet_form.seed.data = seed
sonnet_form.seed_tag.data = seed_tag
sonnet_form.seed_phrase.data = ""
return render_template('sonnet.html',
title='ROBOT SHAKESPEARE',
message=message,
old_seed_tag=old_seed_tag,
form=sonnet_form)
def format_sonnet(text):
formatted = text.split("\n")
# The first and last line cut off in the middle, so we'll ditch them
formatted = formatted[1:len(formatted) - 1]
# Eliminate empty strings, strings that are just newlines, or other improper strings
formatted = [string for string in formatted if len(string) > 3]
return formatted
def tag_seed(seed):
# Grab a chunk of three words
word_list = seed.split()
i = randint(1, len(word_list) - 3)
bad_start_end = set(['on', 'of', 'from', "I", "O!", "and", "be", 'or', 'the', 'than', 'with', 'by'])
bad_start = set(['of'])
bad_end = set(['no', 'an', 'if'])
words = []
for i, word in enumerate(word_list[i:i + 3]):
if not word == "I" and not word == "O!":
word = word.strip("',.;-!:?").lower()
if i == 0 and word not in bad_start_end | bad_start:
words.append(word)
if i == 1:
words.append(word)
if i == 2 and word not in bad_start_end | bad_end:
words.append(word)
tag = " ".join(words)
return tag
```
|
{
"source": "JesseVent/spoken-word",
"score": 2
}
|
#### File: spoken-word/model/keras_gru.py
```python
import os
import numpy as np
np.random.seed(1969)
import tensorflow as tf
tf.set_random_seed(1969)
from scipy import signal
from glob import glob
import re
import pandas as pd
import gc
from scipy.io import wavfile
from keras import optimizers, losses, activations, models
from keras.layers import GRU, LSTM, Convolution2D, Dense, Input, Flatten, Dropout, MaxPooling2D, BatchNormalization, Conv3D, ConvLSTM2D
from keras.callbacks import TensorBoard
from keras.models import Sequential
from tqdm import tqdm
from sklearn.model_selection import GroupKFold
from python_speech_features import mfcc
from python_speech_features import delta
from python_speech_features import logfbank
L = 16000
legal_labels = 'yes no up down left right on off stop go silence unknown'.split()
root_path = r'../'
out_path = r'.'
model_path = r'.'
train_data_path = os.path.join(root_path, 'train', 'audio')
test_data_path = os.path.join(root_path, 'test', 'audio')
def list_wavs_fname(dirpath, ext='wav'):
print(dirpath)
fpaths = glob(os.path.join(dirpath, r'*/*' + ext))
pat = r'.+/(\w+)/\w+\.' + ext + '$'
labels = []
for fpath in fpaths:
r = re.match(pat, fpath)
if r:
labels.append(r.group(1))
pat = r'.+/(\w+\.' + ext + ')$'
fnames = []
for fpath in fpaths:
r = re.match(pat, fpath)
if r:
fnames.append(r.group(1))
return labels, fnames
def pad_audio(samples):
if len(samples) >= L: return samples
else: return np.pad(samples, pad_width=(L - len(samples), 0), mode='constant', constant_values=(0, 0))
def chop_audio(samples, L=16000, num=1000):
for i in range(num):
beg = np.random.randint(0, len(samples) - L)
yield samples[beg: beg + L]
def label_transform(labels):
nlabels = []
for label in labels:
if label == '_background_noise_':
nlabels.append('silence')
elif label not in legal_labels:
nlabels.append('unknown')
else:
nlabels.append(label)
return pd.get_dummies(pd.Series(nlabels))
labels, fnames = list_wavs_fname(train_data_path)
new_sample_rate=16000
y_train = []
x_train = np.zeros((64727,99,26),np.float32)
G = []
ix = 0
for label, fname in tqdm(zip(labels, fnames)):
sample_rate, samples = wavfile.read(os.path.join(train_data_path, label, fname))
samples = pad_audio(samples)
if len(samples) > 16000:
n_samples = chop_audio(samples)
else:
n_samples = [samples]
for samples in n_samples:
filter_banks = logfbank(samples)
filter_banks -= (np.mean(filter_banks, axis=0) + 1e-8)
x_train[ix,:,:] = filter_banks
y_train.append(label)
group = fname.split('_')[0]
G.append(group)
ix += 1
y_train = label_transform(y_train)
label_index = y_train.columns.values
y_train = y_train.values
y_train = np.array(y_train)
G = np.array(G)
del labels, fnames
gc.collect()
model = Sequential()
model.add(GRU(256,input_shape=(99,26)))
model.add(Dropout(0.5))
model.add(Dense(12, activation='softmax'))
model.compile(optimizer='adam',loss='binary_crossentropy',metrics=['categorical_accuracy'])
model.summary()
weights = model.get_weights()
NUM_FOLDS = 4
EPOCHS = 30
BATCH_SIZE = 256
BAGS = 16
kf = GroupKFold(n_splits=NUM_FOLDS)
shape = None
for bag in range(BAGS):
fold = 0
val_loss = np.ones((EPOCHS,NUM_FOLDS),np.float32)
for train, val in kf.split(x_train,y_train,G):
model.set_weights(weights)
model.reset_states()
tensorboard = TensorBoard(log_dir='./logs/gru_fold_{}_bag_{}'.format(fold,bag))
history = model.fit(x_train[train], y_train[train], batch_size=BATCH_SIZE, validation_data=(x_train[val], y_train[val]), epochs=EPOCHS, shuffle=True, verbose=1, callbacks=[tensorboard])
val_loss[:,fold] = history.history['val_loss']
fold += 1
val_mean = np.mean(val_loss,axis=1)
best_loss = np.min(val_mean)
best_epoch = np.argmin(val_mean)
print('Best epoch: {} Best loss: {}'.format(best_epoch,best_loss))
model.set_weights(weights)
model.reset_states()
tensorboard = TensorBoard(log_dir='./logs/gru_bag_{}'.format(bag))
model.fit(x_train, y_train, batch_size=BATCH_SIZE, epochs=best_epoch, shuffle=True, verbose=1, callbacks=[tensorboard])
model.save('./graph/gru_{}_{}.h5'.format(bag+1,best_loss))
def test_data_generator(batch=32):
fpaths = glob(os.path.join(test_data_path, '*wav'))
i = 0
for path in fpaths:
if i == 0:
imgs = []
fnames = []
i += 1
rate, samples = wavfile.read(path)
samples = pad_audio(samples)
filter_banks = logfbank(samples)
filter_banks -= (np.mean(filter_banks, axis=0) + 1e-8)
imgs.append(filter_banks)
fnames.append(path.split('/')[-1])
if i == batch:
i = 0
imgs = np.array(imgs)
yield fnames, imgs
if i < batch:
imgs = np.array(imgs)
yield fnames, imgs
raise StopIteration()
gc.collect()
index = []
results = []
probs = []
for fnames, imgs in tqdm(test_data_generator(batch=32)):
predicts = model.predict(imgs)
probs.extend(predicts)
predicts = np.argmax(predicts, axis=1)
predicts = [label_index[p] for p in predicts]
index.extend(fnames)
results.extend(predicts)
df = pd.DataFrame(columns=['fname', 'label'])
df['fname'] = index
df['label'] = results
df.to_csv(os.path.join(out_path, 'gru_sub_{}_{}.csv'.format(bag+1,best_loss)), index=False)
probs = np.array(probs)
np.save('./graph/gru_probs_{}.npy'.format(bag+1),probs)
```
#### File: spoken-word/model/spoken_word.py
```python
import os
import numpy as np
from scipy.fftpack import fft
from scipy.io import wavfile
from scipy import signal
from glob import glob
import re
import pandas as pd
import gc
from scipy.io import wavfile
from keras import optimizers, losses, activations, models
from keras.layers import Convolution2D, Dense, Input, Flatten, Dropout, MaxPooling2D, BatchNormalization
from sklearn.model_selection import train_test_split
import keras
L = 16000
legal_labels = 'yes no up down left right on off stop go silence unknown'.split()
#src folders
root_path = r'..'
out_path = r'.'
model_path = r'.'
train_data_path = os.path.join(root_path, 'input', 'train', 'audio')
test_data_path = os.path.join(root_path, 'input', 'test', 'audio')
def custom_fft(y, fs):
T = 1.0 / fs
N = y.shape[0]
yf = fft(y)
xf = np.linspace(0.0, 1.0/(2.0*T), N//2)
# FFT is simmetrical, so we take just the first half
# FFT is also complex, to we take just the real part (abs)
vals = 2.0/N * np.abs(yf[0:N//2])
return xf, vals
def log_specgram(audio, sample_rate, window_size=20,
step_size=10, eps=1e-10):
nperseg = int(round(window_size * sample_rate / 1e3))
noverlap = int(round(step_size * sample_rate / 1e3))
freqs, times, spec = signal.spectrogram(audio,
fs=sample_rate,
window='hann',
nperseg=nperseg,
noverlap=noverlap,
detrend=False)
return freqs, times, np.log(spec.T.astype(np.float32) + eps)
def list_wavs_fname(dirpath, ext='wav'):
print(dirpath)
fpaths = glob(os.path.join(dirpath, r'*/*' + ext))
pat = r'.+/(\w+)/\w+\.' + ext + '$'
labels = []
for fpath in fpaths:
r = re.match(pat, fpath)
if r:
labels.append(r.group(1))
pat = r'.+/(\w+\.' + ext + ')$'
fnames = []
for fpath in fpaths:
r = re.match(pat, fpath)
if r:
fnames.append(r.group(1))
return labels, fnames
def pad_audio(samples):
if len(samples) >= L: return samples
else: return np.pad(samples, pad_width=(L - len(samples), 0), mode='constant', constant_values=(0, 0))
def chop_audio(samples, L=16000, num=20):
for i in range(num):
beg = np.random.randint(0, len(samples) - L)
yield samples[beg: beg + L]
def label_transform(labels):
nlabels = []
for label in labels:
if label == '_background_noise_':
nlabels.append('silence')
elif label not in legal_labels:
nlabels.append('unknown')
else:
nlabels.append(label)
return pd.get_dummies(pd.Series(nlabels))
labels, fnames = list_wavs_fname(train_data_path)
new_sample_rate = 8000
y_train = []
x_train = []
for label, fname in zip(labels, fnames):
sample_rate, samples = wavfile.read(os.path.join(train_data_path, label, fname))
samples = pad_audio(samples)
if len(samples) > 16000:
n_samples = chop_audio(samples)
else: n_samples = [samples]
for samples in n_samples:
resampled = signal.resample(samples, int(new_sample_rate / sample_rate * samples.shape[0]))
_, _, specgram = log_specgram(resampled, sample_rate=new_sample_rate)
y_train.append(label)
x_train.append(specgram)
x_train = np.array(x_train)
x_train = x_train.reshape(tuple(list(x_train.shape) + [1]))
y_train = label_transform(y_train)
label_index = y_train.columns.values
y_train = y_train.values
y_train = np.array(y_train)
del labels, fnames
gc.collect()
input_shape = (99, 81, 1)
nclass = 12
inp = Input(shape=input_shape)
norm_inp = BatchNormalization()(inp)
img_1 = Convolution2D(8, kernel_size=2, activation=activations.relu)(norm_inp)
img_1 = Convolution2D(8, kernel_size=2, activation=activations.relu)(img_1)
img_1 = MaxPooling2D(pool_size=(2, 2))(img_1)
img_1 = Dropout(rate=0.2)(img_1)
img_1 = Convolution2D(16, kernel_size=3, activation=activations.relu)(img_1)
img_1 = Convolution2D(16, kernel_size=3, activation=activations.relu)(img_1)
img_1 = MaxPooling2D(pool_size=(2, 2))(img_1)
img_1 = Dropout(rate=0.2)(img_1)
img_1 = Convolution2D(32, kernel_size=3, activation=activations.relu)(img_1)
img_1 = MaxPooling2D(pool_size=(2, 2))(img_1)
img_1 = Dropout(rate=0.2)(img_1)
img_1 = Flatten()(img_1)
dense_1 = BatchNormalization()(Dense(128, activation=activations.relu)(img_1))
dense_1 = BatchNormalization()(Dense(128, activation=activations.relu)(dense_1))
dense_1 = Dense(nclass, activation=activations.softmax)(dense_1)
model = models.Model(inputs=inp, outputs=dense_1)
opt = optimizers.Adam()
model.compile(optimizer=opt, loss=losses.binary_crossentropy)
model.summary()
x_train, x_valid, y_train, y_valid = train_test_split(x_train, y_train, test_size=0.1, random_state=2017)
model.fit(x_train, y_train, batch_size=16, validation_data=(x_valid, y_valid), epochs=3, shuffle=True, verbose=2)
model.save(os.path.join(model_path, 'cnn.model'))
def test_data_generator(batch=16):
fpaths = glob(os.path.join(test_data_path, '*wav'))
i = 0
for path in fpaths:
if i == 0:
imgs = []
fnames = []
i += 1
rate, samples = wavfile.read(path)
samples = pad_audio(samples)
resampled = signal.resample(samples, int(new_sample_rate / rate * samples.shape[0]))
_, _, specgram = log_specgram(resampled, sample_rate=new_sample_rate)
imgs.append(specgram)
fnames.append(path.split('\\')[-1])
if i == batch:
i = 0
imgs = np.array(imgs)
imgs = imgs.reshape(tuple(list(imgs.shape) + [1]))
yield fnames, imgs
if i < batch:
imgs = np.array(imgs)
imgs = imgs.reshape(tuple(list(imgs.shape) + [1]))
yield fnames, imgs
raise StopIteration()
del x_train, y_train
gc.collect()
index = []
results = []
for fnames, imgs in test_data_generator(batch=32):
predicts = model.predict(imgs)
predicts = np.argmax(predicts, axis=1)
predicts = [label_index[p] for p in predicts]
index.extend(fnames)
results.extend(predicts)
df = pd.DataFrame(columns=['fname', 'label'])
df['fname'] = index
df['label'] = results
df.to_csv(os.path.join(out_path, 'sub.csv'), index=False)
```
|
{
"source": "JesseVermeulen123/conducthotline.com",
"score": 2
}
|
#### File: conducthotline.com/hotline/config.py
```python
import json
import os
import hotline.database
from hotline import injector
def _load_secrets():
secrets_file = os.environ.get("SECRETS_FILE", "secrets.json")
with open(secrets_file) as fh:
secrets = json.load(fh)
injector.set("secrets", secrets)
def _initialize_resources():
# Initialize the database, now that we have configuration.
hotline.database.initialize_db()
def load():
_load_secrets()
_initialize_resources()
```
#### File: database/migrations/0002_add_blocklist.py
```python
import peewee
from hotline.database import models
class CreateModels:
method = "create_tables"
args = [models.BlockList]
def run(self):
models.db.create_tables(self.args)
def migrate(migrator):
return [
migrator.add_column(
"auditlog", "reporter_number", peewee.TextField(null=True, index=False)
),
CreateModels(),
]
```
#### File: hotline/pages/webhandlers.py
```python
import os
import cmarkgfm
import flask
import flask.helpers
HERE = os.path.dirname(__file__)
CONTENT = os.path.join(HERE, "content")
blueprint = flask.Blueprint("pages", __name__, template_folder="templates")
@blueprint.route("/pages/<name>")
def view_page(name):
markdown_file = flask.safe_join(CONTENT, f"{name}.md")
if not os.path.exists(markdown_file):
flask.abort(404)
with open(markdown_file, "r") as fh:
content = cmarkgfm.markdown_to_html_with_extensions(
fh.read(), extensions=["table", "autolink", "strikethrough"]
)
# content = content.replace("<h1>", "<h1 class=\"title is-1 is-spaced\">")
# content = content.replace("<h2>", "<h2 class=\"subtitle is-2 is-spaced\">")
return flask.render_template("page.html", content=content)
```
#### File: JesseVermeulen123/conducthotline.com/noxfile.py
```python
import os
import nox
nox.options.sessions = ["format", "lint", "test"]
@nox.session(python="3.7")
def freeze(session):
session.install("pip-tools")
session.run("pip-compile", "--output-file", "requirements.txt", "requirements.in")
@nox.session(python="3.7")
def format(session):
session.install("black", "isort")
session.run("black", "hotline", "tests", "noxfile.py")
session.run("isort", "-rc", "hotline", "tests", "noxfile.py")
@nox.session(python="3.7")
def lint(session):
session.install("mypy", "flake8", "black")
session.run("black", "--check", "hotline", "tests")
session.run("flake8", "docuploader", "tests")
session.run("mypy", "hotline")
@nox.session(python="3.7")
def test(session):
session.install("-r", "requirements.txt")
session.install("-r", "requirements-test.txt")
session.run(
"pytest",
"--cov",
"hotline",
"--cov-report",
"term-missing",
"tests",
*session.posargs
)
@nox.session(python="3.7")
def cli(session):
session.install("-r", "requirements.txt")
env = {
# Workaround for https://github.com/pallets/werkzeug/issues/461
"PYTHONPATH": os.getcwd(),
"FLASK_ENV": "development",
"FLASK_APP": "hotline.__main__",
}
session.run("python", "-m", "flask", *session.posargs, env=env)
@nox.session(python="3.7")
def serve(session):
session.install("-r", "requirements.txt")
env = {
# Workaround for https://github.com/pallets/werkzeug/issues/461
"PYTHONPATH": os.getcwd(),
"FLASK_RUN_PORT": "8080",
"FLASK_ENV": "development",
"FLASK_APP": "hotline.__main__",
}
session.run("python", "-m", "flask", "run", env=env)
@nox.session(python="3.7")
def serve_prod(session):
session.install("-r", "requirements.txt")
session.run("gunicorn", "-b", ":8080", "hotline.__main__:app")
@nox.session(python="3.7")
def shell(session):
session.install("-r", "requirements.txt")
session.install("ipython")
session.run("ipython")
```
#### File: conducthotline.com/tests/test_chatroom.py
```python
from unittest import mock
from hotline import chatroom
def test_add_user_unique_constraint():
room = chatroom.Chatroom()
room.add_user(name="A", number="1234", relay="1")
room.add_user(name="B", number="5678", relay="2")
room.add_user(name="C", number="1234", relay="3")
assert len(room.users) == 2
def test_relay():
room = chatroom.Chatroom()
room.add_user(name="A", number="1234", relay="1")
room.add_user(name="B", number="5678", relay="2")
room.add_user(name="C", number="1111", relay="3")
send_message = mock.Mock(spec=["__call__"])
# A message from User A.
room.relay("1234", "meep", send_message=send_message)
send_message.assert_has_calls(
[
mock.call(to="5678", sender="2", message="A: meep"),
mock.call(to="1111", sender="3", message="A: meep"),
]
)
# A message from User B.
send_message.reset_mock()
room.relay("5678", "moop", send_message=send_message)
send_message.assert_has_calls(
[
mock.call(to="1234", sender="1", message="B: moop"),
mock.call(to="1111", sender="3", message="B: moop"),
]
)
def test_serialize_deserialize():
room = chatroom.Chatroom()
room.add_user(name="A", number="1234", relay="1")
room.add_user(name="B", number="5678", relay="2")
room.add_user(name="C", number="1111", relay="3")
roundtripped = room.deserialize(room.serialize())
assert list(roundtripped.users) == list(room.users)
```
|
{
"source": "jessevig/robustness-gym",
"score": 2
}
|
#### File: robustnessgym/active/mandoline.py
```python
from functools import partial
from types import SimpleNamespace
import numpy as np
from meerkat.tools.lazy_loader import LazyLoader
scipy_optimize = LazyLoader("scipy.optimize")
scipy_special = LazyLoader("scipy.special")
skmetrics = LazyLoader("sklearn.metrics.pairwise")
def Phi(D, edge_list: list = None):
"""Given an n x d matrix of (example, slices), calculate the potential
matrix.
Includes correlations modeled by the edges in the `edge_list`.
"""
if edge_list is not None:
pairwise_terms = (
D[np.arange(len(D)), edge_list[:, 0][:, np.newaxis]].T
* D[np.arange(len(D)), edge_list[:, 1][:, np.newaxis]].T
)
return np.concatenate([D, pairwise_terms], axis=1)
else:
return D
def log_partition_ratio(x, Phi_D_src, n_src):
"""Calculate the log-partition ratio in the KLIEP problem."""
return np.log(n_src) - scipy_special.logsumexp(Phi_D_src.dot(x))
def mandoline(
D_src,
D_tgt,
edge_list,
sigma=None,
):
"""
Mandoline solver.
Args:
D_src: (n_src x d) matrix of (example, slices) for the source distribution.
D_tgt: (n_tgt x d) matrix of (example, slices) for the source distribution.
edge_list: list of edge correlations between slices that should be modeled.
sigma: optional parameter that activates RBF kernel-based KLIEP with scale
`sigma`.
Returns: SimpleNamespace that contains
opt: result of scipy.optimize
Phi_D_src: source potential matrix used in Mandoline
Phi_D_tgt: target potential matrix used in Mandoline
n_src: number of source samples
n_tgt: number of target samples
edge_list: the `edge_list` parameter passed as input
"""
# Copy and binarize the input matrices to -1/1
D_src, D_tgt = np.copy(D_src), np.copy(D_tgt)
if np.min(D_src) == 0:
D_src[D_src == 0] = -1
D_tgt[D_tgt == 0] = -1
# Edge list encoding dependencies between gs
if edge_list is not None:
edge_list = np.array(edge_list)
# Create the potential matrices
Phi_D_tgt, Phi_D_src = Phi(D_tgt, edge_list), Phi(D_src, edge_list)
# Number of examples
n_src, n_tgt = Phi_D_src.shape[0], Phi_D_tgt.shape[0]
def f(x):
obj = Phi_D_tgt.dot(x).sum() - n_tgt * scipy_special.logsumexp(Phi_D_src.dot(x))
return -obj
# Set the kernel
kernel = partial(skmetrics.rbf_kernel, gamma=sigma)
def llkliep_f(x):
obj = kernel(
Phi_D_tgt, x[:, np.newaxis]
).sum() - n_tgt * scipy_special.logsumexp(kernel(Phi_D_src, x[:, np.newaxis]))
return -obj
# Solve
if not sigma:
opt = scipy_optimize.minimize(
f, np.random.randn(Phi_D_tgt.shape[1]), method="BFGS"
)
else:
opt = scipy_optimize.minimize(
llkliep_f, np.random.randn(Phi_D_tgt.shape[1]), method="BFGS"
)
return SimpleNamespace(
opt=opt,
Phi_D_src=Phi_D_src,
Phi_D_tgt=Phi_D_tgt,
n_src=n_src,
n_tgt=n_tgt,
edge_list=edge_list,
)
def log_density_ratio(D, solved):
"""Calculate the log density ratio for a solved Mandoline run."""
Phi_D = Phi(D, None)
return Phi_D.dot(solved.opt.x) + log_partition_ratio(
solved.opt.x, solved.Phi_D_src, solved.n_src
)
def get_k_most_unbalanced_gs(D_src, D_tgt, k):
"""Get the top k slices that shift most between source and target
distributions.
Uses difference in marginals between each slice.
"""
marginal_diff = np.abs(D_src.mean(axis=0) - D_tgt.mean(axis=0))
differences = np.sort(marginal_diff)[-k:]
indices = np.argsort(marginal_diff)[-k:]
return list(indices), list(differences)
def weighted_estimator(weights, empirical_mat):
"""Calculate a weighted empirical mean over a matrix.
Calculates an unweighted mean if `weights` is None.
"""
if weights is None:
return np.mean(empirical_mat, axis=0)
return np.sum(weights[:, np.newaxis] * empirical_mat, axis=0)
def estimate_performance(
D_src,
D_tgt,
edge_list,
empirical_mat_list_src,
):
"""Estimate performance on a target distribution using slices from the
source and target data."""
# Run the solver
solved = mandoline(D_src, D_tgt, edge_list)
# Compute the weights on the source dataset
density_ratios = np.e ** log_density_ratio(solved.Phi_D_src, solved)
# Self-normalized importance weights
weights = density_ratios / np.sum(density_ratios)
all_estimates = []
for mat_src in empirical_mat_list_src:
# Estimates is a 1-D array of estimates for each mat e.g.
# each mat can correspond to a model's (n x 1) error matrix
weighted_estimates = weighted_estimator(weights, mat_src)
source_estimates = weighted_estimator(
np.ones(solved.n_src) / solved.n_src, mat_src
)
all_estimates.append(
SimpleNamespace(
weighted=weighted_estimates,
source=source_estimates,
)
)
return SimpleNamespace(
all_estimates=all_estimates,
solved=solved,
weights=weights,
)
```
#### File: robustnessgym/core/operation.py
```python
from __future__ import annotations
import pathlib
from abc import ABC
from collections import defaultdict
from functools import partial
from typing import Callable, Dict, List, Optional, Union
from meerkat import AbstractColumn
from meerkat.mixins.identifier import IdentifierMixin
from meerkat.tools.identifier import Identifier
from robustnessgym.core.slice import SliceDataPanel as DataPanel
def tuple_to_dict(keys: List[str]):
def _tuple_to_dict(fn: callable):
def _wrapper(*args, **kwargs):
# Assume that if the output isn't a tuple,
# it can be converted to a tuple of length 1
output = fn(*args, **kwargs)
if not isinstance(output, tuple):
output = (output,)
return dict(zip(keys, output))
return _wrapper
return _tuple_to_dict
def stow(
dp: DataPanel,
ops: Dict[Operation, List[List[str]]],
):
"""Apply Operations in sequence.
Args:
dp (DataPanel): DataPanel
ops (Dict[Operation, List[List[str]]]):
Returns:
Updated DataPanel.
"""
# Remove Operations whose outputs are already in the DataPanel
for op, list_of_columns in list(ops.items()):
indices_to_remove = []
for i, columns in enumerate(list(list_of_columns)):
if op.exists(dp):
# Remove the columns at index i
indices_to_remove.append(i)
# Remove input columns for which the Operation was previously executed
for index in sorted(indices_to_remove, reverse=True):
columns = ops[op].pop(index)
print(f"skipped: {op.identifier} -> {columns}", flush=True)
# Remove the op entirely if list_of_columns is now empty
if not ops[op]:
ops.pop(op)
# Run the remaining Operations
for op, list_of_columns in ops.items():
for columns in list_of_columns:
dp = op(dp, columns=columns)
return dp
def lookup(
dp: DataPanel,
op: Union[type, Operation],
columns: List[str],
output_name: str = None,
) -> AbstractColumn:
"""Retrieve the outputs of an Operation from a DataPanel.
Args:
dp (DataPanel): DataPanel
op (Union[type, Operation]): subclass of Operation, or Operation object
columns (List[str]): list of input columns that Operation was applied to
output_name (Optional[str]): for an Operation with `num_outputs` > 1,
the name of the output column to lookup
Returns:
Output columns of the Operation from the DataPanel.
"""
# Operation identifier that should be retrieved
if isinstance(op, Operation):
op_name = str(op.identifier.name)
else:
op_name = str(Identifier(op.__name__))
# Identifiers for all columns in the DataPanel, grouped without input columns
# for Operation identifiers.
column_identifiers = defaultdict(list)
for col in dp.columns:
identifier = Identifier.parse(col)
column_identifiers[identifier.without("columns")].append(identifier)
# Search for the column group that best matches the Operation identifier
best_match, best_distance = None, 100000000
for identifier in column_identifiers:
# The prefix to match
prefix = str(identifier)
# Pick the key that best matches the cls name or instance identifier
if (
prefix.startswith(op_name)
and len(
prefix.replace(op_name, "").replace(
"" if output_name is None else output_name, ""
)
)
< best_distance
):
best_match = identifier
best_distance = len(
prefix.replace(op_name, "").replace(
"" if output_name is None else output_name, ""
)
)
# Get the best matched column group
identifier = best_match
if identifier is None:
raise AttributeError("Lookup failed.")
return dp[str(identifier(columns=columns))]
class Operation(ABC, IdentifierMixin):
"""Abstract base class for operations in Robustness Gym."""
# Path to a log directory
logdir: pathlib.Path = pathlib.Path.home() / "robustnessgym/operations/"
# Create a directory
logdir.mkdir(parents=True, exist_ok=True)
def __init__(
self,
identifier: Identifier = None,
output_names: List[str] = None,
process_batch_fn: Callable = None,
prepare_batch_fn: Callable = None,
**kwargs,
):
super(Operation, self).__init__(
identifier=identifier
if identifier
else Identifier(_name=self.__class__.__name__, **kwargs),
)
self._output_names = output_names
if process_batch_fn:
self.process_batch = process_batch_fn
if prepare_batch_fn:
self.prepare_batch = prepare_batch_fn
def __repr__(self):
return str(self.identifier)
@property
def num_outputs(self) -> int:
"""Number of output columns created by the Operation."""
return len(self.output_names) if self.output_names else 1
@property
def output_names(self) -> Optional[List[str]]:
"""Name of output columns created by the Operation."""
return self._output_names
@property
def output_identifiers(self) -> List[Identifier]:
if self.output_names:
return [self.identifier(output=name) for name in self.output_names]
return [self.identifier]
@property
def output_columns(self) -> List[str]:
return [str(identifier) for identifier in self.output_identifiers]
@property
def identifier(self) -> Identifier:
"""Name of the Operation."""
return self._identifier
@classmethod
def exists(cls, dp: DataPanel) -> bool:
"""Check if the outputs of the Operation are in `dp`.
Args:
dp: DataPanel
Returns:
bool: True if `dp` contains a column produced by `Operation`,
False otherwise
"""
# TODO: update this to use `Operation.outputs`
return any([key.startswith(cls.__name__) for key in dp.keys()])
def prepare_batch(
self,
dp: DataPanel,
columns: List[str],
*args,
**kwargs,
) -> None:
"""Preparation applied to the DataPanel `dp`.
This is provided as a convenience function that can be called by
`self.prepare`.
Args:
dp (DataPanel): DataPanel
columns (list): list of columns
*args: optional positional arguments
**kwargs: optional keyword arguments
"""
raise NotImplementedError("Implement `prepare_batch`.")
def prepare(
self,
dp: DataPanel,
columns: List[str],
batch_size: int = 32,
*args,
**kwargs,
) -> None:
"""Preparation that is applied before the Operation is applied.
Many Operations require a full pass over the DataPanel to precompute some
variables before the core operation can actually be applied e.g. to create a
Bag-of-Words representation, constructing a vocabulary to keep only
tokens that are frequently seen across the DataPanel.
Args:
dp (DataPanel): DataPanel
columns (list): list of columns
batch_size (int): batch size for `dp.map(...)`
*args: optional positional arguments
**kwargs: optional keyword arguments
"""
try:
dp.map(
function=partial(self.prepare_batch, columns=columns, *args, **kwargs),
input_columns=columns,
is_batched_fn=True,
batch_size=batch_size,
*args,
**kwargs,
)
except NotImplementedError:
return
def process_batch(
self,
dp: DataPanel,
columns: List[str],
**kwargs,
) -> tuple:
"""The core functionality of the Operation.
This is provided as a convenience function that can be called by
`self.process`.
Args:
dp (DataPanel): DataPanel
columns (list): list of columns
**kwargs: optional keyword arguments
Returns:
Tuple of outputs, where each output is a a sequence of values. The expected
order of the outputs is the same as the order of identifiers in
`self.outputs`.
"""
return NotImplemented
def process(
self,
dp: DataPanel,
columns: List[str],
batch_size: int = 32,
*args,
**kwargs,
) -> DataPanel:
"""Apply the Operation to a DataPanel.
Args:
dp (DataPanel): DataPanel
columns (list): list of columns
batch_size (int): batch size for `dp.update(...)`
*args: optional positional arguments
**kwargs: optional keyword arguments
"""
return dp.update(
tuple_to_dict(
keys=[str(ident(columns=columns)) for ident in self.output_identifiers]
)(partial(self.process_batch, columns=columns, *args, **kwargs)),
batch_size=batch_size,
is_batched_fn=True,
*args,
**kwargs,
)
def __call__(
self,
dp: DataPanel,
columns: List[str],
batch_size: int = 32,
**kwargs,
) -> DataPanel:
"""Apply the Operation to a DataPanel.
Args:
dp (DataPanel): DataPanel
columns (list): list of columns
batch_size (int):
**kwargs: optional keyword arguments
Returns:
An updated DataPanel, with additional output columns produced by
the Operation.
"""
if isinstance(dp, DataPanel):
assert len(set(columns) - set(dp.column_names)) == 0, (
f"All `columns` ({columns}) must be present and visible in `dp` ("
f"{list(dp.column_names)})."
)
if self.exists(dp):
return dp
# Prepare to apply the Operation to the DataPanel
self.prepare(
dp=dp,
columns=columns,
batch_size=batch_size,
**kwargs,
)
# Apply the Operation to the DataPanel
dp = self.process(
dp=dp,
columns=columns,
batch_size=batch_size,
**kwargs,
)
return dp
else:
return self(
dp=DataPanel(dp),
columns=columns,
batch_size=batch_size,
**kwargs,
)
```
#### File: robustnessgym/core/slice.py
```python
from __future__ import annotations
import json
from json import JSONDecodeError
from typing import Callable, Dict, List, Optional, Union
from meerkat import AbstractColumn, DataPanel
from robustnessgym.core.constants import CURATION, GENERIC, SUBPOPULATION
from robustnessgym.core.identifier import Id, Identifier
class SliceMixin:
"""Slice class in Robustness Gym."""
def __init__(self):
# A slice has a lineage
if self.identifier is None:
self.lineage = []
else:
self.lineage = [(str(self.__class__.__name__), self.identifier)]
# Set the category of the slice: defaults to 'curated'
self.category = CURATION
def add_to_lineage(self, category, identifier, columns=None):
"""Append to the lineage."""
# TODO (karan): add Identifier directly
if columns:
self.lineage.append((category, identifier, columns))
else:
self.lineage.append((category, identifier))
# Update the identifier
self._lineage_to_identifier()
def _add_op_to_lineage(self):
if self.node.last_parent is not None:
opnode, indices = self.node.last_parent
try:
fn = opnode.captured_args["function"]
except KeyError:
return
if opnode.ref().__name__ == "filter":
self.add_to_lineage(
SUBPOPULATION.capitalize(),
Id("Function", name=fn.__name__, mem=hex(id(fn))),
[],
)
self.category = SUBPOPULATION
else:
self.add_to_lineage(
GENERIC.capitalize(),
Id("Function", name=fn.__name__, mem=hex(id(fn))),
[],
)
def _lineage_to_identifier(self):
"""Synchronize to the current lineage by reassigning to
`self._identifier`."""
short_lineage = []
for entry in self.lineage:
if len(entry) == 3:
try:
columns = json.loads(entry[2])
except JSONDecodeError:
columns = entry[2]
short_lineage.append(str(entry[1]) + " @ " + str(columns))
else:
short_lineage.append(str(entry[1]))
# Assign the new lineage to the identifier
self._identifier = Identifier(_name=" -> ".join(short_lineage))
@property
def identifier(self):
"""Slice identifier."""
if self._identifier:
return self._identifier
if self.lineage:
self._lineage_to_identifier()
return self._identifier
return None
@identifier.setter
def identifier(self, value):
"""Set the slice's identifier."""
self._identifier = value
@classmethod
def _add_state_keys(cls) -> set:
"""List of attributes that describe the state of the object."""
return {
"lineage",
"category",
}
class SliceDataPanel(DataPanel, SliceMixin):
def __init__(self, *args, **kwargs):
super(SliceDataPanel, self).__init__(*args, **kwargs)
SliceMixin.__init__(self)
@classmethod
def _state_keys(cls) -> set:
state_keys = super(SliceDataPanel, cls)._state_keys()
state_keys.union(cls._add_state_keys())
return state_keys
def update(
self,
function: Optional[Callable] = None,
with_indices: bool = False,
input_columns: Optional[Union[str, List[str]]] = None,
is_batched_fn: bool = False,
batch_size: Optional[int] = 1,
remove_columns: Optional[List[str]] = None,
num_workers: int = 0,
materialize: bool = True,
pbar: bool = False,
**kwargs,
) -> SliceDataPanel:
dp = super(SliceDataPanel, self).update(
function=function,
with_indices=with_indices,
input_columns=input_columns,
is_batched_fn=is_batched_fn,
batch_size=batch_size,
remove_columns=remove_columns,
num_workers=num_workers,
materialize=materialize,
pbar=pbar,
**kwargs,
)
if isinstance(dp, SliceDataPanel):
dp._add_op_to_lineage()
return dp
def filter(
self,
function: Optional[Callable] = None,
with_indices=False,
input_columns: Optional[Union[str, List[str]]] = None,
is_batched_fn: bool = False,
batch_size: Optional[int] = 1,
drop_last_batch: bool = False,
num_workers: int = 0,
materialize: bool = True,
pbar: bool = False,
**kwargs,
) -> Optional[SliceDataPanel]:
dp = super(SliceDataPanel, self).filter(
function=function,
with_indices=with_indices,
input_columns=input_columns,
is_batched_fn=is_batched_fn,
batch_size=batch_size,
drop_last_batch=drop_last_batch,
num_workers=num_workers,
materialize=materialize,
pbar=pbar,
**kwargs,
)
if isinstance(dp, SliceDataPanel):
dp._add_op_to_lineage()
return dp
def map(
self,
function: Optional[Callable] = None,
with_indices: bool = False,
input_columns: Optional[Union[str, List[str]]] = None,
is_batched_fn: bool = False,
batch_size: Optional[int] = 1,
drop_last_batch: bool = False,
num_workers: int = 0,
output_type: type = None,
mmap: bool = False,
materialize: bool = True,
pbar: bool = False,
**kwargs,
) -> Optional[Union[Dict, List, AbstractColumn]]:
dp = super(SliceDataPanel, self).map(
function=function,
with_indices=with_indices,
input_columns=input_columns,
is_batched_fn=is_batched_fn,
batch_size=batch_size,
drop_last_batch=drop_last_batch,
num_workers=num_workers,
output_type=output_type,
mmap=mmap,
materialize=materialize,
pbar=pbar,
**kwargs,
)
if isinstance(dp, SliceDataPanel):
dp._add_op_to_lineage()
return dp
```
#### File: robustnessgym/core/testbench.py
```python
from __future__ import annotations
import json
import logging
import pathlib
from typing import Callable, Collection, Dict, List, Optional, Sequence, Union
import dill
import pandas as pd
import torch
from fuzzywuzzy import process
from tqdm import tqdm
from robustnessgym.core.constants import (
ATTACK,
AUGMENTATION,
CURATION,
GENERIC,
SUBPOPULATION,
)
from robustnessgym.core.identifier import Identifier
from robustnessgym.core.metrics import compute_metric, get_metric
from robustnessgym.core.model import Model
from robustnessgym.core.slice import SliceDataPanel as DataPanel
from robustnessgym.core.tools import persistent_hash
from robustnessgym.core.version import SemanticVersionerMixin
from robustnessgym.report.report import (
ClassDistributionColumn,
NumericColumn,
Report,
ScoreColumn,
)
from robustnessgym.tasks.schema import Schema
from robustnessgym.tasks.task import Task
logger = logging.getLogger(__name__)
class TestBench(SemanticVersionerMixin):
"""Class for test benches in Robustness Gym."""
def __init__(
self,
identifier: Union[str, Identifier],
task: Task = None,
slices: Collection[DataPanel] = None,
version: str = "0.0.1",
dataset_id: str = None,
class_names: Collection[str] = None,
):
# Call the superclass
super(TestBench, self).__init__(version=version)
# An identifier for the TestBench
self.identifier = identifier
# Set the task
self.task = task
if task is None:
self.task = Task()
# Create the collection of slices
self._slices = set()
self.slice_identifiers = set()
self._slice_table = {}
# Add slices if any
if slices:
self.add_slices(slices)
# The testbench has calculators
self.calculators = set()
# The testbench internally tracks metrics
self.metrics = {}
# The schema tells the testbench which columns to extract from the slices for
# evaluation
self.schema_type = "default"
self.dataset_id = dataset_id
self.class_names = class_names
@property
def slices(self):
"""Slices in the testbench."""
return self._slices
def __repr__(self):
return f"TestBench[{self.identifier}](slices={len(self.slices)})"
def _digest(self) -> str:
return json.dumps([str(sl) for sl in self.slices])
@classmethod
def from_dataset(
cls,
dp: DataPanel,
input_columns: List[str],
output_columns: List[str],
# prediction_columns: List[str],
# metrics: List[str],
) -> TestBench:
"""Create a TestBench from a dp."""
# Define the task
task = Task(
# Identifier
Identifier("Task", dp=str(dp.identifier)),
# Input and output schemas
*Schema.for_dataset(dp, input_columns, output_columns),
)
# Create the testbench
testbench = TestBench(
identifier=Identifier("TestBench", dp=str(dp.identifier)),
task=task,
slices=[dp],
)
# testbench.set_single_dataset_mode()
# testbench.set_prediction_columns(prediction_columns)
return testbench
def set_prediction_columns(self, prediction_columns: List[str]):
"""Set the list of columns that act as prediction columns."""
self.prediction_columns = prediction_columns
def set_single_dataset_mode(self):
"""All slices must be derived from the root dataset in single dataset
mode."""
self.single_dataset_mode = True
def add_calculators(self, calculators: List[Union[str, Callable]]):
"""Add a list of calculators."""
for calc in calculators:
if isinstance(calc, str):
self.calculators.add(get_metric(calc))
elif isinstance(calc, Callable):
self.calculators.add(calc)
else:
continue
def add_model(
self,
model: Model = None,
prediction_columns: List[str] = None,
identifier: Union[str, Identifier] = None,
):
if prediction_columns is not None:
assert model is None, "`model` must be None."
assert (
identifier is not None
), "A model `identifier` must be included with `prediction_columns`."
self.metrics[identifier] = {}
for sl in self.slices:
self.metrics[identifier][sl.identifier] = {}
for calc in self.calculators:
self.metrics[identifier][sl.identifier][calc.__name__] = calc(
*[sl[col] for col in prediction_columns],
*[sl[col] for col in self.task.output_schema.columns],
)
@classmethod
def for_dataset(
cls,
dataset: str,
task: Optional[Union[str, Task]] = None,
version: str = None,
):
"""Create a test bench for a dataset."""
inferred_task = None
if task is not None:
# Infer the task from the dataset
inferred_task = Task.lookup(dataset=dataset)()
# Check that the inferred task matches the task argument
if task is not None and task != inferred_task:
raise AssertionError(
f"DataPanel {dataset} is only compatible with {inferred_task}, "
f"not {task}."
)
return TestBench(
identifier=f"{dataset}-{task}-{version}",
task=inferred_task,
slices=[],
)
@classmethod
def for_task(
cls,
task: Union[str, Task],
version: str = None,
):
"""Create a testbench for a task."""
return TestBench(
identifier=f"{task}-{version}",
task=task,
slices=[],
)
def _human_readable_identifiers(self):
# Temporary function to generate human readable names
groups = {}
for ident in self.slice_identifiers:
if "->" in str(ident):
builder_ident = str(ident).split(" -> ")[-1]
builder_ident, cols = builder_ident.split(" @ ")
name = builder_ident.split("(")[0]
if name not in groups:
groups[name] = set()
groups[name].add((builder_ident, cols))
group_info = {}
for key, group in groups.items():
if len(group) == 1:
group_info[key] = "name"
else:
only_single_column = len(set([t[1] for t in group])) == 1
if only_single_column:
group_info[key] = "builder_ident"
else:
group_info[key] = "full"
ident_mapping = {}
for ident in self.slice_identifiers:
if "->" in str(ident):
builder_ident = str(ident).split(" -> ")[-1]
builder_ident, cols = builder_ident.split(" @ ")
name = builder_ident.split("(")[0]
if group_info[name] == "name":
new_ident = name
elif group_info[name] == "builder_ident":
new_ident = builder_ident
elif group_info[name] == "full":
new_ident = str(ident).split(" -> ")[-1]
if new_ident.startswith("NlpAugTransformation"):
new_ident = new_ident.split("NlpAugTransformation(pipeline=[")[
1
].split("])")[0]
else:
new_ident = str(ident).split("(")[0]
ident_mapping[ident] = new_ident
self.ident_mapping = ident_mapping
def add_slices(self, slices: Collection[DataPanel]) -> None:
"""Add slices to the testbench.
Args:
slices: collection of Slice objects
"""
if isinstance(slices, DataPanel):
slices = [slices]
# Add slices
for sl in slices:
if not isinstance(sl, DataPanel) and isinstance(sl, DataPanel):
# The slice is a DataPanel
sl = DataPanel(sl)
if isinstance(sl, DataPanel):
if sl.identifier not in self.slice_identifiers and len(sl) > 0:
# Add slices that aren't already present in the testbench and have
# non-zero length
self.slices.add(sl)
self.slice_identifiers.add(sl.identifier)
self._slice_table[sl.identifier] = sl
def evaluate(
self,
model: Model,
batch_size: int = 32,
coerce_fn: Callable = None,
input_columns: List[str] = None,
output_columns: List[str] = None,
) -> Dict:
"""Evaluate a model using the test bench and cache results.
Args:
model: model to evaluate
batch_size: batch size for inference
coerce_fn: function to coerce the model's outputs. Useful if the model's
outputs cannot directly be compared to the targets.
input_columns: columns for input schema. Required if task is None.
output_columns: columns for output schema. Required if task is None.
Returns: dict mapping slice identifiers to evaluation metrics.
"""
if self.task is None:
if input_columns is None or output_columns is None:
raise ValueError(
"Input and output columns required when no task specified."
)
else:
# Set the schema using the task
# TODO Is the remapping required when not using a task
self.set_schema("task")
input_columns = self.task.input_schema.columns
output_columns = self.task.output_schema.columns
# TODO(karan): Uncomment and fix this assert on the type of outputs that
# model(..) returns
# # Grab 2 examples from the first slice, run it through the model and check
# that the output is a dictionary
# output = model(dataset=DataPanel.from_batch(self.slices[0][:2]),
# input_keys=self.task.input_schema.keys(),
# output_keys=self.task.output_schema.keys(),
# batch_size=2,
# coerce_fn=coerce_fn)
# print(output)
# assert isinstance(output, Sequence) and isinstance(output[0], Mapping), \
# "model(..) must return a list of dictionaries. Each dictionary should
# map metric names to values."
# Store the model_metrics
if model.identifier not in self.metrics:
self.metrics[model.identifier] = {}
# Run the model on all the slices
# TODO(karan): For slices that are subpopulations, the same example can be in
# multiple slices
# and will be run through the model multiple times. Create a UnionDataPanel?
for sl in tqdm(self.slices):
if sl.identifier not in self.metrics[model.identifier]:
# Evaluate on the slice
# TODO Why not update existing results?
self.metrics[model.identifier][sl.identifier] = model.evaluate(
dataset=sl,
input_columns=input_columns,
output_columns=output_columns,
batch_size=batch_size,
coerce_fn=coerce_fn,
)
return self.metrics[model.identifier]
def add_predictions(
self,
model: Union[Model, str],
predictions: Dict[str, Union[Sequence, torch.Tensor]],
output_columns: List[str] = None,
num_classes=None,
metrics: List[str] = None,
) -> Dict:
"""Compute and cache metrics for pre-computed model predictions.
Args:
model: Model or model id
predictions: Map from slice id to sequence or torch Tensor of predictions
metric (optional): list of metrics. If None, use the metrics specified in
the task.
output_columns (optional): names of output columns. Required if testbench
does not have associated task.
num_classes (optional): number of classes. Required if testbench does not
have associated task.
Returns:
computed metrics
"""
if self.task is None:
if output_columns is None:
raise ValueError(
"'output_columns' is required if testbench does not have "
"associated task."
)
if num_classes is None:
raise ValueError(
"'num_classes' is required if testbench does not have associated "
"task."
)
if metrics is None:
raise ValueError(
"'metrics' is required if testbench does not have associated task."
)
else:
output_columns = self.task.output_schema.columns
num_classes = self.task.output_schema.features[
list(self.task.output_schema.columns)[0]
].num_classes
if self.task.classification():
assert len(output_columns) == 1 # , "Only supports classification."
if metrics is None:
metrics = self.task.metrics
if len(output_columns) > 1:
raise NotImplementedError("Only single output column supported")
if isinstance(model, Model):
model = model.identifier
if model not in self.metrics:
self.metrics[model] = {}
for sl in tqdm(self.slices):
if sl.identifier not in self.metrics[model]:
# Evaluate on the slice
# TODO Why not update existing results?
# slice_predictions = predictions[sl.identifier]
evaluation_dict = {}
# Temporarily expose prediction columns
# sl.set_format(columns=output_columns()
# slice_predictions = predictions[sl.identifier]
# TODO Optimize
# labels = list(zip(*[sl[col] for col in output_columns]))
labels = sl[output_columns[0]]
for metric in metrics:
evaluation_dict[metric] = compute_metric(
metric=metric,
predictions=predictions[sl.identifier],
labels=labels,
num_classes=num_classes,
)
# sl.reset_format()
self.metrics[model][sl.identifier] = evaluation_dict
return evaluation_dict
def add_metrics(self, model: Union[Model, str], metrics: Dict[str, float]):
"""Cache pre-computed metrics for model.
Args:
model: Model or model id.
metrics: map from metric name to value
"""
if isinstance(model, Model):
model = model.identifier
self.metrics[model] = metrics
def create_report(
self,
model: Union[Model, str],
metric_ids: List[str] = None,
) -> Report:
"""Generate report from cached metrics for a model.
Args:
model: Model or model id. Metrics must have already been computed for
this model.
metric_ids (optional): list of metric ids to include in desired order.
If None, take metrics from sample slice.
Returns:
report
"""
if len(self.slices) == 0:
raise ValueError("Cannot create report for empty testbench")
if isinstance(model, Model):
model = model.identifier
if model not in self.metrics:
raise ValueError(
f"Metrics for model {model} have not been computed yet."
f" You must first execute one of "
"the following methods for this model: 'evaluate', "
"'add_predictions', 'add_metrics'"
)
# TODO(Jesse): Need a category for test set
model_metrics = self.metrics[model]
# TODO(Jesse): where to put this? Should only need to be called once
self._human_readable_identifiers()
if metric_ids is None:
sample_slice = list(self.slices)[0].identifier
metric_ids = list(model_metrics[sample_slice].keys())
sorted_metric_ids = sorted(
[
metric_id
for metric_id in metric_ids
if metric_id not in ("class_dist", "pred_dist")
]
)
if "class_dist" in metric_ids:
sorted_metric_ids.append("class_dist")
if "pred_dist" in metric_ids:
sorted_metric_ids.append("pred_dist")
metric_ids = sorted_metric_ids
# Populate columns
columns = []
for metric_id in metric_ids:
if metric_id in ("class_dist", "pred_dist"):
if self.task is None:
class_cds = None
else:
class_names = self.task.output_schema.features[
list(self.task.output_schema.columns)[0]
].names
class_cds = [name[0].upper() for name in class_names]
columns.append(ClassDistributionColumn(metric_id, class_cds))
else:
columns.append(
ScoreColumn(metric_id, min_val=0, max_val=1, is_0_to_1=True)
)
columns.append(NumericColumn("Size"))
category_names = {
GENERIC: "DataPanel",
SUBPOPULATION: "SubPop",
ATTACK: "Attack",
AUGMENTATION: "Augment",
CURATION: "Eval",
}
# Populate data
data = []
for sl in self.slices:
slice_name = self.ident_mapping[sl.identifier]
slice_size = len(sl)
slice_category = category_names.get(sl.category, sl.category.capitalize())
row = []
row.append(slice_category)
row.append(slice_name)
if sl.identifier not in model_metrics:
raise ValueError(
f"Metrics for model {model} and slice {sl.identifier}"
f"have not yet been computed."
)
slice_metrics = model_metrics[sl.identifier]
for metric_id in metric_ids:
row.append(slice_metrics[metric_id])
row.append(slice_size)
data.append(row)
# TODO(karan): generalize aggregation
# slice_metrics = tz.merge_with(np.mean, slice_metrics)
# Task-dependent model predictions
# TODO(karan): e.g. average class distribution predicted, figure out how to
# put this in
# Task-dependent sl information
# TODO(karan): e.g. class distribution
df = pd.DataFrame(data)
report = Report(
data=df, columns=columns, model_name=model, dataset_name=self.dataset_id
)
report.sort(
category_order=dict(
(cat, i)
for i, cat in enumerate(
[SUBPOPULATION, AUGMENTATION, CURATION, ATTACK, GENERIC]
)
)
)
return report
def set_schema(self, schema_type: str):
assert schema_type in {"default", "task"}
if self.schema_type == schema_type:
return
if schema_type == "task":
self._slices = {self.task.remap_schema(sl) for sl in self.slices}
self.schema_type = schema_type
elif schema_type == "default":
# TODO(karan): undo the schema standardization
raise NotImplementedError
def search(self, keyword: str, limit: int = 3):
return [
self._slice_table[t[0]]
for t in process.extract(keyword, self.slice_identifiers, limit=limit)
]
def save(self, path: str) -> None:
"""Save the current testbench to disk. This will save all slices in the
testbench to disk, as well as metrics and other metadata associated
with this testbench.
Args:
path: string path to the save directory
Returns: None
"""
# Path to the save directory
savedir = pathlib.Path(path) / f"{self.identifier}"
# Create a directory inside savedir for the slices
(savedir / "slices").mkdir(parents=True, exist_ok=True)
# Save all the slices
pbar = tqdm(self.slices)
for sl in pbar:
pbar.set_description(f"Saving slice {str(sl.identifier)[:100]}...")
sl.save_to_disk(
str(savedir / "slices" / str(persistent_hash(str(sl.identifier))))
)
# Save metrics
dill.dump(self.metrics, open(str(savedir / "metrics.dill"), "wb"))
# Save metadata
dill.dump(
{
"task": self.task,
"identifier": self.identifier,
"dataset_id": self.dataset_id,
"slices": [
(sl.identifier, sl.category, sl.lineage, len(sl))
for sl in self.slices
],
},
open(str(savedir / "metadata.dill"), "wb"),
)
# Save version info
with open(str(savedir / "version.dill"), "wb") as f:
f.write(self._dumps_version())
@classmethod
def available(cls, path: str) -> List[str]:
"""Check the list of available testbenches in a directory.
Args:
path: string path to a directory. The testbenches available inside this
directory will be returned.
Returns: list of available testbenches
"""
# Path to the save directory
savedir = pathlib.Path(path)
# Loop over the folders
testbench_identifiers = []
for maybe_testbench in savedir.glob("*"):
if (
maybe_testbench.is_dir()
and (maybe_testbench / "metadata.dill").exists()
):
testbench_identifiers.append(maybe_testbench.name)
return testbench_identifiers
@classmethod
def load_metrics(cls, path: str) -> dict:
"""Load metrics from disk."""
# Path to the save directory
savedir = pathlib.Path(path)
# Load metrics
return dill.load(open(str(savedir / "metrics.dill"), "rb"))
@classmethod
def load_metadata(cls, path: str) -> dict:
"""Load metrics from disk."""
# Path to the save directory
savedir = pathlib.Path(path)
# Load metrics
return dill.load(open(str(savedir / "metadata.dill"), "rb"))
@classmethod
def load(cls, path: str) -> TestBench:
"""Load a testbench from disk.
Args:
path: string path to the testbench directory
Returns:
"""
# Path to the save directory
savedir = pathlib.Path(path)
# Load all the slices
slices = []
for sl_path in tqdm(list((savedir / "slices").glob("*"))):
try:
slices.append(DataPanel.load_from_disk(str(sl_path)))
except FileNotFoundError:
continue
# Load metrics
metrics = dill.load(open(str(savedir / "metrics.dill"), "rb"))
# Load metadata
metadata = dill.load(open(str(savedir / "metadata.dill"), "rb"))
# Create the testbench
testbench = cls(
identifier=metadata["identifier"],
task=metadata["task"],
slices=slices,
)
# Set previously stored metrics
testbench.metrics = metrics
# Load version info
with open(str(savedir / "version.dill"), "rb") as f:
testbench._loads_version(f.read())
return testbench
```
#### File: robustnessgym/ops/allen.py
```python
from typing import List
from meerkat.tools.lazy_loader import LazyLoader
from robustnessgym.core.operation import Operation
from robustnessgym.core.slice import SliceDataPanel as DataPanel
from robustnessgym.mixins.device import DeviceMixin
predictors = LazyLoader("allennlp.predictors")
class AllenPredictionOp(DeviceMixin, Operation):
def __init__(
self,
path: str,
device: str,
):
super(AllenPredictionOp, self).__init__(device=device)
self._predictor = predictors.Predictor.from_path(
archive_path=path, cuda_device=self.cuda_device
)
@property
def predictor(self):
return self._predictor
def process_batch(
self,
dp: DataPanel,
columns: List[str],
**kwargs,
) -> tuple:
return (
self.predictor.predict_batch_json(
[{"sentence": text} for text in dp[columns[0]]]
),
)
class AllenConstituencyParsingOp(AllenPredictionOp):
def __init__(self, device: str = None):
super(AllenConstituencyParsingOp, self).__init__(
path="https://storage.googleapis.com/allennlp-public-models/elmo"
"-constituency-parser-2020.02.10.tar.gz",
device=device,
)
def process_batch(
self,
dp: DataPanel,
columns: List[str],
**kwargs,
) -> tuple:
return (
[
p["trees"]
for p in self.predictor.predict_batch_json(
[{"sentence": text} for text in dp[columns[0]]]
)
],
)
class AllenDependencyParsingOp(AllenPredictionOp):
def __init__(self, device: str = None):
super(AllenDependencyParsingOp, self).__init__(
path="https://storage.googleapis.com/allennlp-public-models/"
"biaffine-dependency-parser-ptb-2020.04.06.tar.gz",
device=device,
)
class AllenSemanticRoleLabelingOp(AllenPredictionOp):
def __init__(self, device: str = None):
super(AllenSemanticRoleLabelingOp, self).__init__(
path="https://storage.googleapis.com/allennlp-public-models/bert-base-srl"
"-2020.03.24.tar.gz",
device=device,
)
```
#### File: robustnessgym/scripts/qa.py
```python
import itertools
import pandas as pd
from meerkat import DataPanel
from meerkat.columns.prediction_column import ClassificationOutputColumn
from robustnessgym.core.model import Model
from robustnessgym.tasks.task import ExtractiveQuestionAnswering
def run(dataset_info, model_name, debug=False):
def fn(dp):
# TODO(karan): Uncomment & add correct code here.
# batch = {"col1": dp["context"].data, "col2": dp["question"].data}
# input_cols = ["context", "question"]
#
# input_batch = model.encode_batch(batch, input_cols)
# import cytoolz as tz
# import torch
# input_batch = tz.valmap(
# lambda v: torch.tensor(v).to(device=model.device),
# input_batch
# )
#
# with torch.no_grad():
# outputs = model.model(**input_batch)
#
# outputs = (torch.argmax(outputs.start_logits).item(),
# torch.argmax(outputs.end_logits).item())
#
# token_id = input_batch['input_ids'][0][outputs[0]: outputs[1]]
# answer = model.tokenizer.decode(token_id)
# return answer
pass
dataset_name, split = dataset_info
if isinstance(dataset_name, str):
dataset_name = (dataset_name,)
dp = DataPanel.load_huggingface(*dataset_name, split=split)
if debug:
dp = dp[:10]
model = Model.huggingface(
model_name,
task=ExtractiveQuestionAnswering(),
is_classifier=False,
)
out = dp.map(
fn,
batch_size=5,
is_batched_fn=True,
# output_type=ClassificationOutputColumn,
pbar=True,
)
out = ClassificationOutputColumn(logits=out)
del model
dirname = f"./{'_'.join(dataset_name)}-{model_name}"
out.write(dirname)
# TO LOAD:
# ClassificationOutputColumn.read(dirname)
return out
DATASETS = [
("squad", "validation"),
]
# 'squad' 'validation'
MODELS = [
"distilbert-base-cased-distilled-squad",
]
ds = DATASETS[0]
model = MODELS[0]
outs = []
for ds, model in itertools.product(DATASETS, MODELS):
try:
run(ds, model, debug=True)
outs.append(
{"dataset": str(ds), "model": model, "status": "Success", "Reason": None}
)
except Exception as e:
print(e)
outs.append(
{"dataset": str(ds), "model": model, "status": "Failed", "Reason": str(e)}
)
success = pd.DataFrame(outs)
```
#### File: robustnessgym/slicebuilders/slicebuilder_collection.py
```python
from __future__ import annotations
from typing import List
import cytoolz as tz
import numpy as np
import tqdm
from robustnessgym.core.constants import GENERIC
from robustnessgym.core.slice import SliceDataPanel as DataPanel
from robustnessgym.slicebuilders.slicebuilder import SliceBuilder
class SliceBuilderCollection(SliceBuilder):
"""Collection of Slice Builders."""
def __init__(self, slicebuilders: List[SliceBuilder], *args, **kwargs):
super(SliceBuilderCollection, self).__init__(
category=GENERIC,
identifiers=list(
tz.concat([slicebuilder.identifiers for slicebuilder in slicebuilders])
),
*args,
**kwargs,
)
# TODO(karan): some slicebuilders aren't compatible with each other (e.g.
# single column vs. multi column):
# add some smarter logic here to handle this
# Store the subpopulations
self.slicebuilders = slicebuilders
def __repr__(self):
# TODO(karan): format this nicely
return (
f"{self.__class__.__name__}("
f"{[str(slicebuilder) for slicebuilder in self.slicebuilders]})]"
)
def __call__(
self,
dp: DataPanel,
columns: List[str],
mask: List[int] = None,
store_compressed: bool = None,
store: bool = None,
*args,
**kwargs,
):
if mask:
raise NotImplementedError(
"Mask not supported for SliceBuilderCollection yet."
)
slices = []
slice_membership = []
# Apply each slicebuilder in sequence
for i, slicebuilder in tqdm.tqdm(enumerate(self.slicebuilders)):
# Apply the slicebuilder
dp, slices_i, slice_membership_i = slicebuilder(
batch_or_dataset=dp,
columns=columns,
mask=mask,
store_compressed=store_compressed,
store=store,
*args,
**kwargs,
)
# Add in the slices and slice membership
slices.extend(slices_i)
slice_membership.append(slice_membership_i)
slice_membership = np.concatenate(slice_membership, axis=1)
return dp, slices, slice_membership
```
#### File: robustnessgym/slicebuilders/subpopulation_collection.py
```python
from typing import List, Sequence
import cytoolz as tz
import numpy as np
from multiprocess.pool import Pool
from tqdm import tqdm
from robustnessgym.core.constants import SLICEBUILDERS
from robustnessgym.core.slice import SliceDataPanel as DataPanel
from robustnessgym.slicebuilders.subpopulation import Subpopulation
class SubpopulationCollection(Subpopulation):
def __init__(self, subpopulations: Sequence[Subpopulation], *args, **kwargs):
super(SubpopulationCollection, self).__init__(
identifiers=list(
tz.concat(
[subpopulation.identifiers for subpopulation in subpopulations]
)
),
*args,
**kwargs,
)
# TODO(karan): some subpopulations aren't compatible with each other (e.g.
# single column vs. multi column):
# add some smarter logic here to handle this
# Store the subpopulations
self.subpopulations = subpopulations
def __call__(
self,
dp: DataPanel,
columns: List[str],
num_proc: int = None,
*args,
**kwargs,
):
if not num_proc or num_proc == 1:
slices = []
slice_membership = []
# Apply each slicebuilder in sequence
for i, slicebuilder in tqdm(enumerate(self.subpopulations)):
# Apply the slicebuilder
slices_i, slice_membership_i = slicebuilder(
dp=dp,
columns=columns,
*args,
**kwargs,
)
# Add in the slices and slice membership
slices.extend(slices_i)
slice_membership.append(slice_membership_i)
else:
# TODO(karan): cleanup, make mp.Pool support simpler across the library
with Pool(num_proc) as pool:
slices, slice_membership = zip(
*pool.map(
lambda sb: sb(
dp=dp,
columns=columns,
*args,
**kwargs,
),
[slicebuilder for slicebuilder in self.subpopulations],
)
)
# Combine all the slices
slices = list(tz.concat(slices))
def _store_updates(batch, indices):
# Each Subpopulation will generate slices
for i, subpopulation in enumerate(self.subpopulations):
updates = subpopulation.construct_updates(
slice_membership=slice_membership[i][indices],
columns=columns,
)
batch = subpopulation.store(
batch=batch,
updates=updates,
)
return batch
if isinstance(dp, DataPanel):
dp = dp.map(
_store_updates,
with_indices=True,
batched=True,
)
for subpopulation in self.subpopulations:
# Update the DataPanel's history
dp.update_tape(
path=[SLICEBUILDERS, subpopulation.category],
identifiers=subpopulation.identifiers,
columns=columns,
)
# Combine all the slice membership matrices
slice_membership = np.concatenate(slice_membership, axis=1)
return slices, slice_membership
def apply(
self,
batch: DataPanel,
columns: List[str],
slice_membership: np.ndarray = None,
*args,
**kwargs,
) -> np.ndarray:
# Each Subpopulation will generate slices
for subpopulation, end_idx in zip(
self.subpopulations, np.cumsum([s.num_slices for s in self.subpopulations])
):
# Fill out the slice_membership
slice_membership[
:, end_idx - subpopulation.num_slices : end_idx
] = subpopulation.apply(
slice_membership=slice_membership[
:, end_idx - subpopulation.num_slices : end_idx
],
batch=batch,
columns=columns,
)
return slice_membership
# TODO(karan): add combinations for collections
```
#### File: slicebuilders/subpopulations/entity_frequency.py
```python
from __future__ import annotations
from collections import Counter
from typing import List, Tuple
import numpy as np
from robustnessgym.core.identifier import Identifier
from robustnessgym.core.slice import SliceDataPanel as DataPanel
from robustnessgym.slicebuilders.subpopulation import Subpopulation
class EntityFrequency(Subpopulation):
def __init__(self, entity_thresholds: List[Tuple[str, List[int]]], *args, **kwargs):
identifiers = []
for entity_type, thresholds in entity_thresholds:
for threshold in thresholds:
identifiers.append(
Identifier(
_name=self.__class__.__name__,
entity_type=entity_type,
threshold=threshold,
)
)
super(EntityFrequency, self).__init__(identifiers, *args, **kwargs)
if len(entity_thresholds) == 0:
raise ValueError("At least one entity type required")
for entity_type, _ in entity_thresholds:
if entity_type not in [
"PERSON",
"NORP",
"FAC",
"ORG",
"GPE",
"LOC",
"PRODUCT",
"EVENT",
"WORK_OF_ART",
"LAW",
"LANGUAGE",
"DATE",
"TIME",
"PERCENT",
"MONEY",
"QUANTITY",
"ORDINAL",
"CARDINAL",
]:
raise ValueError(f"Invalid entity type: {entity_type}")
# List of tuples, each of which contains an entity type and a list of
# associated frequency thresholds
self.entity_thresholds = entity_thresholds
def apply(
self,
batch: DataPanel,
columns: List[str],
slice_membership: np.ndarray = None,
*args,
**kwargs,
) -> np.ndarray:
if len(columns) != 1:
raise ValueError("Only one key allowed")
key = columns[0]
for i, cache_item in enumerate(batch["cache"]):
entities = cache_item["Spacy"][key]["ents"]
entity_types = [ent["label"] for ent in entities]
counts = Counter(entity_types)
slice_ndx = 0
for entity_type, thresholds in self.entity_thresholds:
for threshold in thresholds:
if counts[entity_type] >= threshold:
slice_membership[i, slice_ndx] = 1
slice_ndx += 1
return slice_membership
```
#### File: slicebuilders/subpopulations/length.py
```python
from __future__ import annotations
from typing import Callable, List, Tuple, Union
import numpy as np
from robustnessgym.core.identifier import Identifier
from robustnessgym.core.operation import lookup
from robustnessgym.core.slice import SliceDataPanel as DataPanel
from robustnessgym.ops.spacy import SpacyOp
from robustnessgym.slicebuilders.subpopulations.score import ScoreSubpopulation
class NumTokensSubpopulation(ScoreSubpopulation):
"""Subpopulation based on token length."""
def __init__(
self,
intervals: List[Tuple[Union[int, float, str], Union[int, float, str]]],
reduction_fn: Callable = np.sum,
**kwargs,
):
super(NumTokensSubpopulation, self).__init__(
intervals=intervals,
identifiers=[
Identifier(
_name=self.__class__.__name__,
gte=interval[0],
lte=interval[1],
reduction_fn=reduction_fn,
)
for interval in intervals
],
**kwargs,
)
# Assign the reduction fn
self.reduction_fn = reduction_fn
def score(
self,
batch: DataPanel,
columns: List[str],
*args,
**kwargs,
) -> np.ndarray:
# Length of each example, for each column
try:
lengths = [
[len(doc) for doc in lookup(batch, SpacyOp, [col])] for col in columns
]
except AttributeError:
lengths = [[len(text.split()) for text in batch[col]] for col in columns]
# Reduction over column key axis
return self.reduction_fn(np.array(lengths), axis=0)
class NumCharsSubpopulation(ScoreSubpopulation):
"""Subpopulation based on character length."""
def __init__(
self,
intervals: List[Tuple[Union[int, float, str], Union[int, float, str]]],
reduction_fn: Callable = np.sum,
**kwargs,
):
super(NumCharsSubpopulation, self).__init__(
intervals=intervals,
identifiers=[
Identifier(
_name=self.__class__.__name__,
gte=interval[0],
lte=interval[1],
reduction_fn=reduction_fn,
)
for interval in intervals
],
**kwargs,
)
# Assign the reduction fn
self.reduction_fn = reduction_fn
def score(
self,
batch: DataPanel,
columns: List[str],
*args,
**kwargs,
) -> np.ndarray:
# Length of each example, for each column
lengths = [[len(text) for text in batch[col]] for col in columns]
# Reduction over column key axis
return self.reduction_fn(np.array(lengths), axis=0)
```
#### File: robustnessgym/slicebuilders/transformation.py
```python
from typing import Callable, List, Optional, Tuple
import numpy as np
from robustnessgym.core.constants import TRANSFORMATION
from robustnessgym.core.identifier import Identifier
from robustnessgym.core.slice import SliceDataPanel as DataPanel
from robustnessgym.slicebuilders.slicebuilder import SliceBuilder
class Transformation(SliceBuilder):
def __init__(
self,
num_transformed: int = None,
identifiers: List[Identifier] = None,
apply_fn: Callable = None,
category: str = None,
):
assert (
num_transformed if not identifiers else True
), "Must pass in num_transformed if no identifiers are given."
super(Transformation, self).__init__(
identifiers=[
Identifier(
_name=f"{self.__class__.__name__}-{i + 1}",
)
for i in range(num_transformed)
]
if not identifiers
else identifiers,
category=category if category else TRANSFORMATION,
apply_fn=apply_fn,
)
@property
def num_transformed(self):
return self.num_slices
def apply(
self,
batch: DataPanel,
columns: List[str],
skeleton_batches: List[DataPanel],
slice_membership: np.ndarray,
*args,
**kwargs,
) -> Tuple[List[DataPanel], np.ndarray]:
raise NotImplementedError
def process_batch(
self,
dp: DataPanel,
columns: List[str],
*args,
**kwargs,
) -> Tuple[List[DataPanel], Optional[np.ndarray]]:
# Determine the size of the batch
batch_size = len(dp[list(dp.keys())[0]])
# Construct the matrix of slice labels: (batch_size x num_slices)
slice_membership = np.ones((batch_size, self.num_slices), dtype=np.int32)
# Uncache the batch to construct the skeleton for transformed batches
skeleton_batches = [
DataPanel.uncached_batch(dp) for _ in range(self.num_slices)
]
# Set the index for the skeleton batches
for j, skeleton_batch in enumerate(skeleton_batches):
# skeleton_batch.update(
# lambda x: {'index': f"{x['index']}-{self.identifiers[j]}"}
# )
skeleton_batch["index"] = [
f"{idx}-{self.identifiers[j]}" for idx in skeleton_batch["index"]
]
# Apply the SliceBuilder's core functionality: use positional args
try:
transformed_batches, slice_membership = self.apply(
dp,
columns,
skeleton_batches,
slice_membership,
*args,
**kwargs,
)
except TypeError:
self.apply(dp, columns, *args, **kwargs)
# Remove transformed examples where slice_membership[i, :] = 0 before returning
transformed_batches = [
self.filter_batch_by_slice_membership(
batch=transformed_batch,
slice_membership=slice_membership[:, j : j + 1],
)[0]
for j, transformed_batch in enumerate(transformed_batches)
]
return transformed_batches, slice_membership
class SingleColumnTransformation(Transformation):
def single_column_apply(self, column_batch: List) -> List[List]:
return NotImplemented(
"Implement single_column_apply to use this transformation."
)
def apply(
self,
batch: DataPanel,
columns: List[str],
skeleton_batches: List[DataPanel],
slice_membership: np.ndarray,
*args,
**kwargs,
) -> Tuple[List[DataPanel], np.ndarray]:
# Independently apply the transformation over the columns
for column in columns:
try:
# Apply
transformed_batch = self.single_column_apply(
column_batch=batch[column],
)
assert len(transformed_batch) == len(
batch[column]
), "Must output one list of augmentations per example."
# Store the transformed text in the skeleton batches
for i in range(slice_membership.shape[0]):
for j, transformed in enumerate(transformed_batch[i]):
skeleton_batches[j][column][i] = transformed
except: # noqa
# Unable to transform: set all slice membership labels to zero
slice_membership[:, :] = 0
return skeleton_batches, slice_membership
```
#### File: tests/core/test_decorators.py
```python
from unittest import TestCase
from robustnessgym.core.decorators import singlecolumn
class TestDecorators(TestCase):
def test_singlecolumn(self):
@singlecolumn
def apply(self, batch, columns):
print(columns)
apply(None, None, ["abc"])
with self.assertRaises(AssertionError):
apply(None, None, ["abc", "bcd"])
```
#### File: tests/core/test_tools.py
```python
from unittest import TestCase
from robustnessgym.core.tools import recmerge
class TestTools(TestCase):
def test_recmerge(self):
output = recmerge(
{"a": 2, "b": 3, "d": {"e": [1, 2, 3], "f": [3, 4, 5]}, "g": 17},
{"b": 12, "d": {"e": [1, 2, 3], "f": [3, 4]}},
{"a": 4, "d": {"f": [3]}},
)
self.assertEqual(
output, {"a": 4, "b": 12, "d": {"e": [1, 2, 3], "f": [3]}, "g": 17}
)
output = recmerge(
{"a": 2, "b": 3, "d": {"e": [1, 2, 3], "f": [3, 4, 5]}, "g": 17},
{"b": 12, "d": {"e": [1, 2, 3], "f": [3, 4]}},
{"a": 4, "d": {"f": [3]}},
merge_sequences=True,
)
self.assertEqual(
output,
{
"a": 4,
"b": 12,
"d": {"e": [1, 2, 3, 1, 2, 3], "f": [3, 4, 5, 3, 4, 3]},
"g": 17,
},
)
```
#### File: tests/ops/test_spacy.py
```python
from unittest import TestCase
from robustnessgym import lookup
from robustnessgym.ops import SpacyOp
from tests.testbeds import MockTestBedv0
class TestSpacy(TestCase):
def setUp(self):
self.testbed = MockTestBedv0()
def test_apply(self):
# Create the Spacy cached operation
spacy = SpacyOp()
# Apply it
dataset = spacy(self.testbed.dataset, ["text"])
print(dataset.column_names)
# Retrieve information to test
sentences = [doc.sents for doc in lookup(dataset, spacy, ["text"])]
tokens = [list(doc) for doc in lookup(dataset, spacy, ["text"])]
entities = [doc.ents for doc in lookup(dataset, spacy, ["text"])]
num_tokens = [len(list(doc)) for doc in lookup(dataset, spacy, ["text"])]
self.assertEqual(
sentences,
[
["The man is walking."],
["The man is running."],
["The woman is sprinting."],
["The woman is resting."],
["The hobbit is flying."],
["The hobbit is swimming."],
],
)
self.assertEqual(
tokens,
[
["The", "man", "is", "walking", "."],
["The", "man", "is", "running", "."],
["The", "woman", "is", "sprinting", "."],
["The", "woman", "is", "resting", "."],
["The", "hobbit", "is", "flying", "."],
["The", "hobbit", "is", "swimming", "."],
],
)
self.assertEqual(entities, [[], [], [], [], [], []])
self.assertEqual(num_tokens, [5, 5, 5, 5, 5, 5])
```
#### File: slicebuilders/transformations/test_eda.py
```python
import random
import unittest
from unittest import TestCase
import numpy as np
from robustnessgym.slicebuilders.transformations.eda import EasyDataAugmentation
from tests.testbeds import MockTestBedv0
@unittest.skip("Unable to control EDA randomness.")
class TestEasyDataAugmentation(TestCase):
def setUp(self):
self.testbed = MockTestBedv0()
def test_apply(self):
# Create the EDA SliceBuilder
eda = EasyDataAugmentation(num_transformed=3)
# Set the seed
random.seed(0)
np.random.seed(0)
for i, identifier in enumerate(eda.identifiers):
self.assertEqual(
str(identifier),
f"EasyDataAugmentation-{i + 1}(alpha_sr=0.1, alpha_ri=0.1, "
f"alpha_rs=0.1, p_rd=0.1)",
)
# Apply it
dataset, slices, slice_membership = eda(self.testbed.dataset, columns=["text"])
# All the sizes match up
self.assertEqual(len(dataset), len(self.testbed.dataset))
for sl in slices:
self.assertEqual(len(sl), len(self.testbed.dataset))
self.assertEqual(slice_membership.shape, (6, 3))
# Everything was transformed
self.assertTrue(np.all(slice_membership))
# Checking that the transformed text matches
self.assertEqual(
slices[0]["text"],
[
"the man is military man walking",
"the constitute man is running",
"the woman is sprinting",
"the woman is",
"the hobbit is flying",
"the hobbit is swimming",
],
)
# Dataset interaction history updated correctly
self.assertEqual(
len(dataset.fetch_tape(["slicebuilders", "transformation"]).history), 3
)
```
|
{
"source": "jessevogt/dask",
"score": 2
}
|
#### File: dask/dask/async.py
```python
from __future__ import absolute_import
import warnings
from . import local
_msg = ("`dask.async.{0}` has been moved to `dask.local.{0}`, please "
"update your imports")
def get_sync(*args, **kwargs):
warnings.warn(_msg.format('get_sync'))
return local.get_sync(*args, **kwargs)
def get_async(*args, **kwargs):
warnings.warn(_msg.format('get_async'))
return local.get_async(*args, **kwargs)
```
#### File: dask/dask/sizeof.py
```python
from __future__ import print_function, division, absolute_import
import sys
from .utils import Dispatch
try: # PyPy does not support sys.getsizeof
sys.getsizeof(1)
getsizeof = sys.getsizeof
except (AttributeError, TypeError): # Monkey patch
getsizeof = lambda x: 100
sizeof = Dispatch(name='sizeof')
@sizeof.register(object)
def sizeof_default(o):
return getsizeof(o)
@sizeof.register(list)
@sizeof.register(tuple)
@sizeof.register(set)
@sizeof.register(frozenset)
def sizeof_python_collection(seq):
return getsizeof(seq) + sum(map(sizeof, seq))
@sizeof.register_lazy("numpy")
def register_numpy():
import numpy as np
@sizeof.register(np.ndarray)
def sizeof_numpy_ndarray(x):
return int(x.nbytes)
@sizeof.register_lazy("pandas")
def register_pandas():
import pandas as pd
@sizeof.register(pd.DataFrame)
def sizeof_pandas_dataframe(df):
p = int(df.memory_usage(index=True).sum())
obj = int((df.dtypes == object).sum() * len(df) * 100)
if df.index.dtype == object:
obj += len(df) * 100
return int(p + obj) + 1000
@sizeof.register(pd.Series)
def sizeof_pandas_series(s):
p = int(s.memory_usage(index=True))
if s.dtype == object:
p += len(s) * 100
if s.index.dtype == object:
p += len(s) * 100
return int(p) + 1000
@sizeof.register(pd.Index)
def sizeof_pandas_index(i):
p = int(i.memory_usage())
obj = len(i) * 100 if i.dtype == object else 0
return int(p + obj) + 1000
@sizeof.register_lazy("scipy")
def register_spmatrix():
from scipy import sparse
@sizeof.register(sparse.dok_matrix)
def sizeof_spmatrix_dok(s):
return s.__sizeof__()
@sizeof.register(sparse.spmatrix)
def sizeof_spmatrix(s):
return sum(
sizeof(v) for v in s.__dict__.values()
)
```
#### File: dask/tests/test_multiprocessing.py
```python
import multiprocessing
from operator import add
import pickle
import random
import numpy as np
import pytest
from dask import compute, delayed
from dask.context import set_options
from dask.multiprocessing import get, _dumps, _loads, remote_exception
from dask.utils_test import inc
def test_pickle_globals():
""" For the function f(x) defined below, the only globals added in pickling
should be 'np' and '__builtins__'"""
def f(x):
return np.sin(x) + np.cos(x)
assert set(['np', '__builtins__']) == set(
_loads(_dumps(f)).__globals__.keys())
def bad():
raise ValueError("12345")
def test_errors_propagate():
dsk = {'x': (bad,)}
try:
get(dsk, 'x')
except Exception as e:
assert isinstance(e, ValueError)
assert "12345" in str(e)
def test_remote_exception():
e = TypeError("hello")
a = remote_exception(e, 'traceback-body')
b = remote_exception(e, 'traceback-body')
assert type(a) == type(b)
assert isinstance(a, TypeError)
assert 'hello' in str(a)
assert 'Traceback' in str(a)
assert 'traceback-body' in str(a)
def make_bad_result():
return lambda x: x + 1
def test_unpicklable_results_generate_errors():
dsk = {'x': (make_bad_result,)}
try:
get(dsk, 'x')
except Exception as e:
# can't use type because pickle / cPickle distinction
assert type(e).__name__ in ('PicklingError', 'AttributeError')
class NotUnpickleable(object):
def __getstate__(self):
return ()
def __setstate__(self, state):
raise ValueError("Can't unpickle me")
def test_unpicklable_args_generate_errors():
a = NotUnpickleable()
def foo(a):
return 1
dsk = {'x': (foo, a)}
try:
get(dsk, 'x')
except Exception as e:
assert isinstance(e, ValueError)
dsk = {'x': (foo, 'a'),
'a': a}
try:
get(dsk, 'x')
except Exception as e:
assert isinstance(e, ValueError)
def test_reuse_pool():
pool = multiprocessing.Pool()
with set_options(pool=pool):
assert get({'x': (inc, 1)}, 'x') == 2
assert get({'x': (inc, 1)}, 'x') == 2
def test_dumps_loads():
with set_options(func_dumps=pickle.dumps, func_loads=pickle.loads):
assert get({'x': 1, 'y': (add, 'x', 2)}, 'y') == 3
def test_fuse_doesnt_clobber_intermediates():
d = {'x': 1, 'y': (inc, 'x'), 'z': (add, 10, 'y')}
assert get(d, ['y', 'z']) == (2, 12)
def test_optimize_graph_false():
from dask.callbacks import Callback
d = {'x': 1, 'y': (inc, 'x'), 'z': (add, 10, 'y')}
keys = []
with Callback(pretask=lambda key, *args: keys.append(key)):
get(d, 'z', optimize_graph=False)
assert len(keys) == 2
@pytest.mark.parametrize('random', [np.random, random])
def test_random_seeds(random):
def f():
return tuple(random.randint(0, 10000) for i in range(5))
N = 10
with set_options(get=get):
results, = compute([delayed(f, pure=False)() for i in range(N)])
assert len(set(results)) == N
```
|
{
"source": "jessevp07/lcoc-ldevs",
"score": 3
}
|
#### File: lcoc-ldevs/lcoc/afdc.py
```python
import pandas as pd
import geopandas as gpd
from shapely.geometry import Point
#private
import lcoc.readwrite as readwrite
class DCFastChargingLocator(object):
"""
Object for working with DCFC station location data downloaded from the
the Alternative Fuels Data Center (AFDC). The AFDC is a comprehensive
clearinghouse of information about advanced transportation technologies.
It is supported by the U.S. Department of Energy's DOE Vehicle Technologies
Office.
Attributes
-----------
station_data:
pandas.DataFrame of public, active DCFC stations from the AFDC
prev_exists:
Boolean indicating whether version of dataset has been previously ran
"""
def __init__(self, afdc_file=None):
# Download AFDC data
if afdc_file is not None:
self.station_data = pd.read_csv(afdc_file, low_memory=False)
else:
self.station_data = readwrite.read_afdc_data()
# Save copy of AFDC data (if unique) to data\\afdc
self.prev_exists = readwrite.write_afdc_data(self.station_data)
# Preprocessing for unique DCFC stations only
self.station_data = self.station_data[self.station_data['EV DC Fast Count'] > 0]
self.station_data = self.station_data.groupby(['Latitude', 'Longitude'])['EV DC Fast Count'].sum().reset_index()
def join_county_geoid(self):
"""
Function adds 'county_geoid' field to self.station_data by joining
station latitude\\longitude to county geojson file.
"""
# Add Point geometries
pts = []
for lon, lat in zip(self.station_data['Longitude'], self.station_data['Latitude']):
pt = Point(lon, lat)
pts.append(pt)
self.station_data['geom'] = pts
self.station_data = gpd.GeoDataFrame(self.station_data, geometry='geom')
# Spatial join
us_counties_gdf = gpd.read_file('data\\gis\\2017_counties\\tl_2017_us_county.shp')
self.station_data.crs = us_counties_gdf.crs
county_join_gdf = us_counties_gdf[['NAME', 'GEOID', 'STATEFP', 'COUNTYFP', 'geometry']]
self.station_data = gpd.sjoin(self.station_data, us_counties_gdf, how='left', op='intersects')
# Clean up
self.station_data.rename(columns = {'NAME': 'county_name',
'GEOID': 'geoid',
'STATEFP': 'state_fips',
'COUNTYFP': 'county_fips'}, inplace=True)
self.station_data.drop(columns='index_right', inplace=True)
def aggregate_counties_to_csv(self, outfile='outputs\\county-dcfc-counts\\afdc_county_station_counts.csv'):
"""
Function counts stations in county. Outputs station counts as outfile.
"""
county_stations = self.station_data.groupby(['county_name',
'geoid',
'state_fips',
'county_fips'])['geom'].agg('count').reset_index()
county_stations.rename(columns={'geom': 'n_dcfc_stations'}, inplace=True)
# Add state abbrev
state_fips_cw = {1: 'AL',
2: 'AK',
4: 'AZ',
5: 'AR',
6: 'CA',
8: 'CO',
9: 'CT',
11: 'DC',
10: 'DE',
12: 'FL',
13: 'GA',
15: 'HI',
19: 'IA',
16: 'ID',
17: 'IL',
18: 'IN',
20: 'KS',
21: 'KY',
22: 'LA',
25: 'MA',
24: 'MD',
23: 'ME',
26: 'MI',
27: 'MN',
29: 'MO',
28: 'MS',
30: 'MT',
37: 'NC',
38: 'ND',
31: 'NE',
33: 'NH',
34: 'NJ',
35: 'NM',
32: 'NV',
36: 'NY',
39: 'OH',
40: 'OK',
41: 'OR',
42: 'PA',
44: 'RI',
45: 'SC',
46: 'SD',
47: 'TN',
48: 'TX',
49: 'UT',
51: 'VA',
50: 'VT',
53: 'WA',
55: 'WI',
54: 'WV',
56: 'WY'}
county_stations['State'] = county_stations['state_fips'].apply(lambda x: state_fips_cw[int(x)])
county_stations.to_csv(outfile, index=False)
print("Complete, {0} stations in {1} counties.".format(county_stations['n_dcfc_stations'].sum(), len(county_stations)))
def categorize_by_plugcnt(self):
"""
Categorize stations by plug count where: small = 1-plug, medium =
2-6 plugs, large = 7+ plugs.
"""
def categorize(plugs):
if plugs <= 3:
category = 's'
elif 3 < plugs < 15:
category = 'm'
elif plugs >= 15:
category = 'l'
else:
category = None
return category
self.station_data['category'] = self.station_data['EV DC Fast Count'].apply(categorize)
```
|
{
"source": "jessew23/y2x",
"score": 3
}
|
#### File: jessew23/y2x/nicehash.py
```python
from datetime import datetime
from time import mktime
import uuid
import hmac
import requests
import json
from hashlib import sha256
import optparse
import sys
class public_api:
def __init__(self, host, verbose=False):
self.host = host
self.verbose = verbose
def request(self, method, path, query, body):
url = self.host + path
if query:
url += '?' + query
if self.verbose:
print(method, url)
s = requests.Session()
if body:
body_json = json.dumps(body)
response = s.request(method, url, data=body_json)
else:
response = s.request(method, url)
if response.status_code == 200:
return response.json()
elif response.content:
raise Exception(str(response.status_code) + ": " + response.reason + ": " + str(response.content))
else:
raise Exception(str(response.status_code) + ": " + response.reason)
def get_current_global_stats(self):
return self.request('GET', '/main/api/v2/public/stats/global/current/', '', None)
def get_global_stats_24(self):
return self.request('GET', '/main/api/v2/public/stats/global/24h/', '', None)
def get_active_orders(self):
return self.request('GET', '/main/api/v2/public/orders/active/', '', None)
def get_active_orders2(self):
return self.request('GET', '/main/api/v2/public/orders/active2/', '', None)
def buy_info(self):
return self.request('GET', '/main/api/v2/public/buy/info/', '', None)
def get_algorithms(self):
return self.request('GET', '/main/api/v2/mining/algorithms/', '', None)
def get_markets(self):
return self.request('GET', '/main/api/v2/mining/markets/', '', None)
def get_currencies(self):
return self.request('GET', '/main/api/v2/public/currencies/', '', None)
def get_multialgo_info(self):
return self.request('GET', '/main/api/v2/public/simplemultialgo/info/', '', None)
def get_exchange_markets_info(self):
return self.request('GET', '/exchange/api/v2/info/status', '', None)
def get_exchange_trades(self, market):
return self.request('GET', '/exchange/api/v2/trades', 'market=' + market, None)
def get_candlesticks(self, market, from_s, to_s, resolution):
return self.request('GET', '/exchange/api/v2/candlesticks', "market={}&from={}&to={}&resolution={}".format(market, from_s, to_s, resolution), None)
def get_exchange_orderbook(self, market, limit):
return self.request('GET', '/exchange/api/v2/orderbook', "market={}&limit={}".format(market, limit), None)
class private_api:
def __init__(self, host, organisation_id, key, secret, verbose=False):
self.key = key
self.secret = secret
self.organisation_id = organisation_id
self.host = host
self.verbose = verbose
def request(self, method, path, query, body):
xtime = self.get_epoch_ms_from_now()
xnonce = str(uuid.uuid4())
message = bytearray(self.key, 'utf-8')
message += bytearray('\x00', 'utf-8')
message += bytearray(str(xtime), 'utf-8')
message += bytearray('\x00', 'utf-8')
message += bytearray(xnonce, 'utf-8')
message += bytearray('\x00', 'utf-8')
message += bytearray('\x00', 'utf-8')
message += bytearray(self.organisation_id, 'utf-8')
message += bytearray('\x00', 'utf-8')
message += bytearray('\x00', 'utf-8')
message += bytearray(method, 'utf-8')
message += bytearray('\x00', 'utf-8')
message += bytearray(path, 'utf-8')
message += bytearray('\x00', 'utf-8')
message += bytearray(query, 'utf-8')
if body:
body_json = json.dumps(body)
message += bytearray('\x00', 'utf-8')
message += bytearray(body_json, 'utf-8')
digest = hmac.new(bytearray(self.secret, 'utf-8'), message, sha256).hexdigest()
xauth = self.key + ":" + digest
headers = {
'X-Time': str(xtime),
'X-Nonce': xnonce,
'X-Auth': xauth,
'Content-Type': 'application/json',
'X-Organization-Id': self.organisation_id,
'X-Request-Id': str(uuid.uuid4())
}
s = requests.Session()
s.headers = headers
url = self.host + path
if query:
url += '?' + query
if self.verbose:
print(method, url)
if body:
response = s.request(method, url, data=body_json)
else:
response = s.request(method, url)
if response.status_code == 200:
return response.json()
elif response.content:
raise Exception(str(response.status_code) + ": " + response.reason + ": " + str(response.content))
else:
raise Exception(str(response.status_code) + ": " + response.reason)
def get_epoch_ms_from_now(self):
now = datetime.now()
now_ec_since_epoch = mktime(now.timetuple()) + now.microsecond / 1000000.0
return int(now_ec_since_epoch * 1000)
def algo_settings_from_response(self, algorithm, algo_response):
algo_setting = None
for item in algo_response['miningAlgorithms']:
if item['algorithm'] == algorithm:
algo_setting = item
if algo_setting is None:
raise Exception('Settings for algorithm not found in algo_response parameter')
return algo_setting
def get_accounts(self):
return self.request('GET', '/main/api/v2/accounting/accounts2/', '', None)
def get_accounts_for_currency(self, currency):
return self.request('GET', '/main/api/v2/accounting/account2/' + currency, '', None)
def get_withdrawal_addresses(self, currency, size, page):
params = "currency={}&size={}&page={}".format(currency, size, page)
return self.request('GET', '/main/api/v2/accounting/withdrawalAddresses/', params, None)
def get_withdrawal_types(self):
return self.request('GET', '/main/api/v2/accounting/withdrawalAddresses/types/', '', None)
def withdraw_request(self, address_id, amount, currency):
withdraw_data = {
"withdrawalAddressId": address_id,
"amount": amount,
"currency": currency
}
return self.request('POST', '/main/api/v2/accounting/withdrawal/', '', withdraw_data)
def get_my_active_orders(self, algorithm, market, limit):
ts = self.get_epoch_ms_from_now()
params = "algorithm={}&market={}&ts={}&limit={}&op=LT".format(algorithm, market, ts, limit)
return self.request('GET', '/main/api/v2/hashpower/myOrders', params, None)
def create_pool(self, name, algorithm, pool_host, pool_port, username, password):
pool_data = {
"name": name,
"algorithm": algorithm,
"stratumHostname": pool_host,
"stratumPort": pool_port,
"username": username,
"password": password
}
return self.request('POST', '/main/api/v2/pool/', '', pool_data)
def delete_pool(self, pool_id):
return self.request('DELETE', '/main/api/v2/pool/' + pool_id, '', None)
def get_my_pools(self, page, size):
return self.request('GET', '/main/api/v2/pools/', '', None)
def create_hashpower_order(self, market, type, algorithm, price, limit, amount, pool_id, algo_response):
algo_setting = self.algo_settings_from_response(algorithm, algo_response)
order_data = {
"market": market,
"algorithm": algorithm,
"amount": amount,
"price": price,
"limit": limit,
"poolId": pool_id,
"type": type,
"marketFactor": algo_setting['marketFactor'],
"displayMarketFactor": algo_setting['displayMarketFactor']
}
return self.request('POST', '/main/api/v2/hashpower/order/', '', order_data)
def cancel_hashpower_order(self, order_id):
return self.request('DELETE', '/main/api/v2/hashpower/order/' + order_id, '', None)
def refill_hashpower_order(self, order_id, amount):
refill_data = {
"amount": amount
}
return self.request('POST', '/main/api/v2/hashpower/order/' + order_id + '/refill/', '', refill_data)
def set_price_hashpower_order(self, order_id, price, algorithm, algo_response):
algo_setting = self.algo_settings_from_response(algorithm, algo_response)
price_data = {
"price": price,
"marketFactor": algo_setting['marketFactor'],
"displayMarketFactor": algo_setting['displayMarketFactor']
}
return self.request('POST', '/main/api/v2/hashpower/order/' + order_id + '/updatePriceAndLimit/', '',
price_data)
def set_limit_hashpower_order(self, order_id, limit, algorithm, algo_response):
algo_setting = self.algo_settings_from_response(algorithm, algo_response)
limit_data = {
"limit": limit,
"marketFactor": algo_setting['marketFactor'],
"displayMarketFactor": algo_setting['displayMarketFactor']
}
return self.request('POST', '/main/api/v2/hashpower/order/' + order_id + '/updatePriceAndLimit/', '',
limit_data)
def set_price_and_limit_hashpower_order(self, order_id, price, limit, algorithm, algo_response):
algo_setting = self.algo_settings_from_response(algorithm, algo_response)
price_data = {
"price": price,
"limit": limit,
"marketFactor": algo_setting['marketFactor'],
"displayMarketFactor": algo_setting['displayMarketFactor']
}
return self.request('POST', '/main/api/v2/hashpower/order/' + order_id + '/updatePriceAndLimit/', '',
price_data)
def get_my_exchange_orders(self, market):
return self.request('GET', '/exchange/api/v2/myOrders', 'market=' + market, None)
def get_my_exchange_trades(self, market):
return self.request('GET','/exchange/api/v2/myTrades', 'market=' + market, None)
def create_exchange_limit_order(self, market, side, quantity, price):
query = "market={}&side={}&type=limit&quantity={}&price={}".format(market, side, quantity, price)
return self.request('POST', '/exchange/api/v2/order', query, None)
def create_exchange_buy_market_order(self, market, quantity):
query = "market={}&side=buy&type=market&secQuantity={}".format(market, quantity)
return self.request('POST', '/exchange/api/v2/order', query, None)
def create_exchange_sell_market_order(self, market, quantity):
query = "market={}&side=sell&type=market&quantity={}".format(market, quantity)
return self.request('POST', '/exchange/api/v2/order', query, None)
def cancel_exchange_order(self, market, order_id):
query = "market={}&orderId={}".format(market, order_id)
return self.request('DELETE', '/exchange/api/v2/order', query, None)
if __name__ == "__main__":
parser = optparse.OptionParser()
parser.add_option('-b', '--base_url', dest="base", help="Api base url", default="https://api2.nicehash.com")
parser.add_option('-o', '--organization_id', dest="org", help="Organization id")
parser.add_option('-k', '--key', dest="key", help="Api key")
parser.add_option('-s', '--secret', dest="secret", help="Secret for api key")
parser.add_option('-m', '--method', dest="method", help="Method for request", default="GET")
parser.add_option('-p', '--path', dest="path", help="Path for request", default="/")
parser.add_option('-q', '--params', dest="params", help="Parameters for request")
parser.add_option('-d', '--body', dest="body", help="Body for request")
options, args = parser.parse_args()
private_api = private_api(options.base, options.org, options.key, options.secret)
params = ''
if options.params is not None:
params = options.params
try:
response = private_api.request(options.method, options.path, params, options.body)
except Exception as ex:
print("Unexpected error:", ex)
exit(1)
print(response)
exit(0)
```
|
{
"source": "jesseward/bbs.jesseward.com",
"score": 2
}
|
#### File: bbs.jesseward.com/modules/common.py
```python
from __future__ import division
import os
import math
# local
from x84.bbs import echo, showart, get_ini
from x84.bbs import getterminal, LineEditor
def decorate_menu_item(menu_item, colors):
""" Return menu item decorated. """
key_text = (u'{lb}{inp_key}{rb}'.format(
lb=colors['lowlight'](u'['),
rb=colors['lowlight'](u']'),
inp_key=colors['highlight'](menu_item.inp_key)))
# set the inp_key within the key_text if matching
if menu_item.text.startswith(menu_item.inp_key):
return menu_item.text.replace(menu_item.inp_key, key_text, 1)
# otherwise prefixed with space
return (u'{key_text} {menu_text}'.format(
key_text=key_text, menu_text=menu_item.text))
def render_menu_entries(term, top_margin, menu_items,
colors=None, max_cols=3, max_rowsp=2):
"""
Return all menu items rendered in decorated tabular format.
:param term: terminal instance returned by :func:`getterminal`.
:param int top_margin: the top-most row location to begin.
:param menu_items: any object containing attributes ``inp_key``
and ``text``.
:param dict colors: optional terminal attributes, containing
keys of ``highlight`` and ``lowlight``.
:param int max_cols: maximum number of columns rendered.
:param int max_row_spacing: maximum vertical row spacing.
:rtype: str
"""
# we take measured effects to do this operation much quicker when
# colored_menu_items is set False to accommodate slower systems
# such as the raspberry pi.
if colors is not None:
measure_width = term.length
else:
measure_width = str.__len__
colors = {}
colors['highlight'] = colors.get('highlight', lambda txt: txt)
colors['lowlight'] = colors.get('lowlight', lambda txt: txt)
# render all menu items, highlighting their action 'key'
rendered_menuitems = [decorate_menu_item(menu_item, colors)
for menu_item in menu_items]
# create a parallel array of their measurable width
column_widths = map(measure_width, rendered_menuitems)
# here, we calculate how many vertical sections of menu entries
# may be displayed in 80 columns or less -- and forat accordingly
# so that they are left-adjusted in 1 or more tabular columns, with
# sufficient row spacing to padd out the full vertical height of the
# window.
#
# It's really just a bunch of math to make centered, tabular columns..
display_width = min(term.width, 80)
padding = max(column_widths) + 3
n_columns = min(max(1, int(math.floor(display_width / padding))), max_cols)
xpos = max(1, int(math.floor((term.width / 2) - (display_width / 2))))
xpos += int(math.floor((display_width - ((n_columns * padding))) / 2))
rows = int(math.ceil(len(rendered_menuitems) / n_columns))
height = int(math.ceil((term.height - 3) - top_margin))
row_spacing = min(max(1, min(3, int(math.floor(height / rows)))), max_rowsp)
column = 1
output = u''
for idx, item in enumerate(rendered_menuitems):
padding_left = term.move_x(xpos) if column == 1 and xpos else u''
padding_right = ' ' * (padding - column_widths[idx])
if idx == len(rendered_menuitems) - 1:
# last item, two newlines
padding_right = u'\r\n' * 2
elif column == n_columns:
# newline(s) on last column only
padding_right = u'\r\n' * row_spacing
column = 1 if column == n_columns else column + 1
output = u''.join((output, padding_left, item, padding_right))
return output
def waitprompt(term):
""" Display simple "press enter to continue prompt". """
echo(u''.join((
term.normal, '\r\n',
term.move_x(max(0, (term.width // 2) - 40)),
term.magenta('('), term.green('..'),
'press any key to continue', term.green('..'), term.magenta(')')
)))
term.inkey()
return
def display_banner(filepattern, vertical_padding=0, **kwargs):
"""
Start new screen and show artwork, centered.
:param str filepattern: file to display
:param int vertical_padding: number of blank lines to prefix art
:return: number of lines displayed
:rtype: int
Remaining parameters are inherited from :func:`showart`, such
as ``center`` and ``encoding``. By default, ``center`` is True.
"""
# This is unfortunate, we should use 'term' as first argument
term = getterminal()
kwargs['center'] = kwargs.get('center', True)
# move to bottom of screen, reset attribute
echo(term.move(term.height, 0) + term.normal)
# create a new, empty screen
echo(u'\r\n' * (term.height + 1))
# move to home, insert vertical padding
echo(term.home + (u'\r\n' * vertical_padding))
art_generator = showart(filepattern, **kwargs)
line_no = 0
for line_no, txt in enumerate(art_generator):
echo(txt)
# return line number
return line_no + vertical_padding
def prompt_pager(content, line_no=0, colors=None, width=None,
breaker=u'- ', end_prompt=True, **kwargs):
""" Display text, using a stop/continuous/next-page prompt.
:param iterable content: iterable of text contents.
:param int line_no: line number to offset beginning of pager.
:param dict colors: optional dictionary containing terminal styling
attributes, for keys ``'highlight'`` and
``'lowlight'``. When unset, yellow and green
are used.
:param int width: When set, text is left-justified-centered by width.
:param str breaker: repeated decoration for page breaks
:param bool end_prompt: Whether prompt should be displayed at end.
:param kwargs: additional arguments to :func:`textwrap.wrap`
:param bool end_prompt: use 'press enter prompt' at end.
"""
# This is unfortunate, we should use 'term' as first argument
term = getterminal()
colors = colors or {
'highlight': term.yellow,
'lowlight': term.green
}
pager_prompt = (u'{bl}{s}{br}top, {bl}{c}{br}ontinuous, or '
u'{bl}{enter}{br} for next page {br} {bl}\b\b'
.format(bl=colors['lowlight'](u'['),
br=colors['lowlight'](u']'),
s=colors['highlight'](u's'),
c=colors['highlight'](u'c'),
enter=colors['highlight'](u'return')))
should_break = lambda line_no, height: line_no % (height - 3) == 0
def show_breaker():
if not breaker:
return u''
attr = colors['highlight']
breaker_bar = breaker * (min(80, term.width - 1) // len(breaker))
echo(attr(term.center(breaker_bar).rstrip()))
continuous = False
# we must parse the entire tree, so that we can avoid the needless
# call to show_breaker() on the final line.
result = []
for txt in content:
if txt.rstrip():
result.extend(term.wrap(txt, width, **kwargs))
else:
result.append(u'\r\n')
xpos = 0
if term.width:
xpos = max(0, int((term.width / 2) - width / 2))
for line_no, txt in enumerate(result):
if xpos:
echo(term.move_x(xpos))
echo(txt.rstrip() + term.normal + term.clear_eol + u'\r\n')
if (line_no and line_no != len(result) - 1
and not continuous
and should_break(line_no, term.height)):
show_breaker()
echo(u'\r\n')
if xpos:
echo(term.move_x(xpos))
echo(pager_prompt)
while True:
inp = LineEditor(1, colors=colors).read()
if inp is None or inp and inp.lower() in u'sqx':
# s/q/x/escape: quit
echo(u'\r\n')
return
if len(inp) == 1:
echo(u'\b')
if inp.lower() == 'c':
# c: enable continuous
continuous = True
break
elif inp == u'':
# return: next page
break
# remove pager
echo(term.move_x(0) + term.clear_eol)
if breaker:
# and breaker,
echo(term.move_up() + term.clear_eol)
if end_prompt:
show_breaker()
echo(u'\r\n')
if term.width > 80:
echo(term.move_x(max(0, (term.width // 2) - 40)))
echo(u'Press {enter}.'.format(
enter=colors['highlight'](u'enter')))
inp = LineEditor(0, colors=colors).read()
def prompt_input(term, key, sep_ok=u'::', width=None, colors=None):
""" Prompt for and return input, up to given width and colorscheme. """
colors = colors or {'highlight': term.yellow}
sep_ok = colors['highlight'](sep_ok)
echo(u'{sep} {key:<8}: '.format(sep=sep_ok, key=key))
return LineEditor(colors=colors, width=width).read() or u''
def coerce_terminal_encoding(term, encoding):
""" Coerce encoding of terminal to match session by CSI. """
echo(u'\r\n')
echo({
# ESC %G activates UTF-8 with an unspecified implementation
# level from ISO 2022 in a way that allows to go back to
# ISO 2022 again.
'utf8': u'\x1b%G',
# ESC %@ returns to ISO 2022 in case UTF-8 had been entered.
# ESC (U Sets character set G0 to codepage 437, such as on
# Linux vga console.
'cp437': u'\x1b%@\x1b(U',
}.get(encoding, u''))
# remove possible artifacts, at least, %G may print a raw G
echo(term.move_x(0) + term.clear_eol)
def show_description(term, description, color='white', width=80, **kwargs):
"""
Display text as given ``color``, left-adjusted ``width``.
:param str description: description text, may contain terminal attributes,
in which case ``color`` should be set to None.
:param str color: terminal color attribute name, may be None.
:param int width: left-adjusted width, if this is greater than the current
terminal's width, the terminal's width is used instead.
:param kwargs: all remaining keyword arguments are passed to the built-in
:class:`textwrap.TextWrapper`.
:rtype: int
:returns: number if lines written
"""
wide = min(width, term.width)
xpos = max(0, (term.width // 2) - (wide // 2))
lines = []
for line in unicode(description).splitlines():
if line.strip():
lines.extend(term.wrap(line, wide, **kwargs))
else:
lines.append(u'')
# output as a single string, reducing latency
outp = u''.join(
[getattr(term, color) if color else u''] +
[u''.join((
term.move_x(xpos) if xpos else u'',
txt.rstrip(),
term.clear_eol,
u'\r\n')) for txt in lines])
echo(outp)
return len(outp.splitlines())
def filesize(filename):
""" display a file's size in human-readable format """
size = float(os.stat(filename).st_size)
for scale in u'BKMGT':
if size < 1000 or scale == u'T':
if scale in u'BK':
# no precision for bytees or kilobytes
return (u'{size:d}{scale}'
.format(size=int(size), scale=scale))
# 2-decimal precision
return (u'{size:0.2f}{scale}'
.format(size=size, scale=scale))
size /= 1024
def display_prompt(term, colors):
""" Return string for displaying a system-wide command prompt. """
colors['lowlight'] = colors.get('lowlight', lambda txt: txt)
bbsname = get_ini(section='system', key='bbsname') or 'Unnamed'
xpos = 0
if term.width > 30:
xpos = max(5, int((term.width / 2) - (80 / 2)))
return (u'{xpos} Menu Option {colon} '.format(
xpos=term.move_x(xpos),
colon=(u'-->>')))
```
#### File: bbs.jesseward.com/modules/logoff.py
```python
def main():
""" Main procedure. """
# pylint: disable=R0914,R0912
# Too many local variables
# Too many branches
from x84.bbs import getsession, getterminal, echo
from x84.bbs import disconnect, getch
session, term = getsession(), getterminal()
session.activity = 'logging off'
goodbye_msg = term.bold_blue(u'NO CARRIER\r\n')
echo(goodbye_msg)
# http://www.xfree86.org/4.5.0/ctlseqs.html
# Restore xterm icon and window title from stack.
echo(unichr(27) + u'[23;0t')
getch(1.5)
disconnect('+++')
```
|
{
"source": "jesseward/discogs-banner",
"score": 3
}
|
#### File: discogs-banner/discogs_banner/api_tools.py
```python
import logging
import requests
import shutil
import time
import os
from discogs_banner.discogs_wrapper import DiscogsWrapper
logging.getLogger('requests').setLevel(logging.WARNING)
logging.getLogger('oauthlib').setLevel(logging.WARNING)
def fetch_images(config, images):
"""
Downloads and persists discogs thumbnail images to local disk.
:param config: ConfigParser object
:param images: List containing release id, release title and release
thumbnail.
"""
logger = logging.getLogger(__name__)
for image in images:
image_file_name = os.path.join(
config.get('discogs-banner', 'cache_directory'),
os.path.basename(image[2])
)
# if the file exists, do not overwrite and do not download
if os.path.isfile(image_file_name):
logger.info(u'skipping file={file_name}, already exists in cache.'.
format(file_name=image_file_name))
continue
# limit to 1 QPS to discogs API.
time.sleep(1)
response = requests.get(image[2], stream=True)
if response.status_code == 200:
logger.debug(u'Downloaded image. release-id={release},url={url}'.
format(release=image[0], url=image[2]))
with open(image_file_name, 'wb') as out_file:
shutil.copyfileobj(response.raw, out_file)
del response
else:
logger.error(u'error response. http status code={code}, url={url}'.
format(code=resp['status'], url=image[2]))
def fetch_collection(config, user):
"""
Fetches a json object representing a users collection.
:param user: str representing a discogs user name
:return: a list containing release id, release title and release thumbnail
"""
logger = logging.getLogger(__name__)
max = 100
count = 0
collection = []
dw = DiscogsWrapper(config)
discogs_user = dw.discogs.user(user)
# ensure the taret user has a valid collections folder and this
# collection has at least 20 releases, otherwise creating the banner isn't
# worthwhile..
if len(discogs_user.collection_folders) == 0 or len(
discogs_user.collection_folders[0].releases) < 20:
logger.error(u'User does not have a large enough collection')
raise LookupError
for rel in discogs_user.collection_folders[0].releases:
rel_id = rel.release.id
rel_title = rel.release.title
rel_thumb = rel.release.thumb
# ignore default "spacer" images, or an empty string..
if rel_thumb in ('spacer.gif', ''):
logger.warn(u'Ignoring {release} ({rid}) due to an empty thumbnail image'.
format(release=rel_title, rid=rel_id))
continue
# create a list datastructure for our results.
try:
logger.debug(u'Adding {id}->{title}->{thumb} as targets.'.format(id=
rel_id, title=rel_title, thumb=rel_thumb))
collection.append([rel_id, rel_title, rel_thumb])
except requests.exceptions.SSLError, requests.exceptions.ConnectionError:
logger.error(u'Fetch Error at {rid}, skipping.'.format(rid=rel))
continue
if count == max:
break
count +=1
# attempt to avoid rate limiting by discogs.
time.sleep(1)
return collection
```
|
{
"source": "jesseward/discogs-cli",
"score": 3
}
|
#### File: discogs_cli/ext/utils.py
```python
from __future__ import unicode_literals
from __future__ import print_function
import re
import six
import shlex
from prompt_toolkit.completion import Completion
from ..completions import META_LOOKUP
class TextUtils(object):
"""Utilities for parsing and matching text."""
def find_matches(self, word, collection, fuzzy):
"""Find all matches in collection for word.
:type word: str
:param word: The word before the cursor.
:type collection: iterable
:param collection: A collection of words to match.
:type fuzzy: bool
:param fuzzy: Determines whether to use fuzzy matching.
:rtype: generator
:return: Yields an instance of `prompt_toolkit.completion.Completion`.
"""
word = self._last_token(word).lower()
for suggestion in self._find_collection_matches(
word, collection, fuzzy):
yield suggestion
def get_tokens(self, text):
"""Parse out all tokens.
:type text: str
:param text: A string to be split into tokens.
:rtype: list
:return: A list of strings for each word in the text.
"""
if text is not None:
text = text.strip()
words = self._safe_split(text)
return words
return []
def _last_token(self, text):
"""Find the last word in text.
:type text: str
:param text: A string to parse and obtain the last word.
:rtype: str
:return: The last word in the text.
"""
if text is not None:
text = text.strip()
if len(text) > 0:
word = self._safe_split(text)[-1]
word = word.strip()
return word
return ''
def _fuzzy_finder(self, text, collection, case_sensitive=True):
"""Customized fuzzy finder with optional case-insensitive matching.
Adapted from: https://github.com/amjith/fuzzyfinder.
:type text: str
:param text: Input string entered by user.
:type collection: iterable
:param collection: collection of strings which will be filtered based
on the input `text`.
:type case_sensitive: bool
:param case_sensitive: Determines whether the find will be case
sensitive.
:rtype: generator
:return: Yields a list of suggestions narrowed down from `collections`
using the `text` input.
"""
suggestions = []
if case_sensitive:
pat = '.*?'.join(map(re.escape, text))
else:
pat = '.*?'.join(map(re.escape, text.lower()))
regex = re.compile(pat)
for item in collection:
if case_sensitive:
r = regex.search(item)
else:
r = regex.search(item.lower())
if r:
suggestions.append((len(r.group()), r.start(), item))
return (z for _, _, z in sorted(suggestions))
def _find_collection_matches(self, word, collection, fuzzy):
"""Yield all matching names in list.
:type word: str
:param word: The word before the cursor.
:type collection: iterable
:param collection: A collection of words to match.
:type fuzzy: bool
:param fuzzy: Determines whether to use fuzzy matching.
:rtype: generator
:return: Yields an instance of `prompt_toolkit.completion.Completion`.
"""
word = word.lower()
if fuzzy:
for suggestion in self._fuzzy_finder(word,
collection,
case_sensitive=False):
yield Completion(suggestion,
-len(word),
display_meta='display_meta')
else:
for name in sorted(collection):
if name.lower().startswith(word) or not word:
display = None
display_meta = None
if name in META_LOOKUP:
display_meta = META_LOOKUP[name]
yield Completion(name,
-len(word),
display=display,
display_meta=display_meta)
def _shlex_split(self, text):
"""Wrapper for shlex, because it does not seem to handle unicode in 2.6.
:type text: str
:param text: A string to split.
:rtype: list
:return: A list that contains words for each split element of text.
"""
if six.PY2:
text = text.encode('utf-8')
return shlex.split(text)
def _safe_split(self, text):
"""Safely splits the input text.
Shlex can't always split. For example, "\" crashes the completer.
:type text: str
:param text: A string to split.
:rtype: list
:return: A list that contains words for each split element of text.
"""
try:
words = self._shlex_split(text)
return words
except:
return text
```
|
{
"source": "jesseward/harmonic-shuffle",
"score": 3
}
|
#### File: harmonic-shuffle/harmonic_shuffle/harmony.py
```python
import logging
logger = logging.getLogger(__name__)
class Harmony(object):
# numeric representation of the Circle of 5ths.
HARMONY = {
'G': 1,
'D': 2,
'A': 3,
'E': 4,
'B': 5,
'F#': 6,
'Gb': 6,
'Db': 7,
'C#': 7,
'Ab': 8,
'Eb': 9,
'Bb': 10,
'F': 11,
'C': 12,
'Em': 101,
'Bm': 102,
'F#m': 103,
'Gbm': 103,
'Dbm': 104,
'C#m': 104,
'G#m': 105,
'Ebm': 106,
'D#m': 106,
'A#m': 107,
'Bbm': 107,
'Fm': 108,
'Cm': 109,
'Gm': 110,
'Dm': 111,
'Am': 112,
}
def __init__(self, root_key):
"""
:param root_key: A string value representing the root key signature for the song.
"""
if root_key not in Harmony.HARMONY.keys():
raise LookupError('{key} is not reconized'.format(key=root_key))
self.root_key = root_key
self.root_key_value = Harmony.HARMONY[self.root_key]
# a list representing all compatible tone for a given root_key
self.harmonies = self._get_value(self.root_key_value) + self.down_shift() + self.up_shift() + self.minor()
def __repr__(self):
return '<Harmony key={0.root_key} value={0.root_key_value}>'.format(self)
@staticmethod
def _get_value(value):
""" performs a look-up of the HARMONY dictionary by value.
:parameter value: An integer representing a harmonic key
:return: A list of keys
:rtype list:
"""
return [note for note, fifth_value in Harmony.HARMONY.iteritems() if value == fifth_value]
def down_shift(self):
""" Fetches the next key(s) that represents a single tone downward
:return: A list representing a compatible key
:rtype list:
"""
# handle a roll over at position "1" on the wheel. in the case of 1 or 101 we down
# shift to 12 or 112
if self.root_key_value == 1:
down = Harmony._get_value(12)
elif self.root_key_value == 101:
down = Harmony._get_value(112)
else:
down = Harmony._get_value(self.root_key_value - 1)
return down
def up_shift(self):
""" Fetches the next key(s) that represents a single tone forward .
:return: A list representing a group of compatible keys
:rtype list:
"""
# handle a rollover at the apex of the wheel . when key_value is 12 or 112
# we shift forward to 1 (major) or 101 (minor)
if self.root_key_value == 12:
up = Harmony._get_value(1)
elif self.root_key_value == 112:
up = Harmony._get_value(101)
else:
up = Harmony._get_value(self.root_key_value + 1)
return up
def minor(self):
""" Fetches an adjacent key on the wheel (maj -> min or min -> maj).
:return: A list representing a group of compatible keys
:rtype list:
"""
# shift from major to minor
if self.root_key_value < 100:
return self._get_value(self.root_key_value + 100)
# otherwise shift minor to major.
else:
return self._get_value(self.root_key_value - 100)
```
|
{
"source": "jesseward/jellyfin-utilities",
"score": 3
}
|
#### File: jellyfin-utilities/manifest-generator/manifest.py
```python
import datetime
import json
import sys
import os
from typing import Tuple
from typing import List
class Manifest:
def __init__(self, manifest_file: str = None):
self.manifest = {}
self.manifest_file = manifest_file
if manifest_file:
self.manifest = self._read_manifest(manifest_file)
def _read_manifest(self, url: str):
with open(self.manifest_file) as fh:
return json.load(fh)
def applications(self) -> List[str]:
"""Returns the number of applications that exist within the manifest."""
return [a['name'] for a in self.manifest]
def versions(self, app_name: str) -> List[str]:
"""Returns the list of versions for a target application.
:param app_name: target application name to match"""
for a in self.manifest:
if a['name'] == app_name:
return a['versions']
def remove_application(self, app_name: str) -> None:
"""remove an application from the manifest
:param app_name: target application name to remove
"""
app_location = self._application_exists(app_name)
if self._application_exists(app_name) < 0:
raise LookupError(f'{app_name} does not exist in manifest.')
del self.manifest[app_location]
def add_application(self, guid: str, app_name: str, description: str, overview: str, owner: str, category: str):
if self._application_exists(app_name) >= 0:
raise LookupError(f'{app_name} already exists in manifest.')
self.manifest.append({
'guid': guid,
'name': app_name,
'description': description,
'overview': overview,
'owner': owner,
'category': category,
'versions': [],
})
def create(self, manifest_file: str, guid: str, app_name: str, description: str, overview: str,
owner: str, category: str) -> None:
"""create is called when building a new manifest file.
:param manifest_file: the location of the output file.
:param app_name: application target
:param version: version string
:param change_log: descriptive change log for version
:param target_abi: minimum supported Jellyfin ABI version
:param source_url: location of plugin zipfile
:param checksum: the md5 checksum of the plugin zip file
:param timestamp: addition timestamp.
"""
self.manifest_file = manifest_file
self.manifest = [{
'guid': guid,
'name': app_name,
'description': description,
'overview': overview,
'owner': owner,
'category': category,
'versions': [],
}]
def add_version(self, app_name: str, version: str, change_log: str, target_abi: str,
source_url: str, checksum: str, timestamp: str):
"""insert a new version into the manifest.
:param app_name: application target
:param version: version string
:param change_log: descriptive change log for version
:param target_abi: minimum supported Jellyfin ABI version
:param source_url: location of plugin zipfile
:param checksum: the md5 checksum of the plugin zip file
:param timestamp: addition timestamp.
"""
app_location, version_location = self._version_exists(app_name, version)
if version_location >= 0:
raise LookupError(f'{app_name}, {version} already exists')
self.manifest[app_location]['versions'].insert(0, {
'version': version,
'changelog': change_log,
'targetAbi': target_abi,
'sourceUrl': source_url,
'checksum': checksum,
'timestamp': timestamp})
def remove_version(self, app_name: str, version: str) -> None:
"""removes a version from the catalog.
:param app_name: the name of application to target
:param version: version identifier to target
"""
app_location, version_location = self._version_exists(app_name, version)
if version_location < 0:
raise LookupError(f'{app_name}, {version} does not exist, unable to remove')
del self.manifest[app_location]['versions'][version_location]
def _version_exists(self, app_name: str, version: str) -> Tuple[int, int]:
"""
Checks the manifest object to determine if the target app_name already has version information
present. If the app_name, version combination exists a tuple of the application, version index
locations is returned. -1 is returned if the application or version is _not_ found.
:param app_name: the name of application to target
:param version: the version identifer to look-up
"""
app_location = self._application_exists(app_name)
if app_location < 0:
raise LookupError(f'{app_name} does not exist in manifest')
for i, ver in enumerate(self.manifest[app_location]['versions']):
if ver['version'] == version:
return app_location, i
return app_location, -1
def _application_exists(self, app_name: str) -> int:
"""returns the index within the list if the application exists. -1 is return if the
target is _not_ found.
:param app_name: the name of the target application.
"""
for i, application in enumerate(self.manifest):
if app_name == application['name']:
return i
return -1
def close(self) -> None:
"""Persist the manifest metadata to disk."""
with open(self.manifest_file, 'w') as fh:
json.dump(self.manifest, fh)
if __name__ == '__main__':
# python manifest.py -f ../manifest.json -app LastFM application -desc "Scrobble LastFM plays with Jellyfin" \
# -ov "A plugin that scrobbles your Jellyfin music to LastFM" -owner "<NAME>" -cat "music" -guid "ASDFSDF"
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-f', help='Manifest file name')
parser.add_argument('-app', required=True, help='Application name')
parser.add_argument('-create', action='store_true', help='Create manifest file if doesn\'t exist')
subparsers = parser.add_subparsers(dest='command')
# application options
app_parser = subparsers.add_parser('application')
app_parser.add_argument('-desc', help='Application description')
app_parser.add_argument('-ov', help='Application overview')
app_parser.add_argument('-owner', help='Application owner')
app_parser.add_argument('-cat', help='Applicaiton category')
app_parser.add_argument('-guid', help='Plugin GUID')
# version options
version_parser = subparsers.add_parser('version')
version_parser.add_argument('-ver', help='Version string')
version_parser.add_argument('-cl', help='Version changelog')
version_parser.add_argument('-ck', help='Version checksum')
version_parser.add_argument('-abi', help='Version target abi')
version_parser.add_argument('-url', help='Version url')
version_parser.add_argument('-ts', help='Version timestamp', required=False,
default=datetime.datetime.utcnow().isoformat(timespec='seconds') + 'Z')
del_version = subparsers.add_parser('delete-version')
del_version.add_argument('-ver', help='version to remove from manifest')
subparsers.add_parser('delete-application')
args = parser.parse_args()
if not args.f:
parser.error('-f was not supplied.')
if args.create and not os.path.isfile(args.f):
with open(args.f, 'w') as fh:
json.dump([], fh)
try:
m = Manifest(manifest_file=args.f)
except FileNotFoundError:
sys.exit(f'[ERROR] unable to locate {args.f}, re-run with -create')
if args.command == 'delete-application':
m.remove_application(args.app)
if args.command == 'delete-version':
m.remove_version(args.app, args.ver)
if args.command == 'application':
try:
m.add_application(args.guid, args.app, args.desc, args.ov, args.owner, args.cat)
except LookupError as e:
sys.exit(f'[ERROR] Failed to add application. {e}')
if args.command == 'version':
try:
m.add_version(args.app, args.ver, args.cl, args.abi, args.url, args.ck, args.ts)
except LookupError as e:
sys.exit(f'[ERROR] Failed to insert new version. {e}')
m.close()
```
|
{
"source": "jesseward/plex-lastfm-scrobbler",
"score": 2
}
|
#### File: plex-lastfm-scrobbler/plex_scrobble/plex_monitor.py
```python
import io
import logging
import os
import re
import time
import xml.etree.ElementTree as ET
import requests
import pylast
from .scrobble_cache import ScrobbleCache
def parse_line(log_line):
"""
Matches known audio metadata log entries entries against input (log_line)
:param log_line: a str containing a plex media server log line
:return: plex media server metadata id
:rtype: integer (or None)
"""
logger = logging.getLogger(__name__)
REGEX = [
# universal-transcoder
re.compile('.*GET\s\/music\/:\/transcode\/universal\/start\.mp3.*metadata%2F(\d+)\&.*'),
# stream based transcoder
re.compile('.*\sDEBUG\s-\sLibrary\sitem\s(\d+)\s\'.*\'\sgot\splayed\sby\saccount.*')
]
for regex in REGEX:
m = regex.match(log_line)
if m:
logger.info('Found played song and extracted library id "{l_id}" from plex log '.format(l_id=m.group(1)))
return m.group(1)
def fetch_metadata(l_id, config):
""" retrieves the metadata information from the Plex media Server api. """
logger = logging.getLogger(__name__)
url = '{url}/library/metadata/{l_id}'.format(url=config['plex-scrobble']['mediaserver_url'], l_id=l_id)
logger.info('Fetching library metadata from {url}'.format(url=url))
headers = None
if 'plex_token' in config.get('plex-scrobble', {}):
headers = {'X-Plex-Token': config['plex-scrobble']['plex_token']}
# fail if request is greater than 2 seconds.
try:
metadata = requests.get(url, headers=headers).text
except requests.exceptions.RequestException as e:
logger.error('error reading from {url} "{error}"'.format(url=url, error=e))
return False
tree = ET.fromstring(metadata)
track = tree.find('Track')
# BUGFIX: https://github.com/jesseward/plex-lastfm-scrobbler/issues/7
if track is None:
logger.info('Ignoring played item library-id={l_id}, could not determine audio library information.'.
format(l_id=l_id))
return False
# if present use originalTitle. This appears to be set if
# the album is various artist
artist = track.get('originalTitle')
if not artist:
artist = track.get('grandparentTitle')
song = track.get('title')
# BUGFIX : https://github.com/jesseward/plex-lastfm-scrobbler/issues/19
# add support for fetching album metadata from the track object.
album = track.get('parentTitle')
if not album:
logger.warning('unable to locate album name for ibary-id={l_id}'.format(
l_id=l_id))
album = None
if not all((artist, song)):
logger.warning('unable to retrieve meatadata keys for libary-id={l_id}'.
format(l_id=l_id))
return False
return {'title': song.encode('utf-8'), 'artist': artist.encode('utf-8'), 'album': album.encode('utf-8')}
def monitor_log(config):
logger = logging.getLogger(__name__)
st_mtime = False
last_played = None
user_name = config['lastfm']['user_name']
password = config['lastfm']['password']
api_key = config['lastfm']['api_key']
api_secret = config['lastfm']['api_secret']
cache_location = config['plex-scrobble']['cache_location']
try:
f = io.open(config['plex-scrobble']['mediaserver_log_location'], 'r', encoding='utf-8')
except IOError:
logger.error('Unable to read log-file {0}. Shutting down.'.format(config[
'plex-scrobble']['mediaserver_log_location']))
return
f.seek(0, 2)
try:
lastfm = pylast.LastFMNetwork(
api_key=api_key,
api_secret=api_secret,
username=user_name,
password_hash=<PASSWORD>(password))
except Exception as e:
logger.error('FATAL {0}. Aborting execution'.format(e))
os._exit(1)
while True:
time.sleep(.03)
# reset our file handle in the event the log file was not written to
# within the last 60 seconds. This is a very crude attempt to support
# the log file i/o rotation detection cross-platform.
if int(time.time()) - int(os.fstat(f.fileno()).st_mtime) >= 60:
if int(os.fstat(f.fileno()).st_mtime) == st_mtime:
continue
logger.debug('Possible log file rotation, resetting file handle (st_mtime={mtime})'.format(
mtime=time.ctime(os.fstat(f.fileno()).st_mtime)))
f.close()
try:
f = open(config['plex-scrobble']['mediaserver_log_location'])
except IOError:
logger.error('Unable to read log-file {0}. Shutting down.'.format(config[
'plex-scrobble']['mediaserver_log_location']))
return
f.seek(0, 2)
st_mtime = int(os.fstat(f.fileno()).st_mtime)
line = f.readline()
# read all new lines starting at the end. We attempt to match
# based on a regex value. If we have a match, extract the media file
# id and send it off to last.fm for scrobble.
if line:
played = parse_line(line)
if not played:
continue
# when playing via a client, log lines are duplicated (seen via iOS)
# this skips dupes. Note: will also miss songs that have been repeated
if played == last_played:
logger.warning('Dupe detection : {0}, not submitting'.format(last_played))
continue
metadata = fetch_metadata(played, config)
if not metadata:
continue
# submit to last.fm else we add to the retry queue.
try:
logger.info('Attempting to submit {0} - {1} to last.fm'.format(
metadata['artist'], metadata['title']))
lastfm.scrobble(timestamp=int(time.time()), **metadata)
except Exception as e:
logger.error(u'unable to scrobble {0}, adding to cache. error={1}'.format(
metadata, e))
cache = ScrobbleCache(api_key, api_secret, user_name, password,
cache_location=cache_location)
cache.add(metadata['artist'], metadata['title'], metadata['album'])
cache.close
last_played = played
```
|
{
"source": "jesseward/py64",
"score": 3
}
|
#### File: py64/loaders/t64.py
```python
import sys
import struct
#import petascii
import entries
from entries import Entry
def find_distances(items):
h, t = items[0], items[1:]
if len(t) == 0:
return []
else:
hh, tt = t[0], t[1:]
return [(hh[0] - h[0], h[1])] + find_distances(t)
class Loader(entries.Loader):
def __init__(self):
entries.Loader.__init__(self)
self.entries = []
self.offsets = []
def parse(self, stream, file_name):
beginning_pos = int(stream.tell())
stream.seek(0, 2)
end_pos = int(stream.tell())
stream.seek(0)
header_format = "<32s2BHHH24s"
header_size = struct.calcsize(header_format)
data = stream.read(header_size)
version = [0, 0]
reserved = 0
assert(len(data) == header_size)
#(magic, 0, 1, 1, 0, 1, 0, 0, 0, "->ZYRON'S PD<- ")
magic, version[0], version[1], max_files, cur_files, reserved, user_description = struct.unpack(header_format, data)
# assert(version[0] == 0) # whatever
assert(version[1] == 1)
# first string = "C64 tape image file", padded with $00.
magic = magic.rstrip("\0")
assert(magic == "C64 tape image file" or magic.startswith("C64S tape file") or magic.find("TAPE") > -1 or magic.find("tape") > -1)
user_description = user_description.rstrip("\0") #.decode("petascii") # they can't decide.
self.entries = []
# usually 30 entries.
self.entries = [self.parse_entry(stream) for i in range(cur_files)]
self.offsets = sorted(map(lambda entry: (entry.tape_pos, entry), self.entries))
self.offsets.append((end_pos, None))
for size, entry in find_distances(self.offsets):
entry.end_addr = entry.start_addr + size
self.current_entry_index = -1
self.stream = stream
return(self)
def parse_entry(self, stream):
format = "<BBHHHII16s"
size = struct.calcsize(format)
data = stream.read(size)
assert(len(data) == size)
B_used, file_type, start_addr, end_addr, reserved_a, tape_pos, reserved_b, file_name = struct.unpack(format, data)
# B_used > 1: memory snapshot
#file_type = { # 1541 file type
# 0x82: "PRG",
# 0x81: "SEQ",
# # etc. # !=0 => PRG
#}.get(file_type) or file_type
# end_addr == 0xc3c6 is by a faulty tool; TODO loading all entries, sort by ascending order of offset into T64 (+2 for load addr which is part of the file).
B_used, file_type, start_addr, end_addr, reserved_a, tape_pos, reserved_b, file_name = struct.unpack(format, data)
assert(B_used in [0,1])
file_name = file_name.rstrip(b"\x20") # b"\0")
# 0x53
#file_name = file_name.decode("petascii")
return(Entry(B_used = B_used > 0,
file_type = file_type,
start_addr = start_addr,
end_addr = 0, # unreliable
reserved_a = reserved_a,
tape_pos = tape_pos,
reserved_b = reserved_b,
file_name = file_name))
def find_next_entry(self, file_name):
file_name = file_name.rstrip(b"\xA0")
while self.current_entry_index < len(self.entries):
entry = self.entries[self.current_entry_index]
if file_name == "" or entry.file_name == file_name:
return(entry)
self.current_entry_index += 1
return(None)
def load_header(self, file_name):
#type_, file_name, start_addr, stop_addr, data = tape_loader.load_header(file_name)
self.current_entry_index += 1
print("loading header", file_name)
entry = self.find_next_entry(file_name)
return(entry)
#return(entry.file_type, entry.file_name, entry.start_addr, entry.end_addr)
def load_data(self, file_name):
print("loading data")
entry = self.find_next_entry(file_name)
self.stream.seek(entry.tape_pos)
data = self.stream.read(entry.end_addr - entry.start_addr)
#data = tape_loader.load_data(file_name)
return(data)
if __name__ == "__main__":
print(Loader().parse(open(sys.argv[1], "rb"), "F").entries)
```
#### File: py64/py64/memory.py
```python
class Memory(object):
def __init__(self):
self.B_can_write = True # in the instance because of ShedSkin
# def read_memory(self, address, size = 1):
# return 0xFF
#
# def write_memory(self, address, value, size):
# pass
```
#### File: py64/py64/palette.py
```python
colors = [
[0, 0, 0, 0xFF],
[255, 255, 255, 0xFF],
[116, 67, 53, 0xFF],
[124, 172, 186, 0xFF],
[123, 72, 144, 0xFF],
[100, 151, 79, 0xFF],
[63, 50, 133, 0xFF],
[191, 205, 122, 0xFF],
[123, 91, 47, 0xFF],
[79, 69, 0, 0xFF],
[163, 114, 101, 0xFF],
[80, 80, 80, 0xFF],
[120, 120, 120, 0xFF],
[164, 215, 142, 0xFF],
[120, 106, 189, 0xFF],
[159, 159, 159, 0xFF],
]
def get_RGBA32_pixel(index):
item = colors[index]
#return(item[3] | (item[2] << 8) | (item[1] << 16) | (item[0] << 24))
return(item[0] | (item[1] << 8) | (item[2] << 16) | (item[3] << 24))
```
#### File: py64/py64/sprite.py
```python
import sys
SPRITE_COUNT = 8
#sprite = open(sys.argv[1], "rb").read()[2:]
WIDTH = 24
HEIGHT = 21
#def from_high_resolution_sprite(sprite, primary_color):
# result = []
# for cell in sprite:
# for item in [
# primary_color if (cell & (1 << (7 - column_i))) != 0
# else [0,0,0,0] for column_i in range(8)]:
# result += item
# return(result)
#def from_multi_color_sprite(sprite, primary_color, multicolor_0, multicolor_1):
# result = []
# #for i in range(8):
# # result += [0xFF,0,0,0xFF]
# masks = [0x03, 0x0C, 0x30, 0xC0]
# colors = [
# [0x00, 0x00, 0x00, 0x00],
# multicolor_0, # $D025
# primary_color, # $D027..$D02E
# multicolor_1, # $D026
# ]
# for cell in sprite:
# for item in reversed([colors[(cell & masks[column_i]) >> (column_i * 2)] for column_i in range(4)]):
# result += item * 2
# return(result)
#data = from_high_resolution_sprite(sprite)
#frame_size = 4 * 64 * 8 # WIDTH * 4 * HEIGHT + 4
#data = from_multi_color_sprite(sprite)
#def calculate_pixbuf_data(sprite_data, primary_color, B_multicolor, multicolor_0, multicolor_1):
# if B_multicolor:
# data = from_multi_color_sprite(map(ord, sprite_data), primary_color, multicolor_0, multicolor_1)
# else:
# data = from_high_resolution_sprite(map(ord, sprite_data), primary_color)
# return("".join(map(chr, data)))
```
#### File: py64/py64/vic_ii.py
```python
import sys
#import time
import memory
import sprite
from sprite import SPRITE_COUNT
import palette
import time
import screens
A_X_SPRITE_0 = 0x00
A_Y_SPRITE_0 = 0x01
A_X_SPRITE_1 = 0x02
A_Y_SPRITE_1 = 0x03
A_X_SPRITE_2 = 0x04
A_Y_SPRITE_2 = 0x05
A_X_SPRITE_3 = 0x06
A_Y_SPRITE_3 = 0x07
A_X_SPRITE_4 = 0x08
A_Y_SPRITE_4 = 0x09
A_X_SPRITE_5 = 0x0A
A_Y_SPRITE_5 = 0x0B
A_X_SPRITE_6 = 0x0C
A_Y_SPRITE_6 = 0x0D
A_X_SPRITE_7 = 0x0E
A_Y_SPRITE_7 = 0x0F
A_MSB_X = 0x10
A_CONTROL_1 = 0x11
A_RASTER_COUNTER = 0x12
A_LIGHT_PEN_X = 0x13
A_LIGHT_PEN_Y = 0x14
A_SPRITE_ENABLED = 0x15 # bits
A_CONTROL_2 = 0x16
A_SPRITE_Y_EXPANSION = 0x17
A_MEMORY_POINTERS = 0x18 # VM13..CB11, dummy bit (bits).
A_INTERRUPT_STATUS = 0x19
A_INTERRUPT_ENABLED = 0x1A
A_SPRITE_DATA_PRIORITY = 0x1B
A_SPRITE_MULTICOLOR = 0x1C
A_SPRITE_X_EXPANSION = 0x1D
A_SPRITE_SPRITE_COLLISION = 0x1E
A_SPRITE_DATA_COLLISION = 0x1F
A_BORDER_COLOR = 0x20
A_BACKGROUND_COLOR_0 = 0x21
A_BACKGROUND_COLOR_1 = 0x22
A_BACKGROUND_COLOR_2 = 0x23
A_BACKGROUND_COLOR_3 = 0x24
A_SPRITE_MULTICOLOR_0 = 0x25
A_SPRITE_MULTICOLOR_1 = 0x26
A_COLOR_SPRITE_0 = 0x27
A_COLOR_SPRITE_1 = 0x28
A_COLOR_SPRITE_2 = 0x29
A_COLOR_SPRITE_3 = 0x2A
A_COLOR_SPRITE_4 = 0x2B
A_COLOR_SPRITE_5 = 0x2C
A_COLOR_SPRITE_6 = 0x2D
A_COLOR_SPRITE_7 = 0x2E
class Settings(object):
def __init__(self): # VIC):
# self.VIC = VIC
self.raw_memory_pointers = 0
self.first_column = 0
self.mode = 0
self.first_row = 0
self.last_column = 0
self.last_row = 0
self.character_bitmaps_offset = 0 # that's wrong.
self.old_VIC_bank = -1
self.VIC_bank = 0
self.border_color = 0
self.background_color_0 = 0
self.background_color_1 = 0
self.background_color_2 = 0
self.background_color_3 = 0
self.video_offset = 0
self.viewport_row = 0
# the ones that have bit value= 1 are in the back.
self.sprite_priority = 0
self.sprite_enabled = 0 # bitmask
self.sprite_primary_color = SPRITE_COUNT * [0]
self.sprite_multicolor_enabled = 0 # bitmask
self.sprite_multicolor_0 = 0
self.sprite_multicolor_1 = 0
self.sprite_expand_horizontally = 0 # bitmask
self.sprite_expand_vertically = 0 # bitmask
self.sprite_X = SPRITE_COUNT * [0]
self.sprite_Y = SPRITE_COUNT * [0]
#self.controls = controls
#self.controls.handle_key_press("foo")
#self.controls.handle_key_release("bar")
#def repaint(self):
# pass
def unprepare(self):
self.old_VIC_bank = -1
class VIC_II(memory.Memory):
def __init__(self, C64, MMU, CIA2, char_ROM):
self.B_can_write = True # in the instance because of ShedSkin
self.MMU = MMU
self.C64 = C64
self.char_ROM = char_ROM
self.CIA2 = CIA2
self.B_active = True
self.B_bitmap = False
self.control_1 = 0
self.control_2 = 0
self.B_clip_address = False # FIXME default?
self.B_MCM = False # FIXME default?
self.props = Settings()
self.MSB_X = 0
#self.set_control_1(0) # FIXME default.
#self.set_control_2(0) # FIXME default.
palette.get_RGBA32_pixel(0)
self.screen = screens.Screen(self, CIA2)
self.screen.get_rendered_pixbuf() # ShedSkin
def increase_raster_position(self):
self.screen.increase_raster_position()
if (self.screen.raw_interrupt_status != 0 and
self.screen.B_enable_raster_interrupt):
self.C64.cause_interrupt()
return True
def unprepare(self):
self.props.unprepare()
def set_control_1(self, value):
self.control_1 = value
self.props.first_row = (0) + (51 if value & 8 else 55) # set: 25 lines
self.props.last_row = (0) + (250 if value & 8 else 246)
self.props.viewport_row = value & 7
value & 16 # DEN
self.B_bitmap = (value & 32) != 0 # BMM
self.B_clip_address = (value & 64) != 0 # ECM # bits 9 and 10 low.
#value & 128 # RST8 # TODO this is also used for multicolor bitmap mode
old_mode = self.props.mode
self.props.mode = (self.props.mode & 4) | ((value >> 5) & 3)
# self.props.bitmap_mode = (1 if self.B_bitmap else 0) + 2
# * (1 if self.B_clip_address else 0)
self.screen.breakpoint_raster_position = (
self.screen.breakpoint_raster_position & 0xFF) | (value & 128)
# if old_mode != self.props.mode:
# print("new mode is $%X" % self.props.mode)
# time.sleep(10)
def set_control_2(self, value):
self.control_2 = value
self.props.first_column = 24 if value & 8 else 31
self.props.last_column = 343 if value & 8 else 334
self.props.viewport_column = value & 7
self.B_MCM = (value & 16) != 0
old_mode = self.props.mode
self.props.mode = (self.props.mode & 3) | (4 if self.B_MCM else 0)
# if old_mode != self.props.mode:
# print("new mode is $%X" % self.props.mode)
#time.sleep(10)
# TODO 32, 64, 128
def set_memory_pointers(self, value):
self.raw_memory_pointers = value
value >>= 1
CB_13_12_11 = value & 0x7 # character bitmaps or bitmaps.
# TODO in bitmap mode, CB_13 only! (thus 2KiB/8KiB steps).
VM_13_12_11_10 = value >> 3 # video matrix (movable in 1KiB steps).
self.props.character_bitmaps_offset = (1 << 11) * CB_13_12_11
self.props.video_offset = (1 << 10) * VM_13_12_11_10
self.props.unprepare()
if False: # ShedSkin
code_color = self.VIC_read_memory(0, 1) # ShedSkin
character_data = self.load_chunk(0, 8 * 256) # ShedSkin
character_data = self.load_12_chunk(0, 8 * 256) # ShedSkin
def load_chunk(self, offset, size):
#address = VIC_bank_offset + offset
return [self.VIC_read_memory(offset + i, 1) for i in range(size)]
def load_12_chunk(self, offset, size):
#address = VIC_bank_offset + offset
return [self.VIC_read_memory(offset + i, 2) for i in range(size)]
def repaint(self):
self.props.VIC_bank = self.CIA2.VIC_bank
#self.props.repaint()
def read_color_RAM(self, address):
return self.MMU.read_memory(0xD800 + (address & 0x3FF))
def VIC_read_memory(self, address, size=1):
if (self.CIA2.VIC_bank & 1) == 0: # have Char ROM
if address >= 0x1000 and address < 0x2000:
assert size == 1, (
'VIC_II.VIC_read_memory: address within char ROM')
#| (self.read_color_RAM(address) << 8). follow below..
return self.char_ROM.read_memory(address & 0xFFF, size)
# Video_Matrix|Chargen|Sprite_Data_Pointers|Sprite_Data.
if self.B_clip_address:
# FIXME does that also mappen with char_ROM?
address = address &~ (1 << 9) &~ (1 << 10)
# FIXME return self. | (self.read_color_RAM(address) << 8)
# TODO invalidate all the sprites once this chages
VIC_bank_offset = self.CIA2.VIC_bank * 16384
# assert(size == 2)
return self.MMU.read_memory((address & 0x3FFF)
| VIC_bank_offset, 1) | (((self.read_color_RAM(address)
& 0xFF) << 8) if size > 1 else 0)
def set_background_color_0(self, value):
self.props.background_color_0 = value & 15
def set_background_color_1(self, value):
self.props.background_color_1 = value & 15
def set_background_color_2(self, value):
self.props.background_color_2 = value & 15
def set_background_color_3(self, value):
self.props.background_color_3 = value & 15
def set_sprite_priority(self, value):
self.props.sprite_priority = value
def set_sprite_enabled(self, value):
self.props.sprite_enabled = value
def set_sprite_multicolor_0(self, value):
self.props.sprite_multicolor_0 = value
def set_sprite_multicolor_1(self, value):
self.props.sprite_multicolor_1 = value
def set_border_color(self, value):
self.props.border_color = value & 15
def set_sprite_primary_color(self, index, value):
self.props.sprite_primary_color[index] = value
def set_sprite_multicolor_enabled(self, value):
mask = self.props.sprite_multicolor_enabled ^ value
self.props.sprite_multicolor_enabled = value
def set_sprite_expand_horizontally(self, value):
mask = self.props.sprite_expand_horizontally ^ value
self.props.sprite_expand_horizontally = value
def set_sprite_expand_vertically(self, value):
mask = self.props.sprite_expand_vertically ^ value
self.props.sprite_expand_vertically = value
def set_sprite_X(self, index, value):
self.props.sprite_X[index] = value | (256 * ((self.MSB_X &
(1 << index)) and 1))
def set_MSB_X(self, value):
self.MSB_X = value
for index in range(8):
self.props.sprite_X[index] = (self.props.sprite_X[index]
& 0xFF) | (256 * ((self.MSB_X & (1 << index)) and 1))
def set_sprite_Y(self, index, value):
self.props.sprite_Y[index] = value
def read_memory(self, address, size=1):
assert size == 1, 'VIC_II.read_memory: size==1'
address = address & 0x3F
# TODO The registers $d01e and $d01f are automatically
# cleared on reading.
self.control_1 = (self.control_1 & 127) | (
(self.screen.client_raster_position & 0x100) >> 1)
slots = {
A_BORDER_COLOR: self.props.border_color,
A_BACKGROUND_COLOR_0: self.props.background_color_0,
A_BACKGROUND_COLOR_1: self.props.background_color_1,
A_BACKGROUND_COLOR_2: self.props.background_color_2,
A_BACKGROUND_COLOR_3: self.props.background_color_3,
A_RASTER_COUNTER: self.screen.client_raster_position & 0xFF,
A_X_SPRITE_0: self.props.sprite_X[0] & 0xFF,
A_Y_SPRITE_0: self.props.sprite_Y[0],
A_X_SPRITE_1: self.props.sprite_X[1] & 0xFF,
A_Y_SPRITE_1: self.props.sprite_Y[1],
A_X_SPRITE_2: self.props.sprite_X[2] & 0xFF,
A_Y_SPRITE_2: self.props.sprite_Y[2],
A_X_SPRITE_3: self.props.sprite_X[3] & 0xFF,
A_Y_SPRITE_3: self.props.sprite_Y[3],
A_X_SPRITE_4: self.props.sprite_X[4] & 0xFF,
A_Y_SPRITE_4: self.props.sprite_Y[4],
A_X_SPRITE_5: self.props.sprite_X[5] & 0xFF,
A_Y_SPRITE_5: self.props.sprite_Y[5],
A_X_SPRITE_6: self.props.sprite_X[6] & 0xFF,
A_Y_SPRITE_6: self.props.sprite_Y[6],
A_X_SPRITE_7: self.props.sprite_X[7] & 0xFF,
A_Y_SPRITE_7: self.props.sprite_Y[7],
A_MSB_X: self.MSB_X,
A_CONTROL_1: self.control_1,
#A_LIGHT_PEN_X = 0x13
#A_LIGHT_PEN_Y = 0x14
A_SPRITE_ENABLED: self.props.sprite_enabled,
A_CONTROL_2: self.control_2,
A_SPRITE_Y_EXPANSION: self.props.sprite_expand_vertically,
A_MEMORY_POINTERS: self.raw_memory_pointers,
A_INTERRUPT_STATUS: self.screen.raw_interrupt_status,
A_INTERRUPT_ENABLED: (
1 if self.screen.B_enable_raster_interrupt else 0) |
(2 if self.screen.B_enable_sprite_background_collision_interrupt
else 0) |
(4 if self.screen.B_enable_sprite_sprite_collision_interrupt
else 0),
A_SPRITE_DATA_PRIORITY: self.props.sprite_priority,
A_SPRITE_MULTICOLOR: self.props.sprite_multicolor_enabled,
A_SPRITE_X_EXPANSION: self.props.sprite_expand_horizontally,
#A_SPRITE_SPRITE_COLLISION = 0x1E
#A_SPRITE_DATA_COLLISION = 0x1F
#A_BACKGROUND_COLOR_1 = 0x22
#A_BACKGROUND_COLOR_2 = 0x23
#A_BACKGROUND_COLOR_3 = 0x24
A_SPRITE_MULTICOLOR_0: self.props.sprite_multicolor_0,
A_SPRITE_MULTICOLOR_1: self.props.sprite_multicolor_1,
A_COLOR_SPRITE_0: self.props.sprite_primary_color[0],
A_COLOR_SPRITE_1: self.props.sprite_primary_color[1],
A_COLOR_SPRITE_2: self.props.sprite_primary_color[2],
A_COLOR_SPRITE_3: self.props.sprite_primary_color[3],
A_COLOR_SPRITE_4: self.props.sprite_primary_color[4],
A_COLOR_SPRITE_5: self.props.sprite_primary_color[5],
A_COLOR_SPRITE_6: self.props.sprite_primary_color[6],
A_COLOR_SPRITE_7: self.props.sprite_primary_color[7],
}
return slots[address] if address in slots else 0xFF
def write_memory(self, address, value, size):
assert isinstance(value, int), (
'VIC_II.write_memory: value is an integer')
# TODO The registers $d01e and $d01f cannot be written.
address = address & 0x3F
value = (value)
# TODO 47 control registers.
# 34 for sprite control.
#print("VIC-II $%X := %r" % (address, value))
#time.sleep(5)
if address == A_CONTROL_1:
return self.set_control_1(value)
elif address == A_CONTROL_2:
return self.set_control_2(value)
elif address == A_MEMORY_POINTERS:
return self.set_memory_pointers(value)
elif address == A_BORDER_COLOR:
return self.set_border_color(value)
elif address == A_BACKGROUND_COLOR_0:
return self.set_background_color_0(value)
elif address == A_BACKGROUND_COLOR_1:
return self.set_background_color_1(value)
elif address == A_BACKGROUND_COLOR_2:
return self.set_background_color_2(value)
elif address == A_BACKGROUND_COLOR_3:
return self.set_background_color_3(value)
elif address == A_SPRITE_DATA_PRIORITY:
return self.set_sprite_priority(value)
elif address == A_SPRITE_ENABLED:
return self.set_sprite_enabled(value)
elif address == A_COLOR_SPRITE_0:
return self.set_sprite_primary_color(0, value)
elif address == A_COLOR_SPRITE_1:
return self.set_sprite_primary_color(1, value)
elif address == A_COLOR_SPRITE_2:
return self.set_sprite_primary_color(2, value)
elif address == A_COLOR_SPRITE_3:
return self.set_sprite_primary_color(3, value)
elif address == A_COLOR_SPRITE_4:
return self.set_sprite_primary_color(4, value)
elif address == A_COLOR_SPRITE_5:
return self.set_sprite_primary_color(5, value)
elif address == A_COLOR_SPRITE_6:
return self.set_sprite_primary_color(6, value)
elif address == A_COLOR_SPRITE_7:
return self.set_sprite_primary_color(7, value)
elif address == A_SPRITE_MULTICOLOR:
return self.set_sprite_multicolor_enabled(value)
elif address == A_SPRITE_MULTICOLOR_0:
return self.set_sprite_multicolor_0(value)
elif address == A_SPRITE_MULTICOLOR_1:
return self.set_sprite_multicolor_1(value)
elif address == A_SPRITE_X_EXPANSION:
return self.set_sprite_expand_horizontally(value)
elif address == A_SPRITE_Y_EXPANSION:
return self.set_sprite_expand_vertically(value)
elif address == A_RASTER_COUNTER:
self.screen.breakpoint_raster_position = (
self.screen.breakpoint_raster_position & 0x100) | value
elif address == A_INTERRUPT_STATUS:
self.screen.raw_interrupt_status = (
self.screen.raw_interrupt_status & (value ^ 0xFF))
elif address == A_INTERRUPT_ENABLED:
self.screen.B_enable_raster_interrupt = (value & 1) != 0
self.screen.B_enable_sprite_background_collision_interrupt = (
value & 2) != 0
self.screen.B_enable_sprite_sprite_collision_interrupt = (
value & 4) != 0
# TODO light pen
elif address < 0x10: # coordinates
if address & 1:
return self.set_sprite_Y(address >> 1, value)
else:
return self.set_sprite_X(address >> 1, value)
elif address == A_MSB_X:
return self.set_MSB_X(value)
#}.get(address) or ignore)(value)
"""
[$11]=$1B, [$16]=$8: hires text mode (global bg in $21).
[$11]=$1B, [$16]=216: multicolor text mode.
[$11]=$3B, [$16]=8: hires bitmap mode.
[$11]=$3B, [$16]=216: multicolor bitmap mode.
[$11]=$5B, [$16]=8: extended (background color) text mode.
[$16]=5: !!!
http://codebase64.org/doku.php?id=base:built_in_screen_modes
"""
# memory address $D02F (extra keys). Try to set to something
# else than $FF. If it works, it's a C128.
if __name__ == '__main__':
pass
```
#### File: py64/tests/test_opcodes.py
```python
import unittest
import py64.opcodes as op_code
class OpCodeTest(unittest.TestCase):
def setUp(self):
self.opcode = op_code.instruction_set[0x4C]
def test_instruction_set(self):
""" Verify integrity of instruction set data. """
self.assertTrue(self.opcode.instruction == 'JMP')
self.assertTrue(self.opcode.address_id == op_code.ABSOLUTE)
self.assertTrue(self.opcode.size == 3)
self.assertTrue(self.opcode.cycles == 3)
if __name__ == '__main__':
unittest.main()
```
#### File: py64/tests/test_prg_loader.py
```python
import unittest
from StringIO import StringIO
from py64.loaders.prg import Loader
class PrgTest(unittest.TestCase):
def setUp(self):
file_bytes = StringIO(
'\x01\x08\x0f\x08\xcf\x07\x9e\x32\x30\x36\x35\x20\x41\x42\x43\x00'
)
self.file_name = 'TestFileName.PRG'
self.loader = Loader()
self.loader.parse(file_bytes, self.file_name)
def test_prg_start_addr(self):
""" Is the starting address correctly read. """
self.assertTrue(self.loader.start_addr == 2049)
def test_prg_end_addr(self):
""" Is the end address correctly read. """
self.assertTrue(self.loader.end_addr == 2064)
def test_prg_size(self):
""" Is the program size correctly identified. """
self.assertTrue(self.loader.size == 16)
def test_file_type(self):
""" Is the file of type=prg. """
self.assertTrue(self.loader.FILE_TYPE == 0x82)
def test_prg_header_loader(self):
""" Verifiy header data. """
header = self.loader.load_header()
self.assertTrue(header.start_addr == 2049)
self.assertTrue(header.end_addr == 2064)
self.assertTrue(header.reserved_a == 0)
self.assertTrue(header.tape_pos == 0)
self.assertTrue(header.file_name == self.file_name)
if __name__ == '__main__':
unittest.main()
```
|
{
"source": "jessewebb/csgo-icon-extractor",
"score": 3
}
|
#### File: jessewebb/csgo-icon-extractor/main.py
```python
import argparse
import csgo_icon_extractor
DEFAULT_ICONLIB = 'iconlib.swf'
DEFAULT_OUTPUT_DIR = 'csgo-icons'
def _parse_command_line_args():
parser = argparse.ArgumentParser(description='Extracts the CS:GO icon images from the icon lib SWF file.')
parser.add_argument('iconlib', nargs='?', default=DEFAULT_ICONLIB, help='the icon lib SWF file')
parser.add_argument('outdir', nargs='?', default=DEFAULT_OUTPUT_DIR, help='the directory to extract the icons into')
return parser.parse_args()
def main():
print('Running csgo-icon-extractor (version 1.0.0) ...')
args = _parse_command_line_args()
iconlib_file = args.iconlib
output_dir = args.outdir
print('Using configuration: iconlib={}, outdir={}'.format(iconlib_file, output_dir))
csgo_icon_extractor.verfiy_swt_tools_is_in_path()
csgo_icon_extractor.create_output_directory(output_dir)
object_set_details_list = csgo_icon_extractor.extract_object_set_details_list(iconlib_file)
for object_set_details in object_set_details_list:
if object_set_details.object_type in csgo_icon_extractor.SUPPORTED_ICON_TYPE_MAP:
icon_file_ext = csgo_icon_extractor.SUPPORTED_ICON_TYPE_MAP[object_set_details.object_type]
csgo_icon_extractor.extract_icon_set(iconlib_file, object_set_details, icon_file_ext, output_dir)
print('Extracted {} {}(s)'.format(len(object_set_details.ids), object_set_details.object_type))
print('Icon extraction compete!')
if __name__ == "__main__":
main()
```
|
{
"source": "JesseWeinstein/discard",
"score": 2
}
|
#### File: discard/discard/cli.py
```python
import sys
import datetime
from pathlib import Path
import click
from discard import Discard
from discard import reader
def require_token(ctx):
if ctx.obj['token'] is None:
raise click.ClickException('Please pass a token using -t or the DISCORD_TOKEN enviromental variable')
@click.group()
@click.option('-t', '--token', help='Bot or user token.',
envvar='DISCORD_TOKEN')
@click.option('-U', '--is-user-account', default=False, is_flag=True, help='Log in as a user account.')
@click.option('-o', '--output-dir', default=Path('out/'), help='Output directory, out/ by default.',
type=click.Path(file_okay=False, writable=True))
@click.option('--after', help="Datetime after which to retrieve history (UTC)", type=click.DateTime())
@click.option('--before', help="Datetime before which to retrieve history (UTC)", type=click.DateTime())
@click.option('--no-scrub', default=False, is_flag=True, help='Do not scrub token from logged data.')
@click.option('--gzip', default=False, is_flag=True, help='Save logs compressed with gzip.')
@click.pass_context
def cli(ctx, **kwargs):
ctx.ensure_object(dict)
ctx.obj.update(kwargs)
ctx.obj['output_dir'] = Path(ctx.obj['output_dir'])
ctx.obj['command'] = sys.argv
@cli.command(help="Only log in and fetch profile information.")
@click.pass_context
def profile(ctx):
require_token(ctx)
discard = Discard(mode="profile", **ctx.obj)
discard.run()
@cli.command(help="Archive one or multiple channels.")
@click.argument('channel_id', required=True, nargs=-1, type=int)
@click.pass_context
def channel(ctx, channel_id):
require_token(ctx)
discard = Discard(mode="channel", channel_id=channel_id, **ctx.obj)
discard.run()
@cli.command(help="Archive one or multiple guilds.")
@click.argument('guild_id', required=True, nargs=-1, type=int)
@click.pass_context
def guild(ctx, guild_id):
require_token(ctx)
discard = Discard(mode="guild", guild_id=guild_id, **ctx.obj)
discard.run()
@cli.command(help="Read a channel log.")
@click.argument('path', required=True, type=click.Path(file_okay=False))
@click.pass_context
def read(ctx, path):
reader.read_chat(path)
@cli.command(help="Output a summary and audit for a directory of runs.")
@click.argument('path', required=True, type=click.Path(file_okay=False))
@click.option('--json', default=False, is_flag=True, help='Output machine readable summary.')
@click.pass_context
def summary(ctx, path, json):
reader.summary(path, as_json=json)
if __name__ == '__main__':
cli()
```
#### File: discard/discard/discard.py
```python
import sys
import os
import logging
import datetime
import datetime
import json
import traceback
import copy
import random
import string
import gzip
import asyncio
from pathlib import Path
from collections.abc import Iterable
import discord
from tqdm import tqdm
__version__ = "0.3.3"
PBAR_UPDATE_INTERVAL = 100
PBAR_MINIMUM_MESSAGES = 1000 # Minimum number of messages to begin showing a progress bar for
# There's a few websocket events that we need to log (like GUILD_CREATE),
# but also a few we didn't ask for we want to do without (pings, typing notifications, new messages),
# at least until a realtime mode is introduced. For this purpose we use a blacklist.
WS_EVENT_BLACKLIST = [None, 'TYPING_START', 'MESSAGE_CREATE', 'MESSAGE_UPDATE', 'MESSAGE_REACTION_ADD']
class NotFoundError(Exception):
pass
class DiscardClient(discord.Client):
def __init__(self, *args, discard=None, **kwargs):
super().__init__(*args, **kwargs)
self.discard = discard
self.is_user_account = self.discard.is_user_account
self.exception = None
# monkeypatch discord.py request function to log
request_func = self.http.request
async def request_func_wrapped(route, *, files=None, **kwargs):
datetime_start = datetime.datetime.now(datetime.timezone.utc)
response = await request_func(route, files=files, **kwargs) # XXX await?
datetime_end = datetime.datetime.now(datetime.timezone.utc)
discard.log_http_request(route, kwargs, response, datetime_start, datetime_end)
return response
self.http.request = request_func_wrapped
# Override the default run method in order to preserve KeyboardInterrupt
def run(self, *args, **kwargs):
loop = self.loop
try:
loop.run_until_complete(self.start(*args, **kwargs))
except KeyboardInterrupt:
self.exception = sys.exc_info()
finally:
loop.close()
async def on_ready(self):
if self.discard.mode == 'profile':
print(f'We have logged in as {self.user.name} (id {self.user.id})')
if not self.is_user_account:
# Fetch self using the HTTP API (not supported for user accounts)
user = await self.fetch_user(self.user.id)
print(f"Fetched user: {user}")
else:
# Fetch own profile using the HTTP API (not supported for bot accounts)
profile = await self.fetch_user_profile(self.user.id)
print(f"Fetched profile: {profile}")
elif self.discard.mode == 'channel':
for channel_id in self.discard.channel_ids:
channel = self.get_channel(channel_id)
if channel is None:
raise NotFoundError(f"Channel not found: {channel_id}")
await self.archive_channel(channel)
elif self.discard.mode == 'guild':
for guild_id in self.discard.guild_ids:
guild = self.get_guild(guild_id)
if guild is None:
raise NotFoundError(f"Guild not found: {guild_id}")
await self.archive_guild(guild)
else:
raise ValueError(f"Unknown mode: {self.discard.mode}")
# Quit
await self.close()
async def archive_channel(self, channel: discord.abc.GuildChannel):
print(f"Processing channel: {channel}")
self.discard.start_channel(channel)
# XXX is it a good idea for userbots to do this?
#await self.fetch_channel(channel.id)
num_messages = 0
oldest_message = None
newest_message = None
message = None
pbar = None
# before and after datetimes must be timezone-naive in UTC (why not timezone-aware UTC?)
async for message in channel.history(after=self.discard.after, before=self.discard.before, limit=None,
oldest_first=True):
if oldest_message is None:
oldest_message = message
expected_timedelta = (self.discard.before or datetime.datetime.now()) - oldest_message.created_at
for reaction in message.reactions:
# Fetch the users who reacted
async for user in reaction.users():
pass
if num_messages % PBAR_UPDATE_INTERVAL == 0 and num_messages > PBAR_MINIMUM_MESSAGES:
timedelta = message.created_at - oldest_message.created_at
if pbar is None:
pbar = tqdm(total=expected_timedelta.days, initial=timedelta.days, unit="day", miniters=1)
else:
diff = timedelta.days - pbar.n
if diff:
pbar.update(diff)
num_messages += 1
if pbar:
pbar.update(expected_timedelta.days - pbar.n)
pbar.close()
newest_message = message
self.discard.end_channel(channel, num_messages, oldest_message, newest_message)
async def archive_guild(self, guild: discord.Guild):
print(f"Processing guild: {guild}")
self.discard.start_guild(guild)
# XXX is it a good idea for userbots to do this?
await self.fetch_guild(guild.id)
await guild.fetch_channels()
await guild.fetch_roles()
await guild.fetch_emojis()
channels = []
for channel in guild.text_channels:
if channel.permissions_for(guild.me).read_messages:
channels.append(channel)
print(f"{len(channels)} accessible channels...")
for channel in channels:
await self.archive_channel(channel)
self.discard.end_guild(guild, len(channels))
async def on_socket_raw_send(self, payload):
self.discard.log_ws_send(payload)
async def on_socket_response(self, msg):
self.discard.log_ws_recv(msg)
async def on_error(self, event_method, *args, **kwargs):
# Reraising the exception doesn't close the connection,
# so we save it and raise it outside.
# TODO some errors would be best logged but kept non-fatal to still
# fetch the most data possible.
# Have an option for that.
self.exception = sys.exc_info()
await self.close()
class Discard():
def __init__(self, token, mode, output_dir, command=None, channel_id=None, guild_id=None,
is_user_account=False, no_scrub=False, before=None, after=None,
gzip=False):
self.token = token
self.mode = mode
self.command = command
self.channel_ids = channel_id
if not isinstance(self.channel_ids, Iterable):
self.channel_ids = [self.channel_ids]
self.guild_ids = guild_id
if not isinstance(self.guild_ids, Iterable):
self.guild_ids = [self.guild_ids]
self.is_user_account = is_user_account
self.no_scrub = no_scrub
self.output_dir_root = output_dir
self.client = None
if (before and not isinstance(before, datetime.datetime)) or (after and not isinstance(after, datetime.datetime)):
raise TypeError("before and after must be datetime objects")
self.before = before
self.after = after
self.gzip = gzip
self.client = DiscardClient(discard=self)
def start(self):
self.datetime_start = datetime.datetime.now(datetime.timezone.utc)
self.ident = ''.join([random.choice(string.ascii_lowercase + string.digits) for i in range(24)])
self.datetime_end = None
self.finished = False
self.completed = False
self.errors = False
self.exception = None
self.traceback = None
self.num_http_requests = 0
self.num_ws_packets = 0
self.num_messages = 0
self.num_guild_messages = 0
self.profile = None
self.run_directory = self.datetime_start.strftime('%Y%m%dT%H%M%S_'+self.mode)
if not self.before and not self.after:
self.run_directory += '_full'
self.output_directory = self.output_dir_root / Path(self.run_directory)
if os.path.exists(self.output_directory):
self.run_directory += "_" + self.ident[0:5]
self.output_directory = self.output_dir_root / Path(self.run_directory)
if os.path.exists(self.output_directory):
raise RuntimeError("Fatal: Run directory already exists")
os.makedirs(self.output_directory)
self.write_meta_file()
self.open_request_file('run.jsonl')
def open_request_file(self, filepath):
filepath = Path(filepath)
if len(filepath.parts) > 1:
os.makedirs(self.output_directory / filepath.parts[0], exist_ok=True)
if self.gzip:
filepath = filepath.with_name(filepath.name + '.gz')
if os.path.exists(self.output_directory / filepath):
raise RuntimeError("Request file already exists")
open_func = gzip.open if self.gzip else open
self.request_file = open_func(self.output_directory / filepath, 'wt')
def end(self):
self.request_file.close()
self.finished = True
self.datetime_end = datetime.datetime.now(datetime.timezone.utc)
self.write_meta_file()
def run(self):
self.start()
try:
self.client.run(self.token, bot=not self.is_user_account)
if self.client.exception:
t, v, tb = self.client.exception
raise v.with_traceback(tb)
except BaseException as ex:
self.errors = True
self.exception = type(ex).__name__ + f": {ex}"
self.traceback = traceback.format_exc()
self.end()
raise
self.completed = True
print("Completed")
self.end()
def write_meta_file(self):
obj = {
'client': {
'name': 'discard',
'version': __version__,
'discord.py_version': discord.__version__
},
'command': self.command,
'settings': {
'mode': self.mode,
'token': self.token if self.no_scrub else None,
'is_user_account': self.is_user_account,
'output_dir': str(self.output_dir_root),
'after': self.after.isoformat() if self.after else None,
'before': self.before.isoformat() if self.before else None,
'no_scrub': self.no_scrub,
'gzip': self.gzip
},
'run': {
'datetime_start': self.datetime_start.isoformat(),
'datetime_end': self.datetime_end.isoformat() if self.datetime_end else None,
'run_directory': self.run_directory,
'ident': self.ident,
'completed': self.completed,
'finished': self.finished,
'errors': self.errors,
'exception': self.exception,
'traceback': self.traceback,
},
'summary': {
'num_http_requests': self.num_http_requests,
'num_ws_packets': self.num_ws_packets,
'num_messages': self.num_messages
},
'user': None
}
if self.client and self.client.user:
obj['user'] = {
'id': self.client.user.id,
'name': self.client.user.name,
'discriminator': self.client.user.discriminator,
'bot': self.client.user.bot
}
with open(self.output_directory / Path('run.meta.json'), 'w') as f:
json.dump(obj, f, indent=4, ensure_ascii=False)
def start_channel(self, channel):
self.request_file.close()
self.num_guild_messages = 0
guild_id = channel.guild.id
self.open_request_file(f'{guild_id}/{channel.id}.jsonl')
def end_channel(self, channel, num_messages, oldest_message, newest_message):
# This information is intentionally minimalistic. It's supposed to be
# a human-readable summary, not a resource. Logged requests contain all data.
obj = {
'channel': {
'id': channel.id,
'name': channel.name,
'type': str(channel.type)
},
'summary': {
'num_messages': num_messages,
'oldest_message': None,
'newest_message': None
}
}
if oldest_message is not None:
obj['summary']['oldest_message'] = {
'id': oldest_message.id,
'timestamp': oldest_message.created_at.isoformat() # TODO these need to be converted to UTC!
}
if newest_message is not None:
obj['summary']['newest_message'] = {
'id': newest_message.id,
'timestamp': newest_message.created_at.isoformat()
}
with open(self.output_directory / Path(f'{channel.guild.id}/{channel.id}.meta.json'), 'w') as f:
json.dump(obj, f, indent=4, ensure_ascii=False)
self.num_messages += num_messages
self.num_guild_messages += num_messages
def start_guild(self, guild):
self.request_file.close()
self.open_request_file(f'{guild.id}/guild.jsonl')
def end_guild(self, guild, num_channels):
obj = {
'guild': {
'id': guild.id,
'name': guild.name,
},
'summary': {
'num_channels': num_channels,
'num_messages': self.num_guild_messages
}
}
with open(self.output_directory / Path(f'{guild.id}/guild.meta.json'), 'w') as f:
json.dump(obj, f, indent=4, ensure_ascii=False)
def log_http_request(self, route, kwargs, response, datetime_start, datetime_end):
obj = {
'type': 'http',
'datetime_start': datetime_start.isoformat(),
'datetime_end': datetime_end.isoformat(),
'request': {
'method': route.method,
'url': route.url,
},
'response': {
'data': response
}
}
if 'params' in kwargs:
obj['request']['params'] = kwargs['params']
json.dump(obj, self.request_file, ensure_ascii=False)
self.request_file.write('\n')
self.num_http_requests += 1
def log_ws_send(self, data):
now = datetime.datetime.now()
obj = {
'type': 'ws',
'datetime': now.isoformat(),
'direction': 'send',
'data': data,
}
if not self.no_scrub and self.token in data:
obj['data'] = data.replace(self.token, '[SCRUBBED]')
obj['scrubbed'] = True
json.dump(obj, self.request_file, ensure_ascii=False)
self.request_file.write('\n')
self.num_ws_packets += 1
def log_ws_recv(self, data):
if 't' in data:
if data['t'] in WS_EVENT_BLACKLIST:
return
now = datetime.datetime.now()
obj = {
'type': 'ws',
'datetime': now.isoformat(),
'direction': 'recv',
'data': data
}
json.dump(obj, self.request_file, ensure_ascii=False)
self.request_file.write('\n')
self.num_ws_packets += 1
```
|
{
"source": "jessewmc/azure-functions-python-library",
"score": 3
}
|
#### File: azure-functions-python-library/tests/test_meta.py
```python
from typing import Mapping, List
import unittest
import datetime
from azure.functions import meta
class TestMeta(unittest.TestCase):
def test_parsed_datetime_none(self):
parsed = self._parse_datetime(None)
self.assertEqual(parsed, None)
def test_parse_datetime_empty(self):
parsed = self._parse_datetime('')
self.assertEqual(parsed, None)
def test_utc_datetime_no_fraction_parse(self):
parsed = self._parse_datetime('2018-12-12T03:16:34Z')
self.assertEqual(str(parsed), '2018-12-12 03:16:34+00:00')
self.assertEqual(parsed.tzinfo, datetime.timezone.utc)
def test_utc_datetime_parse(self):
parsed = self._parse_datetime('2018-12-12T03:16:34.2191Z')
self.assertEqual(str(parsed), '2018-12-12 03:16:34.219100+00:00')
def test_utc_datetime_neg_tz_parse(self):
parsed = self._parse_datetime('2018-12-12T03:16:34.2191-00:00')
self.assertEqual(str(parsed), '2018-12-12 03:16:34.219100+00:00')
def test_too_fractional_utc_datetime_parse(self):
parsed1 = self._parse_datetime('2018-12-12T03:16:34.2191989Z')
self.assertEqual(str(parsed1), '2018-12-12 03:16:34.219198+00:00')
parsed2 = self._parse_datetime('9999-12-31T23:59:59.9999999+00:00')
self.assertEqual(str(parsed2), '9999-12-31 23:59:59.999999+00:00')
def test_local_datetime_no_fraction_parse(self):
parsed = self._parse_datetime('2018-12-12T03:16:34')
self.assertEqual(str(parsed), '2018-12-12 03:16:34')
def test_local_datetime_parse(self):
parsed = self._parse_datetime('2018-12-12T03:16:34.2191')
self.assertEqual(str(parsed), '2018-12-12 03:16:34.219100')
def test_too_fractional_local_datetime_parse(self):
parsed1 = self._parse_datetime('2018-08-07T23:17:57.4610506')
self.assertEqual(str(parsed1), '2018-08-07 23:17:57.461050')
parsed2 = self._parse_datetime('9999-12-31T23:59:59.9999999')
self.assertEqual(str(parsed2), '9999-12-31 23:59:59.999999')
def test_parsed_timedelta_none(self):
parsed = self._parse_timedelta(None)
self.assertIsNone(parsed)
def test_parsed_timedelta_empty(self):
parsed = self._parse_timedelta('')
self.assertIsNone(parsed)
def test_parse_timedelta_seconds(self):
# Zeros
parsed = self._parse_timedelta('0')
self.assertEqual(parsed.seconds, 0)
# Single Digit
parsed = self._parse_timedelta('3')
self.assertEqual(parsed.seconds, 3)
# Double Digit
parsed = self._parse_timedelta('56')
self.assertEqual(parsed.seconds, 56)
parsed = self._parse_timedelta('678')
self.assertEqual(parsed.seconds, 678)
def test_parse_timedelta_minutes_seconds(self):
# Single Digits Zeros
parsed = self._parse_timedelta('0:0')
self.assertEqual(parsed.seconds, 0)
# Single Digits
parsed = self._parse_timedelta('3:4')
self.assertEqual(parsed.seconds, 3 * 60 + 4)
# Double Digits Zeros
parsed = self._parse_timedelta('00:00')
self.assertEqual(parsed.seconds, 0)
# Double Digits
parsed = self._parse_timedelta('34:56')
self.assertEqual(parsed.seconds, 34 * 60 + 56)
def test_parse_timedelta_hours_minutes_seconds(self):
# Single Digits Zeros
parsed = self._parse_timedelta('0:0:0')
self.assertEqual(parsed.seconds, 0)
# Single Digits
parsed = self._parse_timedelta('3:4:5')
self.assertEqual(parsed.seconds, 3 * 3600 + 4 * 60 + 5)
# Double Digits Zeros
parsed = self._parse_timedelta('00:00:00')
self.assertEqual(parsed.seconds, 0)
# Double Digits
parsed = self._parse_timedelta('12:34:56')
self.assertEqual(parsed.seconds, 12 * 3600 + 34 * 60 + 56)
def test_parse_utc_datetime_failure(self):
malformed_utc = '2018-12-12X03:16:34.219289Z'
with self.assertRaises(ValueError) as context:
self._parse_datetime(malformed_utc)
self.assertIn(malformed_utc, str(context.exception))
def test_parse_local_datetime_failure(self):
malformed_local = '2018-12-12X03:16:34.219289'
with self.assertRaises(ValueError) as context:
self._parse_datetime(malformed_local)
self.assertIn(malformed_local, str(context.exception))
def test_datum_single_level_python_value(self):
datum: Mapping[str, meta.Datum] = meta.Datum(value=None, type="int")
self.assertEqual(datum.python_value, None)
self.assertEqual(datum.python_type, type(None))
datum = meta.Datum(value=1, type=None)
self.assertEqual(datum.python_value, None)
self.assertEqual(datum.python_type, type(None))
datum = meta.Datum(value=b"awesome bytes", type="bytes")
self.assertEqual(datum.python_value, b"awesome bytes")
self.assertEqual(datum.python_type, bytes)
datum = meta.Datum(value="awesome string", type="string")
self.assertEqual(datum.python_value, 'awesome string')
self.assertEqual(datum.python_type, str)
datum = meta.Datum(value=42, type="int")
self.assertEqual(datum.python_value, 42)
self.assertEqual(datum.python_type, int)
datum = meta.Datum(value=43.2103, type="double")
self.assertEqual(datum.python_value, 43.2103)
self.assertEqual(datum.python_type, float)
def test_datum_collections_python_value(self):
class DatumCollectionString:
def __init__(self, *args: List[str]):
self.string = args
datum = meta.Datum(value=DatumCollectionString("string 1", "string 2"),
type="collection_string")
self.assertListEqual(datum.python_value, ["string 1", "string 2"])
self.assertEqual(datum.python_type, list)
class DatumCollectionBytes:
def __init__(self, *args: List[bytes]):
self.bytes = args
datum = meta.Datum(value=DatumCollectionBytes(b"bytes 1", b"bytes 2"),
type="collection_bytes")
self.assertListEqual(datum.python_value, [b"bytes 1", b"bytes 2"])
self.assertEqual(datum.python_type, list)
class DatumCollectionSint64:
def __init__(self, *args: List[int]):
self.sint64 = args
datum = meta.Datum(value=DatumCollectionSint64(1234567, 8901234),
type="collection_sint64")
self.assertListEqual(datum.python_value, [1234567, 8901234])
self.assertEqual(datum.python_type, list)
def test_datum_json_python_value(self):
# None
datum = meta.Datum(value='null',
type="json")
self.assertEqual(datum.python_value, None)
self.assertEqual(datum.python_type, type(None))
# Int
datum = meta.Datum(value='123',
type="json")
self.assertEqual(datum.python_value, 123)
self.assertEqual(datum.python_type, int)
# Float
datum = meta.Datum(value='456.789',
type="json")
self.assertEqual(datum.python_value, 456.789)
self.assertEqual(datum.python_type, float)
# String
datum = meta.Datum(value='"string in json"',
type="json")
self.assertEqual(datum.python_value, "string in json")
self.assertEqual(datum.python_type, str)
# List
datum = meta.Datum(value='["a", "b", "c"]',
type="json")
self.assertListEqual(datum.python_value, ["a", "b", "c"])
self.assertEqual(datum.python_type, list)
# Object
datum = meta.Datum(value='{"name": "awesome", "value": "cool"}',
type="json")
self.assertDictEqual(datum.python_value, {
"name": "awesome",
"value": "cool"})
self.assertEqual(datum.python_type, dict)
# Should ignore Newlines and Spaces
datum = meta.Datum(value='{ "name" : "awesome",\n "value": "cool"\n}',
type="json")
self.assertDictEqual(datum.python_value, {
"name": "awesome",
"value": "cool"})
self.assertEqual(datum.python_type, dict)
def _parse_datetime(self, datetime_str):
return meta._BaseConverter._parse_datetime(datetime_str)
def _parse_timedelta(self, timedelta_str):
return meta._BaseConverter._parse_timedelta(timedelta_str)
```
|
{
"source": "jessexknight/homeworks",
"score": 3
}
|
#### File: homeworks/src/build.py
```python
import os, shutil
class Template:
'''
Structure for storing a string, or list of strings, and replacing specified
keys in the string(s) with dynamic values.
'''
def __init__(self,src):
self.src = src
self.load_content()
def get_content(self):
return self.content
def load_content(self):
'''
Load the intial content for this template from the associated file.
'''
with open(self.src,'r') as f:
self.content = f.read()
def get_sub_content(self, dic):
'''
Get self.content and do some substitutions without modifying this instance.
'''
# check for same broadcast size, if any
# i.e. do we need to write the same value to several keys?
argsizes = list(set([listlen(value) for key,value in dic.iteritems()]))
assert len(argsizes) <= 2
N = max(argsizes)
# initial copying for broadcast
content = [self.content[:] for i in range(N)]
for key, value in dic.iteritems():
# broadcast the value if singular
if listlen(value) is 1:
value = [value for i in range(N)]
# write the substitutions
for i in range(N):
content[i] = content[i].replace(make_key(key),value[i])
content = ''.join(content)
return content
def set_sub_content(self, keys):
'''
Overwrite self.content in this instance with some substitutions.
'''
self.content = self.get_sub_content(keys)
def make_key(str):
'''
Make a standard key (for finding and substituting the key with some value).
'''
return '__'+str+'__'
def find_keys(str):
'''
Find any keys in the string.
Return full keys (formatted) and also truncated base keys.
'''
keys = re.findall(make_key('(.*?)'),str)
base = [k.split(':')[0] for k in keys]
return keys,base
def error(msg):
print " ERROR: "+msg+"\n See -h option for help."
sys.exit(1)
def update(msg):
print " + "+msg
def vupdate(msg,verbose=False):
if verbose:
print " | "+msg
def listlen(obj):
'''
Hack-ish workaround for inconsistency of strings / lists of strings.
'''
if isinstance(obj,list):
return len(obj)
elif isinstance(obj,str):
return 1
elif isinstance(obj,unicode):
return 1
else:
return 0
def make_filename(name,ext=True):
name = name.lower().replace(' ','-')
if ext:
return name+'.php'
else:
return name
def make_pagename(name):
return name.replace('index','home').capitalize()
def get_templates(dirname):
T = {}
for (path,dirs,files) in os.walk(dirname):
for f in files:
pagename = os.path.splitext(f)[0]
T.update({pagename:Template(os.path.join(path,f))})
return T
if __name__ == "__main__":
# meta-data
out = '../web'
portfolio = '../web/portfolio'
pagelist = ['index','about','portfolio','testimonials','links','contact']
# collecting templates
pages = get_templates('pages')
tmp = {'parts': get_templates('templates/parts'),
'nav' : get_templates('templates/nav')}
navul = ''
portul = ''
# building the nav-bar (portfolio)
for (path,dirs,files) in os.walk(portfolio):
for name in dirs:
sub = {'href' : make_filename('portfolio')+'#'+make_filename(name,False),
'title': make_pagename(name)}
portul += tmp['nav']['li-li'].get_sub_content(sub)
# building the nav-bar (pages)
for name in pagelist:
sub = {'href' : make_filename(name),
'title': make_pagename(name)}
if name in ['portfolio']:
sub.update({'li-ul':portul})
navli = tmp['nav']['li-ul']
else:
navli = tmp['nav']['li']
navul += navli.get_sub_content(sub)
tmp['parts']['nav'].set_sub_content({'li':''.join(navul)})
# writing pages from parts
for name, page in pages.iteritems():
sub = {k:t.get_sub_content({'title':make_pagename(name)})
for k,t in tmp['parts'].iteritems()}
print('Building '+make_filename(name))
with open(os.path.join(out,make_filename(name)),'w') as f:
f.write(page.get_sub_content(sub))
# copying scripts
for (path,dirs,files) in os.walk('scripts'):
for f in files:
ftype = os.path.splitext(f)[1][1:]
src = os.path.join(path,f)
dst = os.path.join(out,ftype.replace('php',''),f)
shutil.copyfile(src,dst)
```
|
{
"source": "jessex/shrtn",
"score": 3
}
|
#### File: jessex/shrtn/shrtn.py
```python
import database as db
import sys, re
from urlparse import urlparse
ALPHABET = "abcdefghijkmnpqrstuvwxyzABCDEFGHJKLMNPQRSTUVWXYZ23456789" #no oO0lI1
OURDOMAIN = "http://shr.tn/" #our imaginary domain for our imaginary site
re_short = re.compile(OURDOMAIN + "[a-kmnp-zA-HJ-NP-Z2-9]+$") #matches our URLs
re_end = re.compile("[.][^/]+$") #for checking the end of a url
# ****************************** HELPER FUNCTIONS ******************************
def setup_db():
"""Establishes a connection to our database and creates our url table if it
does not yet exist. Returns the connection to the database file."""
conn = db.setup_sql(db.MYLOCATION)
if conn == None: #Could not establish connection, so quit
sys.exit()
if not db.table_exists(db.MYTABLE, conn): #create table if not yet created
db.create_table(db.MYTABLE, conn)
return conn
def is_valid_short(url):
"""Takes in a url and determines if it is a valid shortened url."""
return not (not re_short.match(url))
def standardize_url(url):
"""Takes in a url and returns a clean, consistent format. For example:
example.com, http://example.com, example.com/ all are http://example.com/
Returns None if the url is somehow invalid."""
if is_valid_short(url): #will not shorten one of our already shortened URLs
return None
parts = urlparse(url, "http") #default scheme is http if omitted
if parts[0] != "http" and parts[0] != "https": #scheme was not http(s)
return None
#url appears valid at this point, proceed with standardization
standard = parts.geturl()
#work-around for bug in urlparse
if standard.startswith("http:///") or standard.startswith("https:///"):
standard = standard.replace("///", "//", 1) #get rid of extra slash
if not standard.endswith("/"): #does not end with '/'...
if re_end.findall(standard): #...but ends with .something...
if parts[0] == "http":
bound = 7
elif parts[0] == "https":
bound = 8
if standard.rfind("/", bound) == -1: #...and contains no other '/'
return standard + "/" #append a '/'
return standard
# ******************************* CORE FUNCTIONS *******************************
def shorten_url(url, conn):
"""Takes in a standard url and returns a shortened version."""
url = standardize_url(url)
if url is None: #tried to shorten invalid url
return None
#get the id for this url (whether new or otherwise)
id = db.search_url(url, db.MYTABLE, conn)
if not id: #url not yet inserted into database
id = db.insert_url(url, db.MYTABLE, conn) #insert and get its id
code = convert_to_code(id)
return "%s%s" % (OURDOMAIN, code)
def lengthen_url(url, conn):
"""Takes in one of our shortened URLs and returns the correct long url."""
#isolate code from shortened url
if not is_valid_short(url): #url was not constructed properly
return "%s404" % OURDOMAIN
code = url[14:] #just the code, ie. h7K9g0
id = resolve_to_id(code) #convert shortened code to id
long = db.search_id(id, db.MYTABLE, conn)
if not long: #id was not found in database
return "%s404" % OURDOMAIN #issue 404
return long #url to perform 301 re-direct on
def convert_to_code(id, alphabet=ALPHABET):
"""Converts a decimal id number into a shortened URL code. Use the id of the
row in the database with the entered long URL."""
if id <= 0: #invalid codes (autoincrement is always 1 or higher)
return alphabet[0]
base = len(alphabet) #base to convert to (56 for our standard alphabet)
chars = []
while id:
chars.append(alphabet[id % base])
id //= base
chars.reverse() #moved right to left, so reverse order
return ''.join(chars) #convert stored characters to single string
def resolve_to_id(code, alphabet=ALPHABET):
"""Converts the shortened URL code back to an id number in decimal form. Use
the id to query the database and lookup the long URL."""
base = len(alphabet)
size = len(code)
id = 0
for i in range(0, size): #convert from higher base back to decimal
id += alphabet.index(code[i]) * (base ** (size-i-1))
return id
```
|
{
"source": "JesseXu117/rqalpha_futu",
"score": 2
}
|
#### File: mod/rqalpha_mod_futu/futu_broker_hk.py
```python
from rqalpha.interface import AbstractBroker
from rqalpha.const import DEFAULT_ACCOUNT_TYPE
from rqalpha.events import EVENT, Event
from rqalpha.model.order import *
from rqalpha.model.base_position import Positions
from rqalpha.model.portfolio import Portfolio
from rqalpha.model.trade import *
from rqalpha.utils.i18n import gettext as _
from .futu_utils import *
from time import sleep
from threading import Thread
from futuquant import OpenHKTradeContext
class FUTUBrokerHK(AbstractBroker):
"""
FUTUBrokerHK 对象用于对接futu港股的仿真和真实交易
设计思路:
1. 帐户的初始资金需要在rqalpha框架下的config中设置 config.base.stock_starting_cash
不与futu的帐户信息同步, 一方面是不影响长期自动运行时计算的收益率等指标,另一方面也为了控制策略脚本对futu实际帐户资金的占用.
2. 初始化时会同步一次futu帐户的持仓数据, 后续状态完全由rqalpha框架内部维护状态, 故策略中记录的持仓有可能与用户实际futu帐户不一致
3. 下单 ,撤单调后,脚本中会定时检查该订单在futu环境中的状态, 产生对应的event事件,可能存在延时。
"""
def __init__(self, env, mod_config):
self._env = env
self._mod_config = mod_config
self._portfolio = None
self._open_order = []
self._env.event_bus.add_listener(EVENT.PRE_BEFORE_TRADING, self._pre_before_trading)
self._env.event_bus.add_listener(EVENT.PRE_AFTER_TRADING, self._pre_after_trading)
# futu api创建及参数
self._trade_context = OpenHKTradeContext(self._mod_config.api_svr.ip, self._mod_config.api_svr.port)
self._trade_envtype = 1 # futu交易 envtype : 0 = 实盘 1 = 仿真
if IsRuntype_RealTrade():
self._trade_envtype = 0
thread_order_check = Thread(target=self._thread_order_check)
thread_order_check.setDaemon(True)
thread_order_check.start()
def get_portfolio(self):
"""
获取投资组合。系统初始化时,会调用此接口,获取包含账户信息、净值、份额等内容的投资组合
:return: Portfolio
"""
if self._portfolio is not None:
return self._portfolio
self._portfolio = self._init_portfolio()
if not self._portfolio._accounts:
raise RuntimeError("accout config error")
return self._portfolio
def submit_order(self, order):
"""
提交订单。在当前版本,RQAlpha 会生成 :class:`~Order` 对象,再通过此接口提交到 Broker。
TBD: 由 Broker 对象生成 Order 并返回?
"""
print("FUTUBrokerHK.submit_order:{}".format(order))
if order.type == ORDER_TYPE.MARKET:
raise RuntimeError("submit_order not support ORDER_TYPE.MARKET")
account = self._get_account(order.order_book_id)
self._env.event_bus.publish_event(Event(EVENT.ORDER_PENDING_NEW, account=account, order=order))
order.active()
# 发起futu api接口请求
futu_order_side = 0 if order.side == SIDE.BUY else 1
futu_order_type = 0 # 港股增强限价单
ret_code, ret_data = self._trade_context.place_order(order.price, order.quantity, order.order_book_id,
futu_order_side, futu_order_type, self._trade_envtype)
# 事件通知
if ret_code != 0:
order.mark_rejected("futu api req err:{} ".format(ret_code))
self._env.event_bus.publish_event(Event(EVENT.ORDER_CREATION_REJECT, account=account, order=order))
else:
futu_order_id = ret_data.loc[0, 'orderid']
self._open_order.append((futu_order_id, order))
self._env.event_bus.publish_event(Event(EVENT.ORDER_CREATION_PASS, account=account, order=order))
sleep(0.1)
self._check_open_orders(futu_order_id)
def cancel_order(self, order):
"""
撤单。
:param order: 订单
:type order: :class:`~Order`
"""
account = self._get_account(order.order_book_id)
futu_order_id = self._get_futu_order_id(order)
if futu_order_id is None:
return
# 立即检查一次订单状态
self._check_open_orders(futu_order_id)
if order.is_final():
return
self._env.event_bus.publish_event(Event(EVENT.ORDER_PENDING_CANCEL, account=account, order=order))
ret_code, ret_data = self._trade_context.set_order_status(0, futu_order_id, self._env) # 0 = 撤单
if ret_code != 0:
self._env.event_bus.publish_event(Event(EVENT.ORDER_CANCELLATION_REJECT, account=account, order=order))
else:
sleep(0.1)
self._check_open_orders(futu_order_id) # 提交请求后,立即再检查一次状态
def get_open_orders(self, order_book_id=None):
"""
[Required]
获得当前未完成的订单。
:return: list[:class:`~Order`]
"""
if order_book_id is None:
return [order for __, order in self._open_orders]
else:
return [order for __, order in self._open_orders if order.order_book_id == order_book_id]
def _pre_before_trading(self, event):
print("broker before_trading")
def _pre_after_trading(self, event):
# 收盘时清掉未完成的订单
for __, order in self._open_order:
order.mark_rejected(_(u"Order Rejected: {order_book_id} can not match. Market close.").format(
order_book_id=order.order_book_id
))
account = self._env.get_account(order.order_book_id)
self._env.event_bus.publish_event(Event(EVENT.ORDER_UNSOLICITED_UPDATE, account=account, order=order))
self._open_orders = []
print("broker after_trading")
def _check_open_orders(self, futu_order_id=None):
if len(self._open_order) == 0:
return
ret_code, pd_data = self._trade_context.order_list_query('', self._trade_envtype)
if ret_code != 0:
return
ft_orders = []
if futu_order_id is not None:
ft_orders.append(futu_order_id)
else:
for (fid, __) in self._open_order:
ft_orders.append(fid)
for fid in ft_orders:
pd_find = pd_data[pd_data.orderid == fid]
if len(pd_find) != 1:
continue
order = self._get_order_by_futu_id(fid)
account = self._get_account(order.order_book_id)
if order is None:
continue
ct_amount = 0 # 期货用的,期货分平当天的仓位和以前的仓位
price = order.avg_price # 分多笔成交下的平均值
trade = Trade.__from_create__(
order_id=order.order_id,
price=price,
amount=0,
side=order.side,
position_effect=order.position_effect,
order_book_id=order.order_book_id,
frozen_price=order.frozen_price,
close_today_amount=ct_amount,
commission=0.,
tax=0., trade_id=None
)
trade._commission = 0
trade._tax = 0
row = pd_find.iloc[0]
ft_status = int(row['status'])
if ft_status == 2 or ft_status == 3: # 部分成交 | 全部成交
qty_deal_last = order.quantity - order.unfilled_quantity
qty_deal_new = int(row['dealt_qty'])
if qty_deal_last == qty_deal_new: # 记录的成交数量与上次相同
continue
trade._amount = qty_deal_new - qty_deal_last
order.fill(trade)
self._env.event_bus.publish_event(Event(EVENT.TRADE, account=account, trade=trade, order=order))
if ft_status == 3:
self._remove_open_order_by_futu_id(fid)
elif ft_status == 5: # 下单失败
self._env.event_bus.publish_event(Event(EVENT.ORDER_CREATION_REJECT, account=account, order=order))
self._remove_open_order_by_futu_id(fid)
elif ft_status == 6: # 6=已撤单
order.mark_cancelled(_(u"{order_id} order has been cancelled by user.").format(order_id=order.order_id))
self._env.event_bus.publish_event(Event(EVENT.ORDER_CANCELLATION_PASS, account=account, order=order))
self._remove_open_order_by_futu_id(fid)
elif ft_status == 4 or ft_status == 7: # 4=已失效 7=已删除
reason = _(u"Order Cancelled: code = {order_book_id} ft_status = {ft_status} ").format(
order_book_id=order.order_book_id, ft_status=ft_status)
order.mark_rejected(reason)
self._env.event_bus.publish_event(Event(EVENT.ORDER_CREATION_REJECT, account=account, order=order))
self._remove_open_order_by_futu_id(fid)
else:
pass # 8 = 等待开盘 21= 本地已发送 22=本地已发送,服务器返回下单失败、没产生订单 23=本地已发送,等待服务器返回超时
def _get_futu_positions(self, env):
StockPosition = env.get_position_model(DEFAULT_ACCOUNT_TYPE.STOCK.name)
positions = Positions(StockPosition)
ret, pd_data = self._trade_context.position_list_query(self._trade_envtype)
if ret != 0:
return None
for i in range(len(pd_data)):
row = pd_data.iloc[i]
code_str = str(row['code'])
pos_state = {}
pos_state['order_book_id'] = code_str
pos_state['quantity'] = int(row['qty'])
pos_state['avg_price'] = float(row['cost_price'])
pos_state['non_closable'] = 0
pos_state['frozen'] = int(row['qty']) - int(row['can_sell_qty'])
pos_state['transaction_cost'] = 0
item = positions.get_or_create(code_str)
item.set_state(pos_state)
return positions
def _init_portfolio(self):
accounts = {}
config = self._env.config
start_date = config.base.start_date
total_cash = 0
for account_type, stock_starting_cash in six.iteritems(config.base.accounts):
if account_type == DEFAULT_ACCOUNT_TYPE.STOCK.name:
# stock_starting_cash = config.base.accounts
if stock_starting_cash == 0:
raise RuntimeError(_(u"stock starting cash can not be 0, using `--stock-starting-cash 1000`"))
all_positons = self._get_futu_positions(self._env)
if all_positons is None:
raise RuntimeError("_init_portfolio fail")
StockAccount = self._env.get_account_model(DEFAULT_ACCOUNT_TYPE.STOCK.name)
accounts[DEFAULT_ACCOUNT_TYPE.STOCK.name] = StockAccount(stock_starting_cash, all_positons)
total_cash += stock_starting_cash
else:
raise NotImplementedError
return Portfolio(start_date, 1, total_cash, accounts)
def _get_account(self, order_book_id):
# account = self._env.get_account(order_book_id)
# for debug
account = self._env.portfolio.accounts[DEFAULT_ACCOUNT_TYPE.STOCK.name]
return account
def _get_futu_order_id(self, order):
for fid, order_item in self._open_order:
if order_item is order:
return fid
return None
def _get_order_by_futu_id(self, futu_order_id):
for fid, order_item in self._open_order:
if futu_order_id == fid:
return order_item
return None
def _remove_open_order_by_futu_id(self, futu_order_id):
order = self._get_order_by_futu_id(futu_order_id)
if order is not None:
self._open_order.remove((futu_order_id, order))
def _thread_order_check(self):
while True:
if len(self._open_order) == 0:
print("broker:_thread_order_check None")
sleep(5)
else:
self._check_open_orders()
sleep(1)
```
#### File: mod/rqalpha_mod_futu/mod.py
```python
from .rqalpha_simulate_broker import RQSimulateBroker
from .futu_event_source import *
from .futu_broker_hk import FUTUBrokerHK
from .futu_market_state import FUTUMarketStateSource
from .futu_data_source import FUTUDataSource, DataCache
from .futu_position import FUTUStockPosition
from .futu_utils import *
from rqalpha.interface import AbstractMod
from rqalpha.const import DEFAULT_ACCOUNT_TYPE
from futuquant import OpenQuoteContext
class FUTUMod(AbstractMod):
_futu_mod = None
_data_cache = ''
def __init__(self):
FUTUMod._futu_mod = self
self._env = None
self._mod_config = None
self._quote_context = None
@classmethod
def get_instance(cls):
return FUTUMod._futu_mod
def start_up(self, env, mod_config):
self._env = env
self._mod_config = mod_config
self._data_cache = DataCache()
# 需要在用户的策略脚本中配置不加载mod_sys_simulation
if self._env.config.mod.sys_simulation.enabled or self._env.broker or self._env.event_source:
raise RuntimeError("请在策略脚本中增加config, {'mod':'sys_simulation':{'enabled': False,} } ")
# 检查市场配置参数: 一个策略脚本只针对一个市场
CheckFutuMarketConfig()
# runtype有三种 : 回测、实盘交易、仿真交易
# futu api对接,只能支持港美股的实盘和港股的仿真
CheckRunTypeConfig()
# 初始化api行情对象
self._quote_context = self._init_quote_context()
# 替换关键组件
self._set_broker()
self._set_data_source()
self._set_event_source()
self._env.set_position_model(DEFAULT_ACCOUNT_TYPE.STOCK.name, FUTUStockPosition)
print(">>> FUTUMod.start_up")
def tear_down(self, success, exception=None):
print(">>> FUTUMod.tear_down")
pass
def _set_broker(self):
if IsRuntype_Backtest():
config_broker = self._mod_config.rqalpha_broker_config
self._env.set_broker(RQSimulateBroker(self._env, config_broker))
elif IsRuntype_RealtimeStrategy():
if IsFutuMarket_HKStock(): # 港股实时策略
broker = FUTUBrokerHK(self._env, self._mod_config)
self._env.set_broker(broker)
elif IsFutuMarket_USStock(): # 美股实时策略
raise RuntimeError("_set_broker no impl")
else:
raise RuntimeError("_set_broker err param")
def _set_event_source(self):
if IsRuntype_Backtest():
# event_source = FUTUEventForBacktest(self._env, self._env.config.base.accounts)
event_source = FUTUEventForBacktest(self._env)
self._env.set_event_source(event_source)
elif IsRuntype_RealtimeStrategy():
market_state_source = FUTUMarketStateSource(self._env, self._quote_context)
event_source = FUTUEventForRealtime(self._env, self._mod_config, market_state_source)
self._env.set_event_source(event_source)
else:
raise RuntimeError("_set_event_source err param")
def _set_data_source(self):
data_source = FUTUDataSource(self._env, self._quote_context, self._data_cache) # 支持回测和实时
if data_source is None:
raise RuntimeError("_set_data_source err param")
self._env.set_data_source(data_source)
def _init_quote_context(self):
self._quote_context = OpenQuoteContext(str(self._mod_config.api_svr.ip), int(self._mod_config.api_svr.port))
return self._quote_context
```
|
{
"source": "JesseXu117/vn.trader_Arbitrage",
"score": 2
}
|
#### File: ctaStrategy/strategy/strategyButterflyArbitrage_etf.py
```python
from ctaStrategy.ctaBase import *
from ctaStrategy.ctaArbitrageTemplate import CtaArbitrageTemplate
import talib as ta
import numpy as np
from datetime import *
EMPTY_STRING = ''
########################################################################
class ETFButterflyStrategy(CtaArbitrageTemplate):
"""蝶式无风险套利策略"""
strategyName = u'蝶式无风险套利策略' # 策略实例名称
className = u'ETFButterflyStrategy'
author = u'Jesse'
# 策略参数
initDays = 0 # 初始化数据所用的天数, 此处只需要监控套利机会故为0
fee = 0.0
option_type = 'Call'
underlying = '510050.SH'
K1 = 2.45
K2 = 2.50
K3 = 2.55
# 策略变量
bar = None
barMinute = EMPTY_STRING
posDict = {}
ask_C1 = 0.0
ask_C2 = 0.0
ask_C3 = 0.0
bid_C1 = 0.0
bid_C2 = 0.0
bid_C3 = 0.0
ask_C1_volume = 0
ask_C2_volume = 0
ask_C3_volume = 0
bid_C1_volume = 0
bid_C2_volume = 0
bid_C3_volume = 0
ask_P1 = 0.0
ask_P2 = 0.0
ask_P3 = 0.0
bid_P1 = 0.0
bid_P2 = 0.0
bid_P3 = 0.0
ask_P1_volume = 0
ask_P2_volume = 0
ask_P3_volume = 0
bid_P1_volume = 0
bid_P2_volume = 0
bid_P3_volume = 0
# exercise_date = '2017-08-07'
# today = date.today()
# T = (datetime(int(exercise_date[:4]),int(exercise_date[5:7]),int(exercise_date[-2:])) -
# datetime(today.year,today.month,today.day)).days
# rate = 0.03
# 不考虑折现
# 参数列表,保存了参数的名称
paramList = ['strategyName',
'className',
'author',
'vtSymbol',
'Symbol1',
'Symbol2',
'Symbol3'
]
# 变量列表,保存了变量的名称
varList = ['inited', # 是否初始化
'trading', # 交易状态
'pos', # 仓位状态
'option_type',
'underlying',
'K1',
'K2',
'K3'
]
# ----------------------------------------------------------------------
def __init__(self, ctaEngine, setting):
"""Constructor"""
super(ETFButterflyStrategy, self).__init__(ctaEngine, setting)
if setting:
self.symbol1 = setting['Symbol1']
self.symbol2 = setting['Symbol2']
self.symbol3 = setting['Symbol3']
if self.K1 >= self.K2 or self.K2 >= self.K3:
raise ValueError('K1 < K2 < K3 must be satified!')
self.posDict[self.symbol1] = 0.0
self.posDict[self.symbol2] = 0.0
self.posDict[self.symbol3] = 0.0
# ----------------------------------------------------------------------
def onInit(self):
"""初始化策略(必须由用户继承实现)"""
if self.initDays == 0:
return
self.writeCtaLog(u'策略初始化')
for vtsymbol in self.vtSymbol:
initData = self.loadTick(self.initDays, vtsymbol)
for tick in initData:
self.onTick(tick)
self.putEvent()
# ----------------------------------------------------------------------
def onStart(self):
"""启动策略(必须由用户继承实现)"""
self.writeCtaLog(u'策略启动')
self.putEvent()
# ----------------------------------------------------------------------
def onStop(self):
"""停止策略(必须由用户继承实现)"""
self.writeCtaLog(u'策略停止')
self.putEvent()
# ----------------------------------------------------------------------
def onOrder(self, order):
"""收到委托变化推送(必须由用户继承实现)"""
# self.lastOrder = order
pass
# ----------------------------------------------------------------------
def onTick(self, tick):
"""收到行情TICK推送(必须由用户继承实现)"""
# 计算K线
print tick.vtSymbol
if self.option_type == 'Call':
if tick.vtSymbol == self.symbol1:
self.ask_C1 = tick.askPrice1
self.ask_C1_volume = tick.askVolume1
self.bid_C1 = tick.bidPrice1
self.bid_C1_volume = tick.bidVolume1
elif tick.vtSymbol == self.symbol2:
self.ask_C2 = tick.askPrice1
self.ask_C2_volume = tick.askVolume1
self.bid_C2 = tick.bidPrice1
self.bid_C2_volume = tick.bidVolume1
elif tick.vtSymbol == self.symbol3:
self.ask_C3 = tick.askPrice1
self.ask_C3_volume = tick.askVolume1
self.bid_C3 = tick.askPrice1
self.bid_C3_volume = tick.bidVolume1
#### fee ####
size = min(min(self.ask_C1_volume, self.bid_C2_volume), self.ask_C3_volume)
if (self.ask_C1 - self.bid_C2 + 2 * self.fee)/(self.K2 - self.K1) < (self.bid_C2 - self.ask_C3 - 2 * self.fee)/(self.K3 - self.K2):
print 'call option butterfly: open position'
self.buy(self.ask_C1, size, self.symbol1)
self.short(self.bid_C2, 2 * size, self.symbol2)
self.buy(self.ask_C3, size , self.symbol3)
self.posDict[self.symbol1] += size
self.posDict[self.symbol2] -= 2 * size
self.posDict[self.symbol3] += size
### close a position
if self.posDict[self.symbol1] * self.posDict[self.symbol2] < 0 and \
self.posDict[self.symbol2] * self.posDict[self.symbol3] < 0:
if (self.bid_C1 - self.ask_C2 - 2 * self.fee) / (self.K2 - self.K1) > (
self.ask_C2 - self.bid_C3 + 2 * self.fee) / (self.K3 - self.K2):
self.sell(self.bid_C1, self.posDict[self.symbol1], self.symbol1)
self.cover(self.ask_C2, self.posDict[self.symbol2], self.symbol2)
self.sell(self.bid_C3, self.posDict[self.symbol3], self.symbol3)
print 'call option butterfly: close position'
### handle the failure of order
elif self.option_type == 'Put':
if tick.vtSymbol == self.symbol1:
self.ask_P1 = tick.askPrice1
self.ask_P1_volume = tick.askVolume1
self.bid_P1 = tick.bidPrice1
self.bid_P1_volume = tick.bidVolume1
elif tick.vtSymbol == self.symbol2:
self.ask_P2 = tick.askPrice1
self.ask_P2_volume = tick.askVolume1
self.bid_P2 = tick.bidPrice1
self.bid_P2_volume = tick.bidVolume1
elif tick.vtSymbol == self.symbol3:
self.ask_P3 = tick.askPrice1
self.ask_P3_volume = tick.askVolume1
self.bid_P3 = tick.askPrice1
self.bid_P3_volume = tick.bidVolume1
#### fee####
size = min(min(self.ask_P1_volume, self.bid_P2_volume), self.ask_P3_volume)
if (self.bid_P2 - self.ask_P1 - 2 * self.fee)/(self.K2 - self.K1) > (self.ask_P3 - self.bid_P2 + 2 * self.fee)/(self.K3 - self.K2):
print 'put option butterfly: open position'
self.buy(self.ask_P1, size, self.symbol1)
self.short(self.bid_P2, 2 * size, self.symbol2)
self.buy(self.ask_P3, size, self.symbol3)
self.posDict[self.symbol1] += size
self.posDict[self.symbol2] -= 2 * size
self.posDict[self.symbol3] += size
if self.posDict[self.symbol1] * self.posDict[self.symbol2] < 0 and \
self.posDict[self.symbol2] * self.posDict[self.symbol3] < 0:
if (self.bid_P1 - self.ask_P2 - 2 * self.fee) / (self.K2 - self.K1) > (
self.ask_P2 - self.bid_P3 + 2 * self.fee) / (self.K3 - self.K2):
self.sell(self.bid_P1, self.posDict[self.symbol1], self.symbol1)
self.cover(self.ask_P2, self.posDict[self.symbol2], self.symbol2)
self.sell(self.bid_P3, self.posDict[self.symbol3], self.symbol3)
print 'put option butterfly: close position'
### handle the failure of order
# ----------------------------------------------------------------------
def onBar(self, bar):
"""收到Bar推送(必须由用户继承实现)"""
pass
# ----------------------------------------------------------------------
def onTrade(self, trade):
"""收到成交推送(必须由用户继承实现)"""
pass
if __name__ == '__main__':
# 提供直接双击回测的功能
# 导入PyQt4的包是为了保证matplotlib使用PyQt4而不是PySide,防止初始化出错
from ctaStrategy.ctaBacktesting_Arbitrage import *
from PyQt4 import QtCore, QtGui
# 创建回测引擎
engine = BacktestingEngine()
# 设置引擎的回测模式为K线
engine.setBacktestingMode(engine.BAR_MODE)
# 设置回测用的数据起始日期
engine.setStartDate('20170101')
# 设置产品相关参数
engine.setSlippage(0.2) # 股指1跳
engine.setRate(0.3 / 10000) # 万0.3
engine.setSize(300) # 股指合约大小
engine.setPriceTick(0.2) # 股指最小价格变动
# 设置使用的历史数据库
engine.setDatabase(MINUTE_DB_NAME, 'IF0000')
# 在引擎中创建策略对象
engine.initStrategy(ETFButterflyStrategy, {})
# 开始跑回测
engine.runBacktesting()
# 显示回测结果
engine.showBacktestingResult()
## 跑优化
# setting = OptimizationSetting() # 新建一个优化任务设置对象
# setting.setOptimizeTarget('capital') # 设置优化排序的目标是策略净盈利
# setting.addParameter('atrLength', 12, 20, 2) # 增加第一个优化参数atrLength,起始11,结束12,步进1
# setting.addParameter('atrMa', 20, 30, 5) # 增加第二个优化参数atrMa,起始20,结束30,步进1
# setting.addParameter('rsiLength', 5) # 增加一个固定数值的参数
## 性能测试环境:I7-3770,主频3.4G, 8核心,内存16G,Windows 7 专业版
## 测试时还跑着一堆其他的程序,性能仅供参考
# import time
# start = time.time()
## 运行单进程优化函数,自动输出结果,耗时:359秒
# engine.runOptimization(AtrRsiStrategy, setting)
## 多进程优化,耗时:89秒
##engine.runParallelOptimization(AtrRsiStrategy, setting)
# print u'耗时:%s' %(time.time()-start)
```
|
{
"source": "Jesse-Yan/UniMatch",
"score": 4
}
|
#### File: python-backend/helper/dataHelper.py
```python
import sqlite3
## input is the value, search is either "OPEID" or "name"
def getData(input, search):
connection = sqlite3.connect("school-data.db")
cursor = connection.cursor()
query = "SELECT * FROM schools where {}=?".format(search)
result = cursor.execute(query, (input,))
row = result.fetchone()
connection.close()
return row
## Get all data from the database
def getAll():
connection = sqlite3.connect("school-data.db")
cursor = connection.cursor()
query = "SELECT * FROM schools"
cursor.execute(query)
result = cursor.fetchall()
connection.close()
return result
```
#### File: python-backend/model/user.py
```python
import sqlite3
class User(object):
def __init__(self, id, username, password, highschool, dreamschool):
self.id = id
self.username = username
self.password = password
self.highschool = highschool
self.dreamschool = dreamschool
@classmethod
def store_to_db(self, username, password, highschool, dreamschool):
if User.find_by_username(username):
return {"message": "user already exists", "status code": 400}
try:
connection = sqlite3.connect("user-data.db")
cursor = connection.cursor()
except:
return {"message": "fail to connect to the datbase", "status code": 500}
query = "INSERT INTO users VALUES (NULL,?,?,?,?)"
try:
cursor.execute(query, (username, password, highschool, dreamschool,))
except:
return {"message": "fail to store to the database", "status code": 500}
connection.commit()
connection.close()
return {"message": "new user has been successfully stored", "status code": 201}
@classmethod
def update(cls, username, dreamschool):
try:
connection = sqlite3.connect("user-data.db")
cursor = connection.cursor()
except:
return {"message": "fail to connect to the datbase", "status code": 500}
query = "UPDATE users SET dreamschool=? where username=?"
try:
cursor.execute(query, (dreamschool, username))
except:
return {"message": "fail to store to the database", "status code": 500}
connection.commit()
connection.close()
return {"message": "successfully updated", "status code": 201}
@classmethod
def find_by_username(cls, username):
try:
connection = sqlite3.connect("user-data.db")
cursor = connection.cursor()
except:
return {"message": "fail to connect to the datbase", "status code": 500}
query = "SELECT * FROM users where username=?"
try:
result = cursor.execute(query, (username, ))
except:
return {"message": "fail to search in the database", "status code": 500}
row = result.fetchone()
if row:
user = cls(*row)
else:
user = None
connection.close()
return user
@classmethod
def find_by_id(cls, id):
try:
connection = sqlite3.connect("user-data.db")
cursor = connection.cursor()
except:
return {"message": "fail to connect to the datbase", "status code": 500}
query = "SELECT * FROM users where id=?"
try:
result = cursor.execute(query, (id, ))
except:
return {"message": "fail to search in the database", "status code": 500}
row = result.fetchone()
if row:
user = cls(*row)
else:
user = None
connection.close()
return user
def json(self):
return {
"username": self.username,
"highschool": self.highschool,
"dreamschool": self.dreamschool
}
```
|
{
"source": "jessey-git/blender_vscode",
"score": 2
}
|
#### File: include/blender_vscode/load_addons.py
```python
import os
import bpy
import sys
import traceback
from pathlib import Path
from . communication import send_dict_as_json
from . environment import user_addon_directory, addon_directories
def setup_addon_links(addons_to_load):
if not os.path.exists(user_addon_directory):
os.makedirs(user_addon_directory)
if not str(user_addon_directory) in sys.path:
sys.path.append(str(user_addon_directory))
path_mappings = []
for source_path, module_name in addons_to_load:
if is_in_any_addon_directory(source_path):
load_path = source_path
else:
load_path = os.path.join(user_addon_directory, module_name)
create_link_in_user_addon_directory(source_path, load_path)
path_mappings.append({
"src": str(source_path),
"load": str(load_path)
})
return path_mappings
def load(addons_to_load):
for source_path, module_name in addons_to_load:
try:
bpy.ops.preferences.addon_enable(module=module_name)
except:
traceback.print_exc()
send_dict_as_json({"type" : "enableFailure", "addonPath" : str(source_path)})
def create_link_in_user_addon_directory(directory, link_path):
if os.path.exists(link_path):
os.remove(link_path)
if sys.platform == "win32":
import _winapi
_winapi.CreateJunction(str(directory), str(link_path))
else:
os.symlink(str(directory), str(link_path), target_is_directory=True)
def is_in_any_addon_directory(module_path):
for path in addon_directories:
if path == module_path.parent:
return True
return False
```
#### File: include/blender_vscode/ui.py
```python
import bpy
from . communication import (
get_blender_port,
get_ptvsd_port,
get_editor_address
)
class DevelopmentPanel(bpy.types.Panel):
bl_idname = "DEV_PT_panel"
bl_label = "Development"
bl_space_type = 'VIEW_3D'
bl_region_type = 'UI'
bl_category = "Dev"
def draw(self, context):
layout = self.layout
layout.label(text=f"Blender at Port {get_blender_port()}")
layout.label(text=f"ptvsd at Port {get_ptvsd_port()}")
layout.label(text=f"Editor at Address {get_editor_address()}")
classes = (
DevelopmentPanel,
)
def register():
for cls in classes:
bpy.utils.register_class(cls)
```
|
{
"source": "jessey-git/dconfig",
"score": 2
}
|
#### File: jessey-git/dconfig/DCONFIG_Booleans.py
```python
from collections import namedtuple
import bpy
from . import DCONFIG_Utils as dc
BoolData = namedtuple('BoolData', ["object", "collection"])
class Details:
BOOLEAN_OBJECT_NAME = "dc_bool_obj"
class DCONFIG_MT_boolean_pie(bpy.types.Menu):
bl_label = "Booleans"
@classmethod
def poll(cls, context):
return dc.active_object_available(context, {'MESH'})
def draw(self, context):
layout = self.layout
pie = layout.menu_pie()
# Left
split = pie.split()
col = split.column(align=True)
col.scale_y = 1.25
dc.setup_op(col, "dconfig.boolean_immediate", 'DOT', "Add", bool_operation='UNION')
dc.setup_op(col, "dconfig.boolean_immediate", 'DOT', "Intersect", bool_operation='INTERSECT')
dc.setup_op(col, "dconfig.boolean_immediate", 'DOT', "Subtract", bool_operation='DIFFERENCE')
# Right
split = pie.split()
col = split.column(align=True)
col.scale_y = 1.25
dc.setup_op(col, "dconfig.boolean_live", 'MOD_BOOLEAN', "Live Add", bool_operation='UNION', cutline=False, insetted=False)
dc.setup_op(col, "dconfig.boolean_live", 'MOD_BOOLEAN', "Live Intersect", bool_operation='INTERSECT', cutline=False, insetted=False)
dc.setup_op(col, "dconfig.boolean_live", 'MOD_BOOLEAN', "Live Subtract", bool_operation='DIFFERENCE', cutline=False, insetted=False)
dc.setup_op(col, "dconfig.boolean_live", 'MOD_BOOLEAN', "Live Subtract Inset", bool_operation='DIFFERENCE', cutline=False, insetted=True)
dc.setup_op(col, "dconfig.boolean_live", 'MOD_BOOLEAN', "Live Cutline", bool_operation='DIFFERENCE', cutline=True, insetted=False)
# Bottom
dc.setup_op(pie, "dconfig.boolean_toggle", 'HIDE_OFF', "Toggle Live Booleans")
# Top
dc.setup_op(pie, "dconfig.boolean_apply", text="Apply")
class DCONFIG_OT_boolean_live(bpy.types.Operator):
bl_idname = "dconfig.boolean_live"
bl_label = "DC Live Booleans"
bl_description = "Add selected geometry as a boolean to the active objects"
bl_options = {'REGISTER', 'UNDO'}
cutline: bpy.props.BoolProperty(name='Cutline', default=False)
insetted: bpy.props.BoolProperty(name='Insetted', default=False)
bool_operation: bpy.props.StringProperty(name="Boolean Operation")
@classmethod
def poll(cls, context):
return dc.active_mesh_selected(context)
def create_bool_obj(self, context, source, inset_move_list):
def rename_boolean_obj(source):
old_name = dc.full_name(source.object)
dc.rename(source.object, Details.BOOLEAN_OBJECT_NAME)
dc.trace(2, "Renamed {} to {}", old_name, dc.full_name(source.object))
if not source.object.name.startswith(Details.BOOLEAN_OBJECT_NAME):
rename_boolean_obj(source)
if self.cutline:
mod = source.object.modifiers.new('Cutline', "SOLIDIFY")
mod.thickness = 0.001
if self.insetted:
dc.make_active_object(context, source.object)
# Duplicate boolean source which then becomes the inset object
bpy.ops.object.duplicate()
inset = context.active_object
dc.rename(inset, "dc_bool_inset")
# Parent boolean source to the inset object
source.object.parent = inset
source.object.matrix_parent_inverse = inset.matrix_world.inverted()
inset_move_list.append(inset)
source.object.display_type = 'WIRE'
def create_bool_mod(self, target, source):
dc.trace(2, "Adding boolean modifier to {}", dc.full_name(target.object))
mod = target.object.modifiers.new(source.object.name, 'BOOLEAN')
mod.object = source.object
mod.operation = self.bool_operation
mod.show_expanded = False
# Booleans go as close to the top of the stack as possible...
mod_index = len(target.object.modifiers) - 1
while mod_index > 0 and target.object.modifiers[mod_index - 1].type not in ('BOOLEAN', 'SOLIDIFY'):
bpy.ops.object.modifier_move_up(modifier=mod.name)
mod_index -= 1
def prepare_objects(self, context):
source_separated = False
if context.mode == 'EDIT_MESH':
if context.active_object.data.total_vert_sel > 0:
bpy.ops.mesh.select_linked()
bpy.ops.mesh.normals_make_consistent(inside=False)
bpy.ops.mesh.separate(type='SELECTED')
source_separated = True
else:
bpy.ops.object.mode_set(mode='EDIT', toggle=False)
bpy.ops.mesh.select_all()
bpy.ops.mesh.normals_make_consistent(inside=False)
return source_separated
def prepare_data(self, context):
bool_targets = []
# Cleanup and separate if necessary...
source_separated = self.prepare_objects(context)
# We should have at least 2 mesh objects (1 target, 1 source) at this point now...
selected_meshes = dc.get_sorted_meshes(context.selected_objects, context.active_object)
if len(selected_meshes) < 2:
return None, None
# Track the target data
for obj in selected_meshes[:-1]:
own_collection = dc.find_collection(context, obj)
bool_targets.append(BoolData(obj, own_collection))
# Last object is the boolean source
source = selected_meshes[-1]
if source_separated:
source.modifiers.clear()
source_collection = dc.find_collection(context, source)
bool_source = BoolData(source, source_collection)
return bool_targets, bool_source
def execute(self, context):
dc.trace_enter(self)
# Process and prepare all necessary data for the later operations
# This supports multi-object editing by preparing data for every selected
# object as best as possible. There is always just 1 boolean source object
# to apply to 1 or more targets...
bool_targets, bool_source = self.prepare_data(context)
if bool_targets is None or bool_source is None:
return dc.warn_canceled(self, "At least 2 mesh objects must be selected")
dc.trace(1, "Data:")
for target in bool_targets:
dc.trace(2, "Target {}|{}", dc.full_name(target.object), target.collection.name)
dc.trace(2, "Source {}|{}", dc.full_name(bool_source.object), bool_source.collection.name)
bpy.ops.object.mode_set(mode='OBJECT', toggle=False)
bpy.ops.object.select_all(action='DESELECT')
# Perform actual boolean operations (keeping track of the final set of geometry to move)...
dc.trace(1, "Processing:")
inset_move_list = []
self.create_bool_obj(context, bool_source, inset_move_list)
for target in bool_targets:
self.create_bool_mod(target, bool_source)
# Place everything in the right collection...
bool_collection = dc.get_boolean_collection(context, True)
# Link the source into the boolean collection...
if bool_source.object.name not in bool_collection.objects:
bool_collection.objects.link(bool_source.object)
bool_source.collection.objects.unlink(bool_source.object)
# Pick the first target as the place to move the new inset geometry
first_target = bool_targets[0]
for obj in inset_move_list:
if obj.name not in first_target.collection.objects:
first_target.collection.objects.link(obj)
bpy.ops.object.select_all(action='DESELECT')
first_target.object.select_set(state=True)
return dc.trace_exit(self)
class DCONFIG_OT_boolean_immediate(bpy.types.Operator):
bl_idname = "dconfig.boolean_immediate"
bl_label = "DC Booleans"
bl_description = "Add selected geometry as a boolean to the active objects"
bl_options = {'REGISTER', 'UNDO'}
bool_operation: bpy.props.StringProperty(name="Boolean Operation")
@classmethod
def poll(cls, context):
ok_edit = context.mode == 'EDIT_MESH' and context.active_object.data.total_face_sel > 0
ok_object = context.mode == 'OBJECT' and len(context.selected_objects) > 1
return ok_edit or ok_object
def execute(self, context):
dc.trace_enter(self)
if context.mode == 'EDIT_MESH':
dc.trace(1, "Performing direct mesh boolean from selected geometry")
bpy.ops.mesh.select_linked()
context.active_object.update_from_editmode()
if context.active_object.data.total_vert_sel == len(context.active_object.data.vertices):
return dc.warn_canceled(self, "All vertices of object became selected")
bpy.ops.mesh.normals_make_consistent(inside=False)
bpy.ops.mesh.intersect_boolean(operation=self.bool_operation)
else:
# Process and prepare all necessary data for the later operations
# This supports multi-object editing by preparing data for every selected
# object as best as possible. There is always just 1 boolean source object
# to apply to 1 or more targets...
bool_targets, bool_source = self.prepare_data(context)
if bool_targets is None or bool_source is None:
return dc.warn_canceled(self, "At least 2 mesh objects must be selected")
dc.trace(1, "Data:")
for target in bool_targets:
dc.trace(2, "Target {}", dc.full_name(target.object))
dc.trace(2, "Source {}", dc.full_name(bool_source.object))
# Perform actual boolean operations...
dc.trace(1, "Processing:")
for target in bool_targets:
dc.make_active_object(context, target.object)
self.apply_bool_mod(target, bool_source)
dc.trace(1, "Cleanup:")
bpy.ops.object.select_all(action='DESELECT')
source_name = dc.full_name(bool_source.object)
bool_source.object.select_set(True)
bpy.ops.object.delete(use_global=False, confirm=False)
dc.trace(2, "Deleted {}", source_name)
return dc.trace_exit(self)
def prepare_source(self, context, source):
bpy.ops.object.mode_set(mode='OBJECT', toggle=False)
bpy.ops.object.select_all(action='DESELECT')
dc.make_active_object(context, source)
bpy.ops.object.convert(target='MESH')
bpy.ops.object.mode_set(mode='EDIT', toggle=False)
bpy.ops.mesh.select_all()
bpy.ops.mesh.normals_make_consistent(inside=False)
bpy.ops.object.mode_set(mode='OBJECT', toggle=False)
bpy.ops.object.select_all(action='DESELECT')
def prepare_data(self, context):
bool_targets = []
# We should have at least 2 mesh objects (1 target, 1 source) at this point now...
selected_meshes = dc.get_objects(context.selected_objects, {'MESH'})
if len(selected_meshes) < 2:
return None, None
# Track each target
for obj in selected_meshes[:-1]:
bool_targets.append(BoolData(obj, None))
# Last object is the boolean source; make sure all modifiers are applied and cleanup...
source = selected_meshes[-1]
self.prepare_source(context, source)
bool_source = BoolData(source, None)
return bool_targets, bool_source
def apply_bool_mod(self, target, source):
dc.trace(2, "Applying boolean modifier to {}", dc.full_name(target.object))
mod = target.object.modifiers.new(source.object.name, 'BOOLEAN')
mod.object = source.object
mod.operation = self.bool_operation
# Non-Live Booleans go to top-most location in the stack...
mod_index = len(target.object.modifiers) - 1
while mod_index > 0:
bpy.ops.object.modifier_move_up(modifier=mod.name)
mod_index -= 1
try:
if bpy.app.version >= (2, 90, 0):
bpy.ops.object.modifier_apply(modifier=mod.name)
else:
bpy.ops.object.modifier_apply(apply_as='DATA', modifier=mod.name)
except RuntimeError as e:
dc.trace(2, "Failed! Applying failed with {}", e)
class DCONFIG_OT_boolean_toggle(bpy.types.Operator):
bl_idname = "dconfig.boolean_toggle"
bl_label = "DC Toggle Cutters"
bl_description = "Toggle boolean viewport visability for the active object"
bl_options = {'REGISTER', 'UNDO'}
def execute(self, context):
dc.trace_enter(self)
bool_collection = dc.get_boolean_collection(context, False)
if bool_collection is not None:
hide_viewport = not bool_collection.hide_viewport
dc.trace(1, "Setting visibility to {}", hide_viewport)
bool_collection.hide_viewport = hide_viewport
return dc.trace_exit(self)
class DCONFIG_OT_boolean_apply(bpy.types.Operator):
bl_idname = "dconfig.boolean_apply"
bl_label = "DC Apply Booleans"
bl_description = "Apply all boolean modifiers for the selected objects"
bl_options = {'REGISTER', 'UNDO'}
@classmethod
def poll(cls, context):
return context.mode == 'OBJECT' and dc.active_mesh_selected(context)
def execute(self, context):
dc.trace_enter(self)
# Process all selected objects...
for current_object in dc.get_objects(context.selected_objects, {'MESH'}):
dc.trace(1, "Processing: {}", dc.full_name(current_object))
bpy.ops.object.select_all(action='DESELECT')
context.view_layer.objects.active = current_object
# We need to apply everything up until the last boolean modifier
mod_count = len(current_object.modifiers)
mod_apply_count = 0
for i in range(mod_count - 1, -1, -1):
if current_object.modifiers[i].type == 'BOOLEAN':
mod_apply_count = i + 1
break
dc.trace(2, "Applying {} of {} modifiers", mod_apply_count, mod_count)
orphaned_objects = []
for i in range(mod_apply_count):
modifier = current_object.modifiers[0]
dc.trace(3, "Applying {}", modifier.type)
if modifier.type == 'BOOLEAN' and modifier.object is not None:
orphaned_objects.append(modifier.object)
try:
if bpy.app.version >= (2, 90, 0):
bpy.ops.object.modifier_apply(modifier=modifier.name)
else:
bpy.ops.object.modifier_apply(apply_as='DATA', modifier=modifier.name)
except RuntimeError:
bpy.ops.object.modifier_remove(modifier=modifier.name)
# Only delete boolean objects that are not linked anywhere else...
dc.trace(2, "Processing orphaned objects: {}", dc.full_names(orphaned_objects))
orphans_to_delete = []
for orphan in orphaned_objects:
ok_to_delete = True
for obj in bpy.data.objects:
if obj not in orphaned_objects:
for modifier in obj.modifiers:
if modifier.type == 'BOOLEAN' and modifier.object is not None and modifier.object.name == orphan.name:
ok_to_delete = False
break
if not ok_to_delete:
break
if ok_to_delete:
orphans_to_delete.append(orphan)
# The collection must be visible for delete to work...
bool_collection = dc.get_boolean_collection(context, False)
if bool_collection is not None:
prev_hide_viewport = bool_collection.hide_viewport
bool_collection.hide_viewport = False
dc.trace(2, "Removing {} orphaned objects", len(orphans_to_delete))
if orphans_to_delete:
for obj in orphans_to_delete:
obj.select_set(True)
bpy.ops.object.delete(use_global=False, confirm=False)
# Now remove the collection...
if bool_collection is not None:
# The user may have inserted their own objects
if not bool_collection.all_objects:
dc.trace(2, "Removing collection: {}", bool_collection.name)
# Find correct parent collection to delete from...
parent_collection = None
for collection in bpy.data.collections:
if bool_collection.name in collection.children:
parent_collection = collection
break
if parent_collection is None:
parent_collection = context.scene.collection
parent_collection.children.unlink(bool_collection)
bpy.data.collections.remove(bool_collection)
else:
dc.trace(2, "Collection still contains objects; not removing: {}", bool_collection.name)
bool_collection.hide_viewport = prev_hide_viewport
return dc.trace_exit(self)
```
#### File: jessey-git/dconfig/DCONFIG_Mesh.py
```python
import math
import bpy
from . import DCONFIG_Utils as dc
class DCONFIG_MT_quick(bpy.types.Menu):
bl_label = "Quick"
def draw(self, context):
layout = self.layout
layout.prop(context.space_data, "lock_camera", text="Camera to View")
layout.prop(context.space_data.overlay, "show_face_orientation")
if context.mode != 'EDIT_MESH':
dc.setup_op(layout, "dconfig.wire_toggle", text="Toggle wire display")
layout.separator()
layout.menu_contents("DCONFIG_MT_modifiers")
else:
layout.prop(context.space_data.overlay, "show_statvis")
layout.separator()
layout.menu("DCONFIG_MT_modifiers", icon='MODIFIER')
layout.separator()
dc.setup_op(layout, "mesh.edges_select_sharp", icon='RESTRICT_SELECT_OFF', text="Select Sharp", sharpness=math.radians(45.1))
dc.setup_op(layout, "mesh.select_face_by_sides", text="Select N-Gons", type='NOTEQUAL', number=4, extend=False)
dc.setup_op(layout, "mesh.region_to_loop", text="Select Boundary Loop")
layout.separator()
dc.setup_op(layout, "mesh.remove_doubles", icon='AUTOMERGE_OFF', text="Weld vertices")
layout.separator()
layout.operator_context = 'INVOKE_REGION_WIN'
dc.setup_op(layout, "mesh.fill_grid", text="Fill Grid")
dc.setup_op(layout, "dconfig.make_quads", text="Make Quads")
dc.setup_op(layout, "dconfig.quick_panel", text="Quick Panel")
dc.setup_op(layout, "dconfig.subd_upres", text="SubD Up-Res")
dc.setup_op(layout, "dconfig.subd_bevel", text="SubD Bevel")
class DCONFIG_OT_make_quads(bpy.types.Operator):
bl_idname = "dconfig.make_quads"
bl_label = "DC Make Quads"
bl_description = "Triangulate and then convert to Quads"
bl_options = {'REGISTER', 'UNDO'}
@classmethod
def poll(cls, context):
return context.mode == 'EDIT_MESH' and dc.active_object_available(context, {'MESH'})
def execute(self, context):
dc.trace_enter(self)
angle = math.radians(60)
bpy.ops.mesh.quads_convert_to_tris(quad_method='BEAUTY', ngon_method='BEAUTY')
bpy.ops.mesh.tris_convert_to_quads(face_threshold=angle, shape_threshold=angle)
return dc.trace_exit(self)
class DCONFIG_OT_edge_crease(bpy.types.Operator):
bl_idname = "dconfig.edge_crease"
bl_label = "Crease Edge"
bl_description = "Change the crease of edges"
bl_options = {'REGISTER', 'UNDO'}
value: bpy.props.FloatProperty(name="Value", default=0, min=-1, max=1)
@classmethod
def poll(cls, context):
return context.mode == 'EDIT_MESH' and dc.active_object_available(context, {'MESH'})
def execute(self, context):
dc.trace_enter(self)
bpy.ops.transform.edge_crease(value=self.value)
return dc.trace_exit(self)
class DCONFIG_OT_subd_upres(bpy.types.Operator):
bl_idname = "dconfig.subd_upres"
bl_label = "DC SubD Up-Res"
bl_description = "Apply a level of subdivision to the mesh"
bl_options = {'REGISTER', 'UNDO'}
@classmethod
def poll(cls, context):
return dc.active_mesh_selected(context)
def execute(self, context):
dc.trace_enter(self)
was_edit = False
if context.mode == 'EDIT_MESH':
was_edit = True
bpy.ops.object.mode_set(mode='OBJECT', toggle=False)
target = context.active_object
mod_subd = target.modifiers.new("Subdivision", 'SUBSURF')
mod_subd.levels = 1
bpy.ops.object.modifier_move_to_index(modifier=mod_subd.name, index=0)
bpy.ops.object.modifier_apply(modifier=mod_subd.name)
if was_edit:
bpy.ops.object.mode_set(mode='EDIT', toggle=False)
return dc.trace_exit(self)
class DCONFIG_OT_subd_bevel(bpy.types.Operator):
bl_idname = "dconfig.subd_bevel"
bl_label = "DC SubD friendly Bevel"
bl_description = "Create a subdivision friendly bevel"
bl_options = {'REGISTER', 'UNDO'}
@classmethod
def poll(cls, context):
return context.mode == 'EDIT_MESH' and dc.active_object_available(context, {'MESH'})
def execute(self, context):
dc.trace_enter(self)
target = context.active_object
target.update_from_editmode()
if target.data.total_edge_sel > 0:
dc.trace(1, "Using existing set of {} selected edges", target.data.total_edge_sel)
else:
dc.trace(1, "Selecting set of sharp edges")
bpy.ops.mesh.select_all(action='DESELECT')
bpy.ops.mesh.select_mode(use_extend=False, use_expand=False, type='EDGE')
bpy.ops.mesh.edges_select_sharp()
bpy.ops.mesh.bevel('INVOKE_DEFAULT', offset_type='OFFSET', offset=0.01, segments=2, profile=1, clamp_overlap=True, miter_outer='ARC')
return dc.trace_exit(self)
class DCONFIG_OT_subd_toggle(bpy.types.Operator):
bl_idname = "dconfig.subd_toggle"
bl_label = "DC SubD Toggle"
bl_description = "Toggle subdivision surface modifier"
bl_options = {'UNDO'}
levels: bpy.props.IntProperty(name="Levels", default=1, min=1, max=5)
@classmethod
def poll(cls, context):
return dc.active_object_available(context, {'MESH', 'CURVE', 'FONT'})
def execute(self, context):
dc.trace_enter(self)
objects = dc.get_objects(context.selected_objects, {'MESH', 'CURVE', 'FONT'})
if not objects:
objects = [context.active_object]
subd_visible = False
subd_invisible = False
# Track visibility states for all required objects...
for obj in objects:
mod_subd = next((mod for mod in reversed(obj.modifiers) if mod.type == 'SUBSURF'), None)
if mod_subd is None:
subd_invisible = True
else:
if mod_subd.show_viewport:
subd_visible = True
else:
subd_invisible = True
# If there's a mix, then push them towards visible, otherwise just toggle...
show_viewport_toggle = False
if subd_invisible and subd_visible:
show_viewport_toggle = True
for obj in objects:
mod_subd = next((mod for mod in reversed(obj.modifiers) if mod.type == 'SUBSURF'), None)
if mod_subd is None:
mod_subd = obj.modifiers.new("Subdivision", 'SUBSURF')
mod_subd.levels = self.levels
mod_subd.show_only_control_edges = True
mod_subd.show_on_cage = True
else:
if self.levels != mod_subd.levels:
mod_subd.levels = self.levels
mod_subd.show_viewport = True
else:
mod_subd.show_viewport = show_viewport_toggle if show_viewport_toggle else not mod_subd.show_viewport
return dc.trace_exit(self)
class DCONFIG_OT_wire_toggle(bpy.types.Operator):
bl_idname = "dconfig.wire_toggle"
bl_label = "DC Wire Toggle"
bl_description = "Toggle object wireframe display"
bl_options = {'UNDO'}
@classmethod
def poll(cls, context):
return dc.active_object_available(context, {'MESH', 'CURVE', 'FONT'})
def execute(self, context):
dc.trace_enter(self)
objects = dc.get_objects(context.selected_objects, {'MESH', 'CURVE', 'FONT'})
if not objects:
objects = [context.active_object]
wire_on = False
wire_off = False
# Track visibility states for all required objects...
for obj in objects:
if obj.display_type == 'WIRE':
wire_on = True
else:
wire_off = True
# If there's a mix, then push them towards wireframe display, otherwise just toggle...
force_wire = False
if wire_on and wire_off:
force_wire = True
for obj in objects:
obj.display_type = 'WIRE' if force_wire else 'TEXTURED' if obj.display_type == 'WIRE' else 'WIRE'
return dc.trace_exit(self)
class DCONFIG_OT_quick_panel(bpy.types.Operator):
bl_idname = "dconfig.quick_panel"
bl_label = "DC Quick Panel"
bl_description = "Panel macro"
bl_options = {'REGISTER', 'UNDO'}
scale: bpy.props.FloatProperty(name="Scale", default=1, step=1, min=0, max=2)
offset: bpy.props.FloatProperty(name="Offset", default=1, step=1, min=0, max=2)
inset: bpy.props.FloatProperty(name="Inset", default=0.5, step=1, min=0, max=1)
depth: bpy.props.FloatProperty(name="Depth", default=0.5, step=1, min=0, max=1)
invert: bpy.props.BoolProperty(name="Invert", default=False)
@classmethod
def poll(cls, context):
return context.mode == 'EDIT_MESH' and dc.active_object_available(context, {'MESH'})
def draw(self, context):
layout = self.layout
layout.use_property_split = True
layout.prop(self, "scale", slider=True)
layout.separator()
layout.prop(self, "offset", slider=True)
layout.prop(self, "inset", slider=True)
layout.prop(self, "depth", slider=True)
layout.prop(self, "invert")
def execute(self, context):
dc.trace_enter(self)
bevel_offset1 = (0.01 / 4) * self.offset * self.scale
inset_thickness = bevel_offset1 * self.inset
inset_depth = 0.02 * self.depth * self.scale * (-1 if self.invert else 1)
bevel_offset2 = math.fabs(inset_depth) / 3
bpy.ops.object.vertex_group_assign_new()
vgroup = context.active_object.vertex_groups.active
bpy.ops.mesh.bevel(offset_type='OFFSET', offset=bevel_offset1, offset_pct=0, segments=2)
bpy.ops.mesh.inset(thickness=inset_thickness, depth=-inset_depth, use_boundary=False)
bpy.ops.mesh.select_more()
bpy.ops.mesh.region_to_loop()
bpy.ops.mesh.bevel(offset_type='OFFSET', offset=bevel_offset2, segments=2, profile=1, clamp_overlap=True, miter_outer='ARC')
bpy.ops.mesh.select_all(action='DESELECT')
bpy.ops.object.vertex_group_set_active(group=vgroup.name)
bpy.ops.object.vertex_group_select()
bpy.ops.object.vertex_group_remove(all=False, all_unlocked=False)
for _ in range(4):
bpy.ops.mesh.select_less()
return dc.trace_exit(self)
focus_settings = {
"EDIT_CURVE": True,
"EDIT_MESH": True,
"EDIT_ARMATURE": True,
"POSE": True,
}
class DCONFIG_OT_mesh_focus(bpy.types.Operator):
bl_idname = "dconfig.mesh_focus"
bl_label = "DC Mesh Focus"
bl_description = "Focus on selected mesh elements and hide everything else"
bl_options = {'REGISTER'}
def execute(self, context):
dc.trace_enter(self)
if context.mode == 'OBJECT':
dc.trace(1, "View selected")
bpy.ops.view3d.view_selected()
bpy.ops.view3d.zoom(delta=-1, use_cursor_init=True)
elif context.mode == 'SCULPT':
dc.trace(1, "View selected")
bpy.ops.view3d.view_selected()
else:
current_focus = focus_settings[context.mode]
if current_focus:
dc.trace(1, "Focus")
if context.mode == 'EDIT_MESH':
bpy.ops.mesh.hide(unselected=True)
elif context.mode == 'EDIT_CURVE':
bpy.ops.curve.hide(unselected=True)
bpy.ops.view3d.view_selected()
bpy.ops.view3d.zoom(delta=-1, use_cursor_init=True)
else:
dc.trace(1, "Unfocus")
if context.mode == 'EDIT_MESH':
bpy.ops.mesh.reveal(select=False)
elif context.mode == 'EDIT_CURVE':
bpy.ops.curve.reveal(select=False)
focus_settings[context.mode] = not current_focus
return dc.trace_exit(self)
```
|
{
"source": "jesse-y/Multiplayer-Asteroids",
"score": 4
}
|
#### File: Multiplayer-Asteroids/server/id_manager.py
```python
from collections import deque
class IdManager:
def __init__(self, max_id=0):
self.last_id = 0
self.max_id = max_id
self.ids = deque()
def assign_id(self):
result = None
if not self.ids and self.last_id == self.max_id and self.max_id > 0:
print('IdManager: max ids requested. last_id={}, max_id={}, ids={}'.format(self.last_id, self.max_id, self.ids))
result = -1
raise Exception
elif self.ids:
result = self.ids.popleft()
else:
self.last_id += 1
result = self.last_id
#print('assigning id: {}. last_id={}, max_id={}, ids={}'.format(result, self.last_id, self.max_id, self.ids))
return result
def release_id(self, new_id):
if new_id == -1:
print('attempted to release invalid id! ({})'.format(new_id))
raise Exception
#print('releasing id: {}. last_id={}, max_id={}, ids={}'.format(new_id, self.last_id, self.max_id, self.ids))
self.ids.append(new_id)
```
|
{
"source": "Jesse-Yung/jsonclasses",
"score": 3
}
|
#### File: jsonclasses/modifiers/after_modifier.py
```python
from __future__ import annotations
from typing import Callable, TYPE_CHECKING
from datetime import date, datetime
from .modifier import Modifier
if TYPE_CHECKING:
from ..ctx import Ctx
from ..types import Types
class AfterModifier(Modifier):
"""After modifier validates date against after date."""
def __init__(self, point: date | datetime | Callable | Types) -> None:
self.point = point
def validate(self, ctx: Ctx) -> None:
if ctx.val is None:
return
value = ctx.val
point = self.resolve_param(self.point, ctx)
if type(value) is date and type(point) is date:
if value <= point:
ctx.raise_vexc('value is too early')
if type(value) is datetime and type(point) is datetime:
if value <= point:
ctx.raise_vexc('value is too early')
else:
value = value
point = point
if type(value) is date:
value = datetime.combine(value, datetime.min.time())
if type(point) is date:
point = datetime.combine(point, datetime.min.time())
if value <= point:
ctx.raise_vexc('value is too early')
```
#### File: jsonclasses/modifiers/authidentity_modifier.py
```python
from __future__ import annotations
from typing import Callable, TYPE_CHECKING
from inspect import signature
from ..fdef import Fdef
from .modifier import Modifier
if TYPE_CHECKING:
from ..ctx import Ctx
class AuthIdentityModifier(Modifier):
"""Fields marked with authidentity are used for authorization.
"""
def define(self, fdef: Fdef) -> None:
fdef._auth_identity = True
```
#### File: jsonclasses/modifiers/canc_modifier.py
```python
from __future__ import annotations
from typing import Callable, TYPE_CHECKING
from .cancu_modifier import CanCUModifier
from ..fdef import Fdef
if TYPE_CHECKING:
from ..ctx import Ctx
from ..types import Types
class CanCModifier(CanCUModifier):
"""Whether this operator can create on this field.
"""
def validate(self, ctx: Ctx) -> None:
if ctx.owner.is_new:
super().validate(ctx)
```
#### File: jsonclasses/modifiers/ceil_modifier.py
```python
from __future__ import annotations
from typing import Any, TYPE_CHECKING
from math import ceil
from .modifier import Modifier
if TYPE_CHECKING:
from ..ctx import Ctx
class CeilModifier(Modifier):
"""Ceil modifier Ceil number value."""
def transform(self, ctx: Ctx) -> Any:
is_number = type(ctx.val) is int or type(ctx.val) is float
return ceil(ctx.val) if is_number else ctx.val
```
#### File: jsonclasses/modifiers/date_modifier.py
```python
from __future__ import annotations
from typing import Any, TYPE_CHECKING
from datetime import date, datetime
from ..fdef import FType
from .type_modifier import TypeModifier
if TYPE_CHECKING:
from ..ctx import Ctx
class DateModifier(TypeModifier):
"""Date modifier validate value against date type."""
def __init__(self):
super().__init__()
self.cls = date
self.ftype = FType.DATE
def transform(self, ctx: Ctx) -> Any:
if ctx.val is None:
return None
elif isinstance(ctx.val, str):
try:
return date.fromisoformat(ctx.val[:10])
except ValueError:
ctx.raise_vexc('wrong date format')
elif type(ctx.val) == datetime:
return date(ctx.val.year,
ctx.val.month,
ctx.val.day)
else:
return ctx.val
def tojson(self, ctx: Ctx) -> Any:
return None if ctx.val is None else self._jsondate(ctx.val)
def _jsondate(self, d: date) -> str:
return d.isoformat() + 'T00:00:00.000Z'
```
#### File: jsonclasses/modifiers/hasprefix_modifier.py
```python
from __future__ import annotations
from typing import Any, TYPE_CHECKING, Callable
from .modifier import Modifier
if TYPE_CHECKING:
from ..ctx import Ctx
from ..types import Types
class HasPrefixModifier(Modifier):
"""Has prefix modifier for checking if a str is suffix of another str."""
def __init__(self, prefix: str | list[Any] | Callable | Types) -> None:
self.prefix = prefix
def validate(self, ctx: Ctx) -> None:
if isinstance(ctx.val, list):
if not ctx.val[:len(ctx.val)] == self.resolve_param(self.prefix, ctx):
ctx.raise_vexc('prefix is not found')
elif type(ctx.val) is str:
if not ctx.val.startswith(self.resolve_param(self.prefix, ctx)):
ctx.raise_vexc('prefix is not found')
```
#### File: jsonclasses/modifiers/instanceof_modifier.py
```python
from __future__ import annotations
from jsonclasses.vmsgcollector import VMsgCollector
from jsonclasses.jfield import JField
from typing import Any, Sequence, Union, cast, TYPE_CHECKING
from ..fdef import (
Fdef, FStore, FType, Nullability, WriteRule, ReadRule, Strictness
)
from ..excs import ValidationException
from .modifier import Modifier
from ..keypath import concat_keypath, initial_keypaths
if TYPE_CHECKING:
from ..jobject import JObject
from ..ctx import Ctx
class InstanceOfModifier(Modifier):
"""InstanceOf modifier validates and transforms JSON Class instance."""
def __init__(self, raw_type: Union[str, type[JObject]]) -> None:
self.raw_type = raw_type
def define(self, fdef: Fdef) -> None:
fdef._ftype = FType.INSTANCE
fdef._raw_inst_types = self.raw_type
def validate(self, ctx: Ctx) -> None:
from ..jobject import JObject
# only validate if there is a value
if ctx.val is None:
return
# only validate an instance once in the circular referenced map
if ctx.mgraph.has(ctx.val):
return
ctx.mgraph.put(ctx.val)
cls = cast(type, ctx.fdef.inst_cls)
all_fields = ctx.ctxcfg.all_fields
if all_fields is None:
all_fields = cls.cdef.jconf.validate_all_fields
if not isinstance(ctx.val, cls):
ctx.raise_vexc(f'value is not instance of {cls.__name__}')
only_validate_modified = not ctx.val.is_new
modified_fields = []
if only_validate_modified:
modified_fields = list(initial_keypaths((ctx.val.modified_fields)))
ctor = VMsgCollector()
val = cast(JObject, ctx.val)
for field in val.__class__.cdef.fields:
fname = field.name
ffdef = field.fdef
fval = getattr(ctx.val, fname)
if field.fdef.fstore == FStore.EMBEDDED:
if only_validate_modified and fname not in modified_fields:
continue
try:
if field.fdef.ftype == FType.INSTANCE:
fval_ctx = ctx.nexto(fval, fname, ffdef)
else:
fval_ctx = ctx.nextvo(fval, fname, ffdef, ctx.original or val)
field.types.modifier.validate(fval_ctx)
except ValidationException as exception:
if all_fields:
ctor.receive(exception.keypath_messages)
else:
raise exception
if ctor.has_msgs:
ctx.raise_mvexc(ctor.messages)
def _strictness_check(self, ctx: Ctx, dest: JObject) -> None:
available_names = dest.__class__.cdef.available_names
for k in ctx.val.keys():
if k not in available_names:
kp = concat_keypath(ctx.skeypathr, k)
ctx.raise_mvexc({kp: 'key is not allowed'})
def _fill_default_value(self, field: JField, dest: JObject, ctx: Ctx):
if field.default is not None:
setattr(dest, field.name, field.default)
else:
dctx = ctx.default(ctx.original, field.name, field.fdef)
tsfmd = field.types.modifier.transform(dctx)
setattr(dest, field.name, tsfmd)
def _has_field_value(self, field: JField, keys: Sequence[str]) -> bool:
return field.json_name in keys or field.name in keys
def _get_field_value(self, field: JField, ctx: Ctx) -> Any:
field_value = ctx.val.get(field.json_name)
if field_value is None:
field_value = ctx.val.get(field.name)
return field_value
# pylint: disable=arguments-differ, too-many-locals, too-many-branches
def transform(self, ctx: Ctx) -> Any:
from ..types import Types
from ..jobject import JObject
# handle non normal value
if ctx.val is None:
return ctx.original
if not isinstance(ctx.val, dict):
return ctx.original if ctx.original is not None else ctx.val
# figure out types, cls and dest
cls = cast(type[JObject], ctx.fdef.inst_cls)
pfield = cls.cdef.primary_field
if pfield:
pkey = pfield.name
pvalue = cast(Union[str, int, None], ctx.val.get(pkey))
else:
pvalue = None
soft_apply_mode = False
if ctx.original is not None:
dest = ctx.original
if pvalue is not None:
ctx.mgraph.putp(pvalue, dest)
elif pvalue is not None:
exist_item = ctx.mgraph.getp(cls, pvalue)
if exist_item is not None:
dest = exist_item
soft_apply_mode = True
else:
dest = cls()
ctx.mgraph.putp(pvalue, dest)
else:
dest = cls()
ctx.mgraph.put(dest)
# strictness check
strictness = cast(bool, cls.cdef.jconf.strict_input)
if ctx.fdef is not None:
if ctx.fdef.strictness == Strictness.STRICT:
strictness = True
elif ctx.fdef.strictness == Strictness.UNSTRICT:
strictness = False
if strictness:
self._strictness_check(ctx, dest)
# fill values
dict_keys = list(ctx.val.keys())
nonnull_ref_lists: list[str] = []
for field in dest.__class__.cdef.fields:
if not self._has_field_value(field, dict_keys):
if field.fdef.is_ref:
fdef = field.fdef
if fdef.ftype == FType.LIST:
if fdef.collection_nullability == Nullability.NONNULL:
nonnull_ref_lists.append(field.name)
elif fdef.fstore == FStore.LOCAL_KEY:
tsfm = dest.__class__.cdef.jconf.ref_key_encoding_strategy
refname = tsfm(field)
if ctx.val.get(refname) is not None:
setattr(dest, refname, ctx.val.get(refname))
crefname = dest.__class__.cdef.jconf.key_encoding_strategy(refname)
if ctx.val.get(crefname) is not None:
setattr(dest, refname, ctx.val.get(crefname))
pass
elif ctx.ctxcfg.fill_dest_blanks and not soft_apply_mode:
if field.fdef.fstore != FStore.CALCULATED:
self._fill_default_value(field, dest, ctx)
continue
field_value = self._get_field_value(field, ctx)
allow_write_field = True
if field.fdef.write_rule == WriteRule.NO_WRITE:
allow_write_field = False
if field.fdef.write_rule == WriteRule.WRITE_ONCE:
cfv = getattr(dest, field.name)
if (cfv is not None) and (not isinstance(cfv, Types)):
allow_write_field = False
if field.fdef.write_rule == WriteRule.WRITE_NONNULL:
if field_value is None:
allow_write_field = False
if not allow_write_field:
if ctx.ctxcfg.fill_dest_blanks:
if field.fdef.fstore != FStore.CALCULATED:
self._fill_default_value(field, dest, ctx)
continue
fctx = ctx.nextvo(field_value, field.name, field.fdef, dest)
tsfmd = field.types.modifier.transform(fctx)
if field.fdef.fstore != FStore.CALCULATED:
setattr(dest, field.name, tsfmd)
for cname in nonnull_ref_lists:
if getattr(dest, cname) is None:
setattr(dest, cname, [])
return dest
def tojson(self, ctx: Ctx) -> Any:
from ..jobject import JObject
if ctx.val is None:
return None
val = cast(JObject, ctx.val)
retval = {}
clschain = ctx.idchain
cls_name = val.__class__.cdef.name
rr = ctx.ctxcfg.reverse_relationship
no_key_refs = cls_name in clschain
for field in val.__class__.cdef.fields:
fval = getattr(val, field.name)
fd = field.types.fdef
jf_name = field.json_name
ignore_writeonly = ctx.ctxcfg.ignore_writeonly
isrr = False
if not rr:
if field.foreign_field:
isrr = field.foreign_field.fdef == ctx.fdef
if fd.fstore == FStore.LOCAL_KEY:
rk = val.__class__.cdef.jconf.ref_key_encoding_strategy(field)
jrk = val.__class__.cdef.jconf.key_encoding_strategy(rk)
retval[jrk] = getattr(val, rk)
if fd.fstore == FStore.LOCAL_KEY and (isrr or no_key_refs):
continue
if fd.fstore == FStore.FOREIGN_KEY and (isrr or no_key_refs):
continue
if fd.read_rule == ReadRule.NO_READ and not ignore_writeonly:
continue
if fd.fstore == FStore.TEMP:
continue
if field.fdef.ftype == FType.INSTANCE:
ictx = ctx.nextoc(fval, field.name, field.fdef, cls_name)
else:
ictx = ctx.nextvc(fval, field.name, field.fdef, cls_name)
retval[jf_name] = field.types.modifier.tojson(ictx)
return retval
def serialize(self, ctx: Ctx) -> Any:
from ..jobject import JObject
value = cast(JObject, ctx.val)
if value is None:
return None
exist_item = ctx.mgraph.get(value)
if exist_item is not None: # Don't do twice for an object
return value
ctx.mgraph.put(value)
should_update = False
if value.is_modified or value.is_new:
should_update = True
for field in value.__class__.cdef.fields:
if field.fdef.is_ref or field.fdef.is_inst or should_update or field.fdef.force_set_on_save:
if field.fdef.fstore == FStore.LOCAL_KEY:
if getattr(value, field.name) is None:
tsf = value.__class__.cdef.jconf.ref_key_encoding_strategy
if getattr(value, tsf(field)) is not None:
continue
field_value = getattr(value, field.name)
fctx = ctx.nextv(field_value, field.name, field.fdef)
tsfmd = field.types.modifier.serialize(fctx)
setattr(value, field.name, tsfmd)
if value.is_modified or value.is_new:
should_update = True
return value
```
#### File: jsonclasses/modifiers/int_modifier.py
```python
from ..fdef import FType
from .type_modifier import TypeModifier
class IntModifier(TypeModifier):
"""Int modifier validates value against int type."""
def __init__(self):
super().__init__()
self.cls = int
self.ftype = FType.INT
self.exact_type = True
```
#### File: jsonclasses/modifiers/neq_modifier.py
```python
from __future__ import annotations
from typing import Any, TYPE_CHECKING, Callable
from .modifier import Modifier
if TYPE_CHECKING:
from ..ctx import Ctx
from ..types import Types
class NeqModifier(Modifier):
"""Neq modifier validates value by unequal testing."""
def __init__(self, val: Any | Types | Callable):
self.val = val
def validate(self, ctx: Ctx) -> None:
if ctx.val == self.resolve_param(self.val, ctx):
ctx.raise_vexc('value is not unequal')
```
#### File: jsonclasses/modifiers/padstart_modifier.py
```python
from __future__ import annotations
from typing import Any, TYPE_CHECKING, Callable
from .modifier import Modifier
if TYPE_CHECKING:
from ..ctx import Ctx
from ..types import Types
class PadStartModifier(Modifier):
"""PadStart modifier add str to the start of str value."""
def __init__(self, char: str | Callable | Types, target_length: int | Callable | Types) -> None:
self.char = char
self.target_length = target_length
def transform(self, ctx: Ctx) -> Any:
char = self.resolve_param(self.char, ctx)
target_length = self.resolve_param(self.target_length, ctx)
if type(ctx.val) is str:
pad_str = (target_length - len(ctx.val)) * char
return pad_str + ctx.val
else:
return ctx.val
```
#### File: jsonclasses/modifiers/primary_modifier.py
```python
from ..fdef import Fdef
from .modifier import Modifier
class PrimaryModifier(Modifier):
"""Primary modifier marks a field as the primary key."""
def define(self, fdef: Fdef) -> None:
fdef._primary = True
```
#### File: jsonclasses/modifiers/readonly_modifier.py
```python
from ..fdef import Fdef, WriteRule
from .modifier import Modifier
class ReadonlyModifier(Modifier):
"""Readonly modifier marks a field to be readonly."""
def define(self, fdef: Fdef) -> None:
fdef._write_rule = WriteRule.NO_WRITE
```
#### File: jsonclasses/modifiers/required_modifier.py
```python
from __future__ import annotations
from typing import TYPE_CHECKING
from ..fdef import Fdef
from .modifier import Modifier
from ..fdef import FStore
from ..jconf import JConf
from ..isjsonclass import isjsonobject
if TYPE_CHECKING:
from ..ctx import Ctx
class RequiredModifier(Modifier):
"""Mark a field as required."""
def define(self, fdef: Fdef) -> None:
fdef._required = True
def validate(self, ctx: Ctx) -> None:
storage = FStore.EMBEDDED
if ctx.fdef is not None:
storage = ctx.fdef.fstore
if storage == FStore.FOREIGN_KEY: # we don't check foreign key
return
if storage == FStore.LOCAL_KEY:
if ctx.val is None: # check key presence
jconf: JConf = ctx.holder.__class__.cdef.jconf
ko = str(ctx.keypathh[0])
field = ctx.holder.__class__.cdef.field_named(ko)
local_key = jconf.ref_key_encoding_strategy(field)
if isinstance(ctx.holder, dict):
if ctx.holder.get(local_key) is None:
ctx.raise_vexc('value required')
elif isjsonobject(ctx.holder):
try:
local_key_value = getattr(ctx.holder, local_key)
except AttributeError:
ctx.raise_vexc('value required')
if local_key_value is None:
ctx.raise_vexc('value required')
return
if ctx.val is None:
ctx.raise_vexc('value required')
```
#### File: jsonclasses/modifiers/this_modifier.py
```python
from __future__ import annotations
from typing import Any, TYPE_CHECKING
from .modifier import Modifier
if TYPE_CHECKING:
from ..ctx import Ctx
class ThisModifier(Modifier):
"""Get the owner object of this field.
"""
def transform(self, ctx: Ctx) -> Any:
return ctx.owner
```
#### File: jsonclasses/modifiers/tocap_modifier.py
```python
from __future__ import annotations
from typing import Any, TYPE_CHECKING
from .modifier import Modifier
if TYPE_CHECKING:
from ..ctx import Ctx
class ToCapModifier(Modifier):
"""capitalize string"""
def transform(self, ctx: Ctx) -> Any:
return ctx.val.capitalize() if isinstance(ctx.val, str) else ctx.val
```
#### File: jsonclasses/modifiers/tonextmin_modifier.py
```python
from __future__ import annotations
from datetime import datetime, timedelta
from typing import Any, TYPE_CHECKING
from .modifier import Modifier
if TYPE_CHECKING:
from ..ctx import Ctx
class ToNextMinModifier(Modifier):
"""Change the minute to the next minute"""
def transform(self, ctx: Ctx) -> Any:
return (ctx.val+timedelta(minutes=1)).replace(microsecond=0, second=0) if type(ctx.val) is datetime else ctx.val
```
#### File: jsonclasses/modifiers/transform_modifier.py
```python
from __future__ import annotations
from typing import Callable, Any, TYPE_CHECKING
from inspect import signature
from .modifier import Modifier
if TYPE_CHECKING:
from ..ctx import Ctx
from ..types import Types
class TransformModifier(Modifier):
"""Transform modifier transforms value."""
def __init__(self, transformer: Callable | Types) -> None:
from ..types import Types
if isinstance(transformer, Types):
self.transformer = transformer
else:
if not callable(transformer):
raise ValueError('transformer is not callable')
params_len = len(signature(transformer).parameters)
if params_len > 2 or params_len < 1:
raise ValueError('not a valid transformer')
self.transformer = transformer
def transform(self, ctx: Ctx) -> Any:
from ..types import Types
if isinstance(self.transformer, Types):
return self.transformer.modifier.transform(ctx)
if ctx.val is None:
return None
params_len = len(signature(self.transformer).parameters)
if params_len == 1:
return self.transformer(ctx.val)
elif params_len == 2:
return self.transformer(ctx.val, ctx)
```
#### File: jsonclasses/modifiers/unqueryable_modifier.py
```python
from ..fdef import Fdef, Queryability
from .modifier import Modifier
class UnqueryableModifier(Modifier):
"""Unqueryable modifier marks a column should be unqueryable."""
def define(self, fdef: Fdef) -> None:
fdef._queryability = Queryability.UNQUERYABLE
```
|
{
"source": "Jesse-Yung/jsonclasses-pymongo",
"score": 2
}
|
#### File: jsonclasses-pymongo/jsonclasses_pymongo/coder.py
```python
from __future__ import annotations
from typing import cast, TYPE_CHECKING
from jsonclasses.jfield import JField
from jsonclasses.fdef import FType, FStore
from inflection import camelize
from .connection import Connection
if TYPE_CHECKING:
from .pymongo_object import PymongoObject
class Coder():
def is_id_field(self, field: JField) -> bool:
return field.fdef.primary
def is_instance_field(self, field: JField) -> bool:
return field.fdef.ftype == FType.INSTANCE
def is_list_field(self, field: JField) -> bool:
return field.fdef.ftype == FType.LIST
def is_list_instance_field(self, field: JField,
cls: type[PymongoObject]) -> bool:
if not self.is_list_field(field):
return False
t = field.fdef.item_types
if t.fdef.raw_inst_types is not None:
return True
return False
def is_foreign_key_storage(self, field: JField) -> bool:
fstore = field.fdef.fstore
return fstore == FStore.FOREIGN_KEY
def is_local_key_storage(self, field: JField) -> bool:
fstore = field.fdef.fstore
return fstore == FStore.LOCAL_KEY
def is_foreign_key_reference_field(self, field: JField) -> bool:
return (self.is_instance_field(field) and
self.is_foreign_key_storage(field))
def is_foreign_keys_reference_field(self, field: JField) -> bool:
return (self.is_list_field(field) and
self.is_foreign_key_storage(field))
def is_local_key_reference_field(self, field: JField) -> bool:
return (self.is_instance_field(field) and
self.is_local_key_storage(field))
def is_local_keys_reference_field(self, field: JField) -> bool:
return self.is_list_field(field) and self.is_local_key_storage(field)
def is_join_table_field(self, field: JField) -> bool:
return field.types.fdef.use_join_table is True
def list_instance_type(self, field: JField) -> type[PymongoObject]:
from .pymongo_object import PymongoObject
fd = field.types.fdef
item_types = fd.item_types
item_fd = item_types.fdef
return cast(type[PymongoObject], item_fd.raw_inst_types)
def join_table_name(self,
cls_a: type[PymongoObject],
field_a: str,
cls_b: type[PymongoObject],
field_b: str) -> str:
connection = Connection.from_class(cls_a)
cabase = connection.collection_from(cls_a).name
cbbase = connection.collection_from(cls_b).name
ca = cabase + camelize(field_a).lower()
cb = cbbase + camelize(field_b).lower()
return ca + cb if ca < cb else cb + ca
```
#### File: jsonclasses-pymongo/jsonclasses_pymongo/exceptions.py
```python
class DatabaseNotConnectedException(Exception):
"""This exception is raised when collection is accessed however database is
not connected.
"""
def __init__(self, message: str):
self.message = message
super().__init__(self.message)
```
#### File: jsonclasses-pymongo/jsonclasses_pymongo/query.py
```python
from __future__ import annotations
from jsonclasses_pymongo.query_to_object import query_to_object
from jsonclasses_pymongo.query_reader import QueryReader
from typing import (Iterator, Union, TypeVar, Generator, Optional, Any,
Generic, NamedTuple, cast)
from datetime import date, datetime
from bson import ObjectId
from inflection import camelize
from pymongo.cursor import Cursor
from jsonclasses.fdef import FStore, FType
from jsonclasses.mgraph import MGraph
from jsonclasses.excs import ObjectNotFoundException
from .coder import Coder
from .decoder import Decoder
from .connection import Connection
from .pymongo_object import PymongoObject
from .utils import ref_db_field_key
T = TypeVar('T', bound=PymongoObject)
U = TypeVar('U', bound='BaseQuery')
V = TypeVar('V', bound='BaseListQuery')
class Subquery(NamedTuple):
name: str
query: Optional[BaseQuery]
class BaseQuery(Generic[T]):
"""Base query is the base class of queries.
"""
def __init__(self: U, cls: type[T]) -> None:
self._cls = cls
self.subqueries: list[Subquery] = []
def include(self: U, name: str, query: Optional[BaseQuery] = None) -> U:
self.subqueries.append(Subquery(name, query))
return self
def _build_aggregate_pipeline(self: U) -> list[dict[str, Any]]:
cls = cast(type[PymongoObject], self._cls)
result: list[dict[str, Any]] = []
for subquery in self.subqueries:
fname = subquery.name
field = cls.cdef.field_named(fname)
if field.fdef.ftype == FType.LIST:
it = field.fdef.item_types.fdef.inst_cls
else:
it = field.fdef.inst_cls
if field.fdef.fstore == FStore.LOCAL_KEY:
key = ref_db_field_key(fname, cls)
if subquery.query is None:
result.append({
'$lookup': {
'from': it.pconf.collection_name,
'localField': key,
'foreignField': '_id',
'as': fname
}
})
else:
subpipeline = subquery.query._build_aggregate_pipeline()
subpipeline.insert(0, {
'$match': {
'$expr': {
'$and': [{'$eq': ['$_id', '$$'+key]}]
}
}
})
result.append({
'$lookup': {
'from': it.pconf.collection_name,
'as': fname,
'let': {key: '$' + key},
'pipeline': subpipeline
}
})
result.append({
'$unwind': '$' + fname
})
elif field.fdef.fstore == FStore.FOREIGN_KEY:
if field.fdef.ftype == FType.INSTANCE:
fk = cast(str, field.fdef.foreign_key)
if subquery.query is None:
result.append({
'$lookup': {
'from': it.pconf.collection_name,
'localField': '_id',
'foreignField': ref_db_field_key(fk, it),
'as': fname
}
})
else:
fk = cast(str, field.fdef.foreign_key)
key = ref_db_field_key(fk, it)
subp = subquery.query._build_aggregate_pipeline()
subp.insert(0, {
'$match': {
'$expr': {
'$and': [{'$eq': ['$'+key, '$$'+key]}]
}
}
})
result.append({
'$lookup': {
'from': it.pconf.collection_name,
'as': fname,
'let': {key: '$_id'},
'pipeline': subp
}
})
result.append({
'$unwind': '$' + fname
})
elif field.fdef.ftype == FType.LIST:
if subquery.query is not None:
subpipeline = subquery.query \
._build_aggregate_pipeline()
else:
subpipeline = []
has_match = False
matcher = None
for item in subpipeline:
if item.get('$match'):
has_match = True
matcher = item.get('$match')
break
if field.fdef.use_join_table:
coder = Coder()
jt_name = coder.join_table_name(cls, field.name,
it, field.foreign_field.name)
this_key = ref_db_field_key(cls.__name__, cls)
that_key = ref_db_field_key(it.__name__, it)
pipeline: list[Any] = []
if matcher is None:
matcher = {}
if not matcher.get('$expr'):
matcher['$expr'] = {}
if not matcher['$expr'].get('$and'):
matcher['$expr']['$and'] = []
matcher['$expr']['$and'].append({
'$eq': ['$_id', '$$' + that_key]
})
if not has_match:
subpipeline.insert(0, {'$match': matcher})
outer_lookup = {'$lookup': {
'from': jt_name,
'as': field.name,
'let': {this_key: '$_id'},
'pipeline': pipeline
}}
match = {'$match': {
'$expr': {
'$and': [
{'$eq': ['$'+this_key, '$$'+this_key]}
]
}
}}
lookup = {'$lookup': {
'from': it.pconf.collection_name,
'as': field.name,
'let': {that_key: '$'+that_key},
'pipeline': subpipeline
}}
unwind = {
'$unwind': '$' + field.name
}
replace = {
'$replaceRoot': {'newRoot': '$'+field.name}
}
pipeline.append(match)
pipeline.append(lookup)
pipeline.append(unwind)
pipeline.append(replace)
result.append(outer_lookup)
else:
fk = cast(str, field.fdef.foreign_key)
key = ref_db_field_key(fk, it)
item = {
'$lookup': {
'from': it.pconf.collection_name,
'as': fname,
'let': {key: '$_id'},
'pipeline': subpipeline
}
}
if matcher is None:
matcher = {}
if not matcher.get('$expr'):
matcher['$expr'] = {}
if not matcher['$expr'].get('$and'):
matcher['$expr']['$and'] = []
matcher['$expr']['$and'].append({
'$eq': ['$' + key, '$$' + key]
})
if not has_match:
subpipeline.insert(0, {'$match': matcher})
result.append(item)
return result
class BaseListQuery(BaseQuery[T]):
"""Base list query is the base class of list queries.
"""
def __init__(self: V,
cls: type[T],
filter: Union[dict[str, Any], str, None] = None) -> None:
super().__init__(cls)
self._match: Optional[dict[str, Any]] = None
self._sort: Optional[list[tuple[str, int]]] = None
self._page_number: Optional[int] = None
self._page_size: Optional[int] = None
self._skip: Optional[int] = None
self._limit: Optional[int] = None
self._use_pick: bool = False
self._pick: Optional[dict[str, Any]] = None
self._use_omit: bool = False
self._omit: Optional[dict[str, Any]] = None
if filter is not None:
if type(filter) is str:
filter = query_to_object(filter)
self._set_matcher(cast(dict, filter))
def _set_matcher(self: V, matcher: dict[str, Any]) -> None:
result = QueryReader(query=matcher, cls=self._cls).result()
if result.get('_match') is not None:
self._match = result['_match']
if result.get('_sort') is not None:
self._sort = result['_sort']
if result.get('_page_number') is not None:
self._page_number = result['_page_number']
if result.get('_page_size') is not None:
self._page_size = result['_page_size']
if result.get('_skip') is not None:
self._skip = result['_skip']
if result.get('_limit') is not None:
self._limit = result['_limit']
def order(self: V, field: str, sort: Optional[int] = None) -> V:
if self._sort is None:
self._sort = []
self._sort.append((field, sort or 1))
return self
def skip(self: V, n: int) -> V:
self._skip = n
return self
def limit(self: V, n: int) -> V:
self._limit = n
return self
def page_number(self: V, n: int) -> V:
self._page_number = n
if self._page_size is None:
self._page_size = 30
return self
def page_no(self: V, n: int) -> V:
return self.page_number(n)
def page_size(self: V, n: int) -> V:
self._page_size = n
return self
def _build_aggregate_pipeline(self: V) -> list[dict[str, Any]]:
lookups = super()._build_aggregate_pipeline()
result: list[dict[str, Any]] = []
if self._match is not None:
result.append({'$match': self._match})
if self._sort is not None:
result.append({'$sort': dict(self._sort)})
if self._page_number is not None and self._page_size is not None:
result.append({'$skip': (self._page_number - 1) * self._page_size})
else:
if self._skip is not None:
result.append({'$skip': self._skip})
if self._limit is not None:
result.append({'$limit': self._limit})
result.extend(lookups)
return result
def _exec(self: V) -> list[T]:
pipeline = self._build_aggregate_pipeline()
collection = Connection.get_collection(self._cls)
cursor = collection.aggregate(pipeline)
results = [result for result in cursor]
return Decoder().decode_root_list(results, self._cls)
class ListQuery(BaseListQuery[T]):
"""Query a list of objects.
"""
def exec(self) -> list[T]:
return self._exec()
def __await__(self) -> Generator[None, None, list[T]]:
yield
return self.exec()
class SingleQuery(BaseListQuery[T]):
"""Queries only one object from the query.
"""
def exec(self) -> T:
self._limit = 1
results = self._exec()
if len(results) == 0:
raise ObjectNotFoundException('object is not found')
return results[0]
def __await__(self) -> Generator[None, None, T]:
yield
return self.exec()
@property
def optional(self) -> OptionalSingleQuery:
query = OptionalSingleQuery(cls=self._cls)
query.subqueries = self.subqueries
query._match = self._match
query._sort = self._sort
query._skip = self._skip
query._limit = self._limit
query._use_pick = self._use_pick
query._pick = self._pick
query._use_omit = self._use_omit
query._omit = self._omit
return query
class OptionalSingleQuery(BaseListQuery[T]):
"""Queries only one object from the query. Returns None if not found.
"""
def exec(self) -> Optional[T]:
self._limit = 1
results = self._exec()
if len(results) == 0:
return None
return results[0]
def __await__(self) -> Generator[None, None, Optional[T]]:
yield
return self.exec()
class BaseIDQuery(BaseQuery[T]):
"""Query collection from object id.
"""
def __init__(self: U, cls: type[T], id: Union[str, ObjectId]) -> None:
super().__init__(cls)
self._id = id
def _build_aggregate_pipeline(self: BaseIDQuery) -> list[dict[str, Any]]:
lookups = super()._build_aggregate_pipeline()
result: list[dict[str, Any]] = []
result.append({'$match': {'_id': ObjectId(self._id)}})
result.extend(lookups)
return result
def _exec(self) -> Optional[T]:
pipeline = self._build_aggregate_pipeline()
collection = Connection.get_collection(self._cls)
cursor = collection.aggregate(pipeline)
results = [result for result in cursor]
if len(results) == 0:
return None
return Decoder().decode_root(results[0], self._cls)
class IDQuery(BaseIDQuery[T]):
"""Query collection from object id. Raises ObjectNotFoundException if not
found.
"""
def exec(self) -> T:
result = self._exec()
if result is not None:
return result
raise ObjectNotFoundException(
f'{self._cls.__name__}(_id={self._id}) not found.')
def __await__(self) -> Generator[None, None, T]:
yield
return self.exec()
@property
def optional(self) -> OptionalIDQuery:
new_query = OptionalIDQuery(cls=self._cls, id=self._id)
new_query.subqueries = self.subqueries
return new_query
class OptionalIDQuery(BaseIDQuery[T]):
"""Query collection from object id. Returns None if not found.
"""
def exec(self) -> Optional[T]:
return self._exec()
def __await__(self) -> Generator[None, None, Optional[T]]:
yield
return self.exec()
class ExistQuery(BaseListQuery[T]):
def exec(self) -> bool:
collection = Connection.get_collection(self._cls)
result = collection.count_documents(self._match or {}, limit=1)
return False if result == 0 else True
def __await__(self) -> Generator[None, None, bool]:
yield
return self.exec()
class QueryIterator(Generic[T]):
def __init__(self, cls: type[T], cursor: Cursor):
self.cls = cls
self.cursor = cursor
self.graph = MGraph()
def __iter__(self):
return self
def __next__(self) -> T:
value = self.cursor.__next__()
return Decoder().decode_root(value, self.cls, self.graph)
class IterateQuery(BaseListQuery[T]):
def exec(self) -> Iterator[T]:
pipeline = self._build_aggregate_pipeline()
collection = Connection.get_collection(self._cls)
cursor = collection.aggregate(pipeline)
return QueryIterator(cls=self._cls, cursor=cursor)
def __await__(self) -> Generator[None, None, Iterator[T]]:
yield
return self.exec()
```
#### File: jsonclasses-pymongo/tests/test_query.py
```python
from __future__ import annotations
from datetime import date
from unittest import TestCase
from bson.objectid import ObjectId
from jsonclasses_pymongo import Connection
from tests.classes.simple_song import SimpleSong
from tests.classes.simple_artist import SimpleArtist
from tests.classes.linked_author import LinkedAuthor
from tests.classes.linked_post import LinkedPost
from tests.classes.simple_date import SimpleDate
from tests.classes.simple_persona import SimplePersona
from tests.classes.simple_sex import SimpleSex, Gender
class TestQuery(TestCase):
@classmethod
def setUpClass(cls) -> None:
connection = Connection('simple')
connection.set_url('mongodb://localhost:27017/simple')
connection.connect()
connection = Connection('linked')
connection.set_url('mongodb://localhost:27017/linked')
connection.connect()
@classmethod
def tearDownClass(cls) -> None:
connection = Connection('simple')
connection.disconnect()
connection = Connection('linked')
connection.disconnect()
def setUp(self) -> None:
collection = Connection.get_collection(SimpleSong)
collection.delete_many({})
collection = Connection.get_collection(SimpleArtist)
collection.delete_many({})
collection = Connection.get_collection(SimpleDate)
collection.delete_many({})
collection = Connection.get_collection(SimplePersona)
collection.delete_many({})
collection = Connection.get_collection(SimpleSex)
collection.delete_many({})
collection = Connection.get_collection(LinkedAuthor)
collection.delete_many({})
collection = Connection.get_collection(LinkedPost)
collection.delete_many({})
def test_query_objects_from_database(self):
song0 = SimpleSong(name='Long', year=2020, artist='Thao')
song0.save()
song1 = SimpleSong(name='Long', year=2020, artist='Thao')
song1.save()
songs = SimpleSong.find().exec()
self.assertEqual(song0.name, songs[0].name)
self.assertEqual(song0.year, songs[0].year)
self.assertEqual(song0.artist, songs[0].artist)
self.assertEqual(song0.id, songs[0].id)
self.assertGreaterEqual(song0.created_at, songs[0].created_at)
self.assertGreaterEqual(song0.updated_at, songs[0].updated_at)
self.assertEqual(song1.name, songs[1].name)
self.assertEqual(song1.year, songs[1].year)
self.assertEqual(song1.artist, songs[1].artist)
self.assertEqual(song1.id, songs[1].id)
self.assertGreaterEqual(song1.created_at, songs[1].created_at)
self.assertGreaterEqual(song1.updated_at, songs[1].updated_at)
def test_query_object_from_database(self):
song0 = SimpleSong(name='Long', year=2020, artist='Thao')
song0.save()
song1 = SimpleSong(name='Long', year=2020, artist='Thao')
song1.save()
song = SimpleSong.one().exec()
self.assertEqual(song0.name, song.name)
self.assertEqual(song0.year, song.year)
self.assertEqual(song0.artist, song.artist)
self.assertEqual(song0.id, song.id)
self.assertGreaterEqual(song0.created_at, song.created_at)
self.assertGreaterEqual(song0.updated_at, song.updated_at)
def test_query_object_with_id(self):
song = SimpleSong(name='Long', year=2020, artist='Thao')
song.save()
result = SimpleSong.id(song.id).exec()
self.assertEqual(song.name, result.name)
self.assertEqual(song.year, result.year)
self.assertEqual(song.artist, result.artist)
self.assertEqual(song.id, result.id)
self.assertGreaterEqual(song.created_at, result.created_at)
self.assertGreaterEqual(song.updated_at, result.updated_at)
def test_query_object_with_int(self):
song = SimpleSong(name='Long', year=2020, artist='Thao')
song.save()
result = SimpleSong.find(year=2020).exec()
self.assertEqual(len(result), 1)
self.assertEqual(result[0].year, 2020)
def test_query_object_with_int_string(self):
song = SimpleSong(name='Long', year=2020, artist='Thao')
song.save()
result = SimpleSong.find('year=2020').exec()
self.assertEqual(len(result), 1)
self.assertEqual(result[0].year, 2020)
def test_query_object_with_int_object(self):
song = SimpleSong(name='Long', year=2020, artist='Thao')
song.save()
result = SimpleSong.find(year={'_gt': 2010}).exec()
self.assertEqual(len(result), 1)
self.assertEqual(result[0].year, 2020)
result = SimpleSong.find(year={'_gt': 2030}).exec()
self.assertEqual(len(result), 0)
def test_query_object_with_int_object_string(self):
song = SimpleSong(name='Long', year=2020, artist='Thao')
song.save()
result = SimpleSong.find('year[_gt]=2010').exec()
self.assertEqual(len(result), 1)
self.assertEqual(result[0].year, 2020)
result = SimpleSong.find('year[_gt]=2030').exec()
self.assertEqual(len(result), 0)
def test_query_object_with_str(self):
song = SimpleSong(name='Long', year=2020, artist='Thao')
song.save()
result = SimpleSong.find(name='Long').exec()
self.assertEqual(len(result), 1)
self.assertEqual(result[0].name, 'Long')
def test_query_object_with_str_str(self):
song = SimpleSong(name='Long', year=2020, artist='Thao')
song.save()
result = SimpleSong.find('name=Long').exec()
self.assertEqual(len(result), 1)
self.assertEqual(result[0].name, 'Long')
def test_query_object_with_str_object(self):
song = SimpleSong(name='Long', year=2020, artist='Thao')
song.save()
song2 = SimpleSong(name='Lieng', year=2020, artist='Lieng')
song2.save()
result = SimpleSong.find(name={'_prefix': 'Lo'}).exec()
self.assertEqual(len(result), 1)
self.assertEqual(result[0].name, 'Long')
def test_query_object_with_str_object_str(self):
song = SimpleSong(name='Long', year=2020, artist='Thao')
song.save()
song2 = SimpleSong(name='Lieng', year=2020, artist='Lieng')
song2.save()
result = SimpleSong.find('name[_prefix]=Lo').exec()
self.assertEqual(len(result), 1)
self.assertEqual(result[0].name, 'Long')
def test_query_object_with_bool(self):
pass
def test_query_object_with_bool_str(self):
pass
def test_query_object_with_bool_object(self):
pass
def test_query_object_with_bool_object_str(self):
pass
def test_query_object_with_date(self):
d = SimpleDate(represents=date(2010, 7, 7))
d.save()
results = SimpleDate.find(represents=date(2010, 7, 7)).exec()
self.assertEqual(len(results), 1)
self.assertEqual(results[0].represents, d.represents)
def test_query_object_with_date_string(self):
d = SimpleDate(represents=date(2010, 7, 7))
d.save()
results = SimpleDate.find('represents=2010-07-07').exec()
self.assertEqual(len(results), 1)
self.assertEqual(results[0].represents, d.represents)
def test_query_object_with_date_object(self):
pass
def test_query_object_with_date_object_string(self):
pass
def test_query_dict_in_list(self):
p = SimplePersona(items=[{'a': 1, 'b': 2}, {'c': 3}])
p.save()
results = SimplePersona.find().exec()
def test_query_enum_with_enum(self):
d = SimpleSex(gender='MALE')
d.save()
results = SimpleSex.find(gender=Gender.MALE).exec()
self.assertEqual(len(results), 1)
self.assertEqual(results[0].gender, d.gender)
def test_query_enum_with_enum_name(self):
d = SimpleSex(gender='MALE')
d.save()
results = SimpleSex.find(gender='MALE').exec()
self.assertEqual(len(results), 1)
self.assertEqual(results[0].gender, d.gender)
def test_query_enum_with_lowercase_enum_name(self):
d = SimpleSex(gender='MALE')
d.save()
results = SimpleSex.find(gender='male').exec()
self.assertEqual(len(results), 1)
self.assertEqual(results[0].gender, d.gender)
def test_query_enum_with_enum_value(self):
d = SimpleSex(gender='MALE')
d.save()
results = SimpleSex.find(gender=1).exec()
self.assertEqual(len(results), 1)
self.assertEqual(results[0].gender, d.gender)
def test_query_enum_with_enum_name_str(self):
d = SimpleSex(gender='MALE')
d.save()
results = SimpleSex.find('gender=MALE').exec()
self.assertEqual(len(results), 1)
self.assertEqual(results[0].gender, d.gender)
def test_query_enum_with_lowercase_enum_name_str(self):
d = SimpleSex(gender='MALE')
d.save()
results = SimpleSex.find('gender=male').exec()
self.assertEqual(len(results), 1)
self.assertEqual(results[0].gender, d.gender)
def test_query_enum_with_enum_value_str(self):
d = SimpleSex(gender='MALE')
d.save()
results = SimpleSex.find('gender=1').exec()
self.assertEqual(len(results), 1)
self.assertEqual(results[0].gender, d.gender)
```
|
{
"source": "Jesse-Yung/jsonclasses",
"score": 2
}
|
#### File: tests/classes/cbm_product.py
```python
from __future__ import annotations
from jsonclasses import jsonclass
def set_deleted(p: CBMProduct) -> None:
p.deleted_count = p.deleted_count - 1
@jsonclass(on_delete=[set_deleted, set_deleted])
class CBMProduct:
name: str
deleted_count: int = 100
```
#### File: tests/classes/gm_article.py
```python
from __future__ import annotations
from jsonclasses import jsonclass, types
def check_owner(article: GMArticle, operator: GMAuthor) -> bool:
return article.author.id == operator.id
def check_tier(article: GMArticle, operator: GMAuthor) -> bool:
return operator.paid_user
@jsonclass
class GMAuthor:
id: str
name: str
paid_user: bool
articles: list[GMArticle] = types.listof('GMArticle').linkedby('author') \
.required
@jsonclass(can_create=[check_owner, check_tier])
class GMArticle:
name: str
content: str
author: GMAuthor
```
#### File: jsonclasses/tests/test_after.py
```python
from __future__ import annotations
from datetime import date, datetime
from tests.classes.super_date import SuperDate
from unittest import TestCase
from jsonclasses.excs import ValidationException
class TestAfter(TestCase):
def test_after_dosent_raise_if_date_value_is_after_date_point(self):
dad = SuperDate(dad=date(2020, 4, 4))
dad.validate()
def test_after_dosent_raise_if_date_value_is_after_datetime_piont(self):
dadt = SuperDate(dadt=date(2020, 4, 4))
dadt.validate()
def test_after_dosent_raise_if_datetime_value_is_after_date_piont(self):
dtad = SuperDate(dtad=datetime(2020, 4, 4, 12, 30))
dtad.validate()
def test_after_raise_if_date_value_and_date_point_are_the_same(self):
dad = SuperDate(dad=date(2020, 4, 3))
with self.assertRaises(ValidationException) as context:
dad.validate()
self.assertEqual(len(context.exception.keypath_messages), 1)
self.assertEqual(context.exception.keypath_messages['dad'],
"value is too early")
def test_after_raise_if_date_value_and_datetime_point_are_the_same(self):
dadt = SuperDate(dadt=date(2020, 4, 3))
with self.assertRaises(ValidationException) as context:
dadt.validate()
self.assertEqual(len(context.exception.keypath_messages), 1)
self.assertEqual(context.exception.keypath_messages['dadt'],
"value is too early")
def test_after_raise_if_datetime_value_and_datetime_point_are_the_same(self):
dtad = SuperDate(dtad=datetime(2020, 4, 3, 0, 0))
with self.assertRaises(ValidationException) as context:
dtad.validate()
self.assertEqual(len(context.exception.keypath_messages), 1)
self.assertEqual(context.exception.keypath_messages['dtad'],
"value is too early")
def test_after_raise_if_date_value_earlier_than_date_point(self):
dad = SuperDate(dad=date(2020, 4, 2))
with self.assertRaises(ValidationException) as context:
dad.validate()
self.assertEqual(len(context.exception.keypath_messages), 1)
self.assertEqual(context.exception.keypath_messages['dad'],
"value is too early")
def test_after_raise_if_date_value_earlier_than_datetime_point(self):
dadt = SuperDate(dadt=date(2020, 4, 2))
with self.assertRaises(ValidationException) as context:
dadt.validate()
self.assertEqual(len(context.exception.keypath_messages), 1)
self.assertEqual(context.exception.keypath_messages['dadt'],
"value is too early")
def test_after_raise_if_datetime_value_earlier_than_date_point(self):
dtad = SuperDate(dtad=datetime(2020, 4, 2, 0, 0))
with self.assertRaises(ValidationException) as context:
dtad.validate()
self.assertEqual(len(context.exception.keypath_messages), 1)
self.assertEqual(context.exception.keypath_messages['dtad'],
"value is too early")
def test_after_does_not_raise_if_date_value_is_after_callable_date_point(self):
dacd = SuperDate(dacd=date(2020, 4, 4))
dacd.validate()
def test_after_does_not_raise_if_date_value_is_after_types_date_piont(self):
datd = SuperDate(datd=date(2020, 4, 4))
datd.validate()
```
#### File: jsonclasses/tests/test_alnum.py
```python
from __future__ import annotations
from unittest import TestCase
from jsonclasses.excs import ValidationException
from tests.classes.product_with_alnum import AlnumProductCode
class TestAlnum(TestCase):
def test_alnum_doesnt_raise_if_value_is_all_int(self):
analysis = AlnumProductCode(product_name='water', product_code='12345')
analysis.validate()
def test_alnum_doesnt_raise_if_value_is_all_string(self):
analysis = AlnumProductCode(product_name='water', product_code='aaaa')
analysis.validate()
def test_alnum_doesnt_raise_if_value_is_string_and_int(self):
analysis = AlnumProductCode(product_name='water', product_code='aaaa123')
analysis.validate()
def test_alnum_raises_if_value_contains_dot(self):
analysis = AlnumProductCode(product_name='water', product_code='aa1.12')
with self.assertRaises(ValidationException) as context:
analysis.validate()
self.assertEqual(len(context.exception.keypath_messages), 1)
self.assertEqual(context.exception.keypath_messages['productCode'],
"value is not alnum str")
def test_alnum_raises_if_value_contains_special_character(self):
analysis = AlnumProductCode(product_name='water', product_code='12a!')
with self.assertRaises(ValidationException) as context:
analysis.validate()
self.assertEqual(len(context.exception.keypath_messages), 1)
self.assertEqual(context.exception.keypath_messages['productCode'],
"value is not alnum str")
```
#### File: jsonclasses/tests/test_append.py
```python
from __future__ import annotations
from unittest import TestCase
from tests.classes.super_append import SuperAppend
class TestAppend(TestCase):
def test_append_adds_str_into_the_end_of_original_str(self):
i_str = SuperAppend(s='99999999')
self.assertEqual(i_str.s, '999999993432')
def test_append_adds_int_into_the_end_of_original_list(self):
l_list = SuperAppend(l=['a', 'c', 's', 'fsa'])
self.assertEqual(l_list.l, ['a', 'c', 's', 'fsa', '7788'])
def test_append_appends_callable_str_into_original_str(self):
i_str = SuperAppend(cs='99999999')
self.assertEqual(i_str.cs, '999999993432')
def test_append_appends_callable_any_into_original_list(self):
l_list = SuperAppend(cl=['a', 'c', 's', 'fsa'])
self.assertEqual(l_list.cl, ['a', 'c', 's', 'fsa', '7788'])
def test_append_appends_types_str_into_original_str(self):
i_str = SuperAppend(ts='99999999')
self.assertEqual(i_str.ts, '999999993432')
def test_append_appends_types_any_into_original_list(self):
l_list = SuperAppend(tl=['a', 'c', 's', 'fsa'])
self.assertEqual(l_list.tl, ['a', 'c', 's', 'fsa', '7788'])
```
#### File: jsonclasses/tests/test_can_delete.py
```python
from __future__ import annotations
from unittest import TestCase
from jsonclasses.excs import UnauthorizedActionException
from tests.classes.gs_product import GSProduct, GSProductUser, GSTProduct
from tests.classes.gm_product import GMProduct, GMProductUser
class TestCanDelete(TestCase):
def test_guards_raises_if_no_operator_is_assigned(self):
product = GSProduct(name='P')
paid_user = GSProductUser(id='P', name='A', paid_user=True)
product.user = paid_user
with self.assertRaises(UnauthorizedActionException):
product.delete()
def test_guard_is_called_for_existing_objects_on_delete(self):
product = GSProduct(name='P')
paid_user = GSProductUser(id='P', name='A', paid_user=True)
product.user = paid_user
product.opby(paid_user)
product.delete()
free_user = GSProductUser(id='F', name='A', paid_user=False)
product.user = free_user
product.opby(free_user)
with self.assertRaises(UnauthorizedActionException):
product.delete()
def test_multiple_guards_are_checked_for_existing_objects_on_del(self):
product = GMProduct(name='P')
setattr(product, '_is_new', False)
paid_user = GMProductUser(id='P', name='A', paid_user=True)
product.user = paid_user
product.opby(paid_user)
product.delete()
free_user = GMProductUser(id='F', name='A', paid_user=False)
product.user = free_user
product.opby(free_user)
with self.assertRaises(UnauthorizedActionException):
product.delete()
def test_types_guard_is_called_for_existing_object_on_delete(self):
product = GSTProduct(name='P')
paid_user = GSProductUser(id='P', name='n', paid_user=True)
product.user = paid_user
product.opby(paid_user)
product.delete()
free_user = GSProductUser(id='F', name='A', paid_user=False)
product.user = free_user
product.opby(free_user)
with self.assertRaises(UnauthorizedActionException):
product.delete()
```
#### File: jsonclasses/tests/test_datetime.py
```python
from __future__ import annotations
from unittest import TestCase
from datetime import date, datetime
from jsonclasses.excs import ValidationException
from tests.classes.simple_balance import SimpleBalance
class TestDatetime(TestCase):
def test_datetime_is_datetime_after_assigned(self):
balance = SimpleBalance(date=datetime(2020, 11, 20, 0, 0, 1))
self.assertEqual(balance._data_dict,
{'date': datetime(2020, 11, 20, 0, 0, 1),
'balance': None})
def test_datetime_converts_date_into_datetime(self):
balance = SimpleBalance(date=date(2020, 6, 30))
self.assertEqual(balance.date, datetime(2020, 6, 30, 0, 0, 0))
def test_datetime_converts_date_str_into_datetime(self):
balance = SimpleBalance(date='2020-11-20')
self.assertEqual(balance.date, datetime(2020, 11, 20, 0, 0, 0))
self.assertEqual(type(balance.date), datetime)
def test_datetime_converts_datetime_str_into_datetime(self):
balance = SimpleBalance(date='2020-11-20T03:03:03.333Z')
self.assertEqual(balance.date, datetime(2020, 11, 20, 3, 3, 3, 333000))
def test_datetime_raises_if_value_is_not_datetime(self):
balance = SimpleBalance(date=True)
with self.assertRaises(ValidationException) as context:
balance.validate()
self.assertEqual(len(context.exception.keypath_messages), 1)
self.assertEqual(context.exception.keypath_messages['date'],
"value is not datetime")
def test_datetime_is_datetime_str_when_tojson(self):
balance = SimpleBalance(date='2020-11-20T03:03:03.333Z')
self.assertEqual(balance.tojson(),
{'date': '2020-11-20T03:03:03.333Z', 'balance': None})
```
#### File: jsonclasses/tests/test_div.py
```python
from __future__ import annotations
from tests.classes.simple_calculation import SimpleCalculation
from unittest import TestCase
class TestDiv(TestCase):
def test_div_with_int_value_divs_by_original_value_(self):
div_int = SimpleCalculation(i_div=10)
self.assertEqual(div_int.i_div, 2)
def test_div_with_float_value_divs_by_original_value_(self):
div_float = SimpleCalculation(f_div=7.5)
self.assertEqual(div_float.f_div, 3)
def test_div_divs_callable_value_to_original_value(self):
div_float = SimpleCalculation(c_div=8.4)
self.assertEqual(div_float.c_div, 2)
def test_div_divs_types_value_to_original_value(self):
div_float = SimpleCalculation(t_div=8.4)
self.assertEqual(div_float.t_div, 2)
```
#### File: jsonclasses/tests/test_email.py
```python
from __future__ import annotations
from unittest import TestCase
from jsonclasses.excs import ValidationException
from tests.classes.email_user import EmailUser
class TestEmail(TestCase):
def test_simple_email_doesnt_raise(self):
analysis = EmailUser(username='hello', email='<EMAIL>')
analysis.validate()
def test_email_with_underbar_doesnt_raise(self):
analysis = EmailUser(username='hello', email='<EMAIL>')
analysis.validate()
def test_email_with_dot_doesnt_raise(self):
analysis = EmailUser(username='hello', email='<EMAIL>')
analysis.validate()
def test_email_with_single_letter_in_each_field_doesnt_raise(self):
analysis = EmailUser(username='hello', email='<EMAIL>')
analysis.validate()
def test_email_with_multiple_domains_doesnt_raise(self):
analysis = EmailUser(username='hello', email='a<EMAIL>.cn.a.a')
analysis.validate()
def test_email_raises_if_value_is_not_valid_email(self):
analysis = EmailUser(username='hello', email='@<EMAIL>')
with self.assertRaises(ValidationException) as context:
analysis.validate()
self.assertEqual(len(context.exception.keypath_messages), 1)
self.assertEqual(context.exception.keypath_messages['email'],
"value is not email string")
def test_email_raises_if_email_contains_special_charaters(self):
analysis = EmailUser(username='hello', email='!<EMAIL>')
with self.assertRaises(ValidationException) as context:
analysis.validate()
self.assertEqual(len(context.exception.keypath_messages), 1)
self.assertEqual(context.exception.keypath_messages['email'],
"value is not email string")
def test_email_raises_exception_if_email_contains_hashtag(self):
analysis = EmailUser(username='hello', email='<EMAIL>')
with self.assertRaises(ValidationException) as context:
analysis.validate()
self.assertEqual(len(context.exception.keypath_messages), 1)
self.assertEqual(context.exception.keypath_messages['email'],
"value is not email string")
```
#### File: jsonclasses/tests/test_getter.py
```python
from __future__ import annotations
from unittest import TestCase
from jsonclasses.excs import ValidationException
from tests.classes.calc_user import CalcUser
class TestGetter(TestCase):
def test_getter_gets_correct_value(self):
user = CalcUser(name="<NAME>", base_score=25.54)
self.assertEqual(user.first_name, 'Peter')
self.assertEqual(user.last_name, 'Layber')
self.assertAlmostEqual(user.score, 51.08)
self.assertEqual(user._data_dict,
{'name': '<NAME>', 'base_score': 25.54})
def test_calc_fields_can_be_output_to_json(self):
user = CalcUser(name="<NAME>", base_score=25.54)
result = user.tojson()
self.assertEqual(result, {'name': '<NAME>',
'firstName': 'Peter',
'lastName': 'Layber',
'score': 51.08,
'baseScore': 25.54})
def test_calc_fields_can_be_validated(self):
user = CalcUser(name="<NAME>", base_score=25.54)
with self.assertRaises(ValidationException) as context:
user.validate()
self.assertEqual(len(context.exception.keypath_messages), 1)
self.assertEqual(context.exception.keypath_messages['score'],
"value is not negative")
```
#### File: jsonclasses/tests/test_inverse.py
```python
from __future__ import annotations
from unittest import TestCase
from tests.classes.super_inverse import SuperInverse
class TestInverse(TestCase):
def test_inverse_transforms_false_if_bool_is_true(self):
b = SuperInverse(iv=False)
self.assertEqual(b.iv, True)
def test_inverse_transforms_true_if_bool_is_false(self):
b = SuperInverse(iv=True)
self.assertEqual(b.iv, False)
```
#### File: jsonclasses/tests/test_is_new.py
```python
from __future__ import annotations
from unittest import TestCase
from tests.classes.simple_order import SimpleOrder
class TestIsNew(TestCase):
def test_jobject_is_new_on_create(self):
order = SimpleOrder(quantity=5)
self.assertEqual(order.is_new, True)
def test_jobject_is_new_is_readonly(self):
order = SimpleOrder(quantity=5)
with self.assertRaises(AttributeError) as context:
order.is_new = False
self.assertEqual(str(context.exception), "can't set attribute")
def test_jobject_new_object_wont_record_modified_fields(self):
order = SimpleOrder(quantity=5)
order.quantity = 2
order.quantity = 10
self.assertEqual(order.modified_fields, ())
```
#### File: jsonclasses/tests/test_length.py
```python
from __future__ import annotations
from unittest import TestCase
from jsonclasses.excs import ValidationException
from tests.classes.simple_company import SimpleCompany
from tests.classes.simple_code import SimpleCode
class TestLength(TestCase):
def test_length_does_not_raise_if_length_is_in_between(self):
c1 = SimpleCompany(name='AZ', number_of_employees=5)
c1.validate()
c2 = SimpleCompany(name='QWERT', number_of_employees=5)
c2.validate()
c3 = SimpleCompany(name='QWERTQWERT', number_of_employees=5)
c3.validate()
def test_length_raises_if_length_is_lt_lowerbond(self):
c1 = SimpleCompany(name='Q', number_of_employees=5)
with self.assertRaises(ValidationException):
c1.validate()
def test_length_raises_if_length_is_gt_lowerbond(self):
c1 = SimpleCompany(name='QWERTYUIOP{', number_of_employees=5)
with self.assertRaises(ValidationException):
c1.validate()
def test_length_should_match_length_in_str(self):
code = SimpleCode(code='1234')
code.validate()
def test_length_should_match_length_in_list(self):
l_code = SimpleCode(l_code=[1, 2, 3, 4])
l_code.validate()
def test_length_raises_if_length_does_not_match_in_str(self):
code = SimpleCode(code='12345')
with self.assertRaises(ValidationException):
code.validate()
code = SimpleCode(code='1')
with self.assertRaises(ValidationException):
code.validate()
def test_length_raises_if_length_does_not_match_in_list(self):
l_code = SimpleCode(l_code=[1, 2, 3, 4, 5, 0])
with self.assertRaises(ValidationException):
l_code.validate()
l_code = SimpleCode(l_code=[1, 2])
with self.assertRaises(ValidationException):
l_code.validate()
def test_length_accepts_single_callable_param(self):
c_code = SimpleCode(c_code='1234')
c_code.validate()
c_code = SimpleCode(c_code='12345')
with self.assertRaises(ValidationException):
c_code.validate()
def test_length_accepts_double_callable_param(self):
cd_code = SimpleCode(cd_code='1234')
cd_code.validate()
cd_code = SimpleCode(cd_code='12345')
cd_code.validate()
cd_code = SimpleCode(cd_code='123456')
with self.assertRaises(ValidationException):
cd_code.validate()
cd_code = SimpleCode(cd_code='123')
with self.assertRaises(ValidationException):
cd_code.validate()
def test_length_accepts_single_types_param(self):
t_code = SimpleCode(t_code='1234')
t_code.validate()
t_code = SimpleCode(t_code='12345')
with self.assertRaises(ValidationException):
t_code.validate()
def test_length_accepts_double_types_param(self):
td_code = SimpleCode(td_code='1234')
td_code.validate()
td_code = SimpleCode(td_code='12345')
td_code.validate()
td_code = SimpleCode(td_code='123456')
with self.assertRaises(ValidationException):
td_code.validate()
td_code = SimpleCode(td_code='123')
with self.assertRaises(ValidationException):
td_code.validate()
```
#### File: jsonclasses/tests/test_listof.py
```python
from __future__ import annotations
from unittest import TestCase
from jsonclasses.excs import ValidationException
from tests.classes.simple_list import SimpleList
from tests.classes.typed_list import TypedList
from tests.classes.list_quiz import ListQuiz
from tests.classes.nullable_list import NullableList
from tests.classes.list_dict import ListDict
class TestListOf(TestCase):
def test_listof_raises_if_value_is_not_list(self):
slst = SimpleList(list=5)
self.assertRaises(ValidationException, slst.validate)
def test_listof_raises_if_one_of_values_does_not_match_inner(self):
slst = SimpleList(list=[5, 4, 3, 2, 1.5])
self.assertRaises(ValidationException, slst.validate)
def test_listof_does_not_raise_if_all_values_match_inner(self):
slst = SimpleList(list=[5, 4, 3, 2, 1])
slst.validate()
def test_listof_accepts_raw_type(self):
slst = SimpleList(list=[5, 4, 3, 2, 1.5])
self.assertRaises(ValidationException, slst.validate)
slst1 = SimpleList(list=[5, 4, 3, 2, 1])
slst1.validate()
def test_listof_accepts_types_type(self):
slst = TypedList(list=[5, 4, 3, 2, 1.5])
self.assertRaises(ValidationException, slst.validate)
slst1 = TypedList(list=[5, 4, 3, 2, 1])
slst1.validate()
def test_listof_does_not_allow_none_for_raw_typed_list(self):
record1 = SimpleList(list=[5, 4, None])
self.assertRaises(ValidationException, record1.validate)
def test_listof_does_not_allow_none_for_types_typed_list(self):
record1 = TypedList(list=[5, 4, None])
self.assertRaises(ValidationException, record1.validate)
def test_listof_allow_none_for_nullable_marked_typed_list(self):
record1 = NullableList(list=[5, 4, None])
record1.validate()
def test_listof_validate_raises_for_one_item(self):
quiz = ListQuiz(numbers=[200, 2, 4, 200, 6, 200])
with self.assertRaises(ValidationException) as context:
quiz.validate()
self.assertEqual(len(context.exception.keypath_messages), 1)
self.assertEqual(context.exception.keypath_messages['numbers.1'],
"value is not greater than or equal 100")
def test_listof_accepts_dict_value(self):
val = ListDict(numbers=[{'ab': 1, 'cd': 2}, {'ef': 3}])
val.validate()
val = ListDict(numbers=[{'ab': 1, 'cd': 2}, {'ef': True}])
with self.assertRaises(ValidationException):
val.validate()
```
#### File: jsonclasses/tests/test_map.py
```python
from __future__ import annotations
from unittest import TestCase
from tests.classes.super_map import SuperMap
class TestMap(TestCase):
def test_map_does_not_raise_if_it_maps_list(self):
item = SuperMap(l_m=[0, 1, 2, 3, 4])
self.assertEqual(item.l_m, list(map(lambda a: a +1, [0, 1, 2, 3, 4])))
```
#### File: jsonclasses/tests/test_on_delete.py
```python
from __future__ import annotations
from unittest import TestCase
from tests.classes.cb_product import CBProduct
from tests.classes.cbm_product import CBMProduct
from tests.classes.cbo_product import CBOProduct
class TestDecoratedOnDelete(TestCase):
def test_callback_are_called_for_existing_objects_on_delete(self):
p = CBProduct(name='N')
setattr(p, '_is_new', False)
p.delete()
self.assertEqual(p.deleted_count, 99)
def test_multiple_callbacks_are_called_for_existing_objects_on_del(self):
p = CBMProduct(name='N')
setattr(p, '_is_new', False)
p.delete()
self.assertEqual(p.deleted_count, 98)
def test_operator_can_be_passed_into_callback(self):
p = CBOProduct(name='N')
setattr(p, '_is_new', False)
p.opby(10)
p.delete()
self.assertEqual(p.deleted_count, 90)
```
#### File: jsonclasses/tests/test_on_save.py
```python
from __future__ import annotations
from unittest import TestCase
from tests.classes.cb_book import CBBook
from tests.classes.cbm_book import CBMBook
from tests.classes.cbo_book import CBOBook
class TestDecoratedOnSave(TestCase):
def test_callback_are_not_called_for_new_objects_on_save(self):
book = CBBook(name='N', content='C')
book.save()
self.assertEqual(book.updated_count, 0)
def test_callback_are_called_for_existing_objects_on_save(self):
book = CBBook(name='N', content='C')
setattr(book, '_is_new', False)
book.save()
self.assertEqual(book.updated_count, 1)
def test_multiple_callbacks_are_not_called_for_new_objects_on_save(self):
book = CBMBook(name='N', content='C')
book.save()
self.assertEqual(book.updated_count, 0)
def test_multiple_callbacks_are_called_for_existing_objects_on_save(self):
book = CBMBook(name='N', content='C')
setattr(book, '_is_new', False)
book.save()
self.assertEqual(book.updated_count, 2)
def test_operator_can_be_passed_into_callback(self):
book = CBOBook(name='N', content='C')
setattr(book, '_is_new', False)
book.opby(10)
book.save()
self.assertEqual(book.updated_count, 10)
```
#### File: jsonclasses/tests/test_padend.py
```python
from __future__ import annotations
from tests.classes.super_str import SuperStr
from unittest import TestCase
class TestPadEnd(TestCase):
def test_padend_adds_str_to_the_end_of_str(self):
ps = SuperStr(pade="aaaaa")
self.assertEqual(ps.pade, "aaaaaeeeee")
def test_padend_keeps_value_when_type_of_value_is_not_str(self):
ps = SuperStr(padie=80)
self.assertEqual(ps.padie, 80)
def test_padend_keeps_value_when_target_length_less_value_length(self):
ps = SuperStr(pade="aaaaaaaaaaa")
self.assertEqual(ps.pade, "aaaaaaaaaaa")
def test_padend_adds_callable_str_to_the_end_of_str(self):
ps = SuperStr(padce="aaaaa")
self.assertEqual(ps.padce, "aaaaaeeeee")
def test_padend_adds_types_str_to_the_end_of_str(self):
ps = SuperStr(padte="aaaaa")
self.assertEqual(ps.padte, "aaaaaeeeee")
```
#### File: jsonclasses/tests/test_positive.py
```python
from __future__ import annotations
from unittest import TestCase
from jsonclasses.excs import ValidationException
from classes.zero_number import ZeroNumber
class TestPositive(TestCase):
def test_positive_doesnt_raise_if_float_value_is_greater_than_zero(self):
n = ZeroNumber(fpositive=5.5)
n.validate()
def test_positive_raises_if_float_value_is_equal_to_zero(self):
n = ZeroNumber(fpositive=0)
with self.assertRaises(ValidationException) as context:
n.validate()
self.assertEqual(len(context.exception.keypath_messages), 1)
self.assertEqual(context.exception.keypath_messages['fpositive'],
"value is not positive")
def test_positive_raises_if_float_value_is_less_than_zero(self):
n = ZeroNumber(fpositive=-5.5)
with self.assertRaises(ValidationException) as context:
n.validate()
self.assertEqual(len(context.exception.keypath_messages), 1)
self.assertEqual(context.exception.keypath_messages['fpositive'],
"value is not positive")
def test_positive_doesnt_raise_if_int_value_is_greater_than_zero(self):
n = ZeroNumber(ipositive=5)
n.validate()
def test_positive_raises_if_int_value_is_equal_to_zero(self):
n = ZeroNumber(ipositive=0)
with self.assertRaises(ValidationException) as context:
n.validate()
self.assertEqual(len(context.exception.keypath_messages), 1)
self.assertEqual(context.exception.keypath_messages['ipositive'],
"value is not positive")
def test_positive_raises_if_int_value_is_less_than_zero(self):
n = ZeroNumber(ipositive=-5)
with self.assertRaises(ValidationException) as context:
n.validate()
self.assertEqual(len(context.exception.keypath_messages), 1)
self.assertEqual(context.exception.keypath_messages['ipositive'],
"value is not positive")
```
#### File: jsonclasses/tests/test_salt.py
```python
from __future__ import annotations
from unittest import TestCase
from tests.classes.super_str import SuperStr
from bcrypt import checkpw
class TestSalt(TestCase):
def test_salt_add_salt_to_a_string(self):
ss = SuperStr(password='<PASSWORD>')
self.assertNotEqual(ss.password, '<PASSWORD>')
self.assertTrue(checkpw('123456'.encode(), ss.password.encode()))
```
#### File: jsonclasses/tests/test_splite.py
```python
from __future__ import annotations
from tests.classes.super_iterable import SuperIterable
from unittest import TestCase
class TestSplit(TestCase):
def test_split_keeps_list_value_if_it_is_list_of_strs(self):
s = SuperIterable(itssp=["a", "d", "s", "r"])
self.assertEqual(s.itssp, ["a", "d", "s", "r"])
def test_split_splits_str_into_a_list_of_substrs(self):
s = SuperIterable(itssp="abc.ous.fga.ssr")
self.assertEqual(s.itssp, ["abc", "ous", "fga", "ssr"])
def test_split_keeps_callable_value_if_it_is_list_of_strs(self):
s = SuperIterable(c_itssp=["a", "d", "s", "r"])
self.assertEqual(s.c_itssp, ["a", "d", "s", "r"])
def test_split_splits_types_a_list_of_substrs(self):
s = SuperIterable(t_itssp="abc.ous.fga.ssr")
self.assertEqual(s.t_itssp, ["abc", "ous", "fga", "ssr"])
```
#### File: jsonclasses/tests/test_tolist.py
```python
from __future__ import annotations
from unittest import TestCase
from tests.classes.super_type import SuperType
class TestToList(TestCase):
def test_tolist_transforms_set_value_into_a_list(self):
l = SuperType(tl={1, 2, 4, 7, 9})
self.assertEqual(l.tl, [1, 2, 4, 7, 9])
def test_tolist_transforms_str_value_into_a_list(self):
l = SuperType(tl="abc&*(_")
self.assertEqual(l.tl, ["a", "b", "c", "&", "*", "(", "_"])
def test_tolist_transforms_tuple_value_into_a_list(self):
l = SuperType(tl=(1, 2, True, "abc"))
self.assertEqual(l.tl, [1, 2, True, "abc"])
def test_tolist_keeps_value_if_type_of_value_is_not_set_str_or_tuple(self):
l = SuperType(tl=12)
self.assertEqual(l.tl, 12)
```
#### File: jsonclasses/tests/test_tonextday.py
```python
from __future__ import annotations
from datetime import date, datetime
from unittest import TestCase
from tests.classes.super_datetime import SuperDateTime
class TestTonextday(TestCase):
def test_tonextday_transforms_datetime_into_the_time_of_next_day(self):
d = SuperDateTime(dtnd=datetime(2021, 10, 11, 17, 37, 27, 446259))
self.assertEqual(d.dtnd, datetime(2021,10, 12, 0, 0))
def test_tonextday_transforms_date_into_the_time_of_next_day(self):
d = SuperDateTime(dnd=date(2021, 10, 11))
self.assertEqual(d.dnd, date(2021,10, 12))
def test_tonextday_does_not_transform_if_is_not_datetime_or_date(self):
s = SuperDateTime(snd='12345')
self.assertEqual(s.snd, '12345')
```
#### File: jsonclasses/tests/test_tonextmin.py
```python
from __future__ import annotations
from datetime import datetime
from unittest import TestCase
from tests.classes.super_datetime import SuperDateTime
class TestTonextmin(TestCase):
def test_tonextmin_transforms_datetime_into_the_time_of_next_minute(self):
d = SuperDateTime(dtnm=datetime(2021, 10, 11, 17, 37, 27, 446259))
self.assertEqual(d.dtnm, datetime(2021,10, 11, 17, 38))
def test_tonextmin_does_not_transform_if_is_not_datetime(self):
s = SuperDateTime(stnm="12345")
self.assertEqual(s.stnm, "12345")
```
#### File: jsonclasses/tests/test_tonextsec.py
```python
from __future__ import annotations
from datetime import datetime
from unittest import TestCase
from tests.classes.super_datetime import SuperDateTime
class TestTonextsec(TestCase):
def test_tonextsec_transforms_datetime_into_the_time_of_next_second(self):
d = SuperDateTime(dtns=datetime(2021, 10, 11, 17, 37, 27, 446259))
self.assertEqual(d.dtns, datetime(2021,10, 11, 17, 37, 28))
def test_tonextsec_does_not_transform_if_is_not_datetime(self):
s = SuperDateTime(stns="12345")
self.assertEqual(s.stns, "12345")
```
#### File: jsonclasses/tests/test_totitle.py
```python
from __future__ import annotations
from unittest import TestCase
from tests.classes.cellphone_title import CellphoneTitle
class TestToTitle(TestCase):
def test_totitle_titlizes_srt(self):
product = CellphoneTitle(cellphone_name='hello', cellphone_title='sale for today')
self.assertEqual(product.cellphone_title, 'Sale For Today')
def test_totitle_titlizes_srt_with_special_characters(self):
product = CellphoneTitle(cellphone_name='hello', cellphone_title='#sale !!$for t*^oday')
self.assertEqual(product.cellphone_title, '#Sale !!$For T*^Oday')
def test_totitle_titlizes_srt_with_int(self):
product = CellphoneTitle(cellphone_name='hello', cellphone_title='123sale 2323for t77oday')
self.assertEqual(product.cellphone_title, '123Sale 2323For T77Oday')
```
#### File: jsonclasses/tests/test_validate_modifier.py
```python
from __future__ import annotations
from unittest import TestCase
from jsonclasses.excs import ValidationException
from tests.classes.valid_password import (
ValidPassword, ValidPasswordMessage, CValidPassword, OptionalPassword,
TValidPassword
)
class TestValidateModifier(TestCase):
def test_validate_wont_validate_none(self):
opw = OptionalPassword(name='Bo Lang', password=None)
opw.validate()
def test_validate_is_fine_when_modifier_returns_true(self):
pw = ValidPassword(name='Li Si', password='<PASSWORD>')
try:
pw.validate()
except ValidationException:
self.fail('validate should be fine if value is valid')
def test_validate_raises_default_msg_when_modifier_returns_false(self):
pw = ValidPassword(name='Li Si', password='<PASSWORD>')
self.assertRaisesRegex(ValidationException,
'invalid value',
pw.validate)
def test_validate_raises_if_modifier_returns_str(self):
pw = ValidPasswordMessage(name='Li Si', password='<PASSWORD>')
self.assertRaisesRegex(ValidationException, 'wrong', pw.validate)
def test_validate_is_fine_when_modifier_returns_none(self):
pw = ValidPasswordMessage(name='Li Si', password='<PASSWORD>')
pw.validate()
def test_validate_can_also_accept_context(self):
pw = CValidPassword(name='Li Si', password='<PASSWORD>')
pw.validate()
def test_validate_doesnt_raise_if_types_validator_is_valid(self):
pw = TValidPassword(name='Q', password=45)
pw.validate()
def test_validate_raises_if_types_validator_isnt_valid(self):
pw = TValidPassword(name='Q', password=50)
self.assertRaisesRegex(ValidationException, 'invalid value', pw.validate)
```
#### File: jsonclasses/tests/test_wrapintolist.py
```python
from __future__ import annotations
from unittest import TestCase
from tests.classes.super_wrap import SuperWrap
class TestInterAt(TestCase):
def test_wrap_into_list_wraps_into_a_list(self):
s = SuperWrap(s='hfkjd')
self.assertEqual(s.s, ['hfkjd'])
```
|
{
"source": "JesseyWright/NiMARE",
"score": 2
}
|
#### File: nimare/workflows/ibma_perm.py
```python
import os
import pathlib
import click
from nilearn.masking import apply_mask
from ..utils import get_template
from ..meta.ibma import rfx_glm
n_iters_default = 10000
output_prefix_default = ''
@click.command(name='conperm', short_help='permutation based metaanalysis of contrast maps',
help='Metaanalysis of contrast maps using random effects and '
'two-sided inference with empirical (permutation based) null distribution '
'and Family Wise Error multiple comparison correction.')
@click.argument('contrast_images', nargs=-1, required=True, type=click.Path(exists=True))
@click.option('--output_dir', help="Where to put the output maps.")
@click.option('--output_prefix', help="Common prefix for output maps.",
default=output_prefix_default, show_default=True)
@click.option('--n_iters', default=n_iters_default, show_default=True,
help="Number of iterations for permutation testing.")
def con_perm(contrast_images, output_dir=None, output_prefix=output_prefix_default,
n_iters=n_iters_default):
target = 'mni152_2mm'
mask_img = get_template(target, mask='brain')
click.echo("Loading contrast maps...")
z_data = apply_mask(contrast_images, mask_img)
click.echo("Estimating the null distribution...")
res = rfx_glm(z_data, mask_img, null='empirical', n_iters=n_iters)
if output_dir is None:
output_dir = os.getcwd()
else:
pathlib.Path(output_dir).mkdir(parents=True, exist_ok=True)
click.echo("Saving output maps...")
res.save_results(output_dir=output_dir, prefix=output_prefix)
```
#### File: nimare/workflows/peaks2maps.py
```python
import os
import pathlib
import click
from nilearn.image import resample_to_img
from nilearn.masking import apply_mask
from ..meta.ibma import rfx_glm
from ..meta.cbma import Peaks2MapsKernel
from ..dataset.extract import convert_sleuth_to_database
n_iters_default = 10000
@click.command(name='peaks2maps', short_help='permutation based metaanalysis of coordinates '
'that uses deep learning to reconstruct the original '
'maps',
help='Method for performing coordinate based meta analysis that uses a pretrained'
'deep neural network to reconstruct unthresholded maps from peak coordinates.'
'The reconstructed maps are evaluated for statistical significance using a'
'permutation based approach with Family Wise Error multiple '
'comparison correction.')
@click.argument('sleuth_file', type=click.Path(exists=True))
@click.option('--output_dir', help="Where to put the output maps.")
@click.option('--output_prefix', help="Common prefix for output maps.")
@click.option('--n_iters', default=n_iters_default, show_default=True,
help="Number of iterations for permutation testing.")
def peaks2maps(sleuth_file, output_dir=None, output_prefix=None, n_iters=n_iters_default):
click.echo("Loading coordinates...")
dset = convert_sleuth_to_database(sleuth_file).get_dataset()
click.echo("Reconstructing unthresholded maps...")
k = Peaks2MapsKernel(dset.coordinates, mask=dset.mask)
imgs = k.transform(ids=dset.ids, masked=False, resample_to_mask=False)
mask_img = resample_to_img(dset.mask, imgs[0], interpolation='nearest')
z_data = apply_mask(imgs, mask_img)
click.echo("Estimating the null distribution...")
res = rfx_glm(z_data, mask_img, null='empirical', n_iters=n_iters)
if output_dir is None:
output_dir = os.path.dirname(sleuth_file)
else:
pathlib.Path(output_dir).mkdir(parents=True, exist_ok=True)
if output_prefix is None:
base = os.path.basename(sleuth_file)
output_prefix, _ = os.path.splitext(base)
output_prefix += '_'
click.echo("Saving output maps...")
res.save_results(output_dir=output_dir, prefix=output_prefix)
```
|
{
"source": "Jesseyx/compare-codecs",
"score": 3
}
|
#### File: compare-codecs/lib/hevc_jm.py
```python
import encoder
import file_codec
import re
import subprocess
class HevcCodec(file_codec.FileCodec):
def __init__(self,
name='hevc',
formatter=None):
self.name = name
self.codecname = 'hevc'
self.extension = 'hevc'
super(HevcCodec, self).__init__(
name,
formatter=formatter)
def StartEncoder(self, context):
return encoder.Encoder(context, encoder.OptionValueSet(self.option_set, ''))
def EncodeCommandLine(self, parameters, bitrate, videofile, encodedfile):
commandline = (
'%s --SourceWidth=%d ---SourceHeight=%d '
'-c %s '
'--FrameRate=%d --InputFile=%s '
'--FramesToBeEncoded=%d '
'--IntraPeriod=-1 '
'%s --TargetBitrate=%d --BitstreamFile=%s' % (
encoder.Tool('TAppEncoderStatic'),
videofile.width, videofile.height,
encoder.Tool('hevc_ra_main.cfg'), # Configuration file
videofile.framerate,
videofile.filename,
videofile.FrameCount(),
parameters.ToString(),
bitrate, encodedfile))
return commandline
def DecodeCommandLine(self, videofile, encodedfile, yuvfile):
commandline = "%s --BitstreamFile=%s --ReconFile=%s" % (
encoder.Tool('TAppDecoderStatic'),
encodedfile, yuvfile)
return commandline
def EncoderVersion(self):
try:
subprocess.check_output([encoder.Tool('TAppEncoderStatic')])
except subprocess.CalledProcessError, err:
helptext = str(err.output)
for line in helptext.split('\n'):
if re.match('HM software:', line):
return line
raise encoder.Error('HM version string not found')
raise encoder.Error('HM did not return help text as expected')
```
#### File: compare-codecs/lib/optimizer_unittest.py
```python
import os
import re
import unittest
import encoder
import encoder_configuration
import optimizer
import test_tools
class DummyCodec(encoder.Codec):
def __init__(self):
super(DummyCodec, self).__init__('dummy')
self.extension = 'fake'
self.option_set = encoder.OptionSet(
encoder.IntegerOption('score', 0, 10),
encoder.Option('another_parameter', ['yes']),
)
def StartEncoder(self, context):
return encoder.Encoder(context,
encoder.OptionValueSet(self.option_set,
"--score=5"))
def Execute(self, parameters, rate, videofile, workdir):
# pylint: disable=W0613
match = re.search(r'--score=(\d+)', parameters.ToString())
if match:
return {'psnr': int(match.group(1)), 'bitrate': 100}
else:
return {'psnr': -100, 'bitrate': 100}
class DummyVideofile(encoder.Videofile):
def __init__(self, filename, clip_time):
super(DummyVideofile, self).__init__(filename)
self.clip_time = clip_time
def ClipTime(self):
return self.clip_time
def Returns1(target_bitrate, result):
"""Score function that returns a constant value."""
# pylint: disable=W0613
return 1.0
def ReturnsClipTime(target_bitrate, result):
# pylint: disable=W0613
return float(result['cliptime'])
class TestOptimizer(unittest.TestCase):
def setUp(self):
self.codec = DummyCodec()
self.file_set = None
self.cache_class = encoder.EncodingMemoryCache
self.score_function = None
self.videofile = DummyVideofile('foofile_640_480_30.yuv', clip_time=1)
self.optimizer = None
def StdOptimizer(self):
# This function is not in setup because some tests
# do not need it.
if not self.optimizer:
self.optimizer = optimizer.Optimizer(self.codec, self.file_set,
cache_class=self.cache_class)
return self.optimizer
def EncoderFromParameterString(self, parameter_string):
return encoder.Encoder(self.optimizer.context,
encoder.OptionValueSet(self.optimizer.context.codec.option_set,
parameter_string))
def testInit(self):
optimizer.Optimizer(self.codec, self.file_set,
cache_class=self.cache_class)
def test_AlternateScorer(self):
my_optimizer = optimizer.Optimizer(self.codec, self.file_set,
cache_class=self.cache_class,
score_function=Returns1)
my_optimizer.BestEncoding(100, self.videofile).Execute().Store()
self.assertAlmostEqual(1,
my_optimizer.Score(my_optimizer.BestEncoding(100, self.videofile)),
places=4)
def test_FirstBestEncodingNoScore(self):
my_optimizer = self.StdOptimizer()
encoding = my_optimizer.BestEncoding(100, self.videofile)
self.assertIsNone(encoding.Result())
def test_BestEncodingOneAlternative(self):
my_optimizer = self.StdOptimizer()
my_optimizer.BestEncoding(100, self.videofile).Store()
encoding = my_optimizer.BestEncoding(100, self.videofile)
self.assertEqual(encoding.videofile, self.videofile)
def test_BestEncodingExecuteGivesScore(self):
my_optimizer = self.StdOptimizer()
my_optimizer.BestEncoding(100, self.videofile).Execute().Store()
self.assertAlmostEqual(5, my_optimizer.Score(
my_optimizer.BestEncoding(100, self.videofile)),
places=4)
def test_BestEncodingOtherSpeedNoScore(self):
my_optimizer = self.StdOptimizer()
my_optimizer.BestEncoding(100, self.videofile).Execute().Store()
self.assertIsNone(my_optimizer.BestEncoding(200, self.videofile).Result())
def test_BestUntriedEncodingReturnsSomething(self):
my_optimizer = self.StdOptimizer()
first_encoding = my_optimizer.BestEncoding(100, self.videofile)
first_encoding.Execute().Store()
other_encoding = my_optimizer.BestUntriedEncoding(100, self.videofile)
self.assertTrue(other_encoding)
self.assertNotEqual(first_encoding.encoder.parameters.ToString(),
other_encoding.encoder.parameters.ToString())
def test_WorksBetterOnSomeOtherClip(self):
my_optimizer = self.StdOptimizer()
videofile2 = DummyVideofile('barfile_640_480_30.yuv', clip_time=1)
# Note - may have to do deterministic generation of these.
encoder1 = self.EncoderFromParameterString('--score=5') # Low score
encoder2 = self.EncoderFromParameterString('--score=10') # High score
# Store 2 scores for the second videofile.
encoding = encoder1.Encoding(100, videofile2)
encoding.Execute().Store()
encoding = encoder2.Encoding(100, videofile2)
encoding.Execute().Store()
# Store 1 score for the first videofile
first_encoding = encoder1.Encoding(100, self.videofile)
first_encoding.Execute().Store()
# pylint: disable=W0212
second_encoding = my_optimizer._WorksBetterOnSomeOtherClip(first_encoding,
100,
self.videofile)
self.assertTrue(second_encoding)
second_encoding.Execute()
self.assertEquals(first_encoding.videofile, second_encoding.videofile)
self.assertAlmostEqual(10, my_optimizer.Score(second_encoding),
places=4)
def test_ShorterParameterListsScoreHigher(self):
my_optimizer = self.StdOptimizer()
encoder1 = self.EncoderFromParameterString('--score=5')
encoder2 = self.EncoderFromParameterString(
'--score=5 --another_parameter=yes')
encoding1 = encoder1.Encoding(100, self.videofile)
encoding1.Execute()
encoding2 = encoder2.Encoding(100, self.videofile)
encoding2.Execute()
self.assertGreater(my_optimizer.Score(encoding1),
my_optimizer.Score(encoding2))
def test_EncodingWithOneLessParameter(self):
my_optimizer = self.StdOptimizer()
my_encoder = self.EncoderFromParameterString('--score=5')
first_encoding = my_encoder.Encoding(100, self.videofile)
# pylint: disable=W0212
next_encoding = my_optimizer._EncodingWithOneLessParameter(first_encoding,
100,
self.videofile,
None)
self.assertTrue(next_encoding)
self.assertEqual(next_encoding.encoder.parameters.ToString(), '')
def test_EncodingGoodOnOtherRate(self):
self.file_set = optimizer.FileAndRateSet(verify_files_present=False)
self.file_set.AddFilesAndRates([self.videofile.filename], [100, 200])
my_optimizer = self.StdOptimizer()
my_encoder = self.EncoderFromParameterString('--score=7')
my_encoder.Encoding(100, self.videofile).Execute().Store()
first_encoder = self.EncoderFromParameterString('--score=8')
first_encoding = first_encoder.Encoding(200, self.videofile)
first_encoding.Execute().Store()
# pylint: disable=W0212
next_encoding = my_optimizer._EncodingGoodOnOtherRate(first_encoding,
200,
self.videofile,
None)
self.assertTrue(next_encoding)
self.assertEqual('--score=7', next_encoding.encoder.parameters.ToString())
def test_BestOverallConfiguration(self):
self.file_set = optimizer.FileAndRateSet(verify_files_present=False)
self.file_set.AddFilesAndRates([self.videofile.filename], [100, 200])
my_optimizer = self.StdOptimizer()
# When there is nothing in the database, None should be returned.
best_encoder = my_optimizer.BestOverallEncoder()
self.assertIsNone(best_encoder)
# Fill in the database with all the files and rates.
my_encoder = self.EncoderFromParameterString('--score=7')
for rate, filename in self.file_set.AllFilesAndRates():
my_encoder.Encoding(rate, encoder.Videofile(filename)).Execute().Store()
best_encoder = my_optimizer.BestOverallEncoder()
self.assertTrue(best_encoder)
self.assertEquals(my_encoder.parameters.ToString(),
best_encoder.parameters.ToString())
# Add an incomplete encode. This should be ignored.
(self.EncoderFromParameterString('--score=9')
.Encoding(100, self.videofile).Execute().Store())
best_encoder = my_optimizer.BestOverallEncoder()
self.assertTrue(best_encoder)
self.assertEquals(my_encoder.parameters.ToString(),
best_encoder.parameters.ToString())
# Complete the set for 'score=9'. This should cause a change.
(self.EncoderFromParameterString('--score=9')
.Encoding(200, self.videofile).Execute().Store())
best_encoder = my_optimizer.BestOverallEncoder()
self.assertTrue(best_encoder)
self.assertEquals('--score=9',
best_encoder.parameters.ToString())
class TestOptimizerWithRealFiles(test_tools.FileUsingCodecTest):
def setUp(self):
self.codec = DummyCodec()
self.file_set = None
self.score_function = None
self.videofile = DummyVideofile('foofile_640_480_30.yuv', clip_time=1)
self.optimizer = None
def EncoderFromParameterString(self, parameter_string):
return encoder.Encoder(self.optimizer.context,
encoder.OptionValueSet(self.optimizer.context.codec.option_set,
parameter_string))
def test_BestOverallConfigurationNotInWorkDirectory(self):
other_dir = os.path.join(encoder_configuration.conf.sysdir(),
'multirepo_test')
os.mkdir(other_dir)
encoder_configuration.conf.override_scorepath_for_test([other_dir])
self.file_set = optimizer.FileAndRateSet(verify_files_present=False)
self.file_set.AddFilesAndRates([self.videofile.filename], [100, 200])
self.optimizer = optimizer.Optimizer(self.codec, self.file_set)
# When there is nothing in the database, None should be returned.
best_encoder = self.optimizer.BestOverallEncoder()
self.assertIsNone(best_encoder)
# Fill in the database with all the files and rates.
other_context = encoder.Context(self.codec, encoder.EncodingDiskCache,
scoredir='multirepo_test')
my_encoder = self.EncoderFromParameterString('--score=7')
other_context.cache.StoreEncoder(my_encoder)
my_encoder.context.cache.StoreEncoder(my_encoder)
for rate, filename in self.file_set.AllFilesAndRates():
my_encoding = my_encoder.Encoding(rate, encoder.Videofile(filename))
my_encoding.Execute()
other_context.cache.StoreEncoding(my_encoding)
# The best encoder should now be from the workdir, but the results are
# all fetched from the searchpath.
best_encoder = self.optimizer.BestOverallEncoder()
self.assertTrue(best_encoder)
self.assertEquals(my_encoder.parameters.ToString(),
best_encoder.parameters.ToString())
one_encoding = best_encoder.Encoding(100, self.videofile)
one_encoding.Recover()
self.assertTrue(one_encoding.Result())
def test_MultipleOptimizers(self):
# Make sure other score directories don't interfere with this test.
encoder_configuration.conf.override_scorepath_for_test([])
os.mkdir(os.path.join(encoder_configuration.conf.sysdir(), 'first_dir'))
os.mkdir(os.path.join(encoder_configuration.conf.sysdir(), 'second_dir'))
one_optimizer = optimizer.Optimizer(self.codec, scoredir='first_dir')
another_optimizer = optimizer.Optimizer(self.codec, scoredir='second_dir')
self.assertNotEqual(one_optimizer.context.cache.workdir,
another_optimizer.context.cache.workdir)
# Storing one encoding's score should not affect the other's.
one_encoding = one_optimizer.BestEncoding(100,
self.videofile)
one_encoding.Execute().Store()
another_encoding = another_optimizer.BestEncoding(100, self.videofile)
self.assertFalse(another_encoding.Result())
another_encoding.Recover()
self.assertFalse(another_encoding.Result())
class TestFileAndRateSet(unittest.TestCase):
def test_OneFileAddedAndReturned(self):
the_set = optimizer.FileAndRateSet(verify_files_present=False)
the_set.AddFilesAndRates(['filename'], [100], 'dirname')
self.assertEqual([(100, 'dirname/filename')], the_set.AllFilesAndRates())
def test_NoDirName(self):
the_set = optimizer.FileAndRateSet(verify_files_present=False)
the_set.AddFilesAndRates(['filename'], [100])
self.assertEqual([(100, 'filename')], the_set.AllFilesAndRates())
def test_OneFileMultipleRates(self):
the_set = optimizer.FileAndRateSet(verify_files_present=False)
the_set.AddFilesAndRates(['filename'], [100, 200], 'dirname')
self.assertEqual(set([(100, 'dirname/filename'),
(200, 'dirname/filename')]),
set(the_set.AllFilesAndRates()))
def test_TwoAddCalls(self):
the_set = optimizer.FileAndRateSet(verify_files_present=False)
the_set.AddFilesAndRates(['filename'], [100, 200], 'dirname')
the_set.AddFilesAndRates(['otherfilename'], [200, 300], 'dirname')
self.assertEqual(set([(100, 'dirname/filename'),
(200, 'dirname/filename'),
(200, 'dirname/otherfilename'),
(300, 'dirname/otherfilename')]),
set(the_set.AllFilesAndRates()))
def test_RatesForFile(self):
the_set = optimizer.FileAndRateSet(verify_files_present=False)
the_set.AddFilesAndRates(['filename'], [100, 200])
the_set.AddFilesAndRates(['otherfilename'], [200, 300])
self.assertEqual(set([100, 200]),
set(the_set.AllRatesForFile('filename')))
class TestFileAndRateSetWithRealFiles(test_tools.FileUsingCodecTest):
def test_AddMissingFile(self):
the_set = optimizer.FileAndRateSet()
the_set.AddFilesAndRates(['nosuchfile'], [100])
self.assertFalse(the_set.AllFilesAndRates())
self.assertFalse(the_set.set_is_complete)
def test_AddPresentFile(self):
the_set = optimizer.FileAndRateSet()
file_name = 'file_1024_768_30.yuv'
test_tools.MakeYuvFileWithOneBlankFrame(file_name)
the_set.AddFilesAndRates([file_name], [100],
basedir=encoder_configuration.conf.workdir())
self.assertTrue(the_set.AllFilesAndRates())
self.assertTrue(the_set.set_is_complete)
if __name__ == '__main__':
unittest.main()
```
|
{
"source": "jessezbj/adaptdl",
"score": 2
}
|
#### File: adaptdl/adaptdl/checkpoint.py
```python
import os
from adaptdl.env import checkpoint_path, replica_rank
# FIXME: Keeping global state like this will result in memory leaks for
# applications which do not restart too often.
_STATES_TO_NAMES = {}
_NAMES_TO_STATES = {}
class State(object):
"""
This class implements An arbitrary piece of state which can be saved and
loaded as part of a checkpoint, and synchronized across all replicas.
Should be sub-classed to define custom save, load, and sync logic.
"""
def __init__(self, name):
"""
Initialize the state object with a unique identifier `name`, which is
used to refer to the saved object in persistent storage. No two `State`
objects may share the same `name`.
Arguments:
name (str): Unique name of this `State` object.
Raises:
ValueError: If a `State` object with the given name already exists.
"""
if name in _NAMES_TO_STATES:
raise ValueError("State '{}' already exists".format(name))
_NAMES_TO_STATES[name] = self
_STATES_TO_NAMES[self] = name
def save(self, fileobj):
"""
This method should be overridden by subclasses to define how the state
is saved. Is invoked by `save_all_states` and `save_state` to save the
state into persistent storage.
Arguments:
fileobj (BinaryIO): A binary writable file object.
"""
pass
def load(self, fileobj):
"""
This method should be overridden by subclasses to define how the state
is loaded. Is invoked by `load_state` to load the state from persistent
storage.
Arguments:
fileobj (BinaryIO): A binary readable file object.
"""
pass
def sync(self):
"""
This method should be overridden by subclasses to define how the state
is synchronized across replicas. This might be necessary to make sure
the state is consistent before saving it to persistent storage. Is
invoked by `save_state` before saving the state.
"""
pass
def save_all_states():
"""
Invokes `save_state` on all `State` objects for which `State.skip` is True.
This function can be used to trigger a global checkpoint and save every
`State` in the current job.
"""
for state in _STATES_TO_NAMES:
save_state(state)
def save_state(state, sync=True):
"""
Saves a `State` object to persistent storage. First invokes `State.sync` on
all replicas if `sync` is `True` (default), and then invokes `State.save`
on the replica of rank 0 only.
Arguments:
state (State): The `State` object to save to persistent storage.
sync (bool): Whether `State.sync` should be invoked.
"""
if sync:
state.sync()
if replica_rank() == 0:
name = _STATES_TO_NAMES[state]
if checkpoint_path() is not None:
with open(os.path.join(checkpoint_path(), name), "wb") as f:
state.save(f)
def load_state(state):
"""
Load the given `State` object from persistent storage. If the object was
previously saved, then State.load will be invoked with a readable file
object to load from.
Arguments:
state (State): `State` object to load from persistent storage.
Returns:
`True` if state was previously saved and `State.load` was invoked,
`False` otherwise.
"""
if checkpoint_path() is None:
return False
try:
name = _STATES_TO_NAMES[state]
with open(os.path.join(checkpoint_path(), name), "rb") as f:
state.load(f)
return True
except FileNotFoundError:
return False
```
#### File: adaptdl/adaptdl/fit_test.py
```python
import adaptdl.goodput as goodput
import numpy as np
def test_fit_1():
# Tests goodput.fit's ability to fit to data generated
# by its own model class with arbitrary parameters, without
# gradient accumulation. Serves as a sanity check
# that the goodput.model fitting works in the most
# optimistic case.
size = (1000,)
nodes = np.random.randint(low=1, high=11, size=size)
replicas = np.random.randint(low=1, high=nodes+1, size=size)
local_bsz = np.random.randint(32, 1024, size=size)
params = goodput.PerfParams(0.1, 0.01, 0.5, 1.0, 1e-6, 1e-6, 1.2)
accum_step_time = (
goodput._predict_accum_time(params, local_bsz) +
np.maximum(np.random.normal(0, 0.001, size=size), 0.0))
network_time = (
goodput._predict_network_time(params, nodes, replicas) +
np.maximum(np.random.normal(0, 0.001, size=size), 0.0))
gamma = params.gamma
optim_step_time = (accum_step_time ** gamma
+ network_time ** gamma) ** (1 / gamma)
result = goodput.fit_perf_params(
nodes, replicas, local_bsz,
accum_step_time, optim_step_time)
loss_result = goodput._obj_fn(
result, nodes, replicas, local_bsz,
accum_step_time, optim_step_time)
loss_true = goodput._obj_fn(
params, nodes, replicas, local_bsz,
accum_step_time, optim_step_time)
assert(abs(loss_result - loss_true) < 0.1 * loss_true
or loss_result < loss_true), \
("goodput.fit failed to fit model from data generated by",
"goodput.PerfParams(0.1, 0.01, 0.5, 1.0, 1e-6, 1e-6, 1.2)",
"parameters: {}".format(result))
def test_fit_2():
# Tests goodput.fit's ability to fit to data generated
# by its own model class with arbitrary parameters, with
# gradient accumulation. Serves as a sanity check
# that the goodput.model fitting works in the most
# optimistic case.
size = (1000,)
nodes = np.random.randint(low=1, high=11, size=size)
replicas = np.random.randint(low=1, high=nodes+1, size=size)
local_bsz = np.random.randint(32, 1024, size=size)
params = goodput.PerfParams(0.1, 0.01, 0.5, 1.0, 1e-6, 1e-6, 1.2)
accum_step_time = goodput._predict_accum_time(params, local_bsz) + \
np.maximum(np.random.normal(0, 0.01, size=size), 0.0)
network_time = goodput._predict_network_time(params, nodes, replicas) + \
np.maximum(np.random.normal(0, 0.01, size=size), 0.0)
gamma = params.gamma
optim_step_time = (accum_step_time ** gamma
+ network_time ** gamma) ** (1 / gamma)
result = goodput.fit_perf_params(
nodes, replicas, local_bsz, accum_step_time, optim_step_time)
loss_result = goodput._obj_fn(
result, nodes, replicas, local_bsz,
accum_step_time, optim_step_time)
loss_true = goodput._obj_fn(
params, nodes, replicas, local_bsz,
accum_step_time, optim_step_time)
assert(abs(loss_result - loss_true) < 0.1 * loss_true
or loss_result < loss_true), \
("goodput.fit failed to fit model from data generated by",
"goodput.PerfParams(0.1, 0.01, 0.5, 1.0, 1e-6, 1e-6, 1.2)",
"parameters: {}".format(result))
```
#### File: adaptdl/adaptdl/goodput.py
```python
import autograd
import numpy as np
import collections
import scipy.optimize
import scipy.stats
# Parameters for a performance model which predicts the per-step time of
# distributed SGD using all-reduce. At a high level, models compute time and
# network time separately, and combines them with some degree of overlap.
# Compute time is modeled as a linear function of the local batch size.
# Network time is modeled using different parameters depending on if the job
# is inter-node (there exists a pair of replicas on different nodes), or
# intra-node (all replicas are on the same node). For both cases, network time
# is modeled as a constant term plus a retrogression term which increases
# linearly with the total number of replicas.
PerfParams = collections.namedtuple("PerfParams", [
# T_compute ~ alpha_c + beta_c * local_bsz +
# (alpha_a + beta_a * local_bsz) * accumulation_steps
"alpha_c", # Constant term of compute time
"beta_c", # Multiplicative factor of compute time
# If inter-node: T_network ~ alpha_n + beta_n * replicas
"alpha_n", # Constant term of inter-node network time
"beta_n", # Retrogression factor of inter-node network time
# If intra-node: T_network ~ alpha_r + beta_r * replicas
"alpha_r", # Constant term of intra-node network time
"beta_r", # Retrogression factor of intra-node network time
# T_step ~ (T_compute ^ gamma + T_network ^ gamma) ^ (1 / gamma)
# Essentially is a p-norm where p = gamma. When p ~ 1 then
# T_step ~ T_compute + T_network, indicating no overlap between compute
# and network. When p -> infinity then T_step = max(T_compute, T_network),
# indicating perfect overlap. We limit gamma to [1, 10] since 10 is close
# enough to approximate the max function for our purposes.
"gamma", # Models the degree of overlap between compute and network
])
GradParams = collections.namedtuple("GradParams", ["sqr", "var"])
class GoodputFunction(object):
def __init__(self, perf_params, grad_params, init_batch_size):
self._perf_params = PerfParams(*perf_params)
self._grad_params = GradParams(*grad_params)
self._init_batch_size = init_batch_size
def __call__(self, num_nodes, num_replicas, atomic_bsz, accum_steps):
return self.evaluate(num_nodes, num_replicas, atomic_bsz, accum_steps)
def evaluate(self, num_nodes, num_replicas, atomic_bsz, accum_steps):
batch_size = num_replicas * atomic_bsz * (accum_steps + 1)
assert np.all(self._init_batch_size <= batch_size)
return self.throughput(num_nodes, num_replicas, atomic_bsz,
accum_steps) * self.efficiency(batch_size)
def throughput(self, num_nodes, num_replicas, atomic_bsz, accum_steps):
accum_time = _predict_accum_time(self._perf_params, atomic_bsz)
network_time = _predict_network_time(self._perf_params,
num_nodes, num_replicas)
optim_time = np.exp(_predict_log_optim_time(self._perf_params,
accum_time, network_time))
total_time = accum_steps * accum_time + optim_time
batch_size = num_replicas * atomic_bsz * (accum_steps + 1)
return batch_size / total_time
def efficiency(self, batch_size):
grad_sqr = self._grad_params.sqr
grad_var = self._grad_params.var
scale = batch_size / self._init_batch_size
denom = grad_var / scale + grad_sqr
gain = np.where(denom > 0, (grad_var + grad_sqr) / denom, 1.0)
return gain / scale
def optimize(self, num_nodes, num_replicas, max_batch_size=None,
atomic_bsz_range=None, accumulation=False):
assert np.all(np.less_equal(1, num_nodes))
assert np.all(np.less_equal(num_nodes, num_replicas))
if max_batch_size is None:
max_batch_size = self._init_batch_size
assert self._init_batch_size <= max_batch_size
atomic_bsz_range = atomic_bsz_range or (None, None)
min_atomic_bsz = atomic_bsz_range[0] or 1
max_atomic_bsz = atomic_bsz_range[1] or max_batch_size
# Remember what the output shape/format should be and flatten inputs.
output_shape = np.broadcast(num_nodes, num_replicas).shape
output_scalar = np.isscalar(num_nodes) or np.isscalar(num_replicas)
num_nodes = np.broadcast_to(num_nodes, output_shape).flatten()
num_replicas = np.broadcast_to(num_replicas, output_shape).flatten()
# Samples 50 different total batch sizes in geometric space.
min_batch_size = np.maximum(self._init_batch_size,
min_atomic_bsz * num_replicas)
batch_size = np.geomspace(min_batch_size, max_batch_size)
local_bsz = batch_size / num_replicas
eps = 1e-8 # Tolerance for floor/ceil operations.
if accumulation:
# If local_bsz size exceeds the max atomic batch size, split it
# into a number of batches to form (atomic_bsz, accum_steps) such
# that (atomic_bsz * (accum_steps + 1)) is close to local_bsz.
#
# If num_replicas == 1 and local_bsz > self._init_batch_size, then
# set accum_steps to at least 1. This is because the gradient
# statistics used for scaling up the learning rate are inaccurate
# when there is only one atomic minibatch to estimate them from.
accum_steps = np.ceil(local_bsz / max_atomic_bsz - eps) - 1
accum_steps = np.where(
np.logical_and(num_replicas == 1,
local_bsz > self._init_batch_size + eps),
np.maximum(accum_steps, 1), accum_steps).astype(int)
atomic_bsz = np.ceil(
local_bsz / (accum_steps + 1) - eps).astype(int)
else:
accum_steps = np.zeros_like(local_bsz, dtype=np.int)
atomic_bsz = np.where(
num_replicas == 1,
self._init_batch_size, np.ceil(local_bsz - eps)).astype(int)
# Evaluate the goodput of all candidate configurations.
goodput = self.evaluate(num_nodes, num_replicas,
atomic_bsz, accum_steps)
# Set the goodput of invalid configurations to 0.0.
goodput = np.where((min_atomic_bsz <= atomic_bsz) &
(atomic_bsz <= max_atomic_bsz), goodput, 0.0)
# Find the indices of the best configurations.
indices = np.argmax(goodput, axis=0), np.arange(goodput.shape[1])
# Restore the correct output shape and return results.
goodput = goodput[indices].reshape(output_shape)
atomic_bsz = atomic_bsz[indices].reshape(output_shape)
accum_steps = accum_steps[indices].reshape(output_shape)
if output_scalar:
goodput = goodput.item()
atomic_bsz = atomic_bsz.item()
accum_steps = accum_steps.item()
return goodput, atomic_bsz, accum_steps
def fit_perf_params(num_nodes, num_replicas, atomic_bsz,
accum_step_time, optim_step_time):
# Fit the performance model given accum time and optim time measurements
# for different configurations of num_nodes, num_replicas, and atomic_bsz.
# HACK: We want to use the original numpy module for calls from the
# SpeedupFunction for performance reasons, but also need those functions to
# use autograd.numpy when we want to differentiate them. We patch the
# global np reference only for the code invoked rom this function.
global np # Replace numpy from autograd.
orig_np = np
np = autograd.numpy
num_nodes = np.array(num_nodes)
num_replicas = np.array(num_replicas)
accum_step_time = np.array(accum_step_time)
optim_step_time = np.array(optim_step_time)
# Set initial params to reasonable values.
params = [1e-1, 1e-2] * 3 + [1.0 + 1e-3]
# Set lower/upper bounds for each parameter. Add a small slack to lower
# bounds to avoid numerical instability issues.
lower = [1e-8, 1e-8] * 3 + [1.0]
upper = [np.inf, np.inf] * 3 + [10.0]
if len(np.unique(atomic_bsz)) == 1:
# Fix alpha_c if only observed a single atomic batch size.
# This makes the speedup model optimistic with respect to
# scaling up the batchsize. This will assign equal weight
# to the constant and multplicative factors for accum time
# if there is only a single datapoint (which is by far the
# most likely case for this scenario)
params[0] = upper[0] = lower[0] = np.mean(accum_step_time) / 2
if not np.any(num_nodes > 1):
# Fix alpha_n and beta_n if no multi-node observations.
params[2] = upper[2] = lower[2]
params[3] = upper[3] = lower[3]
if not np.any(np.logical_and(num_nodes == 1, num_replicas > 1)):
# Fix alpha_r and beta_r if no single-node/multi-replica observations.
params[4] = upper[4] = lower[4]
params[5] = upper[5] = lower[5]
if not np.any(num_replicas > 2):
# Fix beta_n and beta_r if no replicas > 2.
params[3] = upper[3] = lower[3]
params[5] = upper[5] = lower[5]
bounds = scipy.optimize.Bounds(lower, upper, keep_feasible=True)
args = (num_nodes, num_replicas, atomic_bsz,
accum_step_time, optim_step_time)
# FIXME: need to handle optimization failures and propagate to the Trainer.
grad_fn = autograd.grad(_obj_fn)
result = scipy.optimize.minimize(_obj_fn, params, args=args,
jac=grad_fn, bounds=bounds)
params = result.x
if not any(num_nodes > 1):
# Enforce prior: alpha_n and beta_n are at least alpha_r and beta_r.
params[2] = max(params[2], params[4] * 1.1)
params[3] = max(params[3], params[5] * 1.1)
np = orig_np # Restore original numpy.
return PerfParams(*params)
def _rmse(pred, true):
return np.sqrt(((pred - true) ** 2).mean())
def _obj_fn(params, num_nodes, num_replicas, atomic_bsz,
accum_step_time, optim_step_time):
params = PerfParams(*params)
pred_accum = _predict_accum_time(params, atomic_bsz)
pred_network = _predict_network_time(params, num_nodes, num_replicas)
pred_log_optim = _predict_log_optim_time(params, pred_accum, pred_network)
# RMSLError of accum step time predictions.
err1 = _rmse(np.log(pred_accum), np.log(accum_step_time))
# RMSLError of optim step time predictions.
err2 = _rmse(pred_log_optim, np.log(optim_step_time))
# L2 regularization towards a smaller gamma, because it's easier to
# optimize the alpha and beta parameters when gamma is smaller.
reg1 = 1e-3 * (params.gamma - 1) ** 2
# Penalize retrogression terms to prefer a more optimistic model.
reg2 = 1e-2 * ((params.beta_n / params.alpha_n) ** 2 +
(params.beta_r / params.alpha_r) ** 2)
return err1 + err2 + reg1 + reg2
def _predict_accum_time(params, atomic_bsz):
params = PerfParams(*params)
# Forward/backward passes should scale linearly with the batch size.
return params.alpha_c + params.beta_c * atomic_bsz
def _predict_log_optim_time(params, accum_time, network_time):
gamma = PerfParams(*params).gamma
return np.log(accum_time ** gamma + network_time ** gamma) / gamma
def _predict_network_time(params, num_nodes, num_replicas):
params = PerfParams(*params)
# Select the most significant link between replicas, currently either
# inter-node (nodes > 1) or intra-node (replicas > 1). Note that if
# replicas == 1 then neither of these two conditions are matched.
conds = [num_nodes > 1, num_replicas > 1]
# Bandwidth is bottlenecked by the most significant link, alpha models
# the overhead of transferring data across that link.
bottleneck = np.select(conds, [params.alpha_n, params.alpha_r], 1e-8)
# Assuming ring all-reduce, communication happens in a number of rounds
# equal to the number of replicas. beta models the performance
# retrogression from increasing the number of replicas beyond 2.
retrogress = np.select(conds, [params.beta_n, params.beta_r], 1e-8)
retrogress = retrogress * np.maximum(num_replicas - 2, 1e-8)
return (bottleneck + retrogress)
```
#### File: adaptdl/adaptdl/goodput_test.py
```python
from adaptdl.goodput import GoodputFunction, PerfParams, GradParams
import itertools
import numpy as np
import pytest
RNG = np.random.RandomState(0)
PERF_PARAMS = [PerfParams(*RNG.gamma(2.0, 2.0, [7])) for i in range(10)]
GRAD_PARAMS = [GradParams(*RNG.gamma(2.0, 2.0, [2])) for i in range(10)]
def groupby_indices(*args):
_, indices = np.unique(np.stack(args), axis=1, return_inverse=True)
groups = {}
for i, g in enumerate(indices):
groups.setdefault(g, []).append(i)
return list(groups.values())
@pytest.mark.parametrize("perf_params", PERF_PARAMS)
@pytest.mark.parametrize("grad_params", GRAD_PARAMS)
def test_evaluate(perf_params, grad_params):
init_batch_size = 16
goodput_fn = GoodputFunction(perf_params, grad_params, init_batch_size)
# Generate a range of different goodput function arguments.
num_nodes = np.array([1, 2, 3, 4])
num_replicas = np.array([1, 2, 4, 8])
atomic_bsz = np.array([8, 12, 16, 20, 24])
accum_steps = np.array([0, 1, 2, 3, 4])
# Cartesian product.
num_nodes, num_replicas, atomic_bsz, accum_steps = \
map(np.array, zip(*itertools.product(num_nodes, num_replicas,
atomic_bsz, accum_steps)))
# Only keep valid arguments.
valid = np.logical_and(num_nodes <= num_replicas, init_batch_size
<= num_replicas * atomic_bsz * accum_steps)
num_nodes = num_nodes[valid]
num_replicas = num_replicas[valid]
atomic_bsz = atomic_bsz[valid]
accum_steps = accum_steps[valid]
# Evaluate goodput.
goodput = goodput_fn(num_nodes, num_replicas, atomic_bsz, accum_steps)
throughput = goodput_fn.throughput(num_nodes, num_replicas,
atomic_bsz, accum_steps)
efficiency = goodput_fn.efficiency(num_replicas * atomic_bsz
* (accum_steps + 1))
# Check basic invariants.
assert np.all(0 <= throughput)
assert np.all(0 <= efficiency) and np.all(efficiency <= 1)
assert np.allclose(goodput, throughput * efficiency)
# Increasing batch size should decrease efficiency.
batch_size = num_replicas * atomic_bsz * (accum_steps + 1)
sort = np.argsort(batch_size)
assert np.all(np.diff(efficiency[sort]) <= 0)
# All else equal, increasing atomic_bsz should increase throughput.
for indices in groupby_indices(num_nodes, num_replicas, accum_steps):
sort = np.argsort(atomic_bsz[indices])
assert np.all(np.diff(throughput[indices][sort]) >= 0)
# Increasing throughput should experience diminishing returns.
if len(indices) > 1:
diffx = np.diff(atomic_bsz[indices][sort])
diffy = np.diff(throughput[indices][sort])
assert np.all(diffx[:-1] * diffy[1:] - diffx[1:] * diffy[:-1] <= 0)
# All else equal, scalability is sublinear with respect to num_replicas.
for indices in groupby_indices(num_nodes, atomic_bsz, accum_steps):
scalability = throughput / num_replicas
sort = np.argsort(num_replicas[indices])
assert np.all(np.diff(scalability[indices][sort]) <= 0)
@pytest.mark.parametrize("perf_params", PERF_PARAMS)
@pytest.mark.parametrize("grad_params", GRAD_PARAMS)
def test_optimize_no_bounds(perf_params, grad_params):
goodput_fn = GoodputFunction(perf_params, grad_params, 128)
goodput, bsz, steps = goodput_fn.optimize(1, 3)
assert(bsz == 128//3 + 1), "expected bsz = 43, got {}".format(bsz)
assert(isinstance(goodput, float))
replicas = np.asarray([1, 2, 3, 4, 5])
# single-node
goodput, bsz, steps = goodput_fn.optimize(np.ones_like(replicas), replicas)
assert(bsz.shape == (5,))
assert(np.all(bsz == np.ceil(128 / replicas).astype(int)))
assert(goodput.shape == (5,))
assert(bsz[0] == 128)
assert(np.all(steps == 0))
# multi-node
goodput, bsz, steps = goodput_fn.optimize(replicas, replicas)
assert(bsz.shape == (5,))
assert(np.all(bsz == np.ceil(128 / replicas).astype(int)))
assert(goodput.shape == (5,))
assert(bsz[0] == 128)
assert(np.all(steps == 0))
@pytest.mark.parametrize("perf_params", PERF_PARAMS)
@pytest.mark.parametrize("grad_params", GRAD_PARAMS)
def test_optimize_local_bounds(perf_params, grad_params):
fun = GoodputFunction(perf_params, grad_params, 128)
goodput, bsz, steps = fun.optimize(1, 1, atomic_bsz_range=(64, 256))
assert(bsz == 128), "expected bsz = 128, got {}".format(bsz)
assert(isinstance(goodput, float))
replicas = np.asarray(range(1, 100))
# single-node
goodput, bsz, steps = fun.optimize(np.ones_like(replicas), replicas,
atomic_bsz_range=(64, 256))
assert(np.all(bsz >= np.ceil(128 / replicas).astype(int)))
assert(np.all(np.logical_or(bsz >= (64), goodput == 0.0)))
assert(np.all(bsz <= (256)))
assert(np.all(bsz * replicas <= 100 * 128))
assert(bsz[0] == 128)
assert(np.all(steps == 0))
# multi-node
goodput, bsz, steps = fun.optimize(replicas, replicas,
atomic_bsz_range=(64, 256))
assert(np.all(bsz >= np.ceil(128 / replicas).astype(int)))
assert(np.all(np.logical_or(bsz >= (64), goodput == 0.0)))
assert(np.all(bsz <= (256)))
assert(np.all(bsz * replicas <= 100 * 128))
assert(bsz[0] == 128)
assert(np.all(steps == 0))
@pytest.mark.parametrize("perf_params", PERF_PARAMS)
@pytest.mark.parametrize("grad_params", GRAD_PARAMS)
def test_optimize_max_bounds(perf_params, grad_params):
fun = GoodputFunction(perf_params, grad_params, 128)
goodput, bsz, steps = fun.optimize(1, 1, max_batch_size=1280)
assert(bsz == 128), "expected bsz = 128, got {}".format(bsz)
assert(isinstance(goodput, float))
replicas = np.asarray(range(1, 100))
# single-node
goodput, bsz, steps = fun.optimize(np.ones_like(replicas), replicas,
max_batch_size=1280)
assert(np.all(bsz >= np.ceil(128 / replicas).astype(int)))
assert(np.all(bsz * replicas <= 1280 + replicas))
assert(bsz[0] == 128)
assert(np.all(steps == 0))
# multi-node
goodput, bsz, steps = fun.optimize(replicas, replicas, max_batch_size=1280)
assert(np.all(bsz >= np.ceil(128 / replicas).astype(int)))
assert(np.all(bsz * replicas <= 1280 + replicas))
assert(bsz[0] == 128)
assert(np.all(steps == 0))
@pytest.mark.parametrize("perf_params", PERF_PARAMS)
@pytest.mark.parametrize("grad_params", GRAD_PARAMS)
def test_optimize_all_bounds(perf_params, grad_params):
fun = GoodputFunction(perf_params, grad_params, 128)
goodput, bsz, steps = fun.optimize(1, 1, max_batch_size=1280,
atomic_bsz_range=(64, 256))
assert(bsz == 128), "expected bsz = 128, got {}".format(bsz)
assert(isinstance(goodput, float))
replicas = np.asarray(range(1, 20))
# single-node
goodput, bsz, steps = fun.optimize(np.ones_like(replicas), replicas,
max_batch_size=1280,
atomic_bsz_range=(64, 256))
assert(np.all(np.logical_or(bsz >= np.ceil(128 / replicas).astype(int),
goodput == 0.0)))
assert(np.all(np.logical_or(bsz >= (64),
goodput == 0.0)))
assert(np.all(bsz <= (256)))
assert(np.all(np.logical_or(bsz * replicas <= 1280 + replicas,
goodput == 0.0)))
assert(bsz[0] == 128)
assert(np.all(steps == 0))
# multi-node
goodput, bsz, steps = fun.optimize(replicas, replicas,
max_batch_size=1280,
atomic_bsz_range=(64, 256))
assert(np.all(np.logical_or(bsz >= np.ceil(128 / replicas).astype(int),
goodput == 0.0)))
assert(np.all(np.logical_or(bsz >= (64),
goodput == 0.0)))
assert(np.all(bsz <= (256)))
assert(np.all(np.logical_or(bsz * replicas <= 1280 + replicas,
goodput == 0.0)))
assert(bsz[0] == 128)
assert(np.all(steps == 0))
# multi-node edge case
replicas = 4
goodput, bsz, steps = fun.optimize(4, 4, max_batch_size=1024,
atomic_bsz_range=(128, 128))
assert goodput > 0.0
assert bsz == 128
assert steps == 0
@pytest.mark.parametrize("perf_params", PERF_PARAMS)
@pytest.mark.parametrize("grad_params", GRAD_PARAMS)
def test_optimize_accumulation(perf_params, grad_params):
fun = GoodputFunction(perf_params, grad_params, 128)
goodput, bsz, steps = fun.optimize(1, 1, max_batch_size=1280,
atomic_bsz_range=(64, 256),
accumulation=True)
assert(isinstance(goodput, float))
replicas = np.asarray(range(1, 20))
# single-node
goodput, bsz, steps = fun.optimize(np.ones_like(replicas), replicas,
max_batch_size=1280,
atomic_bsz_range=(64, 256),
accumulation=True)
assert(np.all(np.logical_or(bsz >= np.ceil(128 / replicas).astype(int),
goodput == 0.0)))
assert(np.all(np.logical_or(bsz >= (64),
goodput == 0.0)))
assert(np.all(bsz <= (256)))
assert(np.all(np.logical_or(bsz * replicas * (steps + 1) <
1280 + replicas * (steps + 1),
goodput == 0.0)))
assert(np.all(steps <= 15))
assert(np.all(steps >= 0))
assert(np.all(np.logical_or(replicas > 1,
np.logical_or(bsz == 128, steps > 0))))
# multi-node
goodput, bsz, steps = fun.optimize(replicas, replicas,
max_batch_size=1280,
atomic_bsz_range=(64, 256),
accumulation=True)
assert(np.all(np.logical_or(bsz >= np.ceil(128 / replicas).astype(int),
goodput == 0.0)))
assert(np.all(np.logical_or(bsz >= (64),
goodput == 0.0)))
assert(np.all(bsz <= (256)))
assert(np.all(np.logical_or(bsz * replicas * (steps + 1) <
1280 + replicas * (steps + 1),
goodput == 0.0)))
assert(np.all(steps <= 15))
assert(np.all(steps >= 0))
assert(np.all(np.logical_or(np.multiply(steps, bsz) >= 256,
steps == 0)))
@pytest.mark.parametrize("perf_params", PERF_PARAMS)
@pytest.mark.parametrize("grad_params", GRAD_PARAMS)
def test_one_replica_accumulation(perf_params, grad_params):
fun = GoodputFunction(perf_params, grad_params, 128)
replicas = np.asarray([1])
max_batch_sizes = np.asarray(range(128, 128 * 20, 128))
# single-node
for max_batch_size in max_batch_sizes:
goodput, bsz, steps = fun.optimize(np.ones_like(replicas), replicas,
max_batch_size=1280,
atomic_bsz_range=(64, 256),
accumulation=True)
assert(np.all(np.logical_or(bsz >= (64),
goodput == 0.0)))
assert(np.all(bsz <= (256)))
assert(np.all(np.logical_or(bsz * (steps + 1) <=
max_batch_size,
goodput == 0.0)))
assert(np.all(np.logical_or(bsz >= np.ceil(128 / replicas).astype(int),
goodput == 0.0)))
assert(np.all(np.logical_or(bsz * (steps + 1) != 128,
steps == 0)))
```
#### File: adaptdl/adaptdl/reducer_test.py
```python
from multiprocessing import Process
import numpy as np
import collections
from adaptdl.reducer import Reducer
import portpicker
import signal
import faulthandler
root_host = "127.0.0.1"
DEFAULT_REDUCER_PORT = portpicker.pick_unused_port()
def main(rank, size):
faulthandler.enable(all_threads=True)
faulthandler.register(signal.SIGUSR1, all_threads=True, chain=False)
reducer = Reducer(rank, size, root_host, DEFAULT_REDUCER_PORT)
if rank == 0:
batch_size = 28
x = {"foo": 1}
else:
x = {"bar": 1}
batch_size = 0
# start a async reducer
ax = reducer.allreduce_async(np.asarray([1, 1, 1]))
# do a bunch of bcasts
batch_size = reducer.broadcast(batch_size)
batch_size = reducer.broadcast(batch_size)
batch_size = reducer.broadcast(batch_size)
assert batch_size == 28
# do allreduce on Counter
x = reducer.allreduce(collections.Counter(x))
assert x["foo"] == 1
assert x["bar"] == size - 1
# collect the allreduce_async result
ax = ax.result()
assert np.allclose(ax, size * np.asarray([1, 1, 1]))
# try to simulate a training loop
x = None
for _ in range(10):
if x:
x = x.result()
assert np.allclose(x, size * np.asarray([1, 1, 1]))
x = reducer.allreduce_async(np.asarray([1, 1, 1]))
def test_reducer():
size = 3 # number of replicas
processes = []
for rank in range(size):
p = Process(target=main, args=(rank, size), daemon=True)
p.start()
processes.append(p)
for p in processes[1:]:
p.join()
processes[0].join()
# check exceptions raised by the processes
for p in processes:
assert not p.exitcode
```
#### File: adaptdl/torch/epoch_test.py
```python
from adaptdl.conftest import elastic_multiprocessing
@elastic_multiprocessing
def test_epoch():
import adaptdl.checkpoint
from adaptdl.env import num_restarts
from adaptdl.torch.epoch import (remaining_epochs_until,
current_epoch, finished_epochs)
total_epochs = 10
restart_epoch = 5
assert current_epoch() is None
if num_restarts() == 0:
assert finished_epochs() == 0
expected_epochs = list(range(restart_epoch + 1))
elif num_restarts() == 1:
assert finished_epochs() == restart_epoch
expected_epochs = list(range(restart_epoch, total_epochs))
else:
assert False
for idx, epoch in enumerate(remaining_epochs_until(10)):
assert epoch == expected_epochs[idx]
assert current_epoch() == epoch
assert finished_epochs() == epoch
if num_restarts() == 0 and epoch == restart_epoch:
adaptdl.checkpoint.save_all_states()
return 5 # Restart with 5 replicas.
```
#### File: adaptdl/torch/parallel_test.py
```python
import numpy as np
import torch
from torch.utils.data import Dataset
import adaptdl.torch as adl
class LRIterableDataset(Dataset):
def __init__(self, size, true_values, noise):
input_values = np.random.uniform(-5.0, 5.0, size)
bias_input_values = np.stack([np.ones(size), input_values])
target_values = (
np.dot(true_values, bias_input_values)
+ np.random.normal(0.0, noise, size=(size,)))
self._values = list(zip(input_values, target_values))
self._len = size
def __getitem__(self, index):
return self._values[index]
def __len__(self):
return self._len
def test_single_replica_parallel():
adl.init_process_group("gloo")
true_values = np.asarray([3.0, 4.0])
dataset = LRIterableDataset(1000, true_values, 1.0)
dataloader = adl.AdaptiveDataLoader(
dataset, batch_size=32, shuffle=False, num_workers=1)
model = torch.nn.Linear(1, 1, bias=True)
params = [model.bias, model.weight]
sgd = torch.optim.SGD(
[{"params": [param]} for param in params],
lr=0.01)
schedule = torch.optim.lr_scheduler.MultiStepLR(sgd, [50])
model = adl.AdaptiveDataParallel(model, sgd, schedule)
loss = torch.nn.MSELoss()
for epoch in adl.remaining_epochs_until(100):
for inputs, targets in dataloader:
inputs = inputs.float()
targets = targets.float()
sgd.zero_grad()
output = model(torch.reshape(inputs, (-1, 1)))
targets = torch.reshape(targets, (-1, 1))
loss_value = loss(output, targets)
loss_value.backward()
sgd.step()
schedule.step()
params = np.asarray([param.item() for param in params])
assert(np.all(np.isclose(params, true_values, atol=0.1))), \
(params, true_values)
```
#### File: cli/adaptdl_cli/pvc.py
```python
import kubernetes
import re
SUPPORTED_PROVISIONERS = (r'microk8s.io/hostpath',
r'.*cephfs.csi.ceph.com',
r'\befs\b')
def get_storageclass(name=None):
api = kubernetes.client.StorageV1Api()
if name is not None:
return api.read_storage_class(name)
# Find default storageclass.
sc_list = api.list_storage_class()
for sc in sc_list.items:
for provisioner in SUPPORTED_PROVISIONERS:
if re.search(provisioner, sc.provisioner):
return sc
raise SystemExit("Unsupported storageclass from available storageclasses "
f"{[sc.metadata.name for sc in sc_list.items]}")
def create_pvc(name=None, storage_class=None, size="100Gi",
owner_metadata=None):
context = kubernetes.config.list_kube_config_contexts()[1]
namespace = context["context"].get("namespace", "default")
core_api = kubernetes.client.CoreV1Api()
if storage_class is None:
storage_class_name = get_storageclass().metadata.name
else:
storage_class_name = storage_class.metadata.name
if owner_metadata is None:
owner_references = []
else:
owner_references = [
kubernetes.client.V1OwnerReference(
api_version="adaptdl.petuum.com/v1",
kind="AdaptDLJob",
name=owner_metadata["name"],
uid=owner_metadata["uid"])]
if name is None:
metadata = kubernetes.client.V1ObjectMeta(
namespace=namespace,
generate_name="adaptdl-pvc-",
owner_references=owner_references
)
else:
metadata = kubernetes.client.V1ObjectMeta(
namespace=namespace,
name=name,
owner_references=owner_references
)
claim = kubernetes.client.V1PersistentVolumeClaim(
metadata=metadata,
spec=kubernetes.client.V1PersistentVolumeClaimSpec(
storage_class_name=storage_class_name,
access_modes=["ReadWriteMany"],
volume_mode="Filesystem",
resources=kubernetes.client.V1ResourceRequirements(
requests={"storage": size})
),
)
return core_api.create_namespaced_persistent_volume_claim(
namespace, claim)
def create_copy_pod(pvc_name, cp_job_uid):
context = kubernetes.config.list_kube_config_contexts()[1]
namespace = context["context"].get("namespace", "default")
core_api = kubernetes.client.CoreV1Api()
volume_name = "adaptdl-pvc"
labels = {"adaptdl/cli-copy": cp_job_uid}
pvc = core_api.read_namespaced_persistent_volume_claim(pvc_name, namespace)
pvc_uid = pvc.metadata.uid
metadata = {"namespace": namespace,
"generateName": "copy-{}".format(pvc_name),
"labels": labels,
"owner_references": [
kubernetes.client.V1OwnerReference(
api_version="v1",
kind="PersistentVolumeClaim",
name=pvc_name,
uid=pvc_uid)]
}
container = kubernetes.client.V1Container(
name="copy-container",
image="alpine",
command=["sleep"],
args=["1000000"],
volume_mounts=[
kubernetes.client.V1VolumeMount(
name=volume_name,
mount_path="adaptdl_pvc",
),
],
)
body = kubernetes.client.V1Pod(
metadata=metadata,
spec=kubernetes.client.V1PodSpec(
volumes=[{
"name": volume_name,
"persistentVolumeClaim": {
"claimName": pvc_name
}
}],
containers=[container]
)
)
return core_api.create_namespaced_pod(namespace, body)
```
#### File: examples/BERT/metrics.py
```python
import collections
import re
import string
def compute_qa_exact(ans_pred_tokens_samples):
'''
Input: ans_pred_tokens_samples: [([ans1_tokens_candidate1, ans1_tokens_candidate2], pred1_tokens),
([ans2_tokens_candidate1, ans2_tokens_candidate2], pred2_tokens),
...
([ansn_tokens_candidate1, ansn_tokens_candidate2], predn_tokens)]
ans1_tokens_candidate1 = ['this', 'is', 'an', 'sample', 'example']
Output: exact score of the samples
'''
def normalize_txt(text):
# lower case
text = text.lower()
# remove punc
exclude = set(string.punctuation)
text = "".join(ch for ch in text if ch not in exclude)
# remove articles
regex = re.compile(r"\b(a|an|the)\b", re.UNICODE)
text = re.sub(regex, " ", text)
# white space fix
return " ".join(text.split())
exact_scores = []
for (ans_tokens, pred_tokens) in ans_pred_tokens_samples:
pred_str = " ".join(pred_tokens)
candidate_score = []
for item in ans_tokens:
ans_str = " ".join(item)
candidate_score.append(int(normalize_txt(ans_str) == normalize_txt(pred_str)))
exact_scores.append(max(candidate_score))
return 100.0 * sum(exact_scores) / len(exact_scores)
def compute_qa_f1(ans_pred_tokens_samples):
'''
Input: ans_pred_tokens_samples: [([ans1_tokens_candidate1, ans1_tokens_candidate2], pred1_tokens),
([ans2_tokens_candidate1, ans2_tokens_candidate2], pred2_tokens),
...
([ansn_tokens_candidate1, ansn_tokens_candidate2], predn_tokens)]
ans1_tokens_candidate1 = ['this', 'is', 'an', 'sample', 'example']
Output: f1 score of the samples
'''
def sample_f1(ans_tokens, pred_tokens):
common = collections.Counter(ans_tokens) & collections.Counter(pred_tokens)
num_same = sum(common.values())
if len(ans_tokens) == 0 or len(pred_tokens) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(ans_tokens == pred_tokens)
if num_same == 0:
return 0
precision = 1.0 * num_same / len(pred_tokens)
recall = 1.0 * num_same / len(ans_tokens)
f1 = (2 * precision * recall) / (precision + recall)
return f1
f1_scores = []
for (ans_tokens, pred_tokens) in ans_pred_tokens_samples:
candidate_score = []
for item in ans_tokens:
candidate_score.append(sample_f1(item, pred_tokens))
f1_scores.append(max(candidate_score))
return 100.0 * sum(f1_scores) / len(f1_scores)
```
#### File: sched/adaptdl_sched/supervisor.py
```python
import kubernetes_asyncio as kubernetes
from aiohttp import web
import logging
from adaptdl.sched_hints import SCHED_HINTS
from adaptdl_sched.config import get_supervisor_port
LOG = logging.getLogger(__name__)
LOG.setLevel(logging.INFO)
class Supervisor:
"""
Supervisor provides a simple REST interface for several functionalities.
Currently, it has two endpoints:
1. /hints for jobs to send scheduling hints.
2. /discover for finding the pod IPs of a job.
"""
def __init__(self, port, host='0.0.0.0'):
self._host = host
self._port = port
self._core_api = kubernetes.client.CoreV1Api()
self._objs_api = kubernetes.client.CustomObjectsApi()
async def _handle_healthz(self, request):
# Health check.
return web.Response()
async def _handle_discover(self, request):
# Long-polling endpoint used for discovering pod IPs for a given job.
namespace = request.match_info["namespace"]
name = request.match_info["name"]
group = request.match_info["group"]
timeout = int(request.query.get("timeout", "30"))
pod_ip_list = None
async with kubernetes.watch.Watch() as w:
stream = w.stream(self._core_api.list_namespaced_pod, namespace,
label_selector="adaptdl/job={}".format(name),
field_selector="status.podIP!=",
timeout_seconds=timeout)
async for event in stream:
pod = event["object"]
replicas = int(pod.metadata.annotations["adaptdl/replicas"])
rank = int(pod.metadata.annotations["adaptdl/rank"])
if pod.metadata.annotations["adaptdl/group"] == group:
if pod_ip_list is None:
pod_ip_list = [None] * replicas
pod_ip_list[rank] = pod.status.pod_ip
if all(pod_ip is not None for pod_ip in pod_ip_list):
return web.json_response(pod_ip_list)
return web.json_response(status=408) # Timeout.
async def _handle_report(self, request):
namespace = request.match_info['namespace']
name = request.match_info['name']
hints = await request.json()
# Drop all unrecognized fields. TODO: validate each client-sent field.
hints = {k: hints[k] for k in SCHED_HINTS if k in hints}
# Patch only the train field to avoid conflicts with controller.
patch = {"status": {"train": hints}}
LOG.info("Patch AdaptDLJob %s/%s: %s", namespace, name, patch)
await self._objs_api.patch_namespaced_custom_object_status(
"adaptdl.petuum.com", "v1", namespace, "adaptdljobs", name, patch)
return web.Response()
def run(self):
self.app = web.Application()
self.app.add_routes([
web.get('/healthz', self._handle_healthz),
web.get('/discover/{namespace}/{name}/{group}',
self._handle_discover),
web.put('/hints/{namespace}/{name}', self._handle_report),
])
LOG.info("%s %s", self._host, self._port)
web.run_app(self.app, host=self._host, port=self._port)
if __name__ == "__main__":
logging.basicConfig()
kubernetes.config.load_incluster_config()
supervisor = Supervisor(get_supervisor_port())
supervisor.run()
```
#### File: sched/adaptdl_sched/validator_test.py
```python
import json
import kubernetes_asyncio as kubernetes
import uuid
from http import HTTPStatus
from unittest.mock import AsyncMock
from adaptdl_sched.validator import Validator
def _assert_response(response_json, request_json):
assert response_json["apiVersion"] == "admission.k8s.io/v1"
assert response_json["kind"] == "AdmissionReview"
assert response_json["response"]["uid"] == request_json["request"]["uid"]
async def test_healthz(aiohttp_client, loop):
app = Validator().get_app()
client = await aiohttp_client(app)
response = await client.get("/healthz")
assert response.status == HTTPStatus.OK
async def test_create_invalid_template(aiohttp_client, loop):
validator = Validator()
# Set up mocks.
exc = kubernetes.client.rest.ApiException(
status=HTTPStatus.UNPROCESSABLE_ENTITY, reason="reason")
exc.body = json.dumps({"message": str(uuid.uuid4())})
mock = AsyncMock(side_effect=exc)
validator._core_api.create_namespaced_pod_template = mock
# Send request.
app = validator.get_app()
client = await aiohttp_client(app)
template = {"key": str(uuid.uuid4())}
request_json = {
"request": {
"uid": str(uuid.uuid4()),
"operation": "CREATE",
"namespace": str(uuid.uuid4()),
"object": {"spec": {"template": template}},
}
}
response = await client.post("/validate", json=request_json)
# Check template dry run.
assert mock.call_args.args[0] == request_json["request"]["namespace"]
assert mock.call_args.args[1]["template"] == template
assert mock.call_args.kwargs["dry_run"] == "All"
# Check HTTP response.
assert response.status == HTTPStatus.OK
response_json = await response.json()
_assert_response(response_json, request_json)
# Check operation was disallowed.
assert not response_json["response"]["allowed"]
status = response_json["response"]["status"]
assert status["code"] == HTTPStatus.UNPROCESSABLE_ENTITY
assert status["reason"] == "Invalid"
assert status["message"] == json.loads(exc.body)["message"]
async def test_create_invalid_replicas(aiohttp_client, loop):
validator = Validator()
validator._core_api.create_namespaced_pod_template = AsyncMock()
# Send request.
app = validator.get_app()
client = await aiohttp_client(app)
request_json = {
"request": {
"uid": str(uuid.uuid4()),
"operation": "CREATE",
"namespace": str(uuid.uuid4()),
"object": {"spec": {"minReplicas": 4, "maxReplicas": 2,
"template": {}}},
}
}
response = await client.post("/validate", json=request_json)
# Check HTTP response.
assert response.status == HTTPStatus.OK
response_json = await response.json()
_assert_response(response_json, request_json)
# Check operation was disallowed.
assert not response_json["response"]["allowed"]
status = response_json["response"]["status"]
assert status["code"] == HTTPStatus.UNPROCESSABLE_ENTITY
assert status["reason"] == "Invalid"
async def test_update_spec(aiohttp_client, loop):
validator = Validator()
# Send request.
app = validator.get_app()
client = await aiohttp_client(app)
request_json = {
"request": {
"uid": str(uuid.uuid4()),
"operation": "UPDATE",
"namespace": str(uuid.uuid4()),
"object": {"spec": {"key": "value"}},
"oldObject": {"spec": {"key": "oldValue"}},
}
}
response = await client.post("/validate", json=request_json)
# Check HTTP response.
assert response.status == HTTPStatus.OK
response_json = await response.json()
_assert_response(response_json, request_json)
# Check operation was disallowed.
assert not response_json["response"]["allowed"]
status = response_json["response"]["status"]
assert status["code"] == HTTPStatus.UNPROCESSABLE_ENTITY
assert status["reason"] == "Forbidden"
```
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.