content
stringlengths 5
1.05M
|
---|
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class PriceItem(scrapy.Item):
# define the fields for your item here like:
webId = scrapy.Field()
name = scrapy.Field()
brand = scrapy.Field()
model = scrapy.Field()
price = scrapy.Field()
url = scrapy.Field()
source = scrapy.Field()
pass
class DetailItem(scrapy.Item):
webId = scrapy.Field()
webBrand = scrapy.Field()
webModel = scrapy.Field()
brand = scrapy.Field()
model = scrapy.Field()
commentCount = scrapy.Field()
|
# -*- coding: utf-8 -*-
"""
Classes to handle reV h5 output files.
"""
import json
import logging
import numpy as np
import pandas as pd
import time
from reV.version import __version__
from reV.utilities.exceptions import (HandlerRuntimeError, HandlerValueError)
from rex.resource import BaseResource
from rex.utilities.parse_keys import parse_keys, parse_slice
from rex.utilities.utilities import to_records_array
logger = logging.getLogger(__name__)
class Outputs(BaseResource):
"""
Base class to handle reV output data in .h5 format
Examples
--------
The reV Outputs handler can be used to initialize h5 files in the standard
reV/rex resource data format.
>>> from reV import Outputs
>>> import pandas as pd
>>> import numpy as np
>>>
>>> meta = pd.DataFrame({'latitude': np.ones(100),
>>> 'longitude': np.ones(100)})
>>>
>>> time_index = pd.date_range('20210101', '20220101', freq='1h',
>>> closed='right')
>>>
>>> with Outputs('test.h5', 'w') as f:
>>> f.meta = meta
>>> f.time_index = time_index
You can also use the Outputs handler to read output h5 files from disk.
The Outputs handler will automatically parse the meta data and time index
into the expected pandas objects (DataFrame and DatetimeIndex,
respectively).
>>> with Outputs('test.h5') as f:
>>> print(f.meta.head())
>>>
latitude longitude
gid
0 1.0 1.0
1 1.0 1.0
2 1.0 1.0
3 1.0 1.0
4 1.0 1.0
>>> with Outputs('test.h5') as f:
>>> print(f.time_index)
DatetimeIndex(['2021-01-01 01:00:00+00:00', '2021-01-01 02:00:00+00:00',
'2021-01-01 03:00:00+00:00', '2021-01-01 04:00:00+00:00',
'2021-01-01 05:00:00+00:00', '2021-01-01 06:00:00+00:00',
'2021-01-01 07:00:00+00:00', '2021-01-01 08:00:00+00:00',
'2021-01-01 09:00:00+00:00', '2021-01-01 10:00:00+00:00',
...
'2021-12-31 15:00:00+00:00', '2021-12-31 16:00:00+00:00',
'2021-12-31 17:00:00+00:00', '2021-12-31 18:00:00+00:00',
'2021-12-31 19:00:00+00:00', '2021-12-31 20:00:00+00:00',
'2021-12-31 21:00:00+00:00', '2021-12-31 22:00:00+00:00',
'2021-12-31 23:00:00+00:00', '2022-01-01 00:00:00+00:00'],
dtype='datetime64[ns, UTC]', length=8760, freq=None)
There are a few ways to use the Outputs handler to write data to a file.
Here is one example using the pre-initialized file we created earlier.
Note that the Outputs handler will automatically scale float data using
the "scale_factor" attribute. The Outputs handler will unscale the data
while being read unless the unscale kwarg is explicityly set to False.
This behavior is intended to reduce disk storage requirements for big
data and can be disabled by setting dtype=np.float32 or dtype=np.float64
when writing data.
>>> Outputs.add_dataset(h5_file='test.h5', dset_name='dset1',
>>> dset_data=np.ones((8760, 100)) * 42.42,
>>> attrs={'scale_factor': 100}, dtype=np.int32)
>>> with Outputs('test.h5') as f:
>>> print(f['dset1'])
>>> print(f['dset1'].dtype)
[[42.42 42.42 42.42 ... 42.42 42.42 42.42]
[42.42 42.42 42.42 ... 42.42 42.42 42.42]
[42.42 42.42 42.42 ... 42.42 42.42 42.42]
...
[42.42 42.42 42.42 ... 42.42 42.42 42.42]
[42.42 42.42 42.42 ... 42.42 42.42 42.42]
[42.42 42.42 42.42 ... 42.42 42.42 42.42]]
float32
>>> with Outputs('test.h5', unscale=False) as f:
>>> print(f['dset1'])
>>> print(f['dset1'].dtype)
[[4242 4242 4242 ... 4242 4242 4242]
[4242 4242 4242 ... 4242 4242 4242]
[4242 4242 4242 ... 4242 4242 4242]
...
[4242 4242 4242 ... 4242 4242 4242]
[4242 4242 4242 ... 4242 4242 4242]
[4242 4242 4242 ... 4242 4242 4242]]
int32
Note that the reV Outputs handler is specifically designed to read and
write spatiotemporal data. It is therefore important to intialize the meta
data and time index objects even if your data is only spatial or only
temporal. Furthermore, the Outputs handler will always assume that 1D
datasets represent scalar data (non-timeseries) that corresponds to the
meta data shape, and that 2D datasets represent spatiotemporal data whose
shape corresponds to (len(time_index), len(meta)). You can see these
constraints here:
>>> Outputs.add_dataset(h5_file='test.h5', dset_name='bad_shape',
dset_data=np.ones((1, 100)) * 42.42,
attrs={'scale_factor': 100}, dtype=np.int32)
HandlerValueError: 2D data with shape (1, 100) is not of the proper
spatiotemporal shape: (8760, 100)
>>> Outputs.add_dataset(h5_file='test.h5', dset_name='bad_shape',
dset_data=np.ones((8760,)) * 42.42,
attrs={'scale_factor': 100}, dtype=np.int32)
HandlerValueError: 1D data with shape (8760,) is not of the proper
spatial shape: (100,)
"""
def __init__(self, h5_file, mode='r', unscale=True, str_decode=True,
group=None):
"""
Parameters
----------
h5_file : str
Path to .h5 resource file
mode : str, optional
Mode to instantiate h5py.File instance, by default 'r'
unscale : bool, optional
Boolean flag to automatically unscale variables on extraction,
by default True
str_decode : bool, optional
Boolean flag to decode the bytestring meta data into normal
strings. Setting this to False will speed up the meta data read,
by default True
group : str, optional
Group within .h5 resource file to open, by default None
"""
super().__init__(h5_file, unscale=unscale, hsds=False,
str_decode=str_decode, group=group, mode=mode)
self._mode = mode
self._group = self._check_group(group)
self._shape = None
if self.writable:
self.set_version_attr()
def __len__(self):
_len = 0
if 'meta' in self.datasets:
_len = self.h5['meta'].shape[0]
return _len
def __setitem__(self, keys, arr):
if self.writable:
ds, ds_slice = parse_keys(keys)
slice_test = False
if isinstance(ds_slice, tuple):
slice_test = ds_slice[0] == slice(None, None, None)
if ds.endswith('meta') and slice_test:
self._set_meta(ds, arr)
elif ds.endswith('time_index') and slice_test:
self._set_time_index(ds, arr)
else:
self._set_ds_array(ds, arr, ds_slice)
def set_version_attr(self):
"""Set the version attribute to the h5 file."""
self.h5.attrs['version'] = __version__
self.h5.attrs['package'] = 'reV'
@property
def version(self):
"""
Version of package used to create file
Returns
-------
str
"""
return self.h5.attrs['version']
@property
def package(self):
"""
Package used to create file
Returns
-------
str
"""
return self.h5.attrs['package']
@property
def source(self):
"""
Package and version used to create file
Returns
-------
str
"""
out = ("{}_{}"
.format(self.h5.attrs['package'], self.h5.attrs['version']))
return out
@property
def shape(self):
"""
Variable array shape from time_index and meta
Returns
-------
tuple
shape of variables arrays == (time, locations)
"""
if self._shape is None:
dsets = self.datasets
if 'meta' in dsets:
self._shape = self.h5['meta'].shape
if 'time_index' in dsets:
self._shape = self.h5['time_index'].shape + self._shape
return self._shape
@property
def writable(self):
"""
Check to see if h5py.File instance is writable
Returns
-------
is_writable : bool
Flag if mode is writable
"""
is_writable = True
mode = ['a', 'w', 'w-', 'x']
if self._mode not in mode:
is_writable = False
return is_writable
@BaseResource.meta.setter # pylint: disable-msg=E1101
def meta(self, meta):
"""
Write meta data to disk, convert type if neccessary
Parameters
----------
meta : pandas.DataFrame | numpy.recarray
Locational meta data
"""
self._set_meta('meta', meta)
@BaseResource.time_index.setter # pylint: disable-msg=E1101
def time_index(self, time_index):
"""
Write time_index to dics, convert type if neccessary
Parameters
----------
time_index : pandas.DatetimeIndex | ndarray
Temporal index of timesteps
"""
self._set_time_index('time_index', time_index)
@property
def SAM_configs(self):
"""
SAM configuration JSONs used to create CF profiles
Returns
-------
configs : dict
Dictionary of SAM configuration JSONs
"""
if 'meta' in self.datasets:
configs = {k: json.loads(v)
for k, v in self.h5['meta'].attrs.items()}
else:
configs = {}
return configs
@property
def run_attrs(self):
"""
Runtime attributes stored at the global (file) level
Returns
-------
global_attrs : dict
"""
return self.global_attrs
@run_attrs.setter
def run_attrs(self, run_attrs):
"""
Set runtime attributes as global (file) attributes
Parameters
----------
run_attrs : dict
Dictionary of runtime attributes (args, kwargs)
"""
if self.writable:
for k, v in run_attrs.items():
self.h5.attrs[k] = v
@staticmethod
def _check_data_dtype(data, dtype, attrs=None):
"""
Check data dtype and scale if needed
Parameters
----------
data : ndarray
Data to be written to disc
dtype : str
dtype of data on disc
attrs : dict, optional
Attributes to be set. May include 'scale_factor',
by default None
Returns
-------
data : ndarray
Data ready for writing to disc:
- Scaled and converted to dtype
"""
if attrs is None:
attrs = {}
scale_factor = attrs.get('scale_factor', None)
scale = (scale_factor is not None
and not np.issubdtype(data.dtype, np.integer))
if scale:
if scale_factor != 1 and not np.issubdtype(dtype, np.integer):
raise HandlerRuntimeError('Output dtype must be an integer in '
'order to apply scale factor {}".'
.format(scale_factor))
if not np.issubdtype(data.dtype, np.dtype(dtype)):
# apply scale factor and dtype
data = np.round(data * scale_factor).astype(dtype)
elif not np.issubdtype(data.dtype, np.dtype(dtype)):
raise HandlerRuntimeError('A scale_factor is needed to'
'scale "{}" data to "{}".'
.format(data.dtype, dtype))
return data
def _check_group(self, group):
"""
Ensure group is in .h5 file
Parameters
----------
group : str
Group of interest
"""
if group is not None:
if group not in self._h5:
try:
if self.writable:
self._h5.create_group(group)
except Exception as ex:
msg = ('Cannot create group {}: {}'
.format(group, ex))
raise HandlerRuntimeError(msg) from ex
return group
def _set_meta(self, ds, meta, attrs=None):
"""
Write meta data to disk
Parameters
----------
ds : str
meta dataset name
meta : pandas.DataFrame | numpy.recarray
Locational meta data
attrs : dict
Attributes to add to the meta data dataset
"""
# pylint: disable=attribute-defined-outside-init
self._meta = meta
if isinstance(meta, pd.DataFrame):
meta = to_records_array(meta)
if ds in self.datasets:
self.update_dset(ds, meta)
else:
self._create_dset(ds, meta.shape, meta.dtype, data=meta,
attrs=attrs)
def _set_time_index(self, ds, time_index, attrs=None):
"""
Write time index to disk
Parameters
----------
ds : str
time index dataset name
time_index : pandas.DatetimeIndex | ndarray
Temporal index of timesteps
attrs : dict
Attributes to add to the meta data dataset
"""
# pylint: disable=attribute-defined-outside-init
self._time_index = time_index
if isinstance(time_index, pd.DatetimeIndex):
time_index = time_index.astype(str)
dtype = "S{}".format(len(time_index[0]))
time_index = np.array(time_index, dtype=dtype)
if ds in self.datasets:
self.update_dset(ds, time_index)
else:
self._create_dset(ds, time_index.shape, time_index.dtype,
data=time_index, attrs=attrs)
def get_config(self, config_name):
"""
Get SAM config
Parameters
----------
config_name : str
Name of config
Returns
-------
config : dict
SAM config JSON as a dictionary
"""
if 'meta' in self.datasets:
config = json.loads(self.h5['meta'].attrs[config_name])
else:
config = None
return config
def set_configs(self, SAM_configs):
"""
Set SAM configuration JSONs as attributes of 'meta'
Parameters
----------
SAM_configs : dict
Dictionary of SAM configuration JSONs
"""
if self.writable:
for key, config in SAM_configs.items():
if isinstance(config, dict):
config = json.dumps(config)
if not isinstance(key, str):
key = str(key)
self.h5['meta'].attrs[key] = config
def _set_ds_array(self, ds_name, arr, ds_slice):
"""
Write ds to disk
Parameters
----------
ds_name : str
Dataset name
arr : ndarray
Dataset data array
ds_slice : tuple
Dataset slicing that corresponds to arr
"""
if ds_name not in self.datasets:
msg = '{} must be initialized!'.format(ds_name)
raise HandlerRuntimeError(msg)
dtype = self.h5[ds_name].dtype
attrs = self.get_attrs(ds_name)
ds_slice = parse_slice(ds_slice)
self.h5[ds_name][ds_slice] = self._check_data_dtype(
arr, dtype, attrs=attrs)
def _check_chunks(self, chunks, data=None):
"""
Convert dataset chunk size into valid tuple based on variable array
shape
Parameters
----------
chunks : tuple
Desired dataset chunk size
data : ndarray
Dataset array being chunked
Returns
-------
ds_chunks : tuple
dataset chunk size
"""
if chunks is not None:
if data is not None:
shape = data.shape
else:
shape = self.shape
if chunks[0] is None:
chunk_0 = shape[0]
else:
chunk_0 = np.min((shape[0], chunks[0]))
if chunks[1] is None:
chunk_1 = shape[1]
else:
chunk_1 = np.min((shape[1], chunks[1]))
ds_chunks = (chunk_0, chunk_1)
else:
ds_chunks = None
return ds_chunks
def _create_dset(self, ds_name, shape, dtype, chunks=None, attrs=None,
data=None, replace=True):
"""
Initialize dataset
Parameters
----------
ds_name : str
Dataset name
shape : tuple
Dataset shape
dtype : str
Dataset numpy dtype
chunks : tuple
Dataset chunk size
attrs : dict
Dataset attributes
data : ndarray
Dataset data array
replace : bool
If previous dataset exists with the same name, it will be replaced.
"""
if self.writable:
if ds_name in self.datasets and replace:
del self.h5[ds_name]
elif ds_name in self.datasets:
old_shape, old_dtype, _ = self.get_dset_properties(ds_name)
if old_shape != shape or old_dtype != dtype:
e = ('Trying to create dataset "{}", but already exists '
'with mismatched shape and dtype. New shape/dtype '
'is {}/{}, previous shape/dtype is {}/{}'
.format(ds_name, shape, dtype, old_shape, old_dtype))
logger.error(e)
raise HandlerRuntimeError(e)
if ds_name not in self.datasets:
chunks = self._check_chunks(chunks, data=data)
ds = self.h5.create_dataset(ds_name, shape=shape, dtype=dtype,
chunks=chunks)
if attrs is not None:
for key, value in attrs.items():
ds.attrs[key] = value
if data is not None:
ds[...] = data
def _check_dset_shape(self, dset_data):
"""
Check to ensure that dataset array is of the proper shape
Parameters
----------
dset_data : ndarray
Dataset data array
"""
dset_shape = dset_data.shape
if len(dset_shape) == 1:
shape = len(self)
if shape:
shape = (shape,)
if dset_shape != shape:
raise HandlerValueError("1D data with shape {} is not of "
"the proper spatial shape:"
" {}".format(dset_shape, shape))
else:
raise HandlerRuntimeError("'meta' has not been loaded")
else:
shape = self.shape
if shape:
if dset_shape != shape:
raise HandlerValueError("2D data with shape {} is not of "
"the proper spatiotemporal shape:"
" {}".format(dset_shape, shape))
else:
raise HandlerRuntimeError("'meta' and 'time_index' have not "
"been loaded")
def _add_dset(self, dset_name, data, dtype, chunks=None, attrs=None):
"""
Write dataset to disk. Dataset it created in .h5 file and data is
scaled if needed.
Parameters
----------
dset_name : str
Name of dataset to be added to h5 file.
data : ndarray
Data to be added to h5 file.
dtype : str
Intended dataset datatype after scaling.
chunks : tuple
Chunk size for capacity factor means dataset.
attrs : dict
Attributes to be set. May include 'scale_factor'.
"""
self._check_dset_shape(data)
data = self._check_data_dtype(data, dtype, attrs=attrs)
self._create_dset(dset_name, data.shape, dtype,
chunks=chunks, attrs=attrs, data=data)
def update_dset(self, dset, dset_array, dset_slice=None):
"""
Check to see if dset needs to be updated on disk
If so write dset_array to disk
Parameters
----------
dset : str
dataset to update
dset_array : ndarray
dataset array
dset_slice : tuple
slice of dataset to update, it None update all
"""
if dset_slice is None:
dset_slice = (slice(None, None, None), )
keys = (dset, ) + dset_slice
arr = self.__getitem__(keys)
if not np.array_equal(arr, dset_array):
self._set_ds_array(dset, dset_array, dset_slice)
def write_dataset(self, dset_name, data, dtype, chunks=None, attrs=None):
"""
Write dataset to disk. Dataset it created in .h5 file and data is
scaled if needed.
Parameters
----------
dset_name : str
Name of dataset to be added to h5 file.
data : ndarray
Data to be added to h5 file.
dtype : str
Intended dataset datatype after scaling.
chunks : tuple
Chunk size for capacity factor means dataset.
attrs : dict
Attributes to be set. May include 'scale_factor'.
"""
self._add_dset(dset_name, data, dtype, chunks=chunks, attrs=attrs)
@classmethod
def write_profiles(cls, h5_file, meta, time_index, dset_name, profiles,
dtype, attrs=None, SAM_configs=None, chunks=(None, 100),
unscale=True, mode='w-', str_decode=True, group=None):
"""
Write profiles to disk
Parameters
----------
h5_file : str
Path to .h5 resource file
meta : pandas.Dataframe
Locational meta data
time_index : pandas.DatetimeIndex
Temporal timesteps
dset_name : str
Name of the target dataset (should identify the profiles).
profiles : ndarray
reV output result timeseries profiles
dtype : str
Intended dataset datatype after scaling.
attrs : dict, optional
Attributes to be set. May include 'scale_factor', by default None
SAM_configs : dict, optional
Dictionary of SAM configuration JSONs used to compute cf means,
by default None
chunks : tuple, optional
Chunk size for capacity factor means dataset,
by default (None, 100)
unscale : bool, optional
Boolean flag to automatically unscale variables on extraction,
by default True
mode : str, optional
Mode to instantiate h5py.File instance, by default 'w-'
str_decode : bool, optional
Boolean flag to decode the bytestring meta data into normal
strings. Setting this to False will speed up the meta data read,
by default True
group : str, optional
Group within .h5 resource file to open, by default None
"""
logger.info("Saving profiles ({}) to {}".format(dset_name, h5_file))
if profiles.shape != (len(time_index), len(meta)):
raise HandlerValueError("Profile dimensions does not match"
"'time_index' and 'meta'")
ts = time.time()
kwargs = {"unscale": unscale, "mode": mode, "str_decode": str_decode,
"group": group}
with cls(h5_file, **kwargs) as f:
# Save time index
f['time_index'] = time_index
logger.debug("\t- 'time_index' saved to disc")
# Save meta
f['meta'] = meta
logger.debug("\t- 'meta' saved to disc")
# Add SAM configurations as attributes to meta
if SAM_configs is not None:
f.set_configs(SAM_configs)
logger.debug("\t- SAM configurations saved as attributes "
"on 'meta'")
# Write dset to disk
f._add_dset(dset_name, profiles, dtype,
chunks=chunks, attrs=attrs)
logger.debug("\t- '{}' saved to disc".format(dset_name))
tt = (time.time() - ts) / 60
logger.info('{} is complete'.format(h5_file))
logger.debug('\t- Saving to disc took {:.4f} minutes'
.format(tt))
@classmethod
def write_means(cls, h5_file, meta, dset_name, means, dtype, attrs=None,
SAM_configs=None, chunks=None, unscale=True, mode='w-',
str_decode=True, group=None):
"""
Write means array to disk
Parameters
----------
h5_file : str
Path to .h5 resource file
meta : pandas.Dataframe
Locational meta data
dset_name : str
Name of the target dataset (should identify the means).
means : ndarray
reV output means array.
dtype : str
Intended dataset datatype after scaling.
attrs : dict, optional
Attributes to be set. May include 'scale_factor', by default None
SAM_configs : dict, optional
Dictionary of SAM configuration JSONs used to compute cf means,
by default None
chunks : tuple, optional
Chunk size for capacity factor means dataset, by default None
unscale : bool, optional
Boolean flag to automatically unscale variables on extraction,
by default True
mode : str, optional
Mode to instantiate h5py.File instance, by default 'w-'
str_decode : bool, optional
Boolean flag to decode the bytestring meta data into normal
strings. Setting this to False will speed up the meta data read,
by default True
group : str, optional
Group within .h5 resource file to open, by default None
"""
logger.info("Saving means ({}) to {}".format(dset_name, h5_file))
if len(means) != len(meta):
msg = 'Number of means does not match meta'
raise HandlerValueError(msg)
ts = time.time()
kwargs = {"unscale": unscale, "mode": mode, "str_decode": str_decode,
"group": group}
with cls(h5_file, **kwargs) as f:
# Save meta
f['meta'] = meta
logger.debug("\t- 'meta' saved to disc")
# Add SAM configurations as attributes to meta
if SAM_configs is not None:
f.set_configs(SAM_configs)
logger.debug("\t- SAM configurations saved as attributes "
"on 'meta'")
# Write dset to disk
f._add_dset(dset_name, means, dtype,
chunks=chunks, attrs=attrs)
logger.debug("\t- '{}' saved to disc".format(dset_name))
tt = (time.time() - ts) / 60
logger.info('{} is complete'.format(h5_file))
logger.debug('\t- Saving to disc took {:.4f} minutes'
.format(tt))
@classmethod
def add_dataset(cls, h5_file, dset_name, dset_data, dtype, attrs=None,
chunks=None, unscale=True, mode='a', str_decode=True,
group=None):
"""
Add dataset to h5_file
Parameters
----------
h5_file : str
Path to .h5 resource file
dset_name : str
Name of dataset to be added to h5 file
dset_data : ndarray
Data to be added to h5 file
dtype : str
Intended dataset datatype after scaling.
attrs : dict, optional
Attributes to be set. May include 'scale_factor', by default None
unscale : bool, optional
Boolean flag to automatically unscale variables on extraction,
by default True
mode : str, optional
Mode to instantiate h5py.File instance, by default 'a'
str_decode : bool, optional
Boolean flag to decode the bytestring meta data into normal
strings. Setting this to False will speed up the meta data read,
by default True
group : str, optional
Group within .h5 resource file to open, by default None
"""
logger.info("Adding {} to {}".format(dset_name, h5_file))
ts = time.time()
kwargs = {"unscale": unscale, "mode": mode, "str_decode": str_decode,
"group": group}
with cls(h5_file, **kwargs) as f:
f._add_dset(dset_name, dset_data, dtype,
chunks=chunks, attrs=attrs)
tt = (time.time() - ts) / 60
logger.info('{} added'.format(dset_name))
logger.debug('\t- Saving to disc took {:.4f} minutes'
.format(tt))
@classmethod
def init_h5(cls, h5_file, dsets, shapes, attrs, chunks, dtypes,
meta, time_index=None, configs=None, unscale=True, mode='w',
str_decode=True, group=None, run_attrs=None):
"""Init a full output file with the final intended shape without data.
Parameters
----------
h5_file : str
Full h5 output filepath.
dsets : list
List of strings of dataset names to initialize (does not include
meta or time_index).
shapes : dict
Dictionary of dataset shapes (keys correspond to dsets).
attrs : dict
Dictionary of dataset attributes (keys correspond to dsets).
chunks : dict
Dictionary of chunk tuples (keys correspond to dsets).
dtypes : dict
dictionary of numpy datatypes (keys correspond to dsets).
meta : pd.DataFrame
Full meta data.
time_index : pd.datetimeindex | None
Full pandas datetime index. None implies that only 1D results
(no site profiles) are being written.
configs : dict | None
Optional input configs to set as attr on meta.
unscale : bool
Boolean flag to automatically unscale variables on extraction
mode : str
Mode to instantiate h5py.File instance
str_decode : bool
Boolean flag to decode the bytestring meta data into normal
strings. Setting this to False will speed up the meta data read.
group : str
Group within .h5 resource file to open
run_attrs : dict | NoneType
Runtime attributes (args, kwargs) to add as global (file)
attributes
"""
logger.debug("Initializing output file: {}".format(h5_file))
kwargs = {"unscale": unscale, "mode": mode, "str_decode": str_decode,
"group": group}
with cls(h5_file, **kwargs) as f:
if run_attrs is not None:
f.run_attrs = run_attrs
f['meta'] = meta
if time_index is not None:
f['time_index'] = time_index
for dset in dsets:
if dset not in ('meta', 'time_index'):
# initialize each dset to disk
f._create_dset(dset, shapes[dset], dtypes[dset],
chunks=chunks[dset], attrs=attrs[dset])
if configs is not None:
f.set_configs(configs)
logger.debug("\t- Configurations saved as attributes "
"on 'meta'")
logger.debug('Output file has been initialized.')
|
import urllib
import zipfile
if __name__ == '__main__':
try:
urllib.urlretrieve("http://goo.gl/PnJHp", './fernflower.zip')
zf = zipfile.ZipFile('fernflower.zip')
zf.extract('fernflower.jar', '../runtime/bin')
except:
print "Downloading Fernflower failed download manually from http://goo.gl/PnJHp" |
#! /data/salomonis2/LabFiles/Frank-Li/citeseq/scanpy_new_env/bin/python3.6
import pandas as pd
import numpy as np
import os,sys
import scanpy as sc
import matplotlib.pyplot as plt
from sctriangulate import *
from sctriangulate.preprocessing import *
from sctriangulate.colors import bg_greyed_cmap
'''
All the input, intermediate files are deposited on https://www.synapse.org/#!Synapse:syn26320568
'''
# load the data
adata = sc.read_10x_h5('31WF_ND19-446__TNC-RNA-ADT.h5',gex_only=False)
adata_rna = adata[:,adata.var['feature_types']=='Gene Expression']
adata_adt = adata[:,adata.var['feature_types']=='Antibody Capture']
adata_rna.var_names_make_unique()
adata_adt.var_names_make_unique()
# qc, rna
adata_rna.var['mt'] = adata_rna.var_names.str.startswith('MT-')
sc.pp.calculate_qc_metrics(adata_rna, qc_vars=['mt'], percent_top=None, log1p=False, inplace=True)
for key in ['n_genes_by_counts','total_counts','pct_counts_mt']:
sc.pl.violin(adata_rna,key,jitter=0.4)
plt.savefig('qc_rna_violin_{}.pdf'.format(key),bbox_inches='tight')
plt.close()
sc.pl.scatter(adata_rna,x='n_genes_by_counts',y='total_counts',color='pct_counts_mt')
plt.savefig('qc_rna_scatter.pdf',bbox_inches='tight')
plt.close()
# qc, adt
sc.pp.calculate_qc_metrics(adata_adt, var_type='adts',percent_top=None, log1p=False, inplace=True)
for key in ['n_adts_by_counts','total_counts']:
sc.pl.violin(adata_adt,key,jitter=0.4)
plt.savefig('qc_adt_violin_{}.pdf'.format(key),bbox_inches='tight')
plt.close()
sc.pl.scatter(adata_adt,x='n_adts_by_counts',y='total_counts')
plt.savefig('qc_adt_scatter.pdf',bbox_inches='tight')
plt.close()
# decision, min_genes = 300, min_counts= 500, mito < 20
sc.pp.filter_cells(adata_rna, min_genes=300)
sc.pp.filter_cells(adata_rna, min_counts=500)
adata_rna = adata_rna[adata_rna.obs.pct_counts_mt < 20, :]
adata_adt = adata_adt[adata_rna.obs_names,:]
# get clusters and input
doublet_map = doublet_predict(adata_rna)
adding_azimuth(adata_rna,'azimuth_pred.tsv')
adata_rna = scanpy_recipe(adata_rna,False,resolutions=[0.5,1,2,3,4,5,6],modality='rna',pca_n_comps=50)
adata_adt = scanpy_recipe(adata_adt,False,resolutions=[0.5,1,2,3,4,5,6],modality='adt',pca_n_comps=15)
adata_rna = sc.read('adata_after_scanpy_recipe_rna_0.5_1_2_3_4_5_6_umap_True.h5ad')
adata_adt = sc.read('adata_after_scanpy_recipe_adt_0.5_1_2_3_4_5_6_umap_True.h5ad')
adata_combine = concat_rna_and_other(adata_rna,adata_adt,umap='other',name='adt',prefix='AB_')
# plot
all_r = ['sctri_{}_leiden_{}'.format(m,i) for i in [0.5,1,2,3,4,5,6] for m in ['rna','adt']]
cols = all_r + ['azimuth','doublet_scores']
umap_dual_view_save(adata_combine,cols)
adata_combine = make_sure_adata_writable(adata_combine,delete=True)
adata_combine.write('combined_rna_adt.h5ad')
# run scTriangulate
adata_combine = sc.read('combined_rna_adt.h5ad')
sctri = ScTriangulate(dir='output_one',adata=adata_combine,add_metrics={},predict_doublet='precomputed',
query=['sctri_adt_leiden_1','sctri_adt_leiden_2','sctri_adt_leiden_3','sctri_rna_leiden_1','sctri_rna_leiden_2','sctri_rna_leiden_3'])
sctri.lazy_run(viewer_heterogeneity_keys=['azimuth','sctri_adt_leiden_1','sctri_rna_leiden_1'])
# insights
## plot all adts
all_adts = ['AB_' + item for item in adata_adt.var_names.tolist()]
sctri = ScTriangulate.deserialize('output_one/after_pruned_assess.p')
sc.pl.umap(sctri.adata,color=all_adts,cmap=bg_greyed_cmap('viridis'),vmin=1e-5)
plt.savefig('all_adt.pdf',bbox_inches='tight')
plt.close()
sctri.plot_umap('AB_CD56','continuous',umap_cmap='viridis')
## output the obs
sctri.obs_to_df()
## adt and rna contributions
sctri.modality_contributions()
for col in ['adt_contribution','rna_contribution']:
sctri.plot_umap(col,'continuous',umap_cmap='viridis')
# distribution of resolutions
col = []
for item in sctri.adata.obs['pruned']:
if 'leiden_1@' in item:
col.append('resolution1')
elif 'leiden_2@' in item:
col.append('resolution2')
elif 'leiden_3@' in item:
col.append('resolution3')
sctri.adata.obs['resolution_distribution'] = col
sctri.plot_umap('resolution_distribution','category')
# CD4 memory, plot rank
for cluster in ['sctri_adt_leiden_2@10','sctri_adt_leiden_1@12','sctri_adt_leiden_2@22','sctri_adt_leiden_3@1','sctri_rna_leiden_2@2','sctri_rna_leiden_1@0','sctri_adt_leiden_1@0']:
sctri.plot_multi_modal_feature_rank(cluster=cluster)
|
from datetime import datetime
from datetime import timedelta
from pyosmo.model import TestStep
class TestStepLog:
def __init__(self, step: TestStep, duration: timedelta, error: Exception = None):
self._step = step
self._timestamp = datetime.now()
self._duration = duration
self._error = error
@property
def step(self) -> TestStep:
return self._step
@property
def error(self) -> Exception:
return self._error
@property
def name(self) -> str:
return self._step.function_name
@property
def timestamp(self) -> datetime:
return self._timestamp
@property
def duration(self) -> timedelta:
return self._duration
|
class Allen:
"""
Utility class for Allen's interval algebra, https://en.wikipedia.org/wiki/Allen%27s_interval_algebra.
"""
# ------------------------------------------------------------------------------------------------------------------
X_BEFORE_Y = 1
X_MEETS_Y = 2
X_OVERLAPS_WITH_Y = 3
X_STARTS_Y = 4
X_DURING_Y = 5
X_FINISHES_Y = 6
X_EQUAL_Y = 0
X_BEFORE_Y_INVERSE = -1
X_MEETS_Y_INVERSE = -2
X_OVERLAPS_WITH_Y_INVERSE = -3
X_STARTS_Y_INVERSE = -4
X_DURING_Y_INVERSE = -5
X_FINISHES_Y_INVERSE = -6
# ------------------------------------------------------------------------------------------------------------------
@staticmethod
def relation(x_start, x_end, y_start, y_end):
"""
Returns the relation between two intervals.
:param int x_start: The start point of the first interval.
:param int x_end: The end point of the first interval.
:param int y_start: The start point of the second interval.
:param int y_end: The end point of the second interval.
:rtype: int|None
"""
if (x_end - x_start) < 0 or (y_end - y_start) < 0:
return None
diff_end = y_end - x_end
if diff_end < 0:
return -Allen.relation(y_start, y_end, x_start, x_end)
diff_start = y_start - x_start
gab = y_start - x_end
if diff_end == 0:
if diff_start == 0:
return Allen.X_EQUAL_Y
if diff_start < 0:
return Allen.X_FINISHES_Y
return Allen.X_FINISHES_Y_INVERSE
if gab > 1:
return Allen.X_BEFORE_Y
if gab == 1:
return Allen.X_MEETS_Y
if diff_start > 0:
return Allen.X_OVERLAPS_WITH_Y
if diff_start == 0:
return Allen.X_STARTS_Y
if diff_start < 0:
return Allen.X_DURING_Y
# ----------------------------------------------------------------------------------------------------------------------
|
import ciphers
import jw_utils
import sys
try: colorInternal = sys.stdout.shell.write
except AttributeError: raise RuntimeError("Use IDLE")
colorDef = {
"red": "stderr",
"black": "stdin",
"purple": "BUILTIN",
"green": "STRING",
"dark_red": "console",
"blue": "stdout",
"orange": "KEYWORD",
"white_on_black": "hit",
"black_on_red": "ERROR"
}
activatedCiphers = [
ciphers.ceasar.Cipher,
ciphers.vernam.Cipher,
ciphers.vigenere.Cipher
]
menu = jw_utils.menu.Menu("Ciphers")
for cipher in activatedCiphers:
def option(cipher=cipher): # cipher=cipher is a shim to allow defining in loop
secondaryMenu = jw_utils.menu.Menu("\nEncode or decode? (" + cipher.name + ")")
def enc():
out = cipher.encodeFriendly()
colorInternal("Ciphertext: ", colorDef["green"])
colorInternal(out + "\n", colorDef["red"])
def dec():
out = cipher.decodeFriendly()
colorInternal("Plaintext: ", colorDef["green"])
colorInternal(out + "\n", colorDef["black"])
secondaryMenu.addOption("Encode", enc)
secondaryMenu.addOption("Decode", dec)
def e():
secondaryMenu.endOptionLoop()
secondaryMenu.addOption("Exit to cipher selection", e, False)
secondaryMenu.doMenu()
menu.addOption(cipher.name, option, False)
menu.doMenu()
|
#!/usr/bin/env python3
import sys
argv = sys.argv[1:]
sys.argv = sys.argv[:1]
import json
import regex
import datetime
import dateutil.parser
import pytz
import common.http
import common.postgres
from common.config import config
import sqlalchemy
from sqlalchemy.dialects import postgresql
import urllib.error
engine, metadata = common.postgres.get_engine_and_metadata()
TBL_CLIPS = metadata.tables['clips']
TBL_EXT_CHANNEL = metadata.tables['external_channel']
CLIP_URL = "https://api.twitch.tv/kraken/clips/%s"
CLIPS_URL = "https://api.twitch.tv/kraken/clips/top"
VIDEO_URL = "https://api.twitch.tv/kraken/videos/%s"
def get_clips_page(channel, period="day", limit=10, cursor=None):
"""
https://dev.twitch.tv/docs/v5/reference/clips/#get-top-clips
"""
headers = {
'Client-ID': config['twitch_clientid'],
'Accept': 'application/vnd.twitchtv.v5+json',
}
params = {
'channel': channel,
'limit': str(limit),
'period': period,
}
if cursor is not None:
params['cursor'] = cursor
data = common.http.request(CLIPS_URL, params, headers=headers)
return json.loads(data)
def get_clip_info(slug, check_missing=False):
"""
https://dev.twitch.tv/docs/v5/reference/clips/#get-clip
"""
headers = {
'Client-ID': config['twitch_clientid'],
'Accept': 'application/vnd.twitchtv.v5+json',
}
try:
data = common.http.request(CLIP_URL % slug, headers=headers)
except urllib.error.HTTPError as e:
if e.code == 404 and check_missing:
return None
else:
raise
else:
return json.loads(data)
def process_clips(channel, period="day", per_page=10):
cursor = None
slugs = []
while True:
data = get_clips_page(channel, period, per_page, cursor)
if not data['clips']:
break
for clip in data['clips']:
process_clip(clip)
slugs.append(clip['slug'])
cursor = data['_cursor']
if not cursor:
break
return slugs
def get_video_info(vodid):
"""
https://dev.twitch.tv/docs/v5/reference/videos/#get-video
"""
if vodid not in get_video_info._cache:
headers = {
'Client-ID': config['twitch_clientid'],
'Accept': 'application/vnd.twitchtv.v5+json',
}
try:
data = common.http.request(VIDEO_URL % vodid, headers=headers)
get_video_info._cache[vodid] = json.loads(data)
except urllib.error.HTTPError as e:
if e.code == 404:
get_video_info._cache[vodid] = {'httperror': 404}
else:
raise
return get_video_info._cache[vodid]
get_video_info._cache = {}
# match a URL like:
# https://www.twitch.tv/videos/138124005?t=1h13m7s
RE_STARTTIME = regex.compile("^.*\?(?:.*&)?t=(\d+[hms])*(?:&.*)?$")
def process_clip(clip):
# I wish there was a better way to get the clip "broadcast time"...
# clip['created_at'] exists, but it's the time the clip was created not when
# it was broadcast, so it's close when clipped live, but not useful when
# clipped from a vod...
if clip['vod']:
voddata = get_video_info(clip['vod']['id'])
if 'httperror' not in voddata:
match = RE_STARTTIME.match(clip['vod']['url'])
if not match:
raise ValueError("Couldn't find start time in %r for %s" % (clip['vod']['url'], clip['slug']))
offset = datetime.timedelta(0)
for piece in match.captures(1):
val, unit = int(piece[:-1]), piece[-1]
if unit == 's':
offset += datetime.timedelta(seconds=val)
elif unit == 'm':
offset += datetime.timedelta(minutes=val)
elif unit == 'h':
offset += datetime.timedelta(hours=val)
vod_start = dateutil.parser.parse(voddata['created_at'])
clip_start = vod_start + offset
else:
clip_start = dateutil.parser.parse(clip['created_at'])
else:
clip_start = dateutil.parser.parse(clip['created_at'])
data = {
"slug": clip['slug'],
"title": clip['title'],
"vodid": clip['vod']['id'] if clip['vod'] else None,
"time": clip_start,
"data": clip,
"deleted": False,
"channel": clip['broadcaster']['name'],
}
with engine.begin() as conn:
query = postgresql.insert(TBL_CLIPS)
query = query.on_conflict_do_update(
index_elements=[TBL_CLIPS.c.slug],
set_={
'title': query.excluded.title,
'vodid': query.excluded.vodid,
'time': query.excluded.time,
'data': query.excluded.data,
'deleted': query.excluded.deleted,
'channel': query.excluded.channel,
}
)
conn.execute(query, data)
def fix_null_vodids():
"""
Occasionally a video won't have vod information... it's probably close to a
clip from the same vod that does have the id, so find the closest-by-time clip
that has a vodid, and use that.
"""
with engine.begin() as conn:
badvods = conn.execute(sqlalchemy
.select([TBL_CLIPS.c.id, TBL_CLIPS.c.time, TBL_CLIPS.c.channel])
.where(TBL_CLIPS.c.vodid == None))
# Get the updated vodids first, then update them all after, so that we don't
# use the vods we're updating as a source for copying to others...
updates = []
for clipid, cliptime, channel in badvods:
vodid = get_closest_vodid(conn, cliptime, channel)
updates.append((clipid, vodid))
for clipid, vodid in updates:
conn.execute(TBL_CLIPS.update().values(vodid=vodid).where(TBL_CLIPS.c.id == clipid))
def get_closest_vodid(conn, cliptime, channel):
prevclip = conn.execute(sqlalchemy.select([TBL_CLIPS.c.vodid, TBL_CLIPS.c.time])
.where(TBL_CLIPS.c.vodid != None)
.where(TBL_CLIPS.c.time < cliptime)
.where(TBL_CLIPS.c.channel == channel)
.limit(1)
.order_by(TBL_CLIPS.c.time.desc())).first()
nextclip = conn.execute(sqlalchemy.select([TBL_CLIPS.c.vodid, TBL_CLIPS.c.time])
.where(TBL_CLIPS.c.vodid != None)
.where(TBL_CLIPS.c.time > cliptime)
.where(TBL_CLIPS.c.channel == channel)
.limit(1)
.order_by(TBL_CLIPS.c.time.asc())).first()
if prevclip is not None and nextclip is not None:
prevdiff = cliptime - prevclip[1]
nextdiff = nextclip[1] - cliptime
if prevdiff < nextdiff:
return prevclip[0]
else:
return nextclip[0]
elif prevclip is not None:
return prevclip[0]
elif nextclip is not None:
return prevclip[1]
else:
raise ValueError("Can't find any non-null vodids in the DB...")
def check_deleted_clips(period, slugs):
"""
Go through any clips we have in the DB that weren't returned from the Twitch
query, and check if they actually exist (maybe they dropped out of the "last
day" early) or if they've been deleted, in which case mark that in the DB.
"""
period = datetime.timedelta(days={'day': 1, 'week': 7, 'month': 28}[period])
start = datetime.datetime.now(pytz.UTC) - period
with engine.begin() as conn:
clips = conn.execute(sqlalchemy.select([TBL_CLIPS.c.id, TBL_CLIPS.c.slug])
.where(TBL_CLIPS.c.time >= start)
.where(TBL_CLIPS.c.slug.notin_(slugs))
.where(TBL_CLIPS.c.deleted == False))
for clipid, slug in clips:
if get_clip_info(slug, check_missing=True) is None:
conn.execute(TBL_CLIPS.update().values(deleted=True).where(TBL_CLIPS.c.id == clipid))
def get_default_channels():
channels = [config['channel']]
with engine.begin() as conn:
channels.extend(channel for channel, in conn.execute(
sqlalchemy.select([TBL_EXT_CHANNEL.c.channel])))
return channels
def main():
if argv:
period = argv[0]
if period not in ('day', 'week', 'month'):
print("Usage:\n %s [day|week|month] [channel]" % sys.argv[0])
sys.exit(1)
else:
period = "day"
channels = argv[1:] if len(argv) > 1 else get_default_channels()
for channel in channels:
slugs = process_clips(channel, period)
fix_null_vodids()
check_deleted_clips(period, slugs)
if __name__ == '__main__':
main()
|
"""
Search and get metadata for articles in Pubmed.
"""
import xml.etree.ElementTree as ET
import requests
import logging
from functools import lru_cache
from time import sleep
from indra.util import UnicodeXMLTreeBuilder as UTB
logger = logging.getLogger(__name__)
pubmed_search = 'https://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi'
pubmed_fetch = 'https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi'
# Send request can't be cached by lru_cache because it takes a dict
# (a mutable/unhashable type) as an argument. We cache the callers instead.
def send_request(url, data):
try:
res = requests.get(url, params=data)
except requests.exceptions.Timeout as e:
logger.error('PubMed request timed out')
logger.error('url: %s, data: %s' % (url, data))
logger.error(e)
return None
except requests.exceptions.RequestException as e:
logger.error('PubMed request exception')
logger.error('url: %s, data: %s' % (url, data))
logger.error(e)
return None
if res.status_code == 429:
sleep(0.5)
res = requests.get(url, params=data)
if not res.status_code == 200:
logger.error('Got return code %d from pubmed client.'
% res.status_code)
return None
tree = ET.XML(res.content, parser=UTB())
return tree
@lru_cache(maxsize=100)
def get_ids(search_term, **kwargs):
"""Search Pubmed for paper IDs given a search term.
Search options can be passed as keyword arguments, some of which are
custom keywords identified by this function, while others are passed on
as parameters for the request to the PubMed web service
For details on parameters that can be used in PubMed searches, see
https://www.ncbi.nlm.nih.gov/books/NBK25499/#chapter4.ESearch Some useful
parameters to pass are db='pmc' to search PMC instead of pubmed reldate=2
to search for papers within the last 2 days mindate='2016/03/01',
maxdate='2016/03/31' to search for papers in March 2016.
PubMed, by default, limits returned PMIDs to a small number, and this
number can be controlled by the "retmax" parameter. This function
uses a retmax value of 100,000 by default that can be changed via the
corresponding keyword argument.
Parameters
----------
search_term : str
A term for which the PubMed search should be performed.
use_text_word : Optional[bool]
If True, the "[tw]" string is appended to the search term to constrain
the search to "text words", that is words that appear as whole
in relevant parts of the PubMed entry (excl. for instance the journal
name or publication date) like the title and abstract. Using this
option can eliminate spurious search results such as all articles
published in June for a search for the "JUN" gene, or journal names
that contain Acad for a search for the "ACAD" gene.
See also: https://www.nlm.nih.gov/bsd/disted/pubmedtutorial/020_760.html
Default : True
kwargs : kwargs
Additional keyword arguments to pass to the PubMed search as
parameters.
"""
use_text_word = kwargs.pop('use_text_word', True)
if use_text_word:
search_term += '[tw]'
params = {'term': search_term,
'retmax': 100000,
'retstart': 0,
'db': 'pubmed',
'sort': 'pub+date'}
params.update(kwargs)
tree = send_request(pubmed_search, params)
if tree is None:
return []
if tree.find('ERROR') is not None:
logger.error(tree.find('ERROR').text)
return []
if tree.find('ErrorList') is not None:
for err in tree.find('ErrorList'):
logger.error('Error - %s: %s' % (err.tag, err.text))
return []
count = int(tree.find('Count').text)
id_terms = tree.findall('IdList/Id')
if id_terms is None:
return []
ids = [idt.text for idt in id_terms]
if count != len(ids):
logger.warning('Not all ids were retrieved for search %s;\n'
'limited at %d.' % (search_term, params['retmax']))
return ids
def get_id_count(search_term):
"""Get the number of citations in Pubmed for a search query.
Parameters
----------
search_term : str
A term for which the PubMed search should be performed.
Returns
-------
int or None
The number of citations for the query, or None if the query fails.
"""
params = {'term': search_term,
'rettype': 'count',
'db': 'pubmed'}
tree = send_request(pubmed_search, params)
if tree is None:
return None
else:
count = list(tree)[0].text
return int(count)
@lru_cache(maxsize=100)
def get_ids_for_gene(hgnc_name, **kwargs):
"""Get the curated set of articles for a gene in the Entrez database.
Search parameters for the Gene database query can be passed in as
keyword arguments.
Parameters
----------
hgnc_name : str
The HGNC name of the gene. This is used to obtain the HGNC ID
(using the hgnc_client module) and in turn used to obtain the Entrez
ID associated with the gene. Entrez is then queried for that ID.
"""
from indra.databases import hgnc_client
# Get the HGNC ID for the HGNC name
hgnc_id = hgnc_client.get_hgnc_id(hgnc_name)
if hgnc_id is None:
raise ValueError('Invalid HGNC name.')
# Get the Entrez ID
entrez_id = hgnc_client.get_entrez_id(hgnc_id)
if entrez_id is None:
raise ValueError('Entrez ID not found in HGNC table.')
# Query the Entrez Gene database
params = {'db': 'gene',
'retmode': 'xml',
'id': entrez_id}
params.update(kwargs)
tree = send_request(pubmed_fetch, params)
if tree is None:
return []
if tree.find('ERROR') is not None:
logger.error(tree.find('ERROR').text)
return []
# Get all PMIDs from the XML tree
id_terms = tree.findall('.//PubMedId')
if id_terms is None:
return []
# Use a set to remove duplicate IDs
ids = list(set([idt.text for idt in id_terms]))
return ids
def get_ids_for_mesh(mesh_id, major_topic=False, **kwargs):
"""Return PMIDs that are annotated with a given MeSH ID.
Parameters
----------
mesh_id : str
The MeSH ID of a term to search for, e.g., D009101.
major_topic : bool
If True, only papers for which the given MeSH ID is annotated as
a major topic are returned. Otherwise all annotations are considered.
Default: False
**kwargs
Any further PudMed search arguments that are passed to
get_ids.
"""
from indra.databases import mesh_client
mesh_name = mesh_client.get_mesh_name(mesh_id)
if not mesh_name:
logger.error('Could not get MeSH name for ID %s' % mesh_id)
return []
suffix = 'majr' if major_topic else 'mh'
search_term = '%s [%s]' % (mesh_name, suffix)
ids = get_ids(search_term, use_text_word=False, **kwargs)
if mesh_id.startswith('C') and not major_topic:
# Get pmids for supplementary concepts as well
search_term = '%s [nm]' % mesh_name
ids2 = get_ids(search_term, use_text_word=False, **kwargs)
ids = list(set(ids) | set(ids2))
return ids
def get_article_xml(pubmed_id):
"""Get the Article subtree a single article from the Pubmed database.
Parameters
----------
pubmed_id : str
A PubMed ID.
Returns
-------
xml.etree.ElementTree.Element
The XML ElementTree Element that represents the Article portion of the
PubMed entry.
"""
full_xml_tree = get_full_xml(pubmed_id)
if full_xml_tree is None:
return None
article = full_xml_tree.find('PubmedArticle/MedlineCitation/Article')
return article # May be none
@lru_cache(maxsize=100)
def get_full_xml(pubmed_id):
"""Get the full XML tree of a single article from the Pubmed database.
Parameters
----------
pubmed_id : str
A PubMed ID.
Returns
-------
xml.etree.ElementTree.Element
The root element of the XML tree representing the PubMed entry.
The root is a PubmedArticleSet with a single PubmedArticle element
that contains the article metadata.
"""
if pubmed_id.upper().startswith('PMID'):
pubmed_id = pubmed_id[4:]
params = {'db': 'pubmed',
'retmode': 'xml',
'id': pubmed_id}
tree = send_request(pubmed_fetch, params)
return tree
def get_title(pubmed_id):
"""Get the title of an article in the Pubmed database."""
article = get_article_xml(pubmed_id)
if article is None:
return None
return _get_title_from_article_element(article)
def _get_title_from_article_element(article):
title_tag = article.find('ArticleTitle')
title = None
if title_tag is not None:
title = title_tag.text
if hasattr(title_tag, 'itertext'):
title = ''.join(list(title_tag.itertext()))
return title
def _abstract_from_article_element(article, prepend_title=False):
abstract = article.findall('Abstract/AbstractText')
if abstract is None:
return None
abstract_text = ' '.join(['' if not hasattr(abst, 'itertext')
else ' '.join(list(abst.itertext()))
for abst in abstract])
if prepend_title:
title = _get_title_from_article_element(article)
if title is not None:
if not title.endswith('.'):
title += '.'
abstract_text = title + ' ' + abstract_text
return abstract_text
def get_abstract(pubmed_id, prepend_title=True):
"""Get the abstract of an article in the Pubmed database."""
article = get_article_xml(pubmed_id)
if article is None:
return None
return _abstract_from_article_element(article, prepend_title)
# A function to get the text for the element, or None if not found
def _find_elem_text(root, xpath_string):
elem = root.find(xpath_string)
return None if elem is None else elem.text
def _get_journal_info(medline_citation, get_issns_from_nlm):
# Journal info
journal = medline_citation.find('Article/Journal')
journal_title = _find_elem_text(journal, 'Title')
journal_abbrev = _find_elem_text(journal, 'ISOAbbreviation')
# Add the ISSN from the article record
issn_list = []
issn = _find_elem_text(journal, 'ISSN')
if issn:
issn_list.append(issn)
# Add the Linking ISSN from the article record
issn_linking = _find_elem_text(medline_citation,
'MedlineJournalInfo/ISSNLinking')
if issn_linking:
issn_list.append(issn_linking)
# Now get the list of ISSNs from the NLM Catalog
nlm_id = _find_elem_text(medline_citation,
'MedlineJournalInfo/NlmUniqueID')
if nlm_id and get_issns_from_nlm:
nlm_issn_list = get_issns_for_journal(nlm_id)
if nlm_issn_list:
issn_list += nlm_issn_list
# Remove any duplicate issns
issn_list = list(set(issn_list))
return {'journal_title': journal_title, 'journal_abbrev': journal_abbrev,
'issn_list': issn_list, 'journal_nlm_id': nlm_id}
def _get_pubmed_publication_date(pubmed_data):
date_dict = dict.fromkeys(['year', 'month', 'day'])
# Order potential statuses in order of preferences
status_list = ['pubmed', 'accepted', 'revised', 'received', 'entrez']
# Look for various statuses, in order of preference as PubStatus in
# PubmedPubDate
for status in status_list:
pubmed_pub_date = \
pubmed_data.find('./History/PubMedPubDate[@PubStatus="%s"]'
% status)
if pubmed_pub_date is not None:
break
else:
logger.warning("Could not find pub date in: \n%s"
% ET.tostring(pubmed_data).decode('utf-8'))
return date_dict
def _find_date(element):
value = _find_elem_text(pubmed_pub_date, element)
return int(value) if value else None
# Get date elements from extracted pubmed_pub_date element
for date_elem in ['Year', 'Month', 'Day']:
date_dict[date_elem.lower()] = _find_date(date_elem)
return date_dict
def _get_article_info(medline_citation, pubmed_data):
article = medline_citation.find('Article')
pmid = _find_elem_text(medline_citation, './PMID')
pii = _find_elem_text(article,
'./ELocationID[@EIdType="pii"][@ValidYN="Y"]')
# Look for the DOI in the ELocationID field...
doi = _find_elem_text(article,
'./ELocationID[@EIdType="doi"][@ValidYN="Y"]')
# ...and if that doesn't work, look in the ArticleIdList
if doi is None:
doi = _find_elem_text(pubmed_data, './/ArticleId[@IdType="doi"]')
# Try to get the PMCID
pmcid = _find_elem_text(pubmed_data, './/ArticleId[@IdType="pmc"]')
# Title
title = _get_title_from_article_element(article)
# Author list
author_elems = article.findall('AuthorList/Author/LastName')
author_names = None if author_elems is None \
else [au.text for au in author_elems]
# Get the page number entry
page = _find_elem_text(article, 'Pagination/MedlinePgn')
return {'pmid': pmid, 'pii': pii, 'doi': doi, 'pmcid': pmcid,
'title': title, 'authors': author_names, 'page': page}
def get_metadata_from_xml_tree(tree, get_issns_from_nlm=False,
get_abstracts=False, prepend_title=False,
mesh_annotations=True):
"""Get metadata for an XML tree containing PubmedArticle elements.
Documentation on the XML structure can be found at:
- https://www.nlm.nih.gov/bsd/licensee/elements_descriptions.html
- https://www.nlm.nih.gov/bsd/licensee/elements_alphabetical.html
Parameters
----------
tree : xml.etree.ElementTree
ElementTree containing one or more PubmedArticle elements.
get_issns_from_nlm : Optional[bool]
Look up the full list of ISSN number for the journal associated with
the article, which helps to match articles to CrossRef search results.
Defaults to False, since it slows down performance.
get_abstracts : Optional[bool]
Indicates whether to include the Pubmed abstract in the results.
Default: False
prepend_title : Optional[bool]
If get_abstracts is True, specifies whether the article title should
be prepended to the abstract text. Default: False
mesh_annotations : Optional[bool]
If True, extract mesh annotations from the pubmed entries and include
in the returned data. If false, don't. Default: True
Returns
-------
dict of dicts
Dictionary indexed by PMID. Each value is a dict containing the
following fields: 'doi', 'title', 'authors', 'journal_title',
'journal_abbrev', 'journal_nlm_id', 'issn_list', 'page'.
"""
# Iterate over the articles and build the results dict
results = {}
pm_articles = tree.findall('./PubmedArticle')
for art_ix, pm_article in enumerate(pm_articles):
medline_citation = pm_article.find('./MedlineCitation')
pubmed_data = pm_article.find('PubmedData')
# Build the result
result = {}
article_info = _get_article_info(medline_citation, pubmed_data)
result.update(article_info)
journal_info = _get_journal_info(medline_citation, get_issns_from_nlm)
result.update(journal_info)
if mesh_annotations:
context_info = _get_annotations(medline_citation)
result.update(context_info)
publication_date = _get_pubmed_publication_date(pubmed_data)
result['publication_date'] = publication_date
# Get the abstracts if requested
if get_abstracts:
abstract = _abstract_from_article_element(
medline_citation.find('Article'),
prepend_title=prepend_title
)
result['abstract'] = abstract
# Add to dict
results[article_info['pmid']] = result
return results
def get_mesh_annotations(pmid):
"""Return a list of MeSH annotations for a given PubMed ID.
Parameters
----------
pmid : str
A PubMed ID.
Returns
-------
list of dict
A list of dicts that represent MeSH annotations with the following keys:
"mesh" representing the MeSH ID, "text" the standrd name associated with
the MeSH ID, "major_topic" a boolean flag set depending on whether
the given MeSH ID is assigned as a major topic to the article, and
"qualifier" which is a MeSH qualifier ID associated with the annotation,
if available, otherwise None.
"""
full_xml_tree = get_full_xml(pmid)
if not full_xml_tree:
return None
medline_citation = full_xml_tree.find('PubmedArticle/MedlineCitation')
if not medline_citation:
return None
annotations = _get_annotations(medline_citation)
return annotations.get('mesh_annotations')
def _get_annotations(medline_citation):
def _major_topic(e):
if e is not None and e.get('MajorTopicYN').upper() == 'Y':
return True
return False
info = []
for elem in medline_citation.findall('.//MeshHeading'):
dname = elem.find('DescriptorName')
qualifier_elems = elem.findall('QualifierName')
mid = dname.attrib['UI']
major = _major_topic(dname) or any(_major_topic(qual) for qual
in qualifier_elems)
qualifiers = [{'text': qual.text, 'mesh': qual.attrib['UI']}
for qual in qualifier_elems]
qual = qualifiers[0] if qualifiers else None
info.append({'type': 'main', 'mesh': mid, 'text': dname.text,
'major_topic': major,
# This is only here for backwards compatibility with
# INDRA DB which expects a single qualifier or None and
# turns the single qualifier into an int internally, so
# we can't easily put a joined string of multiple
# qualifiers here.
'qualifier': qual,
# This is the proper full list of qualifiers
'qualifiers': qualifiers})
for elem in medline_citation.findall('.//SupplMeshList/SupplMeshName'):
info.append({'type': 'supplementary', 'mesh': elem.attrib['UI'], 'text': elem.text,
'qualifier': None, 'qualifiers': [],
'major_topic': False})
return {'mesh_annotations': info}
def get_metadata_for_ids(pmid_list, get_issns_from_nlm=False,
get_abstracts=False, prepend_title=False):
"""Get article metadata for up to 200 PMIDs from the Pubmed database.
Parameters
----------
pmid_list : list of str
Can contain 1-200 PMIDs.
get_issns_from_nlm : bool
Look up the full list of ISSN number for the journal associated with
the article, which helps to match articles to CrossRef search results.
Defaults to False, since it slows down performance.
get_abstracts : bool
Indicates whether to include the Pubmed abstract in the results.
prepend_title : bool
If get_abstracts is True, specifies whether the article title should
be prepended to the abstract text.
Returns
-------
dict of dicts
Dictionary indexed by PMID. Each value is a dict containing the
following fields: 'doi', 'title', 'authors', 'journal_title',
'journal_abbrev', 'journal_nlm_id', 'issn_list', 'page'.
"""
if len(pmid_list) > 200:
raise ValueError("Metadata query is limited to 200 PMIDs at a time.")
params = {'db': 'pubmed',
'retmode': 'xml',
'id': pmid_list}
tree = send_request(pubmed_fetch, params)
if tree is None:
return None
return get_metadata_from_xml_tree(tree, get_issns_from_nlm, get_abstracts,
prepend_title)
@lru_cache(maxsize=1000)
def get_issns_for_journal(nlm_id):
"""Get a list of the ISSN numbers for a journal given its NLM ID.
Information on NLM XML DTDs is available at
https://www.nlm.nih.gov/databases/dtd/
"""
params = {'db': 'nlmcatalog',
'retmode': 'xml',
'id': nlm_id}
tree = send_request(pubmed_fetch, params)
if tree is None:
return None
issn_list = tree.findall('.//ISSN')
issn_linking = tree.findall('.//ISSNLinking')
issns = issn_list + issn_linking
# No ISSNs found!
if not issns:
return None
else:
return [issn.text for issn in issns]
def expand_pagination(pages):
"""Convert a page number to long form, e.g., from 456-7 to 456-457."""
# If there is no hyphen, it's a single page, and we're good to go
parts = pages.split('-')
if len(parts) == 1: # No hyphen, so no split
return pages
elif len(parts) == 2:
start = parts[0]
end = parts[1]
# If the end is the same number of digits as the start, then we
# don't change anything!
if len(start) == len(end):
return pages
# Otherwise, replace the last digits of start with the digits of end
num_end_digits = len(end)
new_end = start[:-num_end_digits] + end
return '%s-%s' % (start, new_end)
else: # More than one hyphen, something weird happened
logger.warning("Multiple hyphens in page number: %s" % pages)
return pages
|
import os
import tensorflow as tf
import matplotlib.pyplot as plt
import random
import pandas as pd
import numpy as np
import keras.initializers
import keras.optimizers
from networkx import Graph, find_cliques
from sklearn.metrics import roc_curve, auc
from keras.layers import Concatenate, Input, Embedding, Lambda, Activation, BatchNormalization
from keras.layers.core import Dense, Dropout, Reshape
from keras.models import load_model, model_from_json, model_from_yaml, Model
from keras.utils.vis_utils import plot_model
from keras.callbacks import TensorBoard
from .datasets import DataSet
from .importing_modules import *
class NeuralNetworkConfig:
def __init__(self, categorical_input: str="cat_input", continuous_input: str="cont_input", output: str="output",
reshaped_output: str="reshaped_output", noisy_layer: str="noisy", kernel_initializer: str="uniform",
hidden: str = "hidden", reshaped: str="reshaped", dropout: str="dropout", merge: str="merge",
activation: str="relu", output_activation: str="sigmoid", batch_normalization: bool=False):
self.kernel_initializer = kernel_initializer
self.activation = activation
self.output_activation = output_activation
self.cont_input = continuous_input
self.cat_input = categorical_input
self.hidden = hidden
self.noisy_layer = noisy_layer
self.reshaped = reshaped
self.merge = merge
self.dropout = dropout
self.output = output
self.reshaped_output = reshaped_output
self.batch_normalization = batch_normalization
class NeuralNetwork:
def __init__(self, model):
self.__model = model
def get_model(self):
return self.__model
@classmethod
def from_file(cls, from_file: str):
model = load_model(from_file)
return cls(model)
def get_layer(self, name):
return self.__model.get_layer(name)
def get_weights(self):
return self.__model.get_weights()
def set_weights(self, weights):
self.__model.set_weights(weights)
def get_weights_for_layer(self, feature):
return self.__model.get_layer(feature).get_weights()
def get_weights_with_name(self):
model = self.__model
names = [layer.name for layer in model.layers]
weights = []
for name in names:
weights.append(model.get_layer(name).get_weights())
return dict(zip(names, weights))
def set_weights_by_name(self, weights):
for name, weight in weights.items():
self.__model.get_layer(name).set_weights(weight)
def save_plot(self, to_file='model_plot.svg', shapes=False, layer_names=False):
if to_file:
plot_model(self.__model, to_file=to_file, show_shapes=shapes, show_layer_names=layer_names)
def compile(self, loss='binary_crossentropy', lr=0.001):
optimizer=keras.optimizers.Adam(lr=lr)
self.__model.compile(loss=loss, optimizer=optimizer, metrics=['accuracy'])
def export(self, to_file):
if to_file:
name, ext = os.path.splitext(to_file)
if ext == '.h5':
self.__model.save(to_file)
elif ext == '.json':
model_json = self.__model.to_json()
with(to_file, 'w') as json_file:
json_file.write(model_json)
elif ext == '.yaml':
model_yaml = self.__model.to_yaml()
with(to_file, 'w') as yaml_file:
yaml_file.write(model_yaml)
class DenseNeuralNetwork(NeuralNetwork):
@classmethod
def from_scratch(cls, config: NeuralNetworkConfig, dataset, hidden_units: int,
embedding_size: int = 10, dropout_rate: float = 0.0,
output_units=1, embedding_layers_trainable=True):
categorical_data = dataset.get_data(without_resulting_feature=True).select_dtypes(include='category')
continuous_features = dataset.get_data(without_resulting_feature=True).select_dtypes(
exclude='category').columns.size
if isinstance(categorical_data, pd.DataFrame):
categorical_data_categories = {}
for column in categorical_data:
categorical_data_categories[column] = categorical_data[column].cat.categories.size
categorical_data = categorical_data_categories
model = DenseNeuralNetwork._build(config, categorical_data, continuous_features, hidden_units, embedding_size,
dropout_rate, output_units, embedding_layers_trainable)
return cls(model)
@staticmethod
def _build(config, categorical_data_categories, continuous_features: int, hidden_units: int, embedding_size: int,
dropout_rate, output_units: int, embedding_layers_trainable):
# create input layer for continuous data
continuous_input = Input(shape=(continuous_features,), name=config.cont_input)
reshaped_continuous_input = Reshape((1, continuous_features),
name=config.reshaped)(continuous_input)
# create input layers complemented by embedding layers to handle categorical features
embedding_layers = []
categorical_inputs = []
for feature, size in categorical_data_categories.items():
categorical_input = Input((1,), name=config.cat_input + "_" + feature)
categorical_inputs.append(categorical_input)
embedding_layer = Embedding(size, embedding_size, name=feature, trainable=embedding_layers_trainable)(
categorical_input)
embedding_layers.append(embedding_layer)
# merge all inputs
merge_layer = Concatenate(name=config.merge)(embedding_layers + [reshaped_continuous_input])
# hidden layers
hidden_layer = Dense(hidden_units, kernel_initializer=config.kernel_initializer,
name=config.hidden)(merge_layer)
if config.batch_normalization:
hidden_layer = BatchNormalization()(hidden_layer)
hidden_layer = Activation(config.activation)(hidden_layer)
dropout_layer = Dropout(dropout_rate, name=config.dropout)(hidden_layer)
# output_layer
output_layer = Dense(output_units, name=config.output)(dropout_layer)
output_layer = Activation(config.output_activation)(output_layer)
# add reshape layer since output should be vector
output_layer = Reshape((1,), name=config.reshaped_output)(output_layer)
# create final model
model = Model(inputs=categorical_inputs + [continuous_input], outputs=output_layer)
return model
class OptimizedNeuralNetwork(NeuralNetwork):
@classmethod
def from_scratch(cls, config: NeuralNetworkConfig, dataset: DataSet, correlation_info: list, embedding_size: int=10,
dropout_rate: float=0.0, output_units=1):
flatten_correlation = [item for sublist in correlation_info for item in sublist]
features = dataset.get_data(without_resulting_feature=True).columns
if not all(elem in features for elem in flatten_correlation):
return None
diff = list(set(features) - set(flatten_correlation))
diff = [[item] for item in diff]
correlation_info.extend(diff)
categorical_data = dataset.get_data(without_resulting_feature=True).select_dtypes(include='category')
continuous_features = dataset.get_data(without_resulting_feature=True).select_dtypes(exclude='category').columns
if isinstance(categorical_data, pd.DataFrame):
categorical_data_categories = {}
for column in categorical_data:
categorical_data_categories[column] = categorical_data[column].cat.categories.size
categorical_data = categorical_data_categories
model = OptimizedNeuralNetwork._build(config, categorical_data, continuous_features, correlation_info,
embedding_size, dropout_rate, output_units)
return cls(model)
@staticmethod
def _build(config: NeuralNetworkConfig, categorical_data_categories: dict, continuous_features: list,
correlation_info: list,embedding_size: int, dropout_rate: float, output_units: int):
feature_layers = {}
hidden_layers = []
inputs = []
for feature, size in categorical_data_categories.items():
categorical_input = Input((1,), name=config.cat_input + "_" + feature)
inputs.append(categorical_input)
embedding_layer = Embedding(size, embedding_size, name=feature)(categorical_input)
feature_layers[feature] = embedding_layer
for feature in continuous_features:
continuous_input = Input((1,), name=config.cont_input + "_" + feature)
inputs.append(continuous_input)
reshaped_continuous_input = Reshape((1, 1), name=feature)(continuous_input)
feature_layers[feature] = reshaped_continuous_input
for couple in correlation_info:
coupled_layers = [feature_layers[feature] for feature in couple]
if len(couple) > 1:
merge_layer = Concatenate()(coupled_layers)
hidden_layer = Dense(1, kernel_initializer=config.kernel_initializer)(merge_layer)
if config.batch_normalization:
hidden_layer = BatchNormalization()(hidden_layer)
hidden_layer = Activation(config.activation)(hidden_layer)
else:
hidden_layer = Dense(1, kernel_initializer=config.kernel_initializer)(coupled_layers[0])
if config.batch_normalization:
hidden_layer = BatchNormalization()(hidden_layer)
hidden_layer = Activation(config.activation)(hidden_layer)
hidden_layers.append(hidden_layer)
merge_layer = Concatenate()(hidden_layers)
dropout_layer = Dropout(dropout_rate, name=config.dropout)(merge_layer)
# output_layer
output_layer = Dense(1, name=config.output)(dropout_layer)
output_layer = Activation(config.output_activation)(output_layer)
# add reshape layer since output should be vector
output_layer = Reshape((output_units,), name=config.reshaped_output)(output_layer)
# create final model
model = Model(inputs=inputs, outputs=output_layer)
return model
class Trainer:
def __init__(self, nnet: NeuralNetwork, training_dataset, training_target, batch_size=32, epochs=1000):
self.__nnet = nnet
self.__training_dataset = training_dataset
self.__training_target = training_target
self.__batch_size = batch_size
self.__epochs = epochs
self.__score = None
self._preprocess_dataset()
def _preprocess_dataset(self):
categorical_data = DataSet.dataframe_to_series(self.__training_dataset.get_data(without_resulting_feature=True).select_dtypes(include='category'))
if isinstance(self.__nnet, OptimizedNeuralNetwork):
continuous_data = DataSet.dataframe_to_series(self.__training_dataset.get_data(without_resulting_feature=True).select_dtypes(exclude='category'))
self.__training_dataset = [*categorical_data, *continuous_data]
else:
continuous_data = self.__training_dataset.get_data().select_dtypes(exclude='category').values
self.__training_dataset = [*categorical_data, continuous_data]
def train(self, verbose=1):
tensorboard = TensorBoard(log_dir="./logs")
self.__nnet.get_model().fit(self.__training_dataset, self.__training_target, batch_size=self.__batch_size,
epochs=self.__epochs, verbose=verbose, shuffle=False, callbacks=[tensorboard])
def evaluate(self, verbose=1):
self.__score = self.__nnet.get_model().evaluate(self.__training_dataset, self.__training_target,
batch_size=self.__batch_size, verbose=verbose)
def get_score(self):
return self.__score
class Predictor:
def __init__(self, nnet: NeuralNetwork, dataset: DataSet):
self._nnet = nnet
self._dataset = dataset
self._score = {}
self._prediction = []
self._preprocess()
def _preprocess(self):
categorical_data = DataSet.dataframe_to_series(self._dataset.get_data().select_dtypes(include='category'))
if isinstance(self._nnet, OptimizedNeuralNetwork):
continuous_data = DataSet.dataframe_to_series(self._dataset.get_data().select_dtypes(exclude='category'))
self._dataset = [*categorical_data, *continuous_data]
else:
continuous_data = self._dataset.get_data().select_dtypes(exclude='category').values
self._dataset = [*categorical_data, continuous_data]
def predict(self):
self._prediction = self._nnet.get_model().predict(self._dataset).flatten()
return self._prediction
def evaluate(self, real_values, show_plot: bool = False):
if len(self._prediction) > 0:
rounded_pred = np.round(self._prediction)
tp = np.sum(np.logical_and(rounded_pred == 1, real_values == 1))
tn = np.sum(np.logical_and(rounded_pred == 0, real_values == 0))
fp = np.sum(np.logical_and(rounded_pred == 1, real_values == 0))
fn = np.sum(np.logical_and(rounded_pred == 0, real_values == 1))
accuracy = (tp + tn) / (tp + fp + fn + tn)
self._score['ppv'] = tp / (tp + fp)
self._score['npv'] = tn / (tn + fn)
self._score['recall'] = tp / (tp + fn)
self._score['specificity'] = tn / (tn + fp)
self._score['accuracy'] = accuracy
self._score['tp'] = tp
self._score['tn'] = tn
self._score['fp'] = fp
self._score['fn'] = fn
if show_plot:
self._roc(real_values, np.unique(real_values).size)
def _roc(self, real_values, n_classes):
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(n_classes):
fpr[i], tpr[i], _ = roc_curve(real_values, self._prediction)
roc_auc[i] = auc(fpr[i], tpr[i])
plt.figure()
lw = 1
plt.plot(fpr[1], tpr[1], color='darkorange',
lw=lw, label='AUC = %0.2f' % roc_auc[1])
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.0])
plt.xlabel('Ложно-положительные решения')
plt.ylabel('Истино-положительные решения')
plt.title('Кривая ошибок')
plt.legend(loc="lower right")
plt.show()
def get_score(self):
return self._score
def get_prediction(self):
return self._prediction
class FeatureSelector:
def __init__(self, config: NeuralNetworkConfig, nnet: DenseNeuralNetwork, training_dataset):
self._source_model = nnet
self._config = config
self._training_dataset = training_dataset
categorical_columns = training_dataset.get_data(without_resulting_feature=True).select_dtypes(include='category').columns
self._weights = self._source_model.get_weights_with_name()
self._cat_input_shape = self._source_model.get_layer(config.cat_input + "_" + categorical_columns[0]).get_input_shape_at(0)
self._cont_input_shape = self._source_model.get_layer(config.cont_input).get_input_shape_at(0)[-1]
self._hid_size = self._source_model.get_layer(config.hidden).get_output_shape_at(0)[-1]
self._emb_size = self._source_model.get_layer(categorical_columns[0]).get_output_shape_at(0)[-1]
self._dropout_rate = self._source_model.get_layer(config.dropout).get_config()['rate']
self._cat_data = {}
for x in categorical_columns:
self._cat_data[x] = self._source_model.get_layer(x).get_config()["input_dim"] - 1
def _build_network(self, config, dataset, full_copy: bool = False):
noisy_model = DenseNeuralNetwork.from_scratch(config=config, dataset=dataset,
hidden_units=self._hid_size, embedding_size=self._emb_size,
dropout_rate=self._dropout_rate,embedding_layers_trainable=False)
return noisy_model
def run(self, training_dataset, training_target, test_dataset, test_target, noise_rate=0.01, training_epochs=100, batch_size=8, lr=0.001):
training_dataset = DataSet.copy(training_dataset)
test_dataset = DataSet.copy(test_dataset)
predictor = Predictor(self._source_model, test_dataset)
prediction = predictor.predict()
predictor.evaluate(test_target)
prev_accuracy = predictor.get_score()['accuracy']
curr_accuracy = predictor.get_score()['accuracy']
features_to_remove = []
# noise_rate = random.uniform(0, noise_rate)
while curr_accuracy >= prev_accuracy:
for column in training_dataset.get_data().columns:
if test_dataset.get_data()[column].dtype.name == 'category':
noisy_dataset = DataSet.copy(test_dataset)
noisy_dataset.add_noise_to_categorical_columns(column, noise_rate)
noisy_model = self._source_model
predictor = Predictor(noisy_model, noisy_dataset)
else:
noisy_dataset = DataSet.copy(test_dataset)
noisy_dataset.add_noise_to_column(column, noise_rate)
noisy_model = self._source_model
predictor = Predictor(noisy_model, noisy_dataset)
noisy_prediction = predictor.predict()
sensitivity = abs(np.sum(noisy_prediction) - np.sum(prediction)) / len(noisy_prediction)
test_dataset.get_features().set_sensitivity(column, sensitivity)
training_dataset.get_features().set_sensitivity(column, sensitivity)
print("Sensitivity of %s: %f" % (column, training_dataset.get_features().get_sensitivity(column)))
less_sensitive_feature = test_dataset.get_features().get_less_sensitive_feature()
features_to_remove.append(less_sensitive_feature)
test_dataset.rm_less_sensitive()
training_dataset.rm_less_sensitive()
emb_weights = {feature: self._weights[feature] for feature in training_dataset.get_data().select_dtypes(include='category').columns.tolist()}
self._source_model = self._build_network(self._config, training_dataset)
self._source_model.compile(lr=lr)
self._source_model.set_weights_by_name(emb_weights)
trainer = Trainer(self._source_model, training_dataset, training_target, epochs=training_epochs, batch_size=batch_size)
trainer.train()
trainer.evaluate()
self._weights = self._source_model.get_weights_with_name()
predictor = Predictor(self._source_model, test_dataset)
prediction = predictor.predict()
predictor.evaluate(test_target)
prev_accuracy, curr_accuracy = curr_accuracy, predictor.get_score()['accuracy']
print(prev_accuracy)
print(curr_accuracy)
return features_to_remove[:-1]
class CorrelationAnalyzer:
def __init__(self, config: NeuralNetworkConfig, nnet: DenseNeuralNetwork, training_dataset):
self._source_model = nnet
self._config = config
self._training_dataset = training_dataset
self._columns = self._training_dataset.get_data().columns
categorical_columns = training_dataset.get_data(without_resulting_feature=True).select_dtypes(
include='category').columns
self._weights = None
self._emb_weights = None
self._cat_input_shape = self._source_model.get_layer(config.cat_input + "_" + categorical_columns[0]).get_input_shape_at(0)
self._cont_input_shape = self._source_model.get_layer(config.cont_input).get_input_shape_at(0)[-1]
self._hid_size = self._source_model.get_layer(config.hidden).get_output_shape_at(0)[-1]
self._emb_size = self._source_model.get_layer(categorical_columns[0]).get_output_shape_at(0)[-1]
self._dropout_rate = self._source_model.get_layer(config.dropout).get_config()['rate']
self._table = np.empty([len(categorical_columns)+self._cont_input_shape+1, len(categorical_columns)+self._cont_input_shape+1])
self._cat_data = {}
for x in categorical_columns:
self._cat_data[x] = self._source_model.get_layer(x).get_config()["input_dim"] - 1
def _build_network(self, config, dataset, full_copy: bool = False):
noisy_model = DenseNeuralNetwork.from_scratch(config=config, dataset=dataset,
hidden_units=self._hid_size, embedding_size=self._emb_size,
dropout_rate=self._dropout_rate,embedding_layers_trainable=False)
if not full_copy:
noisy_model.set_weights_by_name(self._emb_weights)
else:
noisy_model.set_weights_by_name(self._weights)
return noisy_model
def run(self, test_dataset, training_dataset, training_target, noise_rate=0.01, training_epochs=100, batch_size=32, lr=0.03):
training_dataset = DataSet.copy(training_dataset)
trainer = Trainer(self._source_model, training_dataset, training_target, epochs=training_epochs)
trainer.train()
trainer.evaluate()
self._weights = self._source_model.get_weights_with_name()
self._emb_weights = {feature: self._weights[feature] for feature in list(self._cat_data.keys())}
predictor = Predictor(self._source_model, test_dataset)
self._table[0][0] = np.sum(predictor.predict())
# noise_rate = random.uniform(0, noise_rate)
for idx, column in enumerate(self._columns):
if training_dataset.get_data()[column].dtype.name == 'category':
noisy_dataset = DataSet.copy(training_dataset)
noisy_dataset.add_noise_to_categorical_columns(column, noise_rate)
noisy_model = self._build_network(self._config, training_dataset)
noisy_model.compile(lr=lr)
trainer = Trainer(noisy_model, noisy_dataset, training_target, epochs=training_epochs, batch_size=batch_size)
trainer.train()
trainer.evaluate()
predictor = Predictor(noisy_model, test_dataset)
else:
noisy_dataset = DataSet.copy(training_dataset)
noisy_dataset.add_noise_to_column(column, noise_rate)
noisy_model = self._build_network(self._config, training_dataset)
noisy_model.compile(lr=lr)
trainer = Trainer(noisy_model,noisy_dataset, training_target, epochs=training_epochs, batch_size=batch_size)
trainer.train()
trainer.evaluate()
predictor = Predictor(noisy_model, test_dataset)
noisy_prediction = predictor.predict()
self._table[0][idx+1] = abs(np.sum(noisy_prediction) - self._table[0][0]) / len(noisy_prediction)
for idx, column in enumerate(self._columns):
if test_dataset.get_data()[column].dtype.name == 'category':
noisy_dataset = DataSet.copy(test_dataset)
noisy_dataset.add_noise_to_categorical_columns(column, noise_rate)
noisy_model = self._source_model
predictor = Predictor(noisy_model, test_dataset)
else:
noisy_dataset = DataSet.copy(test_dataset)
noisy_dataset.add_noise_to_column(column, noise_rate)
noisy_model = self._source_model
predictor = Predictor(noisy_model, noisy_dataset)
noisy_prediction = predictor.predict()
self._table[idx + 1][0] = abs(np.sum(noisy_prediction) - self._table[0][0]) / len(noisy_prediction)
for c in range(len(self._cat_data)+self._cont_input_shape):
for idx in range(len(self._cat_data)+self._cont_input_shape):
self._table[idx+1][c+1] = abs(self._table[idx+1][0] - self._table[0][c+1])
self._table = np.delete(self._table, 0, 0)
self._table = np.delete(self._table, 0, 1)
self._table = pd.DataFrame(data=self._table, index=self._columns, columns=self._columns)
self._table.loc['mean'] = self._table.mean()
return self._table
def select_candidates(self):
candidates = pd.DataFrame(columns=self._columns)
fcandidates = []
for column in self._table:
candidates[column] = pd.Series((self._table.loc[self._table[column] > self._table[column]['mean']]).index)
for column in candidates:
for row in range(candidates.shape[0]):
if candidates[column][row] == candidates[column][row] and candidates[column][row] != column:
if column in candidates[candidates[column][row]].tolist():
fcandidates.append([column, candidates[column][row]])
[l.sort() for l in fcandidates]
fcandidates = [l for l in fcandidates if fcandidates.count(l) == 2]
fcandidates = [tuple(x) for x in set(tuple(x) for x in fcandidates)]
correlation_graph = Graph()
correlation_graph.add_edges_from(fcandidates)
fcandidates = list(find_cliques(correlation_graph))
return fcandidates
|
import pygame
GREEN = (25, 111, 61)
BLUE = (31, 97, 141)
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
TURQUOISE = (64, 224, 208)
class Cell:
def __init__(self, row, col, width, total_rows):
self.row = row
self.col = col
self.x = row * width
self.y = col * width
self.color = WHITE
self.successors = []
self.width = width
self.total_rows = total_rows
def get_pos(self):
return self.row, self.col
def is_closed(self):
return self.color == BLUE
def is_open(self):
return self.color == TURQUOISE
def is_wall(self):
return self.color == BLACK
def is_start(self):
return self.color == GREEN
def is_end(self):
return self.color == GREEN
def reset(self):
self.color = WHITE
def make_start(self):
self.color = GREEN
def make_closed(self):
self.color = BLUE
def make_open(self):
self.color = TURQUOISE
def make_wall(self):
self.color = BLACK
def make_end(self):
self.color = GREEN
def make_path(self):
self.color = GREEN
def draw(self, win):
pygame.draw.rect(win, self.color, (self.x, self.y, self.width, self.width))
def get_successors(self, grid):
self.successors = []
if self.row < self.total_rows - 1 and not grid[self.row + 1][self.col].is_wall(): # DOWN
self.successors.append(grid[self.row + 1][self.col])
if self.row > 0 and not grid[self.row - 1][self.col].is_wall(): # UP
self.successors.append(grid[self.row - 1][self.col])
if self.col < self.total_rows - 1 and not grid[self.row][self.col + 1].is_wall(): # RIGHT
self.successors.append(grid[self.row][self.col + 1])
if self.col > 0 and not grid[self.row][self.col - 1].is_wall(): # LEFT
self.successors.append(grid[self.row][self.col - 1])
def __lt__(self, other):
return False
|
from flask_admin.contrib.sqla import ModelView
class FeatureRequestModelView(ModelView):
form_choices = {
"client": [
("Client A", "Client A"),
("Client B", "Client B"),
("Client C", "Client C"),
("Client D", "Client D"),
("Client E", "Client E"),
("Client F", "Client F"),
("Client G", "Client G"),
("Client H", "Client H"),
("Client I", "Client I"),
("Client J", "Client J"),
("Client K", "Client K"),
("Client L", "Client L"),
("Client M", "Client M"),
("Client N", "Client N"),
("Client O", "Client O"),
("Client P", "Client P"),
("Client Q", "Client Q"),
("Client R", "Client R"),
("Client S", "Client S"),
("Client T", "Client T"),
("Client U", "Client U"),
("Client V", "Client V"),
("Client W", "Client W"),
("Client X", "Client X"),
("Client Y", "Client Y"),
("Client Z", "Client Z"),
],
"client_priority": [
("1", 1),
("2", 2),
("3", 3),
("4", 4),
("5", 5),
("6", 6),
("7", 7),
("8", 8),
("9", 9),
("10", 10),
],
"product_area": [
("Policies", "Policies"),
("Billing", "Billing"),
("Claims", "Claims"),
("Reports", "Reports"),
],
}
|
import unittest
from decimal import Decimal
from wexapi.models import OrderInfo
class TestOrderInfo(unittest.TestCase):
def test_create_valid(self):
data = {
"order_id": "343152",
"pair": "btc_usd",
"type": "sell",
"start_amount": 13.345,
"amount": 12.345,
"rate": 485,
"timestamp_created": 1342448420,
"status": 0
}
info = OrderInfo(**data)
self.assertIsInstance(info.start_amount, Decimal)
self.assertEqual(info.start_amount, Decimal(13.345))
if __name__ == '__main__':
unittest.main()
|
import tensorflow as tf
import tensorflow_probability as tfp
import numpy as np
import matplotlib.pyplot as plt
from data_preprocessing import preprocess_mr, get_slots
from postprocessing import postprocess_utterance
import sys
from data_manager import load_data_tensors, load_text_data
from models import Encoder, BahdanauAttention, Decoder
import time
import os
import nltk
from slug2slug_aligner import get_unaligned_and_hallucinated_slots
sys.path.append('./')
import argparse
import helpers
import bayesian_sampler
BATCH_SIZE = 1
# optimal beam size found by Juraska
END_SYMBOL = '<end>'
START_SYMBOL = '<start>'
parser = argparse.ArgumentParser(description='Generate utterances from a trained E2E description generator')
parser.add_argument("test_data", type=str,
help="The path to the test data file")
parser.add_argument("-id", "--identifier", default='',
help="The identifier used to define checkpoint and training info directories")
parser.add_argument("-b", "--beam-width", type=int, default=0,
help="Size of beam to use in generation. If not specified use sampling.")
parser.add_argument("-s", "--sample-content", default=False, action="store_true",
help="Sample slots used in MR of utterance.")
parser.add_argument("-cpd", "--cpd-model-file", default='cpd_model.pkl',
help="Pickle file where the cpd model is stored")
parser.add_argument("-p", "--print-utt", default=False, action="store_true",
help="Print generations for dataset before estimating bleu score")
class BeamObj:
def __init__(self, utterance, probability, last_id, last_hidden):
self.utterance = utterance
self.probability = probability
# save the id of the last word
# this will be used as input for the next timestep
self.last_id = last_id
# this is the decoder hidden state obtained from the previous prediction
self.last_hidden = last_hidden
def __repr__(self):
return '{}({})'.format(self.utterance, self.probability)
def search_candidates(curr_prob, curr_utterance, dec_hidden, predictions, ref_idx2word, beam_size):
""" Find the possible extensions to an utterance and their log-probabilities. """
candidates = []
# calculate probabilites if new predictions were added
# check for all new predictions in the beam
if predictions.shape[0] != 1:
raise ValueError('Batches not supported in beam search. ')
preds = predictions[0]
ids_by_prob = np.argsort(-preds)[:beam_size]
for word_id in ids_by_prob:
pred = preds[word_id]
new_prob = curr_prob + pred
new_utterance = (curr_utterance + " " + ref_idx2word[word_id]).strip()
candidates += [BeamObj(new_utterance, new_prob, word_id, dec_hidden)]
return candidates
def score_prediction(prediction, mr_slots):
"""
Scores a complete utterance based on slot realisation.
mr_slots should be a dict where keys are slots and values are slot values.
The score function is taken from Juraska et al.
"""
N = len(mr_slots.keys())
# remove the whitespace placeholders
orig_mr_slots = { k.replace(' ', ''): v.replace('_', ' ') for k, v in mr_slots.items() }
# use Juraska's code to get erronous slots
unaligned_slots, hallucinated_slots = get_unaligned_and_hallucinated_slots(prediction, orig_mr_slots)
score = N/((len(unaligned_slots)+1)*(len(hallucinated_slots)+1))
return score
def generation_done(beam_obj, training_info, end_token):
""" Stop when end token is reached or the sentence is the maximal length. """
return beam_obj.last_id == end_token or len(beam_obj.utterance) == training_info['max_length_targ']
def get_length_normalisation_denominator(utterance, alpha=0.9):
""" As done in Google's NMT paper, who found optimal alpha to be between 0.6 and 0.7. """
utt_len = len(utterance.split(" "))
return ((5 + utt_len)**alpha)/((5+1)**alpha)
def evaluate_with_beam(encoder, decoder, mr_info, training_info, beam_size):
attention_plot = np.zeros((training_info['max_length_targ'], training_info['max_length_inp']))
processed_mr_info = preprocess_mr(mr_info)
inputs = [training_info['mr_word2idx'][i.strip()] for i in processed_mr_info.split(' ')]
inputs = tf.keras.preprocessing.sequence.pad_sequences([inputs],
maxlen=training_info['max_length_inp'],
padding='post')
inputs = tf.convert_to_tensor(inputs)
start_token = training_info['ref_word2idx'][START_SYMBOL]
end_token = training_info['ref_word2idx'][END_SYMBOL]
hidden = encoder.initialize_hidden_state(1)
enc_out, forward_hidden, backward_hidden = encoder(inputs, hidden)
dec_hidden = tf.keras.layers.Concatenate()([forward_hidden, backward_hidden])
beam = [BeamObj('', 0, start_token, dec_hidden)]
while(True):
new_beams = []
for beam_obj in beam:
if generation_done(beam_obj, training_info, end_token):
new_beams += [beam_obj]
continue
predictions, dec_hidden, attention_weights = decoder(tf.expand_dims([beam_obj.last_id], 0), beam_obj.last_hidden, enc_out)
curr_prob = beam_obj.probability
curr_utterance = beam_obj.utterance
# the network gives back logits instead of probabilities
# so start by calculating probabilities
preds = tf.nn.log_softmax(predictions, axis=1).numpy()
# find the candidates for this prediction
candidates = search_candidates(curr_prob, curr_utterance, dec_hidden, preds, training_info['ref_idx2word'], beam_size)
new_beams += candidates
normalised_beams = [BeamObj(b.utterance, b.probability/get_length_normalisation_denominator(b.utterance), b.last_id, b.last_hidden) for b in new_beams]
beam = sorted(normalised_beams, key=lambda b: -b.probability)[:beam_size]
all_generated = [generation_done(beam_obj, training_info, end_token) for beam_obj in beam]
if np.all(all_generated):
break
return beam, processed_mr_info, attention_plot
def create_results(predicted_ids, results, training_info):
""" Create utterances from predicted ids """
for i in range(len(predicted_ids)):
idd = predicted_ids[i]
utt = results[i]
# don't add anything if the utterance already has an end symbol
if not utt.endswith(END_SYMBOL) and idd != 0:
results[i] = (utt + " " + training_info['ref_idx2word'][idd]).strip()
return results
def evaluate_with_sampling(encoder, decoder, mr_info, training_info, batch_size):
attention_plot = np.zeros((training_info['max_length_targ'], training_info['max_length_inp']))
processed_mr_info = preprocess_mr(mr_info)
inputs = [training_info['mr_word2idx'][i.strip()] if i.strip() in training_info['mr_word2idx'] else 0 for i in processed_mr_info.split(' ')]
inputs = tf.keras.preprocessing.sequence.pad_sequences([inputs],
maxlen=training_info['max_length_inp'],
padding='post')
inputs = tf.convert_to_tensor(inputs)
inputs = tf.tile(inputs, tf.constant([batch_size, 1]))
hidden = encoder.initialize_hidden_state(batch_size)
enc_out, forward_hidden, backward_hidden = encoder(inputs, hidden)
dec_hidden = tf.keras.layers.Concatenate()([forward_hidden, backward_hidden])
dec_input = tf.expand_dims([training_info['ref_word2idx'][START_SYMBOL]], 0)
dec_input = tf.tile(dec_input, tf.constant([batch_size, 1]))
results = ['']*batch_size
for t in range(training_info['max_length_targ']):
predictions, dec_hidden, attention_weights = decoder(dec_input, dec_hidden, enc_out)
pred_dist = tfp.distributions.Multinomial(total_count=1, logits=predictions)
predicted_ids = tf.argmax(pred_dist.sample(1)[0], axis=1).numpy()
results = create_results(predicted_ids, results, training_info)
dec_input = tf.expand_dims(predicted_ids, 1)
return results, mr_info, attention_plot
def generate_reference_using_beam(encoder, decoder, mr_info, training_info, beam_size=1):
""" Generate new reference, and postprocess it to form a complete sentence using beam search."""
beam, processed_mr_info, attention_plot = evaluate_with_beam(encoder, decoder, mr_info, training_info, beam_size)
mr_slots = get_slots(mr_info, remove_whitespace=False)
# postprocess and score the beam
for beam_obj in beam:
processed_utterance = postprocess_utterance(beam_obj.utterance, mr_slots)
score = score_prediction(processed_utterance, mr_slots)
beam_obj.utterance = processed_utterance
beam_obj.probability += np.log(score)
# order again by probability
sorted_beam = sorted(beam, key=lambda x: -x.probability)
return sorted_beam[0].utterance
def generate_reference_with_sampling(encoder, decoder, mr_info, training_info):
""" Generate new reference, and postprocess it to form a complete sentence by sampling the next token from a probability distribution."""
results, processed_mr_info, attention_plot = evaluate_with_sampling(encoder, decoder, mr_info, training_info, batch_size=10)
mr_slots = get_slots(mr_info, remove_whitespace=False)
scores = np.zeros(len(results))
utterances = []
for i, ref in enumerate(results):
processed_utterance = postprocess_utterance(ref, mr_slots)
score = score_prediction(processed_utterance, mr_slots)
scores[i] = score
utterances.append(processed_utterance)
# postprocess and score the beam
best_pred_id = np.argsort(-scores)[0]
return utterances[best_pred_id]
def sample_mr_content(mr_info, content_selection_model_file):
mr_slots = get_slots(mr_info, remove_whitespace=False)
# don't sample over name
sample_mrs = [k for k in mr_slots.keys() if k != 'name']
sampled_slots = bayesian_sampler.sample_slots(content_selection_model_file, sample_mrs)
# always include name
sampled_slots += ['name']
mr_slots_to_use = { mr_key: mr_slots[mr_key] for mr_key in mr_slots.keys() if mr_key in sampled_slots }
return ', '.join(k + '[' + v + ']' for k, v in mr_slots_to_use.items())
def print_generations(test_data, encoder, decoder, training_info, beam_width, sample_content, cpd_model_file):
print('Beam width is', beam_width)
for i in range(len(test_data)):
print(test_data['mr'].iloc[i])
mr_input = test_data['mr'].iloc[i]
mr_info = ''
if sample_content:
mr_info = sample_mr_content(mr_input, cpd_model_file)
print('Sampled mr', mr_info)
else:
mr_info = mr_input
generated = ''
if beam_width > 0:
generated = generate_reference_using_beam(encoder, decoder, mr_info, training_info, beam_width)
else:
generated = generate_reference_with_sampling(encoder, decoder, mr_info, training_info)
print(generated)
if 'ref' in test_data.columns:
print(test_data['ref'].iloc[i])
bleu = nltk.translate.bleu_score.sentence_bleu([test_data['ref'].iloc[i]], generated)
print('bleu score for the best prediction', bleu)
print('-------------------------')
def calculate_mean_bleu_score(test_data, encoder, decoder, training_info, beam_width, sample_content, cpd_model_file=None):
print('Calculating mean BLEU score for validation set of size', len(test_data))
bleus = np.zeros(len(test_data))
if sample_content and cpd_model_file is None:
raise ValueError('Please give CPD model file if sampling content')
for i in range(len(test_data)):
mr_input = test_data['mr'].iloc[i]
mr_info = ''
if sample_content:
mr_info = sample_mr_content(mr_input, cpd_model_file)
else:
mr_info = mr_input
generated = ''
if beam_width > 0:
generated = generate_reference_using_beam(encoder, decoder, mr_info, training_info, beam_width)
else:
generated = generate_reference_with_sampling(encoder, decoder, mr_info, training_info)
bleu = nltk.translate.bleu_score.sentence_bleu([test_data['ref'].iloc[i]], generated)
bleus[i] = bleu
if i % 50 == 0:
print(i)
if i % 500 == 0:
print(generated)
print(test_data['ref'].iloc[i])
print('mean bleu', np.mean(bleus[bleus > 0]))
return np.mean(bleus), np.var(bleus)
def main(test_data_file, checkpoint_dir, training_info_file, beam_width, sample_content, cpd_model_file, print_utt):
training_info = helpers.load_from_pickle(training_info_file)
encoder = Encoder(len(training_info['mr_word2idx'])+1,
training_info['embedding_dim'],
training_info['units'])
decoder = Decoder(len(training_info['ref_word2idx'])+1,
training_info['embedding_dim'],
training_info['units']*2,
training=False)
optimizer = tf.keras.optimizers.Adam()
checkpoint = tf.train.Checkpoint(optimizer=optimizer,
encoder=encoder,
decoder=decoder)
print('Restoring checkpoint from', checkpoint_dir)
checkpoint.restore(tf.train.latest_checkpoint(checkpoint_dir))
# get test data
test_data = load_text_data(test_data_file, 2000)
if print_utt:
print_generations(test_data, encoder, decoder, training_info, beam_width, sample_content, cpd_model_file)
bleu_mean, bleu_var = calculate_mean_bleu_score(test_data, encoder, decoder, training_info, beam_width, sample_content, cpd_model_file)
print(bleu_mean, bleu_var)
if __name__ == "__main__":
# restoring the latest checkpoint in checkpoint_dir
args = parser.parse_args()
test_data_file = args.test_data
identifier = args.identifier
checkpoint_dir = 'training_checkpoints' + identifier
training_info_file = 'training_info' + identifier + '.pkl'
beam_width = args.beam_width
sample_content = args.sample_content
print_utt = args.print_utt
print('Sampling content', sample_content)
cpd_model_file = args.cpd_model_file
main(test_data_file, checkpoint_dir, training_info_file, beam_width, sample_content, cpd_model_file, print_utt) |
#! /usr/bin/env python3
import argparse
from PyPDF2 import PdfFileReader, PdfFileWriter
def main(pdf_file, new_chapters_file, output_file):
with open(new_chapters_file) as f:
new_chapters = [line.strip() for line in f.readlines()]
reader = PdfFileReader(pdf_file)
writer = PdfFileWriter()
old_chapters = {
chapter['/Page']: chapter['/Title']
for chapter in reader.outlines
}
if len(old_chapters) != len(new_chapters):
print('Number of chapters unequal')
return
chapter_pairs = zip(sorted(old_chapters.items()), new_chapters)
for (page_num, old_chapter), new_chapter in chapter_pairs:
print('[p.{}] {} => {}'.format(page_num, old_chapter, new_chapter))
for i in range(reader.getNumPages()):
page = reader.getPage(i)
writer.addPage(page)
chapter_pairs = zip(sorted(old_chapters), new_chapters)
for page_num, new_chapter in chapter_pairs:
writer.addBookmark(new_chapter, page_num)
with open(output_file, 'wb') as f:
writer.write(f)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='''
Rename chapters based on an input file
''')
parser.add_argument('pdf', help='PDF file containing the old chapters')
parser.add_argument('chapters',
help='''
Plain text file to read the updated chapters from.
One line represents one chapter.
''')
parser.add_argument('output', help='Output file')
args = parser.parse_args()
main(args.pdf, args.chapters, args.output)
|
# -*- coding: utf-8 *-*
import sys
import os
import errno
import time
import pprint
# global configuration
import vprimer.glv as glv
import vprimer.utils as utl
from vprimer.logging_config import LogConf
log = LogConf.open_log(__name__)
import vcfpy
from vprimer.allele_select import AlleleSelect
class Variant(object):
def __init__(self):
pass
def pick_variant(self):
"""
"""
proc_name = "variant"
log.info("-------------------------------")
log.info("Start processing {}\n".format(proc_name))
# stop, action, gothrough
ret_status = utl.decide_action_stop(proc_name)
if ret_status == "stop":
msg = "STOP. "
msg += "Current process \'{}\' ".format(proc_name)
msg += "has exceeded the User-specified stop point "
msg += "\'{}', ".format(glv.conf.stop)
msg += "so stop program. exit."
log.info(msg)
sys.exit(1)
elif ret_status == "gothrough":
msg = "SKIP \'{}\' proc, ".format(proc_name)
msg += "glv.conf.progress = {}, ".format(glv.conf.progress)
msg += "glv.conf.stop = {}, ".format(glv.conf.stop)
msg += "so skip program."
log.info(msg)
return
# open vcf through vcfpy
reader = vcfpy.Reader.from_path(glv.conf.vcf_file_path)
# for each distinguish_groups
for proc_cnt, distin_dict in enumerate(glv.outlist.distin_files, 1):
reg = distin_dict['region']
# vcf_ittr for each distinguish groups
vcf_ittr = reader.fetch(glv.conf.regions_dict[reg]['reg'])
self._iterate_vcf(vcf_ittr, distin_dict, proc_cnt)
def _iterate_vcf(self, vcf_ittr, distin_dict, proc_cnt):
"""
"""
# basic informations
gr_list = [distin_dict[0], distin_dict[1]]
reg = distin_dict['region']
reg_dict = glv.conf.regions_dict[reg]
pick_mode = distin_dict['pick_mode']
indel_size = distin_dict['indel_size']
min_indel_len, max_indel_len = \
[int(i) for i in indel_size.split('-')]
# At first, we check difference of genotype between two sample
# that described at the beginning of each group
top_smpl_list = [
glv.conf.group_members_dict[gr_list[0]][0],
glv.conf.group_members_dict[gr_list[1]][0]]
# logging current target
utl.print_distin_info("variant", distin_dict, proc_cnt)
start = time.time()
# File name to export variant
out_txt_file = distin_dict['variant']['out_path']
utl.save_to_tmpfile(out_txt_file)
#------------------------------------------------------
# To add an allele_int column for all sample
# Members of the specified group come first
# gr0:s1 g0:s2 g0:s3 g1:s4 g1:s5 g1:s6 s7 s8 s9 s10
sample_nickname_ordered_list, \
sample_fullname_ordered_list = \
utl.get_ordered_sample_list(gr_list)
sample_added_header = "{}\t{}".format(
distin_dict['variant']['hdr_text'],
"\t".join(sample_nickname_ordered_list))
# Can I parallelize here?
with open(out_txt_file, mode='a') as f:
# write sample added header
f.write("{}\n".format(sample_added_header))
# access to vcf using iterater
for record in vcf_ittr:
# 1. Skip same GT between top two sample
if self._skip_same_GT_between_top2sample(
record, top_smpl_list) > 0:
continue
# 2. Check GT in your own group
if self._skip_different_GT_in_own_group(
record, top_smpl_list, gr_list) > 0:
continue
# 3. Select different allele combination among 2x2 allele
asel = AlleleSelect(min_indel_len, max_indel_len)
asel.select_diff_allele(record, top_smpl_list, gr_list)
# from record, construct allele_int of the member
# who is paying attention
allele_int_line = ""
# 4. Save variant information as text file
for var_type, line in zip(asel.var_types, asel.lines):
if utl.is_my_pick_mode(
var_type, distin_dict['pick_mode']) == True:
# make allele_int line
if allele_int_line == "":
#self._get_ai_line(
allele_int_line = \
self._get_allele_line(
record, sample_fullname_ordered_list)
# add allele line
f.write("{}\t{}\n".format(line, allele_int_line))
log.info("variant {} > {}.txt\n".format(
utl.elapsed_time(time.time(), start),
distin_dict['variant']['base_nam']))
def _get_ai_line(self, record, sample_fullname_list):
'''
'''
#line = [record.CHROM, record.POS, record.REF]
#alt_list = [alt.value for alt in record.ALT]
#line += [",".join(alt_list)]
line = list()
line += [AlleleSelect.allele_convert("{}/{}".format(
record.call_for_sample[fn].gt_alleles[0],
record.call_for_sample[fn].gt_alleles[1]
), "int") for fn in sample_fullname_list]
line_str = '\t'.join(map(str, line))
return line_str
def _get_allele_line(self, record, sample_fullname_list):
'''
'''
#line = [record.CHROM, record.POS, record.REF]
#alt_list = [alt.value for alt in record.ALT]
#line += [",".join(alt_list)]
line = list()
line += [AlleleSelect.allele_convert("{}/{}".format(
record.call_for_sample[fn].gt_alleles[0],
record.call_for_sample[fn].gt_alleles[1]
), "allele") for fn in sample_fullname_list]
line_str = '\t'.join(map(str, line))
return line_str
def _skip_different_GT_in_own_group(self, record, tsl, gr_list):
skip = glv.SKIP_DONT_SKIP
# check twice, group0, and group1
for gr_no in range(2):
# pick sample name belong to a group
for (sample_no, sample_name) in enumerate(
glv.conf.group_members_dict[gr_list[gr_no]]):
if sample_no == 0:
continue # self
sample0 = tsl[gr_no]
sample1 = sample_name
# もし、サンプル間でvariantが見つかった場合は、
s0_0, s0_1, s1_0, s1_1 = \
AlleleSelect.record_call_for_sample(
record, sample0, sample1)
# compare alleles with first sample
if s0_0 == s1_0 and s0_1 == s1_1:
#log.debug("SKIP_SAME_HOMO {},({}){} {}{}/{}{}".format(
# gr_list[gr_no],
# sample_no, sample_name,
# record.call_for_sample[tsl[gr_no]].gt_alleles[0],
# record.call_for_sample[tsl[gr_no]].gt_alleles[1],
# record.call_for_sample[sample_name].gt_alleles[0],
# record.call_for_sample[sample_name].gt_alleles[1]))
pass
else:
skip = glv.SKIP_DIFF_INGROUP
#log.debug("SKIP_SAME_HOMO {},({}){} {}{}/{}{}".format(
# gr_list[gr_no],
# sample_no, sample_name,
# record.call_for_sample[tsl[gr_no]].gt_alleles[0],
# record.call_for_sample[tsl[gr_no]].gt_alleles[1],
# record.call_for_sample[sample_name].gt_alleles[0],
# record.call_for_sample[sample_name].gt_alleles[1]))
return skip
return skip
def _skip_same_GT_between_top2sample(self, record, tsl):
# for REF 20200708
sample0 = tsl[0]
sample1 = tsl[1]
s0_0, s0_1, s1_0, s1_1 = \
AlleleSelect.record_call_for_sample(record, sample0, sample1)
skip = glv.SKIP_DONT_SKIP
# ./. only 0
if utl.is_None(s0_0, s0_1, s1_0, s1_1):
skip = glv.SKIP_None
#log.debug("SKIP_None {}{}/{}{}".format(s0_0,s0_1,s1_0,s1_1))
return skip
# same homo: AA,AA
if utl.is_same_homo(s0_0, s0_1, s1_0, s1_1):
skip = glv.SKIP_SAME_HOMO
#log.debug("SKIP_SAME_HOMO {}{}/{}{}".format(s0_0,s0_1,s1_0,s1_1))
return skip
# same hetero: AB,AB
if utl.is_same_hetero(s0_0, s0_1, s1_0, s1_1):
skip = glv.SKIP_SAME_HETERO
#log.debug("SKIP_SAME_HETERO {}{}/{}{}".format(
# s0_0,s0_1,s1_0,s1_1))
return skip
return skip
def print_allele(self):
''' When show_genotype is specified, the genotype of the specified
regions and members are output to a file.
main
variant.py print_allele
allele_select.py cls allele_int
'''
proc_name = "genotype"
log.info("-------------------------------")
log.info("Start processing {}\n".format(proc_name))
# header
header = list()
header += ["CHROM", "POS", "Rlen", "Alen", "diff", "REF", "ALT"]
header += glv.conf.group_members_dict['all']
# reader
reader = vcfpy.Reader.from_path(glv.conf.vcf_file_path)
total_cnt = len(glv.conf.region_name_list)
# Save to file for each region
for proc_cnt, region_name in enumerate(glv.conf.region_name_list, 1):
region = glv.conf.regions_dict[region_name]['reg']
# Create a list of fullname for the specified members
sample_fullname_list = list()
for nickname in glv.conf.group_members_dict['all']:
sample_fullname_list.append(utl.get_fullname(nickname))
# if group priority
#sample_fullname_list = \
# utl.get_sample_list_from_groupname(
# group_list, "fullname")
# out file name
outf_pref = "005_genotype"
basename = "{}~{}~{}".format(
outf_pref, region_name, glv.conf.show_genotype)
out_file_path = "{}/{}.txt".format(
glv.conf.out_dir_path, basename)
# backup
utl.save_to_tmpfile(out_file_path)
log.info("")
log.info("{} / {}, {}({}) > {}".format(
proc_cnt, total_cnt, region_name, region, out_file_path ))
start = time.time()
with open(out_file_path, mode='w') as f:
f.write("{}\n".format('\t'.join(map(str, header))))
vcf_ittr = reader.fetch(region)
for record in vcf_ittr:
# Main informations
line = [record.CHROM, record.POS]
alt_list = [alt.value for alt in record.ALT]
# variant length and diff
len_ref = len(record.REF)
lens_alt_list = list()
for alt in alt_list:
lens_alt_list.append(len(alt))
diff_len = abs(len_ref - lens_alt_list[0])
lens_alt = ",".join(map(str, lens_alt_list))
line += [len_ref]
line += [lens_alt]
line += [diff_len]
line += [record.REF]
line += [",".join(alt_list)]
line += [AlleleSelect.allele_convert(
"{}/{}".format(
record.call_for_sample[fn].gt_alleles[0],
record.call_for_sample[fn].gt_alleles[1]
), glv.conf.show_genotype
) for fn in sample_fullname_list]
f.write("{}\n".format('\t'.join(map(str, line))))
log.info("genotype {} > {}.txt\n".format(
utl.elapsed_time(time.time(), start),
out_file_path))
# def print_all_allele_int(self):
# '''
# '''
#
# header = list()
# header += ["CHROM", "POS", "REF", "ALT"]
# header += glv.conf.vcf_sample_nickname_list
# #print("#{}".format("\t".join(header)))
#
# reader = vcfpy.Reader.from_path(glv.conf.vcf_file_path)
#
# # all chromosome region
# for region in glv.conf.ref_fasta_chrom_region_list:
#
# # for members full name
# glv.conf.vcf_sample_fullname_list
#
# vcf_ittr = reader.fetch(region)
# for record in vcf_ittr:
#
# line = [record.CHROM, record.POS, record.REF]
# alt_list = [alt.value for alt in record.ALT]
# line += [",".join(alt_list)]
# line += [AlleleSelect.allele_convert("{}/{}".format(
# record.call_for_sample[fn].gt_alleles[0],
# record.call_for_sample[fn].gt_alleles[1]
# )) for fn in glv.conf.vcf_sample_fullname_list]
#
# #print('\t'.join(map(str, line)))
#
#
# def print_allele_int(self):
# '''
# '''
#
# header = list()
# header += ["CHROM", "POS", "REF", "ALT"]
# header += glv.conf.vcf_sample_nickname_list
# #print("#{}".format("\t".join(header)))
#
# reader = vcfpy.Reader.from_path(glv.conf.vcf_file_path)
# for distin_dict in glv.outlist.distin_files:
#
# # for region
# region_name = distin_dict['region']
# region = glv.conf.regions_dict[region_name]['reg']
#
# # for members full name
# group_list = [distin_dict[0], distin_dict[1]]
# sample_fullname_list = \
# utl.get_sample_list_from_groupname(
# group_list, "fullname")
#
# vcf_ittr = reader.fetch(region)
# for record in vcf_ittr:
#
# line = [record.CHROM, record.POS, record.REF]
# alt_list = [alt.value for alt in record.ALT]
# line += [",".join(alt_list)]
# line += [AlleleSelect.allele_convert("{}/{}".format(
# record.call_for_sample[fn].gt_alleles[0],
# record.call_for_sample[fn].gt_alleles[1]
# )) for fn in sample_fullname_list]
#
# #print('\t'.join(map(str, line)))
|
from codecs import open
from setuptools import find_packages, setup
with open("README.md", "r", encoding="UTF-8") as f:
README = f.read()
EXTRAS = {
"lint": ["black", "flake8", "isort"],
}
EXTRAS["dev"] = EXTRAS["lint"]
setup(
name="testcase-maker",
version="0.2.0.post1",
author="benwoo1110",
author_email="[email protected]",
description="Competitive programming testcases made easy!",
extras_require=EXTRAS,
install_requires=[
"attrs~=21.2.0",
],
license="MIT License",
long_description=README,
long_description_content_type="text/markdown",
url="https://github.com/benwoo1110/testcase-maker",
packages=find_packages(),
python_requires=">=3.8",
classifiers=[
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.9",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Software Development :: Libraries",
"Topic :: Utilities",
],
)
|
from typer import Typer
from tcli.commands.configure.credentials.credentials import CredentialsApp
from tcli.typer_app import TyperApp
class ConfigureApp(TyperApp):
def on_create_app(self, app: Typer, *args, **kwargs) -> Typer:
app.add_typer(
CredentialsApp(self.config).create_app(
help="Manage CLI credentials."),
name="credentials"
)
return app
|
"""Test the file examples/ex_utils_static.py."""
import tempfile
from pathlib import Path
from .examples import ex_utils_static
def test_smoke_test_write_plotly_html():
"""Smoke test WritePlotlyHTML."""
try:
with tempfile.TemporaryDirectory() as dir_name: # act
filename = Path(dir_name) / 'tmp.html'
ex_utils_static.write_sample_html(filename) # act
content = filename.read_text()
except ValueError:
raise ValueError('Likely no orca installation was found. Try "conda install -c plotly plotly-orca"')
assert len(content.split('\n')) >= 2500
def test_smoke_test_write_from_markdown():
"""Smoke test write_from_markdown."""
ex_utils_static.example_write_from_markdown() # act
|
import sublime, sublime_plugin, os
class ExpandTabsOnSave(sublime_plugin.EventListener):
def on_pre_save(self, view):
if view.settings().get('expand_tabs_on_save') == 1:
view.window().run_command('expand_tabs')
|
from airview_api.models.models import *
|
def count_letters(msg):
"""
Zwraca pare (znak, liczba zliczeń) dla najczęściej występującego znaku w wiadomości.
W przypadku równości zliczeń wartości sortowane są alfabetycznie.
:param msg: Message to count chars in.
:type msg: str
:return: Most frequent pair char - count in message.
:rtype: list
"""
s = []
for elem in msg:
if elem not in s:
s.append(elem)
chars = [msg.count(elem) for elem in s]
if chars.count(max(chars)) == 1:
return s[chars.index(max(chars))], max(chars)
else:
for elem in chars:
if elem != max(chars):
chars.pop(chars.index(elem))
s.pop(chars.index(elem))
return min(s), max(chars)
if __name__ == '__main__':
msg = 'Abrakadabra'
assert count_letters(msg) == ('a', 4)
assert count_letters('za') == ('a', 1)
|
import numpy as np
import healpy as hp
import os
from powspechi.pserrors import IsomapError
# Read event file
def readevtfile(infile, skip_header=True):
r"""Read an event file with at least two columns, where the first should
correspond to the particles' azimuthal (:math:`\phi`) coordinates and the
second to the polar (:math:`\theta`) coordinates.
Parameters
----------
infile : filenamestr
The event file name whose format is described above.
skip_header : bool, optional
Option to skip the first line of the file. Default: *True*.
Returns
-------
angs : float, ndarray
A 2-D array whose shape is *(mult, ncol)*, where *mult* is the
event multiplicity and *ncol* the number of columns.
"""
data = []
with open(infile, 'r') as f:
if skip_header:
f.readline()
for line in f:
data.append(line.split())
angs = np.asarray(data, dtype=float)
return angs
# Mapping angs of type 'phi theta'
def mapping(nside, angs):
r"""Make a map with a chosen resolution out of particles angular
coordinates :math:`(\phi, \theta)`.
Parameters
----------
nside : int, scalar
Chosen map resolution.
angs : float, ndarray
A 2-D array whose first column corresponds to the :math:`\phi`
coordinate while the second corresponds to the :math:`\theta`
coordinate.
Returns
-------
maph : float, ndarray
A 1-D array whose size depends on the `nside` through the relation
:math:`\mathrm{len(maph)} = 12 \cdot nside^2`.
"""
npix = hp.nside2npix(nside)
maph = np.zeros(npix)
pix = hp.ang2pix(nside, angs[:, 1], angs[:, 0])
vals, times = np.unique(pix, return_counts=True)
maph[vals] = times
maph *= float(npix)/len(angs)
return maph
# Get supmap_iso numpy array:
def getsupmapiso(nside, etacut=0.9):
r"""Get the desired supmap_iso.
Parameters
----------
nside : int, scalar
Map resolution.
etacut : float, scalar, optional
The imposed limit to pseudorapidity, such that :math:`|\eta|` < `etacut`.
Default: 0.9.
Returns
-------
supmapiso : float, ndarray
A 1-D array representing a HEALPix map with the specified resolution.
Raises
------
IsomapError
If the `supmap_iso*.fits` file does not exist.
Notes
-----
The maps in the files `supmap_iso*.fits` are meant to correct for edge effects when
there is a limit on :math:`\eta` (:math:`\theta`) and it is necessary to divide the
event maps their ensemble sum. In the case of no :math:`\theta` limitations
or no divisions, such maps are not necessary. Currently, ``powspechi`` supports such
corrections for `nside`: 8, 16, 32, 64 and 128 and `etacut`: 0.8 and 0.9.
"""
curr_dir = os.path.dirname(__file__)
det_file = os.path.join(curr_dir, 'supmaps_iso/supmap_iso%s_ns%d.fits' %(etacut, nside))
if os.path.isfile(det_file):
supmapiso = hp.read_map(det_file, verbose=False)
return supmapiso
else:
raise IsomapError('The desired supmap_iso*.fits file with nside = %d and |eta| < %s does not exist. Please refer to documentation.' %(nside, etacut))
# Make a supmap out of maps in a dictionary
def supmaps(maps, supmapiso=None):
r"""Sum an ensemble of maps.
Parameters
----------
maps : float, array_like
A map or a list/array of maps.
supmapiso : float, ndarray, optional
A map limited in :math:`\theta`, used to account for the pixel weights
on map edges. Default: *None*.
Returns
-------
supmap : float, ndarray
A 1-D array resultant of the sum of the elements in `maps`. If `supmapiso`
is given, weights are assigned to the pixels on the edges of `supmap`.
"""
if maps[0].ndim == 0:
maps = np.reshape(maps, (1, len(maps)))
npix = hp.get_map_size(maps[0])
supmap = np.sum(maps, axis=0)
supmap *= npix/np.sum(supmap)
if np.any(supmapiso):
pixs = np.nonzero(supmapiso)
supmap[pixs] /= supmapiso[pixs]
supmap *= npix/np.sum(supmap)
return supmap
# Make norm maps out of the given maps and a supmap
def make_normmaps(maps, supmap, etacut=0.9):
r"""Divide an ensemble of maps by a single map, preferably the sum of
said ensemble.
Parameters
----------
maps : float, array_like
A single map or an ensemble of maps. They should be limited in
pseudorapidity by the value in `etacut`.
supmap : float, ndarray
A 1-D array usually representing the sum of all elements in `maps`.
etacut : float, scalar, optional
The value of the pseudorapidity limit, :math:`|\eta|` < `etacut`.
If there is no limit, set it to *None*. Default: 0.9.
Returns
-------
norm_maps : float, array_like
The result of dividing `maps` by `supmap`. Its shape will be the same
as `maps`.
Notes
-----
In the power spectral analysis at hand [1]_ [2]_, `supmap` is the sum
of all event maps and it is represented by :math:`F^{all}(\mathbf{n_p})`,
where :math:`\mathbf{n_p}` is a pixel number. A normalized map is thus defined
by the following expression:
.. math:: \bar{f}(\mathbf{n_p}) = \frac{f(\mathbf{n_p})}{F^{all}(\mathbf{n_p})},
where :math:`f(\mathbf{n_p})` is a map from the original event ensemble, the latter
denoted by the `maps` parameter.
References
----------
.. [1] M. Machado, P.H. Damgaard, J.J. Gaardhoeje, and C. Bourjau, "Angular power spectrum of heavy ion collisions", Phys. Rev. C **99**, 054910 (2019).
.. [2] M. Machado, "Heavy ion anisotropies: a closer look at the angular power spectrum", arXiv:1907.00413 [hep-ph] (2019).
"""
if maps[0].ndim == 0:
maps = np.reshape(maps, (1, len(maps)))
npix = hp.get_map_size(maps[0])
nside = hp.npix2nside(npix)
if etacut:
qi, qf = 2.*np.arctan(np.exp(-np.array([etacut, -etacut])))
mask = np.ones(npix)
mask[hp.query_strip(nside, qi, qf)] = 0.
else:
qi, qf = 0., 2*np.pi
mask = 0.
finmap = supmap/npix*(1.-mask)+mask
pixs = np.where(finmap == 0.)
finmap[pixs] = 1.
norm_maps = maps / (npix*finmap)
norm_maps *= npix / np.sum(norm_maps, axis=1)[:, None]
return norm_maps |
from .context import feature_infection
import pytest
import uuid
from operator import attrgetter
@pytest.fixture(scope="module")
def test_feature(request):
return feature_infection.CDC.get_infector("test")
def infected(feature, entities):
return map(feature.is_infected, entities)
class Entity(object):
"""Simplified user representation"""
def __init__(self):
self.connections = []
self.id = uuid.uuid4()
def __eq__(self, other):
return self.id == other.id
get_connections = attrgetter("connections")
class TestTotalInfection:
def test_no_entities(self, test_feature):
entities = []
assert test_feature.total_infection(entities, None,
connections=Entity.get_connections) == set()
def test_single_user(self, test_feature):
entities = [Entity()]
assert test_feature.total_infection(entities, entities[0],
connections=Entity.get_connections) == set(entities)
def test_disconnected_user(self, test_feature):
entities = [Entity(), Entity()]
assert test_feature.total_infection(entities, entities[0],
connections=Entity.get_connections) == set(entities[0:1])
def test_connected_entities(self, test_feature):
user1 = Entity()
user2 = Entity()
user1.connections.append(user2)
entities = [user1, user2]
assert test_feature.total_infection(entities, user1,
connections=Entity.get_connections) == set(entities)
assert test_feature.total_infection(entities, user2,
connections=Entity.get_connections) == set(entities)
def test_disjoint_connected_groups(self, test_feature):
entities = [Entity(), Entity(), Entity(), Entity()]
entities[0].connections.append(entities[1])
entities[2].connections.append(entities[3])
assert test_feature.total_infection(entities, entities[0],
connections=Entity.get_connections) == set(entities[:2])
assert test_feature.total_infection(entities, entities[2],
connections=Entity.get_connections) == set(entities[2:])
def test_transitive_connection(self, test_feature):
entities = [Entity(), Entity(), Entity()]
entities[0].connections.append(entities[1])
entities[1].connections.append(entities[2])
assert test_feature.total_infection(entities, entities[0],
connections=Entity.get_connections) == set(entities)
def test_connection_cycle(self, test_feature):
entities = [Entity(), Entity(), Entity()]
entities[0].connections.append(entities[1])
entities[1].connections.append(entities[2])
entities[2].connections.append(entities[0])
assert test_feature.total_infection(entities, entities[0],
connections=Entity.get_connections) == set(entities)
def test_self_connection(self, test_feature):
entities = [Entity(), Entity()]
entities[0].connections.append(entities[0])
entities[0].connections.append(entities[1])
assert test_feature.total_infection(entities, entities[0],
connections=Entity.get_connections) == set(entities)
def test_marked_infected(self, test_feature):
entities = [Entity(), Entity()]
entities[0].connections.append(entities[1])
test_feature.total_infection(entities, entities[0],
connections=Entity.get_connections)
assert all(infected(test_feature, entities))
def test_only_current_feature_infecting(self, test_feature):
entities = [Entity()]
seperate_feature = feature_infection.CDC.get_infector("seperate")
test_feature.total_infection(entities, entities[0],
connections=Entity.get_connections)
assert not seperate_feature.is_infected(entities[0])
class TestLimitdInfection:
def test_limited_infection(self, test_feature):
entities = [Entity(), Entity(), Entity()]
entities[1].connections.append(entities[2])
assert test_feature.limited_infection(entities, 1,
connections=Entity.get_connections) == set([entities[0]])
def test_single_network(self, test_feature):
entities = [Entity(), Entity()]
entities[0].connections.append(entities[1])
test_feature.limited_infection(entities, 2,
connections=Entity.get_connections)
assert all(infected(test_feature, entities))
def test_single_large_network(self, test_feature):
entities = [Entity(), Entity()]
entities[0].connections.append(entities[1])
test_feature.limited_infection(entities, 1,
connections=Entity.get_connections)
assert not any(infected(test_feature, entities))
def test_multiple_components(self, test_feature):
entities = [Entity(), Entity(), Entity()]
entities[1].connections.append(entities[2])
test_feature.limited_infection(entities, 3,
connections=Entity.get_connections)
assert all(infected(test_feature, entities))
def test_multiple_non_exact(self, test_feature):
entities = [Entity(), Entity(), Entity(), Entity()]
entities[1].connections.extend(entities[2:])
test_feature.limited_infection(entities, 2,
connections=Entity.get_connections)
assert test_feature.is_infected(entities[0])
def test_smaller_network(self, test_feature):
entities = [Entity()]
test_feature.limited_infection(entities, 2,
connections=Entity.get_connections)
assert all(infected(test_feature, entities))
def test_only_current_feature_infected(self, test_feature):
entities = [Entity()]
seperate_feature = feature_infection.CDC.get_infector("seperate")
test_feature.limited_infection(entities, 1,
connections=Entity.get_connections)
assert not seperate_feature.is_infected(entities[0])
|
import pkg_resources
from subprocess import call
'''pip_update_all
Based on user515656's answer at https://stackoverflow.com/questions/2720014/how-to-upgrade-all-python-packages-with-pip
'''
packages = [dist.project_name for dist in list(pkg_resources.working_set)]
call("pip install --upgrade " + ' '.join(packages), shell=True) |
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import sys
from vector_calculus import vector_calculus as vc
def calc_and_plot_dists(field1, field2,
field1_title=None,
field2_title=None,
units=None):
r"""
"""
field1_magnitudes, field1_orientations = sample_vector_field(field1)
field2_magnitudes, field2_orientations = sample_vector_field(field2)
field1_histograms = bin_samples(field1_magnitudes, field1_orientations)
field2_histograms = bin_samples(field2_magnitudes, field2_orientations)
xi_sq = xi_squared(field1_histograms['counts_2d'], field2_histograms['counts_2d'])
plot_histograms(field1_magnitudes, field1_orientations, units=units)
sns.plt.suptitle(field1_title)
plot_histograms(field2_magnitudes, field2_orientations, units=units)
sns.plt.suptitle(field2_title)
plt.show()
print 'Xi^2 = %f10.2' % xi_sq
def sample_vector_field(vector_field):
r"""
"""
vector_field = np.asarray(vector_field)
orientations = np.ravel(vector_orientation(vector_field))
magnitudes = np.ravel(vc.magnitude(vector_field))
return magnitudes, orientations
def bin_samples(magnitudes,
orientations,
magnitude_bins=50,
orientation_bins=50,
joint_bins=50):
r"""
"""
mag_counts, mag_bins = np.histogram(magnitudes,
bins=magnitude_bins)
o_counts, o_bins = np.histogram(orientations,
bins=orientation_bins)
(counts_2d, mag_bins_2d,
o_bins_2d) = np.histogram2d(magnitudes,
orientations,
bins=(magnitude_bins,
orientation_bins))
histograms = {'mag_counts': mag_counts,
'mag_bins': mag_bins,
'o_counts': o_counts,
'o_bins': o_bins,
'counts_2d': counts_2d,
'mag_bins_2d': mag_bins_2d,
'o_bins_2d': o_bins_2d}
return histograms
def plot_histograms(magnitudes, orientations, bins=50,
color='red', cmap='Reds', units=None):
r"""
"""
joint_grid = sns.JointGrid(magnitudes,
orientations)
joint_grid.plot_joint(plt.hist2d, bins=bins,
cmap=cmap)
joint_grid.plot_marginals(sns.distplot,
kde=False, bins=bins,
color=color)
xlabel = 'magnitude' + ' [' + units + ']'
joint_grid.set_axis_labels(xlabel, 'orientation [rad]')
return joint_grid
def xi_squared(dist1, dist2, dof=2.):
r"""
"""
assert dist1.shape == dist2.shape, "Distributions do not have equal dimensions."
addends = (dist1 - dist2)**2 / (dist1 + dist2)
addends[np.isclose(dist1 + dist2, 0)] = 0
xi_sq = np.sum(addends)
if not dof:
dof = len(dist1.shape)
xi_sq = xi_sq/dof
return xi_sq
def vector_orientation(vector_field):
r"""
Return vector angle with the z-axis.
"""
mag = vc.magnitude(vector_field)
angle = np.arccos(vector_field[2, :, :, :]/mag)
angle[np.isclose(mag, 0)] = 0
reflex = np.where(np.logical_and(vector_field[0, :, :, :] < 0,
vector_field[1, :, :, :] >= 0))
angle[reflex] = 2.*np.pi - angle[reflex]
reflex = np.where(np.logical_and(vector_field[0, :, :, :] < 0,
vector_field[1, :, :, :] < 0))
angle[reflex] = 2.*np.pi - angle[reflex]
return angle
|
from django.db import models
# Create your models here.
class Materia(models.Model):
nombre = models.CharField(max_length=15)
periodo = models.CharField(max_length=10)
año = models.IntegerField(default=2020)
id_clase = models.ForeignKey('clases.Clase', on_delete=models.CASCADE)
def __str__(self):
return self.nombre |
# flake8: noqa
from enum import IntEnum, unique
from deprecated import deprecated
@deprecated(
version="1.0.0",
reason="This enum doesn't get maintained as it never made it to the first release",
)
@unique
class ItemTypeGC(IntEnum):
"""
An Enum referencing the different item types in CS:GO
They are returned as def_index by the CS:GO game coordinator
Source: https://tf2b.com/itemlist.php?gid=730
Last updated: Jul 18, 2021
INFO: Contains multiple entries for similar items
"""
DESERT_EAGLE = 1
DUAL_BERETTAS = 2
AUG = 8
FIVE_SEVEN = 3
GLOCK_18 = 4
AK_47 = 7
AWP = 9
FAMAS = 10
G3SG1 = 11
GALIL_AR = 13
M249 = 14
M4A4 = 16
MAC_10 = 17
P90 = 19
REPULSOR_DEVICE = 20
MP5_SD = 23
UMP_45 = 24
XM1014 = 25
PP_BIZON = 26
MAG_7 = 27
NEGEV = 28
SAWED_OFF = 29
TEC_9 = 30
ZEUS_X27 = 31
P2000 = 32
MP7 = 33
MP9 = 34
NOVA = 35
P250 = 36
BALLISTIC_SHIELD = 37
SCAR_20 = 38
SG_553 = 39
SSG_08 = 40
KNIFE_CT = 41
KNIFE_CT_2 = 42
FLASHBANG = 43
HIGH_EXPLOSIVE_GRENADE = 44
SMOKE_GRENADE = 45
MOLOTOV = 46
DECOY_GRENADE = 47
INCENDIARY_GRENADE = 48
C4_EXPLOSIVE = 49
KEVLAR_VEST = 50
KEVLAR_AND_HELMET = 51
HEAVY_ASSAULT_SUIT = 52
ITEM_NVG = 54
DEFUSE_KIT = 55
RESCUE_KIT = 56
MEDI_SHOT = 57
MUSIC_KIT = 58
KNIFE_T = 59
M4A1_S = 60
USP_S = 61
TRADE_UP_CONTRACT = 62
CZ75_AUTO = 63
R8_REVOLVER = 64
TACTICAL_AWARENESS_GRENADE = 68
BARE_HANDS = 69
BREACH_CHARGE = 70
TABLET = 72
KNIFE = 74
AXE = 75
HAMMER = 76
WRENCH = 78
SPECTRAL_SHIV = 80
FIRE_BOMB = 81
DIVERSION_DEVICE = 82
FRAG_GRENADE = 83
SNOWBALL = 84
BUMP_MINE = 85
BAYONET = 500
CLASSIC_KNIFE = 503
FLIP_KNIFE = 505
GUT_KNIFE = 506
KARAMBIT = 507
M9_BAYONET = 508
HUNTSMAN_KNIFE = 509
FALCHION_KNIFE = 512
BOWIE_KNIFE = 514
BUTTERFLY_KNIFE = 515
SHADOW_DAGGERS = 516
PARACORD_KNIFE = 517
SURVIVAL_KNIFE = 518
URSUS_KNIFE = 519
NAVAJA_KNIFE = 520
NOMAD_KNIFE = 521
STILETTO_KNIFE = 522
TALON_KNIFE = 523
SKELETON_KNIFE = 525
FIVE_YEAR_VETERAN_COIN = 874
CHAMPION_AT_DREAMHACK_2013 = 875
FINALIST_AT_DREAMHACK_2013 = 876
SEMIFINALIST_AT_DREAMHACK_2013 = 877
QUARTERFINALIST_AT_DREAMHACK_2013 = 878
CHAMPION_AT_EMS_ONE_KATOWICE_2014 = 879
FINALIST_AT_EMS_ONE_KATOWICE_2014 = 880
SEMIFINALIST_AT_EMS_ONE_KATOWICE_2014 = 881
QUARTERFINALIST_AT_EMS_ONE_KATOWICE_2014 = 882
CHAMPION_AT_ESL_ONE_COLOGNE_2014 = 883
FINALIST_AT_ESL_ONE_COLOGNE_2014 = 884
SEMIFINALIST_AT_ESL_ONE_COLOGNE_2014 = 885
QUARTERFINALIST_AT_ESL_ONE_COLOGNE_2014 = 886
BRONZE_COLOGNE_2014_PICKEM_TROPHY = 887
SILVER_COLOGNE_2014_PICKEM_TROPHY = 888
GOLD_COLOGNE_2014_PICKEM_TROPHY = 889
CHAMPION_AT_DREAMHACK_WINTER_2014 = 890
FINALIST_AT_DREAMHACK_WINTER_2014 = 891
SEMIFINALIST_AT_DREAMHACK_WINTER_2014 = 892
QUARTERFINALIST_AT_DREAMHACK_WINTER_2014 = 893
BRONZE_DREAMHACK_2014_PICKEM_TROPHY = 894
SILVER_DREAMHACK_2014_PICKEM_TROPHY = 895
GOLD_DREAMHACK_2014_PICKEM_TROPHY = 896
CHAMPION_AT_ESL_ONE_KATOWICE_2015 = 897
FINALIST_AT_ESL_ONE_KATOWICE_2015 = 898
SEMIFINALIST_AT_ESL_ONE_KATOWICE_2015 = 899
QUARTERFINALIST_AT_ESL_ONE_KATOWICE_2015 = 900
BRONZE_KATOWICE_2015_PICKEM_TROPHY = 901
SILVER_KATOWICE_2015_PICKEM_TROPHY = 902
GOLD_KATOWICE_2015_PICKEM_TROPHY = 903
CHAMPION_AT_ESL_ONE_COLOGNE_2015 = 904
FINALIST_AT_ESL_ONE_COLOGNE_2015 = 905
SEMIFINALIST_AT_ESL_ONE_COLOGNE_2015 = 906
QUARTERFINALIST_AT_ESL_ONE_COLOGNE_2015 = 907
BRONZE_COLOGNE_2015_PICKEM_TROPHY = 908
SILVER_COLOGNE_2015_PICKEM_TROPHY = 909
GOLD_COLOGNE_2015_PICKEM_TROPHY = 910
BRONZE_CLUJ_NAPOCA_2015_PICKEM_TROPHY = 911
SILVER_CLUJ_NAPOCA_2015_PICKEM_TROPHY = 912
GOLD_CLUJ_NAPOCA_2015_PICKEM_TROPHY = 913
BRONZE_CLUJ_NAPOCA_2015_FANTASY_TROPHY = 914
SILVER_CLUJ_NAPOCA_2015_FANTASY_TROPHY = 915
GOLD_CLUJ_NAPOCA_2015_FANTASY_TROPHY = 916
CHAMPION_AT_DREAMHACK_CLUJ_NAPOCA_2015 = 917
FINALIST_AT_DREAMHACK_CLUJ_NAPOCA_2015 = 918
SEMIFINALIST_AT_DREAMHACK_CLUJ_NAPOCA_2015 = 919
QUARTERFINALIST_AT_DREAMHACK_CLUJ_NAPOCA_2015 = 920
BRONZE_COLUMBUS_2016_PICKEM_TROPHY = 921
SILVER_COLUMBUS_2016_PICKEM_TROPHY = 922
GOLD_COLUMBUS_2016_PICKEM_TROPHY = 923
BRONZE_COLUMBUS_2016_FANTASY_TROPHY = 924
SILVER_COLUMBUS_2016_FANTASY_TROPHY = 925
GOLD_COLUMBUS_2016_FANTASY_TROPHY = 926
CHAMPION_AT_MLG_COLUMBUS_2016 = 927
FINALIST_AT_MLG_COLUMBUS_2016 = 928
SEMIFINALIST_AT_MLG_COLUMBUS_2016 = 929
QUARTERFINALIST_AT_MLG_COLUMBUS_2016 = 930
CHAMPION_AT_ESL_ONE_COLOGNE_2016 = 931
FINALIST_AT_ESL_ONE_COLOGNE_2016 = 932
SEMIFINALIST_AT_ESL_ONE_COLOGNE_2016 = 933
QUARTERFINALIST_AT_ESL_ONE_COLOGNE_2016 = 934
BRONZE_COLOGNE_2016_PICKEM_TROPHY = 935
SILVER_COLOGNE_2016_PICKEM_TROPHY = 936
GOLD_COLOGNE_2016_PICKEM_TROPHY = 937
BRONZE_COLOGNE_2016_FANTASY_TROPHY = 938
SILVER_COLOGNE_2016_FANTASY_TROPHY = 939
GOLD_COLOGNE_2016_FANTASY_TROPHY = 940
CHAMPION_AT_ELEAGUE_ATLANTA_2017 = 941
FINALIST_AT_ELEAGUE_ATLANTA_2017 = 942
SEMIFINALIST_AT_ELEAGUE_ATLANTA_2017 = 943
QUARTERFINALIST_AT_ELEAGUE_ATLANTA_2017 = 944
BRONZE_ATLANTA_2017_PICKEM_TROPHY = 945
SILVER_ATLANTA_2017_PICKEM_TROPHY = 946
GOLD_ATLANTA_2017_PICKEM_TROPHY = 947
CHAMPION_AT_PGL_KRAKOW_2017 = 948
FINALIST_AT_PGL_KRAKOW_2017 = 949
SEMIFINALIST_AT_PGL_KRAKOW_2017 = 950
QUARTERFINALIST_AT_PGL_KRAKOW_2017 = 951
BRONZE_KRAKOW_2017_PICKEM_TROPHY = 952
SILVER_KRAKOW_2017_PICKEM_TROPHY = 953
GOLD_KRAKOW_2017_PICKEM_TROPHY = 954
CHAMPION_AT_ELEAGUE_BOSTON_2018 = 955
FINALIST_AT_ELEAGUE_BOSTON_2018 = 956
SEMIFINALIST_AT_ELEAGUE_BOSTON_2018 = 957
QUARTERFINALIST_AT_ELEAGUE_BOSTON_2018 = 958
BRONZE_BOSTON_2018_PICKEM_TROPHY = 959
SILVER_BOSTON_2018_PICKEM_TROPHY = 960
GOLD_BOSTON_2018_PICKEM_TROPHY = 961
CHAMPION_AT_FACEIT_LONDON_2018 = 962
FINALIST_AT_FACEIT_LONDON_2018 = 963
SEMIFINALIST_AT_FACEIT_LONDON_2018 = 964
QUARTERFINALIST_AT_FACEIT_LONDON_2018 = 965
BRONZE_LONDON_2018_PICKEM_TROPHY = 966
SILVER_LONDON_2018_PICKEM_TROPHY = 967
GOLD_LONDON_2018_PICKEM_TROPHY = 968
TEN_YEAR_VETERAN_COIN = 969
LOYALTY_BADGE = 970
CHAMPION_AT_IEM_KATOWICE_2019 = 971
FINALIST_AT_IEM_KATOWICE_2019 = 972
SEMIFINALIST_AT_IEM_KATOWICE_2019 = 973
QUARTERFINALIST_AT_IEM_KATOWICE_2019 = 974
CHAMPION_AT_STARLADDER_BERLIN_2019 = 975
FINALIST_AT_STARLADDER_BERLIN_2019 = 976
SEMIFINALIST_AT_STARLADDER_BERLIN_2019 = 977
QUARTERFINALIST_AT_STARLADDER_BERLIN_2019 = 978
OPERATION_PAYBACK_PASS = 1000
OPERATION_PAYBACK_CHALLENGE_COIN = 1001
SILVER_OPERATION_PAYBACK_COIN = 1002
GOLD_OPERATION_PAYBACK_COIN = 1003
MUSEUM_MAP_COIN = 1004
DOWNTOWN_MAP_COIN = 1005
THUNDER_MAP_COIN = 1006
FAVELA_MAP_COIN = 1007
MOTEL_MAP_COIN = 1008
SEASIDE_MAP_COIN = 1009
LIBRARY_MAP_COIN = 1010
OPERATION_BRAVO_PASS = 1012
OPERATION_BRAVO_CHALLENGE_COIN = 1013
SILVER_OPERATION_BRAVO_COIN = 1014
GOLD_OPERATION_BRAVO_COIN = 1015
AGENCY_MAP_COIN = 1016
ALI_MAP_COIN = 1017
CACHE_MAP_COIN = 1018
CHINATOWN_MAP_COIN = 1019
GWALIOR_MAP_COIN = 1020
RUINS_MAP_COIN = 1021
SIEGE_MAP_COIN = 1022
OPERATION_PHOENIX_PASS = 1023
OPERATION_PHOENIX_CHALLENGE_COIN = 1024
SILVER_OPERATION_PHOENIX_COIN = 1025
GOLD_OPERATION_PHOENIX_COIN = 1026
OPERATION_BREAKOUT_ALL_ACCESS_PASS = 1027
OPERATION_BREAKOUT_CHALLENGE_COIN = 1028
SILVER_OPERATION_BREAKOUT_COIN = 1029
GOLD_OPERATION_BREAKOUT_COIN = 1030
CASTLE_MAP_COIN = 1031
BLACK_GOLD_MAP_COIN = 1032
RUSH_MAP_COIN = 1033
MIST_MAP_COIN = 1034
INSERTION_MAP_COIN = 1035
OVERGROWN_MAP_COIN = 1036
MARQUIS_MAP_COIN = 1037
WORKOUT_MAP_COIN = 1038
BACKALLEY_MAP_COIN = 1039
SEASON_MAP_COIN = 1040
BAZAAR_MAP_COIN = 1041
FACADE_MAP_COIN = 1042
LOG_MAP_COIN = 1043
RAILS_MAP_COIN = 1044
RESORT_MAP_COIN = 1045
ZOO_MAP_COIN = 1046
SANTORINI_MAP_COIN = 1047
COAST_MAP_COIN = 1048
MIKLA_MAP_COIN = 1049
ROYAL_MAP_COIN = 1050
EMPIRE_MAP_COIN = 1051
TULIP_MAP_COIN = 1052
CRUISE_MAP_COIN = 1053
SUBZERO_MAP_COIN = 1054
BIOME_MAP_COIN = 1055
ABBEY_MAP_COIN = 1056
RUBY_MAP_COIN = 1057
BREACH_MAP_COIN = 1058
STUDIO_MAP_COIN = 1059
JUNGLE_MAP_COIN = 1060
ANUBIS_MAP_COIN = 1061
CHLORINE_MAP_COIN = 1062
MUTINY_MAP_COIN = 1063
SWAMP_MAP_COIN = 1064
FROSTBITE_MAP_COIN = 1065
ENGAGE_MAP_COIN = 1066
APOLLO_MAP_COIN = 1067
GUARD_MAP_COIN = 1068
ELYSION_MAP_COIN = 1069
NAME_TAG = 1200
STORAGE_UNIT = 1201
CSGO_CASE_KEY = 1203
ESPORTS_KEY = 1204
STICKER = 1209
GIFT_PACKAGE = 1210
PALLET_OF_PRESENTS = 1211
CSGO_CAPSULE_KEY = 1212
WINTER_OFFENSIVE_CASE_KEY = 1214
AUDIENCE_PARTICIPATION_PARCEL = 1215
OPERATION_PHOENIX_CASE_KEY = 1303
COMMUNITY_STICKER_CAPSULE_1_KEY = 1304
HUNTSMAN_CASE_KEY = 1305
MISSION = 1306
HUNTSMAN_CASE_KEY_2 = 1307
COMMUNITY_STICKER_CAPSULE_1_KEY_2 = 1308
OPERATION_BREAKOUT_CASE_KEY = 1309
OPERATION_BREAKOUT_CASE_KEY_2 = 1310
OPERATION_BREAKOUT_CASE_KEY_3 = 1311
OPERATION_BREAKOUT_CASE_KEY_4 = 1313
MUSIC_KIT_2 = 1314
OPERATION_VANGUARD_ACCESS_PASS = 1315
OPERATION_VANGUARD_CHALLENGE_COIN = 1316
SILVER_OPERATION_VANGUARD_COIN = 1317
GOLD_OPERATION_VANGUARD_COIN = 1318
CAMPAIGN_MAGHREB = 1320
CAMPAIGN_EURASIA = 1321
OPERATION_VANGUARD_CASE_KEY = 1322
CHROMA_CASE_KEY = 1323
STATTRAK_SWAP_TOOL = 1324
CHROMA_2_CASE_KEY = 1325
OPERATION_BLOODHOUND_ACCESS_PASS = 1326
OPERATION_BLOODHOUND_CHALLENGE_COIN = 1327
SILVER_OPERATION_BLOODHOUND_COIN = 1328
GOLD_OPERATION_BLOODHOUND_COIN = 1329
FALCHION_CASE_KEY = 1330
SERVICE_MEDAL_2015_BLUE = 1331
SERVICE_MEDAL_2015_PURPLE = 1332
SHADOW_CASE_KEY = 1333
REVOLVER_CASE_KEY = 1334
OPERATION_WILDFIRE_ACCESS_PASS = 1335
OPERATION_WILDFIRE_CHALLENGE_COIN = 1336
SILVER_OPERATION_WILDFIRE_COIN = 1337
GOLD_OPERATION_WILDFIRE_COIN = 1338
SERVICE_MEDAL_2016_GREY = 1339
SERVICE_MEDAL_2016_LIGHT_BLUE = 1340
SERVICE_MEDAL_2016_BLUE = 1341
SERVICE_MEDAL_2016_PURPLE = 1342
SERVICE_MEDAL_2016_PINK = 1343
SERVICE_MEDAL_2016_RED = 1344
CHROMA_3_CASE_KEY = 1347
SEALED_GRAFFITI = 1348
GRAFFITI = 1349
GAMMA_CASE_KEY = 1350
GAMMA_2_CASE_KEY = 1351
OPERATION_HYDRA_ACCESS_PASS = 1352
COUNTER_STRIKE_GLOBAL_OFFENSIVE_GAME_LICENSE = 1353
GLOVE_CASE_KEY = 1356
SERVICE_MEDAL_2017_GREY = 1357
SERVICE_MEDAL_2017_LIGHT_BLUE = 1358
SERVICE_MEDAL_2017_BLUE = 1359
SERVICE_MEDAL_2017_PURPLE = 1360
SERVICE_MEDAL_2017_PINK = 1361
SERVICE_MEDAL_2017_RED = 1362
SERVICE_MEDAL_2017_BLACK = 1363
SPECTRUM_CASE_KEY = 1364
OPERATION_HYDRA_CASE_KEY = 1365
SPECTRUM_2_CASE_KEY = 1366
SERVICE_MEDAL_2018_GREY = 1367
SERVICE_MEDAL_2018_GREEN = 1368
SERVICE_MEDAL_2018_BLUE = 1369
SERVICE_MEDAL_2018_PURPLE = 1370
SERVICE_MEDAL_2018_PINK = 1371
SERVICE_MEDAL_2018_RED = 1372
CLUTCH_CASE_KEY = 1373
HORIZON_CASE_KEY = 1374
DANGER_ZONE_CASE_KEY = 1375
SERVICE_MEDAL_2019_GREY = 1376
SERVICE_MEDAL_2019_GREEN = 1377
SERVICE_MEDAL_2019_BLUE = 1378
SERVICE_MEDAL_2019_PURPLE = 1379
SERVICE_MEDAL_2019_PINK = 1380
SERVICE_MEDAL_2019_RED = 1381
PRISMA_CASE_KEY = 1383
SHATTERED_WEB_CASE_KEY = 1384
CS20_CASE_KEY = 1385
PRISMA_2_CASE_KEY = 1386
FRACTURE_CASE_KEY = 1387
OPERATION_BROKEN_FANG_CASE_KEY = 1388
CSGO_WEAPON_CASE = 4001
ESPORTS_2013_CASE = 4002
OPERATION_BRAVO_CASE = 4003
CSGO_WEAPON_CASE_2 = 4004
ESPORTS_2013_WINTER_CASE = 4005
DREAMHACK_2013_SOUVENIR_PACKAGE = 4006
STICKER_CAPSULE = 4007
WINTER_OFFENSIVE_WEAPON_CASE = 4009
CSGO_WEAPON_CASE_3 = 4010
OPERATION_PHOENIX_WEAPON_CASE = 4011
STICKER_CAPSULE_2 = 4012
EMS_ONE_2014_SOUVENIR_PACKAGE = 4013
EMS_KATOWICE_2014_CHALLENGERS = 4014
EMS_KATOWICE_2014_LEGENDS = 4015
COMMUNITY_STICKER_CAPSULE_1 = 4016
HUNTSMAN_WEAPON_CASE = 4017
OPERATION_BREAKOUT_WEAPON_CASE = 4018
ESPORTS_2014_SUMMER_CASE = 4019
ESL_ONE_COLOGNE_2014_LEGENDS = 4020
ESL_ONE_COLOGNE_2014_CHALLENGERS = 4021
ESL_ONE_COLOGNE_2014_DUST_II_SOUVENIR_PACKAGE = 4022
ESL_ONE_COLOGNE_2014_INFERNO_SOUVENIR_PACKAGE = 4023
ESL_ONE_COLOGNE_2014_MIRAGE_SOUVENIR_PACKAGE = 4024
ESL_ONE_COLOGNE_2014_NUKE_SOUVENIR_PACKAGE = 4025
ESL_ONE_COLOGNE_2014_CACHE_SOUVENIR_PACKAGE = 4026
ESL_ONE_COLOGNE_2014_COBBLESTONE_SOUVENIR_PACKAGE = 4027
ESL_ONE_COLOGNE_2014_OVERPASS_SOUVENIR_PACKAGE = 4028
OPERATION_VANGUARD_WEAPON_CASE = 4029
DREAMHACK_2014_LEGENDS_HOLO_FOIL = 4030
DREAMHACK_2014_DUST_II_SOUVENIR_PACKAGE = 4031
DREAMHACK_2014_INFERNO_SOUVENIR_PACKAGE = 4032
DREAMHACK_2014_MIRAGE_SOUVENIR_PACKAGE = 4033
DREAMHACK_2014_NUKE_SOUVENIR_PACKAGE = 4034
DREAMHACK_2014_CACHE_SOUVENIR_PACKAGE = 4035
DREAMHACK_2014_COBBLESTONE_SOUVENIR_PACKAGE = 4036
DREAMHACK_2014_OVERPASS_SOUVENIR_PACKAGE = 4037
FNATIC_DREAMHACK_2014 = 4038
CLOUD9_DREAMHACK_2014 = 4039
NINJAS_IN_PYJAMAS_DREAMHACK_2014 = 4041
VIRTUSPRO_DREAMHACK_2014 = 4042
NATUS_VINCERE_DREAMHACK_2014 = 4043
TEAM_DIGNITAS_DREAMHACK_2014 = 4045
BRAVADO_GAMING_DREAMHACK_2014 = 4046
ESC_GAMING_DREAMHACK_2014 = 4047
HELLRAISERS_DREAMHACK_2014 = 4048
MYXMG_DREAMHACK_2014 = 4049
IBUYPOWER_DREAMHACK_2014 = 4050
TEAM_LDLC_DREAMHACK_2014 = 4051
PENTA_SPORTS_DREAMHACK_2014 = 4052
PLANETKEY_DYNAMICS_DREAMHACK_2014 = 4053
DREAMHACK_WINTER_2014 = 4054
STICKER_3DMAX_DREAMHACK_2014 = 4055
COPENHAGEN_WOLVES_DREAMHACK_2014 = 4056
DAT_TEAM_DREAMHACK_2014 = 4057
LONDON_CONSPIRACY_DREAMHACK_2014 = 4058
MOUSESPORTS_DREAMHACK_2014 = 4059
FLIPSID3_TACTICS_DREAMHACK_2014 = 4060
CHROMA_CASE = 4061
STICKER_3DMAX_KATOWICE_2015 = 4062
CLOUD9_G2A_KATOWICE_2015 = 4063
COUNTER_LOGIC_GAMING_KATOWICE_2015 = 4064
FLIPSID3_TACTICS_KATOWICE_2015 = 4065
FNATIC_KATOWICE_2015 = 4066
HELLRAISERS_KATOWICE_2015 = 4067
KEYD_STARS_KATOWICE_2015 = 4068
LGB_ESPORTS_KATOWICE_2015 = 4069
NATUS_VINCERE_KATOWICE_2015 = 4070
NINJAS_IN_PYJAMAS__KATOWICE_2015 = 4071
PENTA_SPORTS__KATOWICE_2015 = 4072
TEAM_ENVYUS_KATOWICE_2015 = 4073
TSM_KINGUIN_KATOWICE_2015 = 4074
TITAN_KATOWICE_2015 = 4075
VIRTUSPRO_KATOWICE_2015 = 4076
VOX_EMINOR__KATOWICE_2015 = 4077
ESL_ONE_KATOWICE_2015 = 4078
ESL_ONE_KATOWICE_2015_DUST_II_SOUVENIR_PACKAGE = 4079
ESL_ONE_KATOWICE_2015_INFERNO_SOUVENIR_PACKAGE = 4080
ESL_ONE_KATOWICE_2015_MIRAGE_SOUVENIR_PACKAGE = 4081
ESL_ONE_KATOWICE_2015_NUKE_SOUVENIR_PACKAGE = 4082
ESL_ONE_KATOWICE_2015_CACHE_SOUVENIR_PACKAGE = 4083
ESL_ONE_KATOWICE_2015_COBBLESTONE_SOUVENIR_PACKAGE = 4084
ESL_ONE_KATOWICE_2015_OVERPASS_SOUVENIR_PACKAGE = 4085
ESL_ONE_KATOWICE_2015_LEGENDS_HOLO_FOIL = 4086
ESL_ONE_KATOWICE_2015_CHALLENGERS_HOLO_FOIL = 4087
STATTRAK_SWAP_TOOL_TWO_PACK = 4088
CHROMA_2_CASE = 4089
ENFU_STICKER_CAPSULE = 4090
FALCHION_CASE = 4091
FNATIC_COLOGNE_2015 = 4092
VIRTUSPRO_COLOGNE_2015 = 4093
MOUSESPORTS_COLOGNE_2015 = 4094
NATUS_VINCERE_COLOGNE_2015 = 4095
RENEGADES_COLOGNE_2015 = 4096
TEAM_KINGUIN_COLOGNE_2015 = 4097
TEAM_EBETTLE_COLOGNE_2015 = 4098
CLOUD9_G2A_COLOGNE_2015 = 4099
NINJAS_IN_PYJAMAS_COLOGNE_2015 = 4100
TEAM_ENVYUS_COLOGNE_2015 = 4101
LUMINOSITY_GAMING_COLOGNE_2015 = 4102
TEAM_SOLOMID_COLOGNE_2015 = 4103
TEAM_IMMUNITY_COLOGNE_2015 = 4104
FLIPSID3_TACTICS_COLOGNE_2015 = 4105
TITAN_COLOGNE_2015 = 4106
COUNTER_LOGIC_GAMING_COLOGNE_2015 = 4107
ESL_COLOGNE_2015 = 4108
ESL_ONE_COLOGNE_2015_LEGENDS_FOIL = 4109
ESL_ONE_COLOGNE_2015_CHALLENGERS_FOIL = 4110
AUTOGRAPH_CAPSULE_GROUP_A_FOIL_COLOGNE_2015 = 4111
AUTOGRAPH_CAPSULE_GROUP_B_FOIL_COLOGNE_2015 = 4112
AUTOGRAPH_CAPSULE_GROUP_C_FOIL_COLOGNE_2015 = 4113
AUTOGRAPH_CAPSULE_GROUP_D_FOIL_COLOGNE_2015 = 4114
AUTOGRAPH_CAPSULE_FNATIC_COLOGNE_2015 = 4115
AUTOGRAPH_CAPSULE_LUMINOSITY_GAMING_COLOGNE_2015 = 4116
AUTOGRAPH_CAPSULE_NATUS_VINCERE_COLOGNE_2015 = 4117
AUTOGRAPH_CAPSULE_NINJAS_IN_PYJAMAS_COLOGNE_2015 = 4118
AUTOGRAPH_CAPSULE_TEAM_ENVYUS_COLOGNE_2015 = 4119
AUTOGRAPH_CAPSULE_TITAN_COLOGNE_2015 = 4120
AUTOGRAPH_CAPSULE_TEAM_SOLOMID_COLOGNE_2015 = 4121
AUTOGRAPH_CAPSULE_VIRTUSPRO_COLOGNE_2015 = 4122
AUTOGRAPH_CAPSULE_MOUSESPORTS_COLOGNE_2015 = 4123
AUTOGRAPH_CAPSULE_RENEGADES_COLOGNE_2015 = 4124
AUTOGRAPH_CAPSULE_TEAM_IMMUNITY_COLOGNE_2015 = 4125
AUTOGRAPH_CAPSULE_TEAM_EBETTLE_COLOGNE_2015 = 4126
AUTOGRAPH_CAPSULE_TEAM_KINGUIN_COLOGNE_2015 = 4127
AUTOGRAPH_CAPSULE_FLIPSID3_TACTICS_COLOGNE_2015 = 4128
AUTOGRAPH_CAPSULE_COUNTER_LOGIC_GAMING_COLOGNE_2015 = 4129
AUTOGRAPH_CAPSULE_CLOUD9_G2A_COLOGNE_2015 = 4130
ESL_ONE_COLOGNE_2015_DUST_II_SOUVENIR_PACKAGE = 4131
ESL_ONE_COLOGNE_2015_MIRAGE_SOUVENIR_PACKAGE = 4132
ESL_ONE_COLOGNE_2015_INFERNO_SOUVENIR_PACKAGE = 4133
ESL_ONE_COLOGNE_2015_COBBLESTONE_SOUVENIR_PACKAGE = 4134
ESL_ONE_COLOGNE_2015_OVERPASS_SOUVENIR_PACKAGE = 4135
ESL_ONE_COLOGNE_2015_CACHE_SOUVENIR_PACKAGE = 4136
ESL_ONE_COLOGNE_2015_TRAIN_SOUVENIR_PACKAGE = 4137
SHADOW_CASE = 4138
NINJAS_IN_PYJAMAS_CLUJ_NAPOCA_2015 = 4139
TEAM_DIGNITAS_CLUJ_NAPOCA_2015 = 4140
COUNTER_LOGIC_GAMING_CLUJ_NAPOCA_2015 = 4141
VEXED_GAMING_CLUJ_NAPOCA_2015 = 4142
FLIPSID3_TACTICS_CLUJ_NAPOCA_2015 = 4143
TEAM_LIQUID_CLUJ_NAPOCA_2015 = 4144
MOUSESPORTS_CLUJ_NAPOCA_2015 = 4145
NATUS_VINCERE_CLUJ_NAPOCA_2015 = 4146
VIRTUSPRO_CLUJ_NAPOCA_2015 = 4147
CLOUD9_CLUJ_NAPOCA_2015 = 4148
G2_ESPORTS_CLUJ_NAPOCA_2015 = 4149
TITAN_CLUJ_NAPOCA_2015 = 4150
TEAM_SOLOMID_CLUJ_NAPOCA_2015 = 4151
TEAM_ENVYUS_CLUJ_NAPOCA_2015 = 4152
FNATIC_CLUJ_NAPOCA_2015 = 4153
LUMINOSITY_GAMING_CLUJ_NAPOCA_2015 = 4154
DREAMHACK_CLUJ_NAPOCA_2015 = 4155
DREAMHACK_CLUJ_NAPOCA_2015_LEGENDS_FOIL = 4156
DREAMHACK_CLUJ_NAPOCA_2015_CHALLENGERS_FOIL = 4157
AUTOGRAPH_CAPSULE_CHALLENGERS_FOIL_CLUJ_NAPOCA_2015 = 4158
AUTOGRAPH_CAPSULE_LEGENDS_FOIL_CLUJ_NAPOCA_2015 = 4159
AUTOGRAPH_CAPSULE_NINJAS_IN_PYJAMAS_CLUJ_NAPOCA_2015 = 4160
AUTOGRAPH_CAPSULE_TEAM_DIGNITAS_CLUJ_NAPOCA_2015 = 4161
AUTOGRAPH_CAPSULE_COUNTER_LOGIC_GAMING_CLUJ_NAPOCA_2015 = 4162
AUTOGRAPH_CAPSULE_VEXED_GAMING_CLUJ_NAPOCA_2015 = 4163
AUTOGRAPH_CAPSULE_FLIPSID3_TACTICS_CLUJ_NAPOCA_2015 = 4164
AUTOGRAPH_CAPSULE_TEAM_LIQUID_CLUJ_NAPOCA_2015 = 4165
AUTOGRAPH_CAPSULE_MOUSESPORTS_CLUJ_NAPOCA_2015 = 4166
AUTOGRAPH_CAPSULE_NATUS_VINCERE_CLUJ_NAPOCA_2015 = 4167
AUTOGRAPH_CAPSULE_VIRTUSPRO_CLUJ_NAPOCA_2015 = 4168
AUTOGRAPH_CAPSULE_CLOUD9_CLUJ_NAPOCA_2015 = 4169
AUTOGRAPH_CAPSULE_G2_ESPORTS_CLUJ_NAPOCA_2015 = 4170
AUTOGRAPH_CAPSULE_TITAN_CLUJ_NAPOCA_2015 = 4171
AUTOGRAPH_CAPSULE_TEAM_SOLOMID_CLUJ_NAPOCA_2015 = 4172
AUTOGRAPH_CAPSULE_TEAM_ENVYUS_CLUJ_NAPOCA_2015 = 4173
AUTOGRAPH_CAPSULE_FNATIC_CLUJ_NAPOCA_2015 = 4174
AUTOGRAPH_CAPSULE_LUMINOSITY_GAMING_CLUJ_NAPOCA_2015 = 4175
DREAMHACK_CLUJ_NAPOCA_2015_DUST_II_SOUVENIR_PACKAGE = 4176
DREAMHACK_CLUJ_NAPOCA_2015_MIRAGE_SOUVENIR_PACKAGE = 4177
DREAMHACK_CLUJ_NAPOCA_2015_INFERNO_SOUVENIR_PACKAGE = 4178
DREAMHACK_CLUJ_NAPOCA_2015_COBBLESTONE_SOUVENIR_PACKAGE = 4179
DREAMHACK_CLUJ_NAPOCA_2015_OVERPASS_SOUVENIR_PACKAGE = 4180
DREAMHACK_CLUJ_NAPOCA_2015_CACHE_SOUVENIR_PACKAGE = 4181
DREAMHACK_CLUJ_NAPOCA_2015_TRAIN_SOUVENIR_PACKAGE = 4182
PINUPS_CAPSULE = 4183
SLID3_CAPSULE = 4184
TEAM_ROLES_CAPSULE = 4185
REVOLVER_CASE = 4186
OPERATION_WILDFIRE_CASE = 4187
NINJAS_IN_PYJAMAS_MLG_COLUMBUS_2016 = 4188
SPLYCE_MLG_COLUMBUS_2016 = 4189
COUNTER_LOGIC_GAMING_MLG_COLUMBUS_2016 = 4190
GAMBIT_GAMING_MLG_COLUMBUS_2016 = 4191
FLIPSID3_TACTICS_MLG_COLUMBUS_2016 = 4192
TEAM_LIQUID_MLG_COLUMBUS_2016 = 4193
MOUSESPORTS_MLG_COLUMBUS_2016 = 4194
NATUS_VINCERE_MLG_COLUMBUS_2016 = 4195
VIRTUSPRO_MLG_COLUMBUS_2016 = 4196
CLOUD9_MLG_COLUMBUS_2016 = 4197
G2_ESPORTS_MLG_COLUMBUS_2016 = 4198
FAZE_CLAN_MLG_COLUMBUS_2016 = 4199
ASTRALIS_MLG_COLUMBUS_2016 = 4200
TEAM_ENVYUS_MLG_COLUMBUS_2016 = 4201
FNATIC_MLG_COLUMBUS_2016 = 4202
LUMINOSITY_GAMING_MLG_COLUMBUS_2016 = 4203
MLG_MLG_COLUMBUS_2016 = 4204
MLG_COLUMBUS_2016_LEGENDS_HOLO_FOIL = 4205
MLG_COLUMBUS_2016_CHALLENGERS_HOLO_FOIL = 4206
AUTOGRAPH_CAPSULE_CHALLENGERS_FOIL_MLG_COLUMBUS_2016 = 4207
AUTOGRAPH_CAPSULE_LEGENDS_FOIL_MLG_COLUMBUS_2016 = 4208
AUTOGRAPH_CAPSULE_NINJAS_IN_PYJAMAS_MLG_COLUMBUS_2016 = 4209
AUTOGRAPH_CAPSULE_SPLYCE_MLG_COLUMBUS_2016 = 4210
AUTOGRAPH_CAPSULE_COUNTER_LOGIC_GAMING_MLG_COLUMBUS_2016 = 4211
AUTOGRAPH_CAPSULE_GAMBIT_GAMING_MLG_COLUMBUS_2016 = 4212
AUTOGRAPH_CAPSULE_FLIPSID3_TACTICS_MLG_COLUMBUS_2016 = 4213
AUTOGRAPH_CAPSULE_TEAM_LIQUID_MLG_COLUMBUS_2016 = 4214
AUTOGRAPH_CAPSULE_MOUSESPORTS_MLG_COLUMBUS_2016 = 4215
AUTOGRAPH_CAPSULE_NATUS_VINCERE_MLG_COLUMBUS_2016 = 4216
AUTOGRAPH_CAPSULE_VIRTUSPRO_MLG_COLUMBUS_2016 = 4217
AUTOGRAPH_CAPSULE_CLOUD9_MLG_COLUMBUS_2016 = 4218
AUTOGRAPH_CAPSULE_G2_ESPORTS_MLG_COLUMBUS_2016 = 4219
AUTOGRAPH_CAPSULE_FAZE_CLAN_MLG_COLUMBUS_2016 = 4220
AUTOGRAPH_CAPSULE_ASTRALIS_MLG_COLUMBUS_2016 = 4221
AUTOGRAPH_CAPSULE_TEAM_ENVYUS_MLG_COLUMBUS_2016 = 4222
AUTOGRAPH_CAPSULE_FNATIC_MLG_COLUMBUS_2016 = 4223
AUTOGRAPH_CAPSULE_LUMINOSITY_GAMING_MLG_COLUMBUS_2016 = 4224
MLG_COLUMBUS_2016_DUST_II_SOUVENIR_PACKAGE = 4225
MLG_COLUMBUS_2016_MIRAGE_SOUVENIR_PACKAGE = 4226
MLG_COLUMBUS_2016_INFERNO_SOUVENIR_PACKAGE = 4227
MLG_COLUMBUS_2016_COBBLESTONE_SOUVENIR_PACKAGE = 4228
MLG_COLUMBUS_2016_OVERPASS_SOUVENIR_PACKAGE = 4229
MLG_COLUMBUS_2016_CACHE_SOUVENIR_PACKAGE = 4230
MLG_COLUMBUS_2016_TRAIN_SOUVENIR_PACKAGE = 4231
MLG_COLUMBUS_2016_NUKE_SOUVENIR_PACKAGE = 4232
CHROMA_3_CASE = 4233
COMMUNITY_GRAFFITI_BOX_1 = 4234
COLLECTIBLE_PINS_CAPSULE_SERIES_1 = 4235
GAMMA_CASE = 4236
NINJAS_IN_PYJAMAS_COLOGNE_2016 = 4237
OPTIC_GAMING_COLOGNE_2016 = 4238
COUNTER_LOGIC_GAMING_COLOGNE_2016 = 4239
GAMBIT_GAMING_COLOGNE_2016 = 4240
FLIPSID3_TACTICS_COLOGNE_2016 = 4241
TEAM_LIQUID_COLOGNE_2016 = 4242
MOUSESPORTS_COLOGNE_2016 = 4243
NATUS_VINCERE_COLOGNE_2016 = 4244
VIRTUSPRO_COLOGNE_2016 = 4245
SK_GAMING_COLOGNE_2016 = 4246
G2_ESPORTS_COLOGNE_2016 = 4247
FAZE_CLAN_COLOGNE_2016 = 4248
ASTRALIS_COLOGNE_2016 = 4249
TEAM_ENVYUS_COLOGNE_2016 = 4250
FNATIC_COLOGNE_2016 = 4251
TEAM_DIGNITAS_COLOGNE_2016 = 4252
ESL_COLOGNE_2016 = 4253
COLOGNE_2016_LEGENDS_HOLO_FOIL = 4254
COLOGNE_2016_CHALLENGERS_HOLO_FOIL = 4255
AUTOGRAPH_CAPSULE_CHALLENGERS_FOIL_COLOGNE_2016 = 4256
AUTOGRAPH_CAPSULE_LEGENDS_FOIL_COLOGNE_2016 = 4257
AUTOGRAPH_CAPSULE_NINJAS_IN_PYJAMAS_COLOGNE_2016 = 4258
AUTOGRAPH_CAPSULE_OPTIC_GAMING_COLOGNE_2016 = 4259
AUTOGRAPH_CAPSULE_COUNTER_LOGIC_GAMING_COLOGNE_2016 = 4260
AUTOGRAPH_CAPSULE_GAMBIT_GAMING_COLOGNE_2016 = 4261
AUTOGRAPH_CAPSULE_FLIPSID3_TACTICS_COLOGNE_2016 = 4262
AUTOGRAPH_CAPSULE_TEAM_LIQUID_COLOGNE_2016 = 4263
AUTOGRAPH_CAPSULE_MOUSESPORTS_COLOGNE_2016 = 4264
AUTOGRAPH_CAPSULE_NATUS_VINCERE_COLOGNE_2016 = 4265
AUTOGRAPH_CAPSULE_VIRTUSPRO_COLOGNE_2016 = 4266
AUTOGRAPH_CAPSULE_SK_GAMING_COLOGNE_2016 = 4267
AUTOGRAPH_CAPSULE_G2_ESPORTS_COLOGNE_2016 = 4268
AUTOGRAPH_CAPSULE_FAZE_CLAN_COLOGNE_2016 = 4269
AUTOGRAPH_CAPSULE_ASTRALIS_COLOGNE_2016 = 4270
AUTOGRAPH_CAPSULE_TEAM_ENVYUS_COLOGNE_2016 = 4271
AUTOGRAPH_CAPSULE_FNATIC_COLOGNE_2016 = 4272
AUTOGRAPH_CAPSULE_TEAM_DIGNITAS_COLOGNE_2016 = 4273
COLOGNE_2016_DUST_II_SOUVENIR_PACKAGE = 4274
COLOGNE_2016_MIRAGE_SOUVENIR_PACKAGE = 4275
COLOGNE_2016_COBBLESTONE_SOUVENIR_PACKAGE = 4276
COLOGNE_2016_OVERPASS_SOUVENIR_PACKAGE = 4277
COLOGNE_2016_CACHE_SOUVENIR_PACKAGE = 4278
COLOGNE_2016_TRAIN_SOUVENIR_PACKAGE = 4279
COLOGNE_2016_NUKE_SOUVENIR_PACKAGE = 4280
GAMMA_2_CASE = 4281
SUGARFACE_CAPSULE = 4282
BESTIARY_CAPSULE = 4283
COLLECTIBLE_PINS_CAPSULE_SERIES_2 = 4284
CSGO_GRAFFITI_BOX = 4285
PERFECT_WORLD_GRAFFITI_BOX = 4286
STATTRAK_RADICALS_BOX = 4287
GLOVE_CASE = 4288
STICKER_ASTRALIS_ATLANTA_2017 = 4289
STICKER_TEAM_ENVYUS_ATLANTA_2017 = 4290
STICKER_FAZE_CLAN_ATLANTA_2017 = 4291
STICKER_FLIPSID3_TACTICS_ATLANTA_2017 = 4292
STICKER_FNATIC_ATLANTA_2017 = 4293
STICKER_G2_ESPORTS_ATLANTA_2017 = 4294
STICKER_GAMBIT_GAMING_ATLANTA_2017 = 4295
STICKER_GODSENT_ATLANTA_2017 = 4296
STICKER_HELLRAISERS_ATLANTA_2017 = 4297
STICKER_MOUSESPORTS_ATLANTA_2017 = 4298
STICKER_NATUS_VINCERE_ATLANTA_2017 = 4299
STICKER_NORTH_ATLANTA_2017 = 4300
STICKER_OPTIC_GAMING_ATLANTA_2017 = 4301
STICKER_SK_GAMING_ATLANTA_2017 = 4302
STICKER_TEAM_LIQUID_ATLANTA_2017 = 4303
STICKER_VIRTUSPRO_ATLANTA_2017 = 4304
STICKER_ELEAGUE_ATLANTA_2017 = 4305
SEALED_GRAFFITI_ASTRALIS_ATLANTA_2017 = 4306
SEALED_GRAFFITI_TEAM_ENVYUS_ATLANTA_2017 = 4307
SEALED_GRAFFITI_FAZE_CLAN_ATLANTA_2017 = 4308
SEALED_GRAFFITI_FLIPSID3_TACTICS_ATLANTA_2017 = 4309
SEALED_GRAFFITI_FNATIC_ATLANTA_2017 = 4310
SEALED_GRAFFITI_G2_ESPORTS_ATLANTA_2017 = 4311
SEALED_GRAFFITI_GAMBIT_GAMING_ATLANTA_2017 = 4312
SEALED_GRAFFITI_GODSENT_ATLANTA_2017 = 4313
SEALED_GRAFFITI_HELLRAISERS_ATLANTA_2017 = 4314
SEALED_GRAFFITI_MOUSESPORTS_ATLANTA_2017 = 4315
SEALED_GRAFFITI_NATUS_VINCERE_ATLANTA_2017 = 4316
SEALED_GRAFFITI_NORTH_ATLANTA_2017 = 4317
SEALED_GRAFFITI_OPTIC_GAMING_ATLANTA_2017 = 4318
SEALED_GRAFFITI_SK_GAMING_ATLANTA_2017 = 4319
SEALED_GRAFFITI_TEAM_LIQUID_ATLANTA_2017 = 4320
SEALED_GRAFFITI_VIRTUSPRO_ATLANTA_2017 = 4321
SEALED_GRAFFITI_ELEAGUE_ATLANTA_2017 = 4322
ATLANTA_2017_LEGENDS_HOLO_FOIL = 4323
ATLANTA_2017_CHALLENGERS_HOLO_FOIL = 4324
AUTOGRAPH_CAPSULE_CHALLENGERS_FOIL_ATLANTA_2017 = 4325
AUTOGRAPH_CAPSULE_LEGENDS_FOIL_ATLANTA_2017 = 4326
AUTOGRAPH_CAPSULE_ASTRALIS_ATLANTA_2017 = 4327
AUTOGRAPH_CAPSULE_TEAM_ENVYUS_ATLANTA_2017 = 4328
AUTOGRAPH_CAPSULE_FAZE_CLAN_ATLANTA_2017 = 4329
AUTOGRAPH_CAPSULE_FLIPSID3_TACTICS_ATLANTA_2017 = 4330
AUTOGRAPH_CAPSULE_FNATIC_ATLANTA_2017 = 4331
AUTOGRAPH_CAPSULE_G2_ESPORTS_ATLANTA_2017 = 4332
AUTOGRAPH_CAPSULE_GAMBIT_GAMING_ATLANTA_2017 = 4333
AUTOGRAPH_CAPSULE_GODSENT_ATLANTA_2017 = 4334
AUTOGRAPH_CAPSULE_HELLRAISERS_ATLANTA_2017 = 4335
AUTOGRAPH_CAPSULE_MOUSESPORTS_ATLANTA_2017 = 4336
AUTOGRAPH_CAPSULE_NATUS_VINCERE_ATLANTA_2017 = 4337
AUTOGRAPH_CAPSULE_NORTH_ATLANTA_2017 = 4338
AUTOGRAPH_CAPSULE_OPTIC_GAMING_ATLANTA_2017 = 4339
AUTOGRAPH_CAPSULE_SK_GAMING_ATLANTA_2017 = 4340
AUTOGRAPH_CAPSULE_TEAM_LIQUID_ATLANTA_2017 = 4341
AUTOGRAPH_CAPSULE_VIRTUSPRO_ATLANTA_2017 = 4342
ATLANTA_2017_MEGA_BUNDLE = 4343
ATLANTA_2017_DUST_II_SOUVENIR_PACKAGE = 4344
ATLANTA_2017_MIRAGE_SOUVENIR_PACKAGE = 4345
ATLANTA_2017_COBBLESTONE_SOUVENIR_PACKAGE = 4346
ATLANTA_2017_OVERPASS_SOUVENIR_PACKAGE = 4347
ATLANTA_2017_CACHE_SOUVENIR_PACKAGE = 4348
ATLANTA_2017_TRAIN_SOUVENIR_PACKAGE = 4349
ATLANTA_2017_NUKE_SOUVENIR_PACKAGE = 4350
SPECTRUM_CASE = 4351
OPERATION_HYDRA_CASE = 4352
OPERATION_HYDRA_CHALLENGE_COIN = 4353
SILVER_OPERATION_HYDRA_COIN = 4354
GOLD_OPERATION_HYDRA_COIN = 4355
DIAMOND_OPERATION_HYDRA_COIN = 4356
STICKER_ASTRALIS_KRAKOW_2017 = 4357
STICKER_VIRTUSPRO_KRAKOW_2017 = 4358
STICKER_FNATIC_KRAKOW_2017 = 4359
STICKER_SK_GAMING_KRAKOW_2017 = 4360
STICKER_NATUS_VINCERE_KRAKOW_2017 = 4361
STICKER_GAMBIT_KRAKOW_2017 = 4362
STICKER_NORTH_KRAKOW_2017 = 4363
STICKER_FAZE_CLAN_KRAKOW_2017 = 4364
STICKER_MOUSESPORTS_KRAKOW_2017 = 4365
STICKER_G2_ESPORTS_KRAKOW_2017 = 4366
STICKER_BIG_KRAKOW_2017 = 4367
STICKER_CLOUD9_KRAKOW_2017 = 4368
STICKER_PENTA_SPORTS_KRAKOW_2017 = 4369
STICKER_FLIPSID3_TACTICS_KRAKOW_2017 = 4370
STICKER_IMMORTALS_KRAKOW_2017 = 4371
STICKER_VEGA_SQUADRON_KRAKOW_2017 = 4372
STICKER_PGL_KRAKOW_2017 = 4373
SEALED_GRAFFITI_ASTRALIS_KRAKOW_2017 = 4374
SEALED_GRAFFITI_VIRTUSPRO_KRAKOW_2017 = 4375
SEALED_GRAFFITI_FNATIC_KRAKOW_2017 = 4376
SEALED_GRAFFITI_SK_GAMING_KRAKOW_2017 = 4377
SEALED_GRAFFITI_NATUS_VINCERE_KRAKOW_2017 = 4378
SEALED_GRAFFITI_GAMBIT_KRAKOW_2017 = 4379
SEALED_GRAFFITI_NORTH_KRAKOW_2017 = 4380
SEALED_GRAFFITI_FAZE_CLAN_KRAKOW_2017 = 4381
SEALED_GRAFFITI_MOUSESPORTS_KRAKOW_2017 = 4382
SEALED_GRAFFITI_G2_ESPORTS_KRAKOW_2017 = 4383
SEALED_GRAFFITI_BIG_KRAKOW_2017 = 4384
SEALED_GRAFFITI_CLOUD9_KRAKOW_2017 = 4385
SEALED_GRAFFITI_PENTA_SPORTS_KRAKOW_2017 = 4386
SEALED_GRAFFITI_FLIPSID3_TACTICS_KRAKOW_2017 = 4387
SEALED_GRAFFITI_IMMORTALS_KRAKOW_2017 = 4388
SEALED_GRAFFITI_VEGA_SQUADRON_KRAKOW_2017 = 4389
SEALED_GRAFFITI_PGL_KRAKOW_2017 = 4390
KRAKOW_2017_LEGENDS_HOLO_FOIL = 4391
KRAKOW_2017_CHALLENGERS_HOLO_FOIL = 4392
KRAKOW_2017_CHALLENGERS_AUTOGRAPH_CAPSULE = 4393
KRAKOW_2017_LEGENDS_AUTOGRAPH_CAPSULE = 4394
KRAKOW_2017_MEGA_BUNDLE = 4395
KRAKOW_2017_INFERNO_SOUVENIR_PACKAGE = 4396
KRAKOW_2017_MIRAGE_SOUVENIR_PACKAGE = 4397
KRAKOW_2017_COBBLESTONE_SOUVENIR_PACKAGE = 4398
KRAKOW_2017_OVERPASS_SOUVENIR_PACKAGE = 4399
KRAKOW_2017_CACHE_SOUVENIR_PACKAGE = 4400
KRAKOW_2017_TRAIN_SOUVENIR_PACKAGE = 4401
KRAKOW_2017_NUKE_SOUVENIR_PACKAGE = 4402
SPECTRUM_2_CASE = 4403
PERFECT_WORLD_STICKER_CAPSULE_1 = 4404
PERFECT_WORLD_STICKER_CAPSULE_2 = 4405
STICKER_GAMBIT_ESPORTS_BOSTON_2018 = 4406
STICKER_100_THIEVES_BOSTON_2018 = 4407
STICKER_ASTRALIS_BOSTON_2018 = 4408
STICKER_VIRTUSPRO_BOSTON_2018 = 4409
STICKER_FNATIC_BOSTON_2018 = 4410
STICKER_SK_GAMING_BOSTON_2018 = 4411
STICKER_BIG_BOSTON_2018 = 4412
STICKER_NORTH_BOSTON_2018 = 4413
STICKER_G2_ESPORTS_BOSTON_2018 = 4414
STICKER_CLOUD9_BOSTON_2018 = 4415
STICKER_FLIPSID3_TACTICS_BOSTON_2018 = 4416
STICKER_NATUS_VINCERE_BOSTON_2018 = 4417
STICKER_MOUSESPORTS_BOSTON_2018 = 4418
STICKER_SPROUT_ESPORTS_BOSTON_2018 = 4419
STICKER_FAZE_CLAN_BOSTON_2018 = 4420
STICKER_VEGA_SQUADRON_BOSTON_2018 = 4421
STICKER_SPACE_SOLDIERS_BOSTON_2018 = 4422
STICKER_TEAM_LIQUID_BOSTON_2018 = 4423
STICKER_AVANGAR_BOSTON_2018 = 4424
STICKER_RENEGADES_BOSTON_2018 = 4425
STICKER_TEAM_ENVYUS_BOSTON_2018 = 4426
STICKER_MISFITS_GAMING_BOSTON_2018 = 4427
STICKER_QUANTUM_BELLATOR_FIRE_BOSTON_2018 = 4428
STICKER_TYLOO_BOSTON_2018 = 4429
STICKER_ELEAGUE_BOSTON_2018 = 4430
SEALED_GRAFFITI_GAMBIT_ESPORTS_BOSTON_2018 = 4431
SEALED_GRAFFITI_100_THIEVES_BOSTON_2018 = 4432
SEALED_GRAFFITI_ASTRALIS_BOSTON_2018 = 4433
SEALED_GRAFFITI_VIRTUSPRO_BOSTON_2018 = 4434
SEALED_GRAFFITI_FNATIC_BOSTON_2018 = 4435
SEALED_GRAFFITI_SK_GAMING_BOSTON_2018 = 4436
SEALED_GRAFFITI_BIG_BOSTON_2018 = 4437
SEALED_GRAFFITI_NORTH_BOSTON_2018 = 4438
SEALED_GRAFFITI_G2_ESPORTS_BOSTON_2018 = 4439
SEALED_GRAFFITI_CLOUD9_BOSTON_2018 = 4440
SEALED_GRAFFITI_FLIPSID3_TACTICS_BOSTON_2018 = 4441
SEALED_GRAFFITI_NATUS_VINCERE_BOSTON_2018 = 4442
SEALED_GRAFFITI_MOUSESPORTS_BOSTON_2018 = 4443
SEALED_GRAFFITI_SPROUT_ESPORTS_BOSTON_2018 = 4444
SEALED_GRAFFITI_FAZE_CLAN_BOSTON_2018 = 4445
SEALED_GRAFFITI_VEGA_SQUADRON_BOSTON_2018 = 4446
SEALED_GRAFFITI_SPACE_SOLDIERS_BOSTON_2018 = 4447
SEALED_GRAFFITI_TEAM_LIQUID_BOSTON_2018 = 4448
SEALED_GRAFFITI_AVANGAR_BOSTON_2018 = 4449
SEALED_GRAFFITI_RENEGADES_BOSTON_2018 = 4450
SEALED_GRAFFITI_TEAM_ENVYUS_BOSTON_2018 = 4451
SEALED_GRAFFITI_MISFITS_GAMING_BOSTON_2018 = 4452
SEALED_GRAFFITI_QUANTUM_BELLATOR_FIRE_BOSTON_2018 = 4453
SEALED_GRAFFITI_TYLOO_BOSTON_2018 = 4454
SEALED_GRAFFITI_ELEAGUE_BOSTON_2018 = 4455
BOSTON_2018_LEGENDS_HOLO_FOIL = 4456
BOSTON_2018_RETURNING_CHALLENGERS_HOLO_FOIL = 4457
BOSTON_2018_MINOR_CHALLENGERS_HOLO_FOIL = 4458
BOSTON_2018_LEGENDS_AUTOGRAPH_CAPSULE = 4459
BOSTON_2018_RETURNING_CHALLENGERS_AUTOGRAPH_CAPSULE = 4460
BOSTON_2018_MINOR_CHALLENGERS_AUTOGRAPH_CAPSULE = 4461
BOSTON_2018_MEGA_BUNDLE = 4462
BOSTON_2018_INFERNO_SOUVENIR_PACKAGE = 4463
BOSTON_2018_MIRAGE_SOUVENIR_PACKAGE = 4464
BOSTON_2018_COBBLESTONE_SOUVENIR_PACKAGE = 4465
BOSTON_2018_OVERPASS_SOUVENIR_PACKAGE = 4466
BOSTON_2018_CACHE_SOUVENIR_PACKAGE = 4467
BOSTON_2018_TRAIN_SOUVENIR_PACKAGE = 4468
BOSTON_2018_NUKE_SOUVENIR_PACKAGE = 4469
COMMUNITY_CAPSULE_2018 = 4470
CLUTCH_CASE = 4471
STICKER_FLASH_GAMING_BOSTON_2018 = 4472
SEALED_GRAFFITI_FLASH_GAMING_BOSTON_2018 = 4473
BOSTON_2018_MINOR_CHALLENGERS_WITH_FLASH_GAMING_HOLO_FOIL = 4474
BOSTON_2018_MINOR_CHALLENGERS_WITH_FLASH_GAMING_AUTOGRAPH_CAPSULE = 4475
BOSTON_2018_MEGA_BUNDLE_2 = 4476
CHICKEN_CAPSULE = 4477
BOSTON_2018_ATTENDING_LEGENDS_HOLO_FOIL = 4478
BOSTON_2018_ATTENDING_LEGENDS_AUTOGRAPH_CAPSULE = 4479
BOSTON_2018_MEGA_BUNDLE_3 = 4480
COLLECTIBLE_PINS_CAPSULE_SERIES_3 = 4481
HORIZON_CASE = 4482
STICKER_CLOUD9_LONDON_2018 = 4483
STICKER_FAZE_CLAN_LONDON_2018 = 4484
STICKER_NATUS_VINCERE_LONDON_2018 = 4485
STICKER_MIBR_LONDON_2018 = 4486
STICKER_MOUSESPORTS_LONDON_2018 = 4487
STICKER_WINSTRIKE_TEAM_LONDON_2018 = 4488
STICKER_G2_ESPORTS_LONDON_2018 = 4489
STICKER_FNATIC_LONDON_2018 = 4490
STICKER_GAMBIT_ESPORTS_LONDON_2018 = 4491
STICKER_VEGA_SQUADRON_LONDON_2018 = 4492
STICKER_SPACE_SOLDIERS_LONDON_2018 = 4493
STICKER_BIG_LONDON_2018 = 4494
STICKER_ASTRALIS_LONDON_2018 = 4495
STICKER_TEAM_LIQUID_LONDON_2018 = 4496
STICKER_NORTH_LONDON_2018 = 4497
STICKER_VIRTUSPRO_LONDON_2018 = 4498
STICKER_NINJAS_IN_PYJAMAS_LONDON_2018 = 4499
STICKER_COMPLEXITY_GAMING_LONDON_2018 = 4500
STICKER_HELLRAISERS_LONDON_2018 = 4501
STICKER_RENEGADES_LONDON_2018 = 4502
STICKER_OPTIC_GAMING_LONDON_2018 = 4503
STICKER_ROGUE_LONDON_2018 = 4504
STICKER_TEAM_SPIRIT_LONDON_2018 = 4505
STICKER_TYLOO_LONDON_2018 = 4506
STICKER_FACEIT_LONDON_2018 = 4507
SEALED_GRAFFITI_CLOUD9_LONDON_2018 = 4508
SEALED_GRAFFITI_FAZE_CLAN_LONDON_2018 = 4509
SEALED_GRAFFITI_NATUS_VINCERE_LONDON_2018 = 4510
SEALED_GRAFFITI_MIBR_LONDON_2018 = 4511
SEALED_GRAFFITI_MOUSESPORTS_LONDON_2018 = 4512
SEALED_GRAFFITI_WINSTRIKE_TEAM_LONDON_2018 = 4513
SEALED_GRAFFITI_G2_ESPORTS_LONDON_2018 = 4514
SEALED_GRAFFITI_FNATIC_LONDON_2018 = 4515
SEALED_GRAFFITI_GAMBIT_ESPORTS_LONDON_2018 = 4516
SEALED_GRAFFITI_VEGA_SQUADRON_LONDON_2018 = 4517
SEALED_GRAFFITI_SPACE_SOLDIERS_LONDON_2018 = 4518
SEALED_GRAFFITI_BIG_LONDON_2018 = 4519
SEALED_GRAFFITI_ASTRALIS_LONDON_2018 = 4520
SEALED_GRAFFITI_TEAM_LIQUID_LONDON_2018 = 4521
SEALED_GRAFFITI_NORTH_LONDON_2018 = 4522
SEALED_GRAFFITI_VIRTUSPRO_LONDON_2018 = 4523
SEALED_GRAFFITI_NINJAS_IN_PYJAMAS_LONDON_2018 = 4524
SEALED_GRAFFITI_COMPLEXITY_GAMING_LONDON_2018 = 4525
SEALED_GRAFFITI_HELLRAISERS_LONDON_2018 = 4526
SEALED_GRAFFITI_RENEGADES_LONDON_2018 = 4527
SEALED_GRAFFITI_OPTIC_GAMING_LONDON_2018 = 4528
SEALED_GRAFFITI_ROGUE_LONDON_2018 = 4529
SEALED_GRAFFITI_TEAM_SPIRIT_LONDON_2018 = 4530
SEALED_GRAFFITI_TYLOO_LONDON_2018 = 4531
SEALED_GRAFFITI_FACEIT_LONDON_2018 = 4532
LONDON_2018_LEGENDS_HOLO_FOIL = 4533
LONDON_2018_RETURNING_CHALLENGERS_HOLO_FOIL = 4534
LONDON_2018_MINOR_CHALLENGERS_HOLO_FOIL = 4535
LONDON_2018_LEGENDS_AUTOGRAPH_CAPSULE = 4536
LONDON_2018_RETURNING_CHALLENGERS_AUTOGRAPH_CAPSULE = 4537
LONDON_2018_MINOR_CHALLENGERS_AUTOGRAPH_CAPSULE = 4538
LONDON_2018_MEGA_BUNDLE = 4539
LONDON_2018_INFERNO_SOUVENIR_PACKAGE = 4540
LONDON_2018_MIRAGE_SOUVENIR_PACKAGE = 4541
LONDON_2018_DUST_II_SOUVENIR_PACKAGE = 4542
LONDON_2018_OVERPASS_SOUVENIR_PACKAGE = 4543
LONDON_2018_CACHE_SOUVENIR_PACKAGE = 4544
LONDON_2018_TRAIN_SOUVENIR_PACKAGE = 4545
LONDON_2018_NUKE_SOUVENIR_PACKAGE = 4546
SKILL_GROUPS_CAPSULE = 4547
DANGER_ZONE_CASE = 4548
OPERATION_SHATTERED_WEB_PREMIUM_PASS = 4549
OPERATION_SHATTERED_WEB_CHALLENGE_COIN = 4550
SILVER_OPERATION_SHATTERED_WEB_COIN = 4551
GOLD_OPERATION_SHATTERED_WEB_COIN = 4552
DIAMOND_OPERATION_SHATTERED_WEB_COIN = 4553
KATOWICE_2019_VIEWER_PASS = 4554
KATOWICE_2019_COIN = 4555
KATOWICE_2019_SILVER_COIN = 4556
KATOWICE_2019_GOLD_COIN = 4557
KATOWICE_2019_DIAMOND_COIN = 4558
STICKER_ASTRALIS_KATOWICE_2019 = 4559
STICKER_AVANGAR_KATOWICE_2019 = 4560
STICKER_BIG_KATOWICE_2019 = 4561
STICKER_CLOUD9_KATOWICE_2019 = 4562
STICKER_COMPLEXITY_GAMING_KATOWICE_2019 = 4563
STICKER_ENCE_KATOWICE_2019 = 4564
STICKER_FAZE_CLAN_KATOWICE_2019 = 4565
STICKER_FNATIC_KATOWICE_2019 = 4566
STICKER_FURIA_KATOWICE_2019 = 4567
STICKER_G2_ESPORTS_KATOWICE_2019 = 4568
STICKER_GRAYHOUND_GAMING_KATOWICE_2019 = 4569
STICKER_HELLRAISERS_KATOWICE_2019 = 4570
STICKER_MIBR_KATOWICE_2019 = 4571
STICKER_NATUS_VINCERE_KATOWICE_2019 = 4572
STICKER_NINJAS_IN_PYJAMAS_KATOWICE_2019 = 4573
STICKER_NRG_KATOWICE_2019 = 4574
STICKER_RENEGADES_KATOWICE_2019 = 4575
STICKER_TEAM_LIQUID_KATOWICE_2019 = 4576
STICKER_TEAM_SPIRIT_KATOWICE_2019 = 4577
STICKER_TYLOO_KATOWICE_2019 = 4578
STICKER_VEGA_SQUADRON_KATOWICE_2019 = 4579
STICKER_VICI_GAMING_KATOWICE_2019 = 4580
STICKER_VITALITY_KATOWICE_2019 = 4581
STICKER_WINSTRIKE_TEAM_KATOWICE_2019 = 4582
STICKER_IEM_KATOWICE_2019 = 4583
KATOWICE_2019_LEGENDS_HOLO_FOIL = 4584
KATOWICE_2019_RETURNING_CHALLENGERS_HOLO_FOIL = 4585
KATOWICE_2019_MINOR_CHALLENGERS_HOLO_FOIL = 4586
KATOWICE_2019_LEGENDS_AUTOGRAPH_CAPSULE = 4587
KATOWICE_2019_RETURNING_CHALLENGERS_AUTOGRAPH_CAPSULE = 4588
KATOWICE_2019_MINOR_CHALLENGERS_AUTOGRAPH_CAPSULE = 4589
KATOWICE_2019_INFERNO_SOUVENIR_PACKAGE = 4590
KATOWICE_2019_MIRAGE_SOUVENIR_PACKAGE = 4591
KATOWICE_2019_DUST_II_SOUVENIR_PACKAGE = 4592
KATOWICE_2019_OVERPASS_SOUVENIR_PACKAGE = 4593
KATOWICE_2019_CACHE_SOUVENIR_PACKAGE = 4594
KATOWICE_2019_TRAIN_SOUVENIR_PACKAGE = 4595
KATOWICE_2019_NUKE_SOUVENIR_PACKAGE = 4596
HALO_CAPSULE = 4597
PRISMA_CASE = 4598
FERAL_PREDATORS_CAPSULE = 4599
SHATTERED_WEB_STICKER_COLLECTION = 4600
THE_OVERPASS_COLLECTION = 4601
THE_COBBLESTONE_COLLECTION = 4602
THE_CACHE_COLLECTION = 4603
THE_GODS_AND_MONSTERS_COLLECTION = 4604
THE_CHOP_SHOP_COLLECTION = 4605
THE_RISING_SUN_COLLECTION = 4606
BONUS_RANK_XP = 4607
THE_CANALS_COLLECTION = 4608
PATCH = 4609
CSGO_PATCH_PACK = 4610
THE_NORSE_COLLECTION = 4611
THE_ST_MARC_COLLECTION = 4612
METAL_SKILL_GROUP_PATCH_COLLECTION = 4614
HALF_LIFE_ALYX_PATCH_PACK = 4615
WARHAMMER_40000_STICKER_CAPSULE = 4616
CSGO_GRAFFITI_3_COLLECTION = 4617
TROLLING_GRAFFITI_COLLECTION = 4618
BLUEBERRIES_BUCKSHOT_NSWC_SEAL = 4619
SHATTERED_WEB_CASE = 4620
CSGO_GRAFFITI_2_COLLECTION = 4621
BERLIN_2019_VIEWER_PASS = 4622
BERLIN_2019_COIN = 4623
BERLIN_2019_SILVER_COIN = 4624
BERLIN_2019_GOLD_COIN = 4625
BERLIN_2019_DIAMOND_COIN = 4626
BERLIN_2019_VIEWER_PASS_AND_3_SOUVENIR_TOKENS = 4627
BERLIN_2019_SOUVENIR_TOKEN = 4628
STICKER_ASTRALIS_BERLIN_2019 = 4629
STICKER_ENCE_BERLIN_2019 = 4630
STICKER_MIBR_BERLIN_2019 = 4631
STICKER_NATUS_VINCERE_BERLIN_2019 = 4632
STICKER_NINJAS_IN_PYJAMAS_BERLIN_2019 = 4633
STICKER_FAZE_CLAN_BERLIN_2019 = 4634
STICKER_TEAM_LIQUID_BERLIN_2019 = 4635
STICKER_RENEGADES_BERLIN_2019 = 4636
STICKER_COMPLEXITY_GAMING_BERLIN_2019 = 4637
STICKER_HELLRAISERS_BERLIN_2019 = 4638
STICKER_AVANGAR_BERLIN_2019 = 4639
STICKER_G2_ESPORTS_BERLIN_2019 = 4640
STICKER_VITALITY_BERLIN_2019 = 4641
STICKER_GRAYHOUND_GAMING_BERLIN_2019 = 4642
STICKER_MOUSESPORTS_BERLIN_2019 = 4643
STICKER_FORZE_ESPORTS_BERLIN_2019 = 4644
STICKER_NRG_BERLIN_2019 = 4645
STICKER_TYLOO_BERLIN_2019 = 4646
STICKER_FURIA_BERLIN_2019 = 4647
STICKER_CR4ZY_BERLIN_2019 = 4648
STICKER_SYMAN_GAMING_BERLIN_2019 = 4649
STICKER_NORTH_BERLIN_2019 = 4650
STICKER_DREAMEATERS_BERLIN_2019 = 4651
STICKER_INTZ_E_SPORTS_CLUB_BERLIN_2019 = 4652
STICKER_STARLADDER_BERLIN_2019 = 4653
BERLIN_2019_LEGENDS_HOLO_FOIL = 4654
BERLIN_2019_RETURNING_CHALLENGERS_HOLO_FOIL = 4655
BERLIN_2019_MINOR_CHALLENGERS_HOLO_FOIL = 4656
BERLIN_2019_LEGENDS_AUTOGRAPH_CAPSULE = 4657
BERLIN_2019_RETURNING_CHALLENGERS_AUTOGRAPH_CAPSULE = 4658
BERLIN_2019_MINOR_CHALLENGERS_AUTOGRAPH_CAPSULE = 4659
BERLIN_2019_INFERNO_SOUVENIR_PACKAGE = 4660
BERLIN_2019_MIRAGE_SOUVENIR_PACKAGE = 4661
BERLIN_2019_DUST_II_SOUVENIR_PACKAGE = 4662
BERLIN_2019_OVERPASS_SOUVENIR_PACKAGE = 4663
BERLIN_2019_TRAIN_SOUVENIR_PACKAGE = 4664
BERLIN_2019_NUKE_SOUVENIR_PACKAGE = 4665
BERLIN_2019_VERTIGO_SOUVENIR_PACKAGE = 4666
SHATTERED_WEB_STICKER_COLLECTION_2 = 4667
X_RAY_P250_PACKAGE = 4668
CS20_CASE = 4669
CS20_STICKER_CAPSULE = 4670
ONE_STAR_FOR_OPERATION_SHATTERED_WEB = 4671
TEN_STARS_FOR_OPERATION_SHATTERED_WEB = 4672
ONE_HUNDRED_STARS_FOR_OPERATION_SHATTERED_WEB = 4673
SERVICE_MEDAL_2020_GREY = 4674
SERVICE_MEDAL_2020_GREEN = 4675
SERVICE_MEDAL_2020_BLUE = 4676
SERVICE_MEDAL_2020_PURPLE = 4677
SERVICE_MEDAL_2020_PINK = 4678
SERVICE_MEDAL_2020_RED = 4679
TWO_TIMES_MCCOY_TACP_CAVALRY = 4680
CIVIL_PROTECTION_PIN = 4682
SUSTENANCE_PIN = 4683
VORTIGAUNT_PIN = 4684
HEADCRAB_GLYPH_PIN = 4685
HEALTH_PIN = 4686
LAMBDA_PIN = 4687
COPPER_LAMBDA_PIN = 4688
CMB_PIN = 4689
BLACK_MESA_PIN = 4690
COMBINE_HELMET_PIN = 4691
CITY_17_PIN = 4692
HALF_LIFE_ALYX_COLLECTIBLE_PINS_CAPSULE = 4693
HALF_LIFE_ALYX_STICKER_CAPSULE = 4694
PRISMA_2_CASE = 4695
MASTERMINDS_MUSIC_KIT_BOX = 4696
STATTRAK_MASTERMINDS_MUSIC_KIT_BOX = 4697
FRACTURE_CASE = 4698
OPERATION_BROKEN_FANG_PREMIUM_PASS = 4699
OPERATION_BROKEN_FANG_CHALLENGE_COIN = 4700
SILVER_OPERATION_BROKEN_FANG_COIN = 4701
GOLD_OPERATION_BROKEN_FANG_COIN = 4702
DIAMOND_OPERATION_BROKEN_FANG_COIN = 4703
ONE_STAR_FOR_OPERATION_BROKEN_FANG = 4704
TEN_STARS_FOR_OPERATION_BROKEN_FANG = 4705
ONE_HUNDRED_STARS_FOR_OPERATION_BROKEN_FANG = 4706
DISTINGUISHED_AGENTS = 4707
EXCEPTIONAL_AGENTS = 4708
SUPERIOR_AGENTS = 4709
MASTER_AGENT__COMMANDER_MAE = 4710
CMDR_MAE_DEAD_COLD_JAMISON_SWAT = 4711
FIRST_LIEUTENANT_FARLOW_SWAT = 4712
JOHN_VAN_HEALEN_KASK_SWAT = 4713
BIO_HAZ_SPECIALIST_SWAT = 4714
SERGEANT_BOMBSON_SWAT = 4715
CHEM_HAZ_SPECIALIST_SWAT = 4716
OPERATION_BROKEN_FANG_CASE = 4717
REZAN_THE_REDSHIRT_SABRE = 4718
RECOIL_STICKER_COLLECTION = 4719
THE_CONTROL_COLLECTION = 4720
THE_HAVOC_COLLECTION = 4721
THE_ANCIENT_COLLECTION = 4722
RECOIL_STICKER_COLLECTION_2 = 4723
MASTER_AGENTS__SIR_BLOODY_DARRYL = 4724
BROKEN_FANG_GLOVES = 4725
SIR_BLOODY_MIAMI_DARRYL_THE_PROFESSIONALS = 4726
SAFECRACKER_VOLTZMANN_THE_PROFESSIONALS = 4727
LITTLE_KEV_THE_PROFESSIONALS = 4728
BROKEN_FANG_STICKER_COLLECTION = 4729
GETAWAY_SALLY_THE_PROFESSIONALS = 4730
BROKEN_FANG_STICKER_COLLECTION_2 = 4731
NUMBER_K_THE_PROFESSIONALS = 4732
SIR_BLOODY_SILENT_DARRYL_THE_PROFESSIONALS = 4733
SIR_BLOODY_SKULLHEAD_DARRYL_THE_PROFESSIONALS = 4734
SIR_BLOODY_DARRYL_ROYALE_THE_PROFESSIONALS = 4735
SIR_BLOODY_LOUDMOUTH_DARRYL_THE_PROFESSIONALS = 4736
SERVICE_MEDAL_2021_GREY = 4737
SERVICE_MEDAL_2021_GREEN = 4738
SERVICE_MEDAL_2021_BLUE = 4739
SERVICE_MEDAL_2021_PURPLE = 4740
SERVICE_MEDAL_2021_PINK = 4741
SERVICE_MEDAL_2021_RED = 4742
RMR_LEGENDS_2020 = 4743
RMR_CHALLENGERS_2020 = 4744
RMR_CONTENDERS_2020 = 4745
BLOODHOUND_GLOVES = 5027
DEFAULT_T_GLOVES = 5028
DEFAULT_CT_GLOVES = 5029
SPORT_GLOVES = 5030
DRIVER_GLOVES = 5031
HAND_WRAPS = 5032
MOTO_GLOVES = 5033
SPECIALIST_GLOVES = 5034
HYDRA_GLOVES = 5035
LOCAL_T_AGENT = 5036
LOCAL_CT_AGENT = 5037
CSGO_CUSTOMPLAYER_TM_ANARCHIST = 5038
CSGO_CUSTOMPLAYER_TM_ANARCHIST_VARIANTA = 5039
CSGO_CUSTOMPLAYER_TM_ANARCHIST_VARIANTB = 5040
CSGO_CUSTOMPLAYER_TM_ANARCHIST_VARIANTC = 5041
CSGO_CUSTOMPLAYER_TM_ANARCHIST_VARIANTD = 5042
CSGO_CUSTOMPLAYER_TM_PIRATE = 5043
CSGO_CUSTOMPLAYER_TM_PIRATE_VARIANTA = 5044
CSGO_CUSTOMPLAYER_TM_PIRATE_VARIANTB = 5045
CSGO_CUSTOMPLAYER_TM_PIRATE_VARIANTC = 5046
CSGO_CUSTOMPLAYER_TM_PIRATE_VARIANTD = 5047
CSGO_CUSTOMPLAYER_TM_PROFESSIONAL = 5048
CSGO_CUSTOMPLAYER_TM_PROFESSIONAL_VAR1 = 5049
CSGO_CUSTOMPLAYER_TM_PROFESSIONAL_VAR2 = 5050
CSGO_CUSTOMPLAYER_TM_PROFESSIONAL_VAR3 = 5051
CSGO_CUSTOMPLAYER_TM_PROFESSIONAL_VAR4 = 5052
CSGO_CUSTOMPLAYER_TM_SEPARATIST = 5053
CSGO_CUSTOMPLAYER_TM_SEPARATIST_VARIANTA = 5054
CSGO_CUSTOMPLAYER_TM_SEPARATIST_VARIANTB = 5055
CSGO_CUSTOMPLAYER_TM_SEPARATIST_VARIANTC = 5056
CSGO_CUSTOMPLAYER_TM_SEPARATIST_VARIANTD = 5057
CSGO_CUSTOMPLAYER_CTM_GIGN = 5058
CSGO_CUSTOMPLAYER_CTM_GIGN_VARIANTA = 5059
CSGO_CUSTOMPLAYER_CTM_GIGN_VARIANTB = 5060
CSGO_CUSTOMPLAYER_CTM_GIGN_VARIANTC = 5061
CSGO_CUSTOMPLAYER_CTM_GIGN_VARIANTD = 5062
CSGO_CUSTOMPLAYER_CTM_GSG9 = 5063
CSGO_CUSTOMPLAYER_CTM_GSG9_VARIANTA = 5064
CSGO_CUSTOMPLAYER_CTM_GSG9_VARIANTB = 5065
CSGO_CUSTOMPLAYER_CTM_GSG9_VARIANTC = 5066
CSGO_CUSTOMPLAYER_CTM_GSG9_VARIANTD = 5067
CSGO_CUSTOMPLAYER_CTM_IDF = 5068
CSGO_CUSTOMPLAYER_CTM_IDF_VARIANTB = 5069
CSGO_CUSTOMPLAYER_CTM_IDF_VARIANTC = 5070
CSGO_CUSTOMPLAYER_CTM_IDF_VARIANTD = 5071
CSGO_CUSTOMPLAYER_CTM_IDF_VARIANTE = 5072
CSGO_CUSTOMPLAYER_CTM_IDF_VARIANTF = 5073
CSGO_CUSTOMPLAYER_CTM_SWAT = 5074
CSGO_CUSTOMPLAYER_CTM_SWAT_VARIANTA = 5075
CSGO_CUSTOMPLAYER_CTM_SWAT_VARIANTB = 5076
CSGO_CUSTOMPLAYER_CTM_SWAT_VARIANTC = 5077
CSGO_CUSTOMPLAYER_CTM_SWAT_VARIANTD = 5078
CSGO_CUSTOMPLAYER_CTM_SAS_VARIANTA = 5079
CSGO_CUSTOMPLAYER_CTM_SAS_VARIANTB = 5080
CSGO_CUSTOMPLAYER_CTM_SAS_VARIANTC = 5081
CSGO_CUSTOMPLAYER_CTM_SAS_VARIANTD = 5082
CSGO_CUSTOMPLAYER_CTM_ST6 = 5083
CSGO_CUSTOMPLAYER_CTM_ST6_VARIANTA = 5084
CSGO_CUSTOMPLAYER_CTM_ST6_VARIANTB = 5085
CSGO_CUSTOMPLAYER_CTM_ST6_VARIANTC = 5086
CSGO_CUSTOMPLAYER_CTM_ST6_VARIANTD = 5087
CSGO_CUSTOMPLAYER_TM_BALKAN_VARIANTE = 5088
CSGO_CUSTOMPLAYER_TM_BALKAN_VARIANTA = 5089
CSGO_CUSTOMPLAYER_TM_BALKAN_VARIANTB = 5090
CSGO_CUSTOMPLAYER_TM_BALKAN_VARIANTC = 5091
CSGO_CUSTOMPLAYER_TM_BALKAN_VARIANTD = 5092
CSGO_CUSTOMPLAYER_TM_JUMPSUIT_VARIANTA = 5093
CSGO_CUSTOMPLAYER_TM_JUMPSUIT_VARIANTB = 5094
CSGO_CUSTOMPLAYER_TM_JUMPSUIT_VARIANTC = 5095
CSGO_CUSTOMPLAYER_TM_PHOENIX_HEAVY = 5096
CSGO_CUSTOMPLAYER_CTM_HEAVY = 5097
CSGO_CUSTOMPLAYER_TM_LEET_VARIANTA = 5100
CSGO_CUSTOMPLAYER_TM_LEET_VARIANTB = 5101
CSGO_CUSTOMPLAYER_TM_LEET_VARIANTC = 5102
CSGO_CUSTOMPLAYER_TM_LEET_VARIANTD = 5103
CSGO_CUSTOMPLAYER_TM_LEET_VARIANTE = 5104
GROUND_REBEL__ELITE_CREW = 5105
OSIRIS_ELITE_CREW = 5106
PROF_SHAHMAT_ELITE_CREW = 5107
THE_ELITE_MR_MUHLIK_ELITE_CREW = 5108
CSGO_CUSTOMPLAYER_TM_PHOENIX = 5200
CSGO_CUSTOMPLAYER_TM_PHOENIX_VARIANTA = 5201
CSGO_CUSTOMPLAYER_TM_PHOENIX_VARIANTB = 5202
CSGO_CUSTOMPLAYER_TM_PHOENIX_VARIANTC = 5203
CSGO_CUSTOMPLAYER_TM_PHOENIX_VARIANTD = 5204
SOLDIER_PHOENIX = 5205
ENFORCER_PHOENIX = 5206
SLINGSHOT_PHOENIX = 5207
STREET_SOLDIER_PHOENIX = 5208
CSGO_CUSTOMPLAYER_CTM_FBI = 5300
CSGO_CUSTOMPLAYER_CTM_FBI_VARIANTA = 5301
CSGO_CUSTOMPLAYER_CTM_FBI_VARIANTC = 5302
CSGO_CUSTOMPLAYER_CTM_FBI_VARIANTD = 5303
CSGO_CUSTOMPLAYER_CTM_FBI_VARIANTE = 5304
OPERATOR_FBI_SWAT = 5305
MARKUS_DELROW_FBI_HRT = 5306
MICHAEL_SYFERS__FBI_SNIPER = 5307
SPECIAL_AGENT_AVA_FBI = 5308
THIRD_COMMANDO_COMPANY_KSK = 5400
SEAL_TEAM_6_SOLDIER_NSWC_SEAL = 5401
BUCKSHOT_NSWC_SEAL = 5402
TWO_TIMES_MCCOY_USAF_TACP = 5403
LT_COMMANDER_RICKSAW_NSWC_SEAL = 5404
DRAGOMIR_SABRE = 5500
MAXIMUS_SABRE = 5501
REZAN_THE_READY_SABRE = 5502
BLACKWOLF_SABRE = 5503
THE_DOCTOR_ROMANOV_SABRE = 5504
DRAGOMIR_SABRE_FOOTSOLDIER = 5505
CSGO_CUSTOMPLAYER_CTM_SAS = 5600
B_SQUADRON_OFFICER_SAS = 5601
DUST_II_PIN = 6001
GUARDIAN_ELITE_PIN = 6002
MIRAGE_PIN = 6003
INFERNO_PIN = 6004
ITALY_PIN = 6005
VICTORY_PIN = 6006
MILITIA_PIN = 6007
NUKE_PIN = 6008
TRAIN_PIN = 6009
GUARDIAN_PIN = 6010
TACTICS_PIN = 6011
GUARDIAN_2_PIN = 6012
BRAVO_PIN = 6013
BAGGAGE_PIN = 6014
PHOENIX_PIN = 6015
OFFICE_PIN = 6016
COBBLESTONE_PIN = 6017
OVERPASS_PIN = 6018
BLOODHOUND_PIN = 6019
CACHE_PIN = 6020
VALERIA_PHOENIX_PIN = 6021
CHROMA_PIN = 6022
GUARDIAN_3_PIN = 6023
CANALS_PIN = 6024
WELCOME_TO_THE_CLUTCH_PIN = 6025
DEATH_SENTENCE_PIN = 6026
INFERNO_2_PIN = 6027
WILDFIRE_PIN = 6028
EASY_PEASY_PIN = 6029
ACES_HIGH_PIN = 6030
HYDRA_PIN = 6031
HOWL_PIN = 6032
BRIGADIER_GENERAL_PIN = 6033
ALYX_PIN = 6034
ITEM_6035 = 6035
ITEM_6036 = 6036
ITEM_6037 = 6037
ITEM_6038 = 6038
ITEM_6039 = 6039
ITEM_6040 = 6040
ITEM_6041 = 6041
ITEM_6042 = 6042
ITEM_6043 = 6043
ITEM_6044 = 6044
ITEM_6045 = 6045
ITEM_6046 = 6046
ITEM_6047 = 6047
ITEM_6048 = 6048
ITEM_6049 = 6049
ITEM_6050 = 6050
ITEM_6051 = 6051
ITEM_6052 = 6052
ITEM_6053 = 6053
ITEM_6054 = 6054
ITEM_6055 = 6055
ITEM_6056 = 6056
ITEM_6057 = 6057
ITEM_6058 = 6058
ITEM_6059 = 6059
ITEM_6060 = 6060
ITEM_6061 = 6061
ITEM_6062 = 6062
ITEM_6063 = 6063
ITEM_6064 = 6064
ITEM_6065 = 6065
ITEM_6066 = 6066
ITEM_6067 = 6067
ITEM_6068 = 6068
ITEM_6069 = 6069
ITEM_6070 = 6070
ITEM_6071 = 6071
ITEM_6072 = 6072
ITEM_6073 = 6073
ITEM_6074 = 6074
ITEM_6075 = 6075
ITEM_6076 = 6076
ITEM_6077 = 6077
ITEM_6078 = 6078
ITEM_6079 = 6079
ITEM_6080 = 6080
ITEM_6081 = 6081
ITEM_6082 = 6082
ITEM_6083 = 6083
ITEM_6084 = 6084
ITEM_6085 = 6085
ITEM_6086 = 6086
ITEM_6087 = 6087
ITEM_6088 = 6088
DUST_II_PIN_2 = 6101
GUARDIAN_ELITE_PIN_2 = 6102
MIRAGE_PIN_2 = 6103
INFERNO_PIN_2 = 6104
ITALY_PIN_2 = 6105
VICTORY_PIN_2 = 6106
MILITIA_PIN_2 = 6107
NUKE_PIN_2 = 6108
TRAIN_PIN_2 = 6109
GUARDIAN_PIN_2 = 6110
TACTICS_PIN_2 = 6111
GUARDIAN_2_PIN_2 = 6112
BRAVO_PIN_2 = 6113
BAGGAGE_PIN_2 = 6114
PHOENIX_PIN_2 = 6115
OFFICE_PIN_2 = 6116
COBBLESTONE_PIN_2 = 6117
OVERPASS_PIN_2 = 6118
BLOODHOUND_PIN_2 = 6119
CACHE_PIN_2 = 6120
VALERIA_PHOENIX_PIN_2 = 6121
CHROMA_PIN_2 = 6122
GUARDIAN_3_PIN_2 = 6123
CANALS_PIN_2 = 6124
WELCOME_TO_THE_CLUTCH_PIN_2 = 6125
DEATH_SENTENCE_PIN_2 = 6126
INFERNO_2_PIN_2 = 6127
WILDFIRE_PIN_2 = 6128
EASY_PEASY_PIN_2 = 6129
ACES_HIGH_PIN_2 = 6130
HYDRA_PIN_2 = 6131
HOWL_PIN_2 = 6132
BRIGADIER_GENERAL_PIN_2 = 6133
ALYX_PIN_2 = 6134
DISTINGUISHED_AGENTS_2 = 6404
EXCEPTIONAL_AGENTS_2 = 6405
SUPERIOR_AGENTS_2 = 6406
MASTER_AGENTS_2 = 6407
OPERATION_WILDFIRE_CASE_KEY = 7000
STICKER_BOSSY_BURGER = 20000
STICKER_CAT_CALL = 20001
STICKER_CHICKEN_STRIKE = 20002
STICKER_CT_IN_BANANA = 20003
STICKER_DONT_WORRY_IM_PRO = 20004
STICKER_FIGHT_LIKE_A_GIRL = 20005
STICKER_FLASHBANG = 20006
STICKER_KAWAII_KILLER_CT = 20007
STICKER_NELU_THE_BEAR = 20008
STICKER_ONE_SHOT_ONE_KILL = 20009
STICKER_SHOOTING_STAR_RETURN = 20010
STICKER_WAR_PENGUIN = 20012
STICKER_WINDY_WALKING_CLUB = 20013
STICKER_BLITZKRIEG = 20014
STICKER_PIGEON_MASTER = 20015
STICKER_TERRORIZED = 20016
STICKER_TILL_DEATH_DO_US_PART = 20017
STICKER_STAY_FROSTY = 20018
STICKER_T_ON_CAT = 20019
MUSIC_KIT_DANIEL_SADOWSKI_CRIMSON_ASSAULT = 20020
MUSIC_KIT_NOISIA_SHARPENED = 20021
MUSIC_KIT_ROBERT_ALLAIRE_INSURGENCY = 20022
MUSIC_KIT_SEAN_MURRAY_AD8 = 20023
MUSIC_KIT_FEED_ME_HIGH_NOON = 20024
MUSIC_KIT_DREN_DEATHS_HEAD_DEMOLITION = 20025
MUSIC_KIT_AUSTIN_WINTORY_DESERT_FIRE = 20026
MUSIC_KIT_SASHA_LNOE = 20027
MUSIC_KIT_SKOG_METAL = 20028
STICKER_DOOMED = 20029
STICKER_QUEEN_OF_PAIN = 20030
STICKER_TRICK_OR_THREAT = 20031
STICKER_TRICK_OR_TREAT = 20032
STICKER_WITCH = 20033
STICKER_ZOMBIE_LOVER = 20034
STICKER_BLOOD_BOILER = 20035
STICKER_DINKED = 20036
STICKER_DRUG_WAR_VETERAN = 20037
STICKER_HO_HO_HO = 20038
STICKER_MASSIVE_PEAR = 20039
STICKER_MY_LITTLE_FRIEND = 20040
STICKER_PANDAMONIUM = 20041
STICKER_PIECE_OF_CAKE = 20042
STICKER_SAS_CHICKEN = 20043
STICKER_THUG_LIFE = 20044
STICKER_T_REKT = 20045
STICKER_WAROWL = 20046
STICKER_WORK_FOR_AMMO = 20047
STICKER_PHOENIX_FOIL = 20048
STICKER_BOMB_SQUAD_FOIL = 20049
MUSIC_KIT_MIDNIGHT_RIDERS_ALL_I_WANT_FOR_CHRISTMAS = 20050
MUSIC_KIT_DANIEL_SADOWSKI_TOTAL_DOMINATION = 20051
MUSIC_KIT_VARIOUS_ARTISTS_HOTLINE_MIAMI = 20052
MUSIC_KIT_MATT_LANGE_ISORHYTHM = 20053
MUSIC_KIT_MATEO_MESSINA_FOR_NO_MANKIND = 20054
MUSIC_KIT_DAMJAN_MRAVUNAC_THE_TALOS_PRINCIPLE = 20055
STICKER_FLICKSHOT = 20056
STICKER_HEADSHOT_GUARANTEE = 20057
STICKER_ECO_RUSH = 20058
STICKER_JUST_TROLLING = 20059
STICKER_FIRESTARTER_HOLO = 20061
STICKER_LUCKY_CAT_FOIL = 20062
STICKER_ROBO = 20063
STICKER_WITCHCRAFT = 20064
STICKER_WANNA_FIGHT = 20065
STICKER_HOSTAGE_RESCUE = 20066
STICKER_HAMSTER_HAWK = 20067
STICKER_HEADLESS_CHICKEN = 20068
MUSIC_KIT_PROXY_BATTLEPACK = 20069
MUSIC_KIT_KITHEORY_MOLOTOV = 20070
MUSIC_KIT_TROELS_FOLMANN_UBER_BLASTO_PHONE = 20071
MUSIC_KIT_KELLY_BAILEY_HAZARDOUS_ENVIRONMENTS = 20072
MUSIC_KIT_SKOG_II_HEADSHOT = 20073
ENFU_STICKER_CAPSULE_2 = 20074
STICKER_AWP_COUNTRY = 20075
STICKER_CHI_BOMB = 20076
STICKER_DORU_THE_FOX = 20077
STICKER_KNIFE_CLUB = 20078
STICKER_CS_ON_THE_MIND = 20079
STICKER_NINJA_DEFUSE = 20080
STICKER_PROS_DONT_FAKE = 20081
STICKER_KAWAII_KILLER_TERRORIST = 20082
STICKER_BAAA_CKSTABBER = 20083
STICKER_DELICIOUS_TEARS = 20084
MUSIC_KIT_DANIEL_SADOWSKI_THE_8_BIT_KIT = 20085
MUSIC_KIT_AWOLNATION_I_AM = 20086
MUSIC_KIT_MORD_FUSTANG_DIAMONDS = 20087
MUSIC_KIT_MICHAEL_BROSS_INVASION = 20088
MUSIC_KIT_IAN_HULTQUIST_LIONS_MOUTH = 20089
MUSIC_KIT_NEW_BEAT_FUND_SPONGE_FINGERZ = 20090
MUSIC_KIT_BEARTOOTH_DISGUSTING = 20091
MUSIC_KIT_LENNIE_MOORE_JAVA_HAVANA_FUNKALOO = 20092
MUSIC_KIT_DARUDE_MOMENTS_CSGO = 20093
STATTRAK_MUSIC_KIT_PROXY_BATTLEPACK = 20094
STATTRAK_MUSIC_KIT_KITHEORY_MOLOTOV = 20095
STATTRAK_MUSIC_KIT_TROELS_FOLMANN_UBER_BLASTO_PHONE = 20096
STATTRAK_MUSIC_KIT_KELLY_BAILEY_HAZARDOUS_ENVIRONMENTS = 20097
STATTRAK_MUSIC_KIT_SKOG_II_HEADSHOT = 20098
STATTRAK_MUSIC_KIT_DANIEL_SADOWSKI_THE_8_BIT_KIT = 20099
STATTRAK_MUSIC_KIT_AWOLNATION_I_AM = 20100
STATTRAK_MUSIC_KIT_MORD_FUSTANG_DIAMONDS = 20101
STATTRAK_MUSIC_KIT_MICHAEL_BROSS_INVASION = 20102
STATTRAK_MUSIC_KIT_IAN_HULTQUIST_LIONS_MOUTH = 20103
STATTRAK_MUSIC_KIT_NEW_BEAT_FUND_SPONGE_FINGERZ = 20104
STATTRAK_MUSIC_KIT_BEARTOOTH_DISGUSTING = 20105
STATTRAK_MUSIC_KIT_LENNIE_MOORE_JAVA_HAVANA_FUNKALOO = 20106
STATTRAK_MUSIC_KIT_DARUDE_MOMENTS_CSGO = 20107
STATTRAK_MUSIC_KIT_DANIEL_SADOWSKI_CRIMSON_ASSAULT = 20108
STATTRAK_MUSIC_KIT_NOISIA_SHARPENED = 20109
STATTRAK_MUSIC_KIT_ROBERT_ALLAIRE_INSURGENCY = 20110
STATTRAK_MUSIC_KIT_SEAN_MURRAY_AD8 = 20111
STATTRAK_MUSIC_KIT_FEED_ME_HIGH_NOON = 20112
STATTRAK_MUSIC_KIT_DREN_DEATHS_HEAD_DEMOLITION = 20113
STATTRAK_MUSIC_KIT_AUSTIN_WINTORY_DESERT_FIRE = 20114
STATTRAK_MUSIC_KIT_SASHA_LNOE = 20115
STATTRAK_MUSIC_KIT_SKOG_METAL = 20116
STATTRAK_MUSIC_KIT_MIDNIGHT_RIDERS_ALL_I_WANT_FOR_CHRISTMAS = 20117
STATTRAK_MUSIC_KIT_DANIEL_SADOWSKI_TOTAL_DOMINATION = 20118
STATTRAK_MUSIC_KIT_VARIOUS_ARTISTS_HOTLINE_MIAMI = 20119
STATTRAK_MUSIC_KIT_MATT_LANGE_ISORHYTHM = 20120
STATTRAK_MUSIC_KIT_MATEO_MESSINA_FOR_NO_MANKIND = 20121
STATTRAK_MUSIC_KIT_DAMJAN_MRAVUNAC_THE_TALOS_PRINCIPLE = 20122
PINUPS_STICKER_CAPSULE = 20123
SLID3_STICKER_CAPSULE = 20124
TEAM_ROLES_STICKER_CAPSULE = 20125
COLLECTIBLE_PINS_CAPSULE_SERIES_1_2 = 20126
SUGARFACE_STICKER_CAPSULE = 20127
BESTIARY_STICKER_CAPSULE = 20128
CSGO_GRAFFITI_BOX_2 = 20129
COMMUNITY_GRAFFITI_BOX_1_2 = 20130
COLLECTIBLE_PINS_CAPSULE_SERIES_2_2 = 20131
STATTRAK_RADICALS_MUSIC_KIT_BOX = 20133
PERFECT_WORLD_STICKER_CAPSULE_1_2 = 20134
PERFECT_WORLD_STICKER_CAPSULE_2_2 = 20135
PERFECT_WORLD_GRAFFITI_BOX_2 = 20136
COMMUNITY_CAPSULE_2018_2 = 20137
COLLECTIBLE_PINS_CAPSULE_SERIES_3_2 = 20138
SKILL_GROUPS_CAPSULE_2 = 20139
MUSIC_KIT_THE_VERKKARS_EZ4ENCE = 20140
STATTRAK_MUSIC_KIT_THE_VERKKARS_EZ4ENCE = 20141
FERAL_PREDATORS_STICKER_CAPSULE = 20142
CHICKEN_CAPSULE_2 = 20143
X_RAY_P250_PACKAGE_2 = 20144
CS20_STICKER_CAPSULE_2 = 20145
HALO_STICKER_CAPSULE = 20146
MUSIC_KIT_SCARLXRD_KING_SCAR = 20147
STATTRAK_MUSIC_KIT_SCARLXRD_KING_SCAR = 20148
CSGO_PATCH_PACK_2 = 20149
HALF_LIFE_ALYX_COLLECTIBLE_PINS_CAPSULE_2 = 20152
HALF_LIFE_ALYX_STICKER_CAPSULE_2 = 20153
HALF_LIFE_ALYX_PATCH_PACK_2 = 20154
MASTERMINDS_MUSIC_KIT_BOX_2 = 20169
STATTRAK_MASTERMINDS_MUSIC_KIT_BOX_2 = 20170
WARHAMMER_40000_STICKER_CAPSULE_2 = 20171
MUSIC_KIT_AMON_TOBIN_ALL_FOR_DUST = 20172
STATTRAK_MUSIC_KIT_AMON_TOBIN_ALL_FOR_DUST = 20173
DAILY_PAYMENT_FOR_MAP_DE_CACHE_STARTING_2018_10_10 = 30001
DAILY_PAYMENT_FOR_MAP_CS_AGENCY_STARTING_2018_10_10 = 30002
DAILY_PAYMENT_FOR_MAP_DE_AUSTRIA_STARTING_2018_10_10 = 30003
DAILY_PAYMENT_FOR_MAP_DE_SUBZERO_STARTING_2018_10_10 = 30004
DAILY_PAYMENT_FOR_MAP_DE_BIOME_STARTING_2018_10_10 = 30005
DAILY_PAYMENT_FOR_MAP_DE_ZOO_STARTING_2019_01_24 = 30006
DAILY_PAYMENT_FOR_MAP_DE_ABBEY_STARTING_2019_01_24 = 30007
DAILY_PAYMENT_FOR_MAP_DE_RUBY_STARTING_2019_04_26 = 30008
DAILY_PAYMENT_FOR_MAP_CS_WORKOUT_STARTING_2019_04_26 = 30009
DAILY_PAYMENT_FOR_MAP_DE_BREACH_STARTING_2019_08_01 = 30010
DAILY_PAYMENT_FOR_MAP_DE_SEASIDE_STARTING_2019_08_01 = 30011
DAILY_PAYMENT_FOR_MAP_DE_CACHE_STARTING_2019_10_19 = 30012
DAILY_PAYMENT_FOR_MAP_DE_STUDIO_STARTING_2019_11_19 = 30013
DAILY_PAYMENT_FOR_MAP_DZ_JUNGLETY_STARTING_2019_11_19 = 30014
DAILY_PAYMENT_FOR_MAP_DE_ANUBIS_STARTING_2020_04_01 = 30015
DAILY_PAYMENT_FOR_MAP_DE_CHLORINE_STARTING_2020_04_01 = 30016
DAILY_PAYMENT_FOR_MAP_DE_MUTINY_STARTING_2020_07_24 = 30017
DAILY_PAYMENT_FOR_MAP_DE_SWAMP_STARTING_2020_07_24 = 30018
DAILY_PAYMENT_FOR_MAP_DZ_FROSTBITE_STARTING_2020_12_04 = 30019
DAILY_PAYMENT_FOR_MAP_DE_ENGAGE_STARTING_2020_12_04 = 30020
DAILY_PAYMENT_FOR_MAP_CS_APOLLO_STARTING_2020_12_04 = 30021
DAILY_PAYMENT_FOR_MAP_DE_GUARD_STARTING_2020_12_04 = 30022
DAILY_PAYMENT_FOR_MAP_DE_ELYSION_STARTING_2020_12_04 = 30023
|
from django.forms import ModelForm
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
from django import forms
from .models import *
class CreateUserForm(ModelForm):
class Meta:
model = User
fields = '__all__'
class OrderGame(ModelForm):
class Meta:
model = Game
fieldsForm = ['email','fromtime','untiltime','date','grupeName','field','nunofplayers','description','fixed','private','password','phonenumber']
|
import math
n = float(input('Digite um valor: '))
print('O valor digitado foi {} e sua parte inteira seria {}'.format(n, math.trunc(n)))
'''
from math import trunc
n = float(input('Digite um valor: ' ))
print('O valor digitado foi {} e sua parte inteira seria {}'.format(n, trunc(n)))
'''
|
from oarepo_oai_pmh_harvester.decorators import rule
from oarepo_oai_pmh_harvester.transformer import OAITransformer
from nr_oai_pmh_harvester.rules.nusl.field24500 import get_title_dict
@rule("nusl", "marcxml", "/24630", phase="pre")
def call_title_alternate_2(el, **kwargs):
return title_alternate_2(el, **kwargs) # pragma: no cover
def title_alternate_2(el, **kwargs):
res = []
record = kwargs["record"]
if isinstance(el, (list, tuple)):
for _ in el: # pragma: no cover
get_volume_issue(el, record, res)
if isinstance(el, dict):
get_volume_issue(el, record, res)
if res:
return {"titleAlternate": res}
return OAITransformer.PROCESSED # pragma: no cover
def get_volume_issue(el, record, res):
res.append(get_title_dict(el, record, first_lang_field="n"))
res.append(get_title_dict(el, record, first_lang_field="p"))
|
keys = {
"product_url": "http://airlabs.xyz/",
"name": "Caspar Chlebowski",
"email": "[email protected]",
"message": "this is a test"
}
|
"""
Test `dodecaphony.fragment` module.
Author: Nikolay Lysenko
"""
from collections import Counter
from typing import Any
import pytest
from dodecaphony.fragment import (
Event,
Fragment,
FragmentParams,
SUPPORTED_DURATIONS,
calculate_durations_of_measures,
calculate_number_of_undefined_events,
create_initial_sonic_content,
create_initial_temporal_content,
distribute_pitch_classes,
find_mutable_sonic_content_indices,
find_mutable_temporal_content_indices,
find_sonorities,
initialize_fragment,
override_calculated_attributes,
set_pitches_of_lower_lines,
set_pitches_of_upper_line,
split_time_span,
validate,
)
@pytest.mark.parametrize(
"fragment, expected",
[
(
# `fragment`
Fragment(
temporal_content=[
[1.0, 1.0, 1.0, 1.0, 2.0, 2.0, 1.0, 1.0, 1.0, 1.0, 2.0, 1.0, 1.0],
[2.0, 4.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],
],
sonic_content=[
['B', 'A', 'G', 'C#', 'D#', 'C', 'D', 'A#', 'F#', 'E', 'G#', 'F', 'pause'],
['A#', 'A', 'F#', 'C', 'D', 'B', 'C#', 'G#', 'F', 'D#', 'G', 'E'],
],
meter_numerator=4,
meter_denominator=4,
n_beats=16,
line_ids=[1, 2],
upper_line_highest_position=55,
upper_line_lowest_position=41,
n_melodic_lines_by_group=[1, 1],
n_tone_row_instances_by_group=[1, 1],
mutable_temporal_content_indices=[0, 1],
mutable_sonic_content_indices=[0, 1],
),
# `expected`
[
[[1.0, 1.0, 1.0, 1.0], [2.0, 2.0], [1.0, 1.0, 1.0, 1.0], [2.0, 1.0, 1.0]],
[[2.0, 4.0], [2.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0]],
]
),
]
)
def test_calculate_durations_of_measures(
fragment: Fragment, expected: list[list[list[float]]]
) -> None:
"""Test `calculate_durations_of_measures` function."""
fragment = override_calculated_attributes(fragment)
result = calculate_durations_of_measures(fragment)
assert result == expected
@pytest.mark.parametrize(
"group_index, temporal_content, sonic_content, line_indices, n_tone_row_instances, "
"pauses_fraction, expected",
[
(
# `group_index`
0,
# `temporal_content`
[[], [1.0 for _ in range(12)]],
# `sonic_content`
{
0: {
'pitch_classes': [
'B', 'A#', 'G', 'C#', 'D#', 'C', 'D', 'A', 'F#', 'E', 'G#', 'F',
'B', 'A#', 'G', 'C#', 'D#', 'C', 'D', 'A', 'F#', 'E', 'G#', 'F',
'B', 'A#', 'G', 'C#', 'D#', 'C', 'D', 'A', 'F#', 'E', 'G#', 'F',
]
}
},
# `line_indices`
[0, 1],
# `n_tone_row_instances`
3,
# `pauses_fraction`
0.0,
# `expected`
24
),
]
)
def test_calculate_number_of_undefined_events(
group_index: int, temporal_content: list[list[float]],
sonic_content: dict[int, dict[str, Any]], line_indices: list[int],
n_tone_row_instances: int, pauses_fraction: float, expected: float
) -> None:
"""Test `calculate_number_of_undefined_events` function."""
result = calculate_number_of_undefined_events(
group_index, temporal_content, sonic_content, line_indices, n_tone_row_instances,
pauses_fraction
)
assert result == expected
@pytest.mark.parametrize(
"params, temporal_content, expected_n_pauses_by_group",
[
(
# `params`
FragmentParams(
tone_row=['B', 'A#', 'G', 'C#', 'D#', 'C', 'D', 'A', 'F#', 'E', 'G#', 'F'],
groups=[
{'n_melodic_lines': 1, 'n_tone_row_instances': 1},
],
meter_numerator=4,
meter_denominator=4,
n_measures=100,
line_ids=[1],
upper_line_highest_note='E6',
upper_line_lowest_note='E4',
pauses_fraction=0.1,
temporal_content={},
sonic_content={}
),
# `temporal_content`
[[1.0, 1.0, 1.0, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5]],
# `expected_n_pauses_by_group`
[1]
),
(
# `params`
FragmentParams(
tone_row=['B', 'A#', 'G', 'C#', 'D#', 'C', 'D', 'A', 'F#', 'E', 'G#', 'F'],
groups=[
{'n_melodic_lines': 1, 'n_tone_row_instances': 1},
],
meter_numerator=4,
meter_denominator=4,
n_measures=2,
line_ids=[1],
upper_line_highest_note='E6',
upper_line_lowest_note='E4',
pauses_fraction=0.1,
temporal_content={},
sonic_content={
0: {
'pitch_classes': [
'pause', 'B', 'A#', 'G', 'C#', 'D#', 'C',
'D', 'A', 'F#', 'E', 'G#', 'F', 'pause'
]
}
}
),
# `temporal_content`
[[1.0, 1.0, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5]],
# `expected_n_pauses_by_group`
[2]
),
]
)
def test_create_initial_sonic_content(
params: FragmentParams, temporal_content: list[list[float]],
expected_n_pauses_by_group: list[int]
) -> None:
"""Test `create_initial_sonic_content` function."""
sonic_content = create_initial_sonic_content(params, temporal_content)
assert len(sonic_content) == len(params.groups)
zipped = zip(sonic_content, expected_n_pauses_by_group)
for i, (line_content, expected_n_pauses) in enumerate(zipped):
counter = Counter(line_content)
for pitch_class in params.tone_row:
assert counter[pitch_class] == params.groups[i]['n_tone_row_instances']
assert counter['pause'] == expected_n_pauses
@pytest.mark.parametrize(
"params, expected_n_events_by_line",
[
(
# `params`
FragmentParams(
tone_row=['B', 'A#', 'G', 'C#', 'D#', 'C', 'D', 'A', 'F#', 'E', 'G#', 'F'],
groups=[
{'n_melodic_lines': 1, 'n_tone_row_instances': 1},
{'n_melodic_lines': 3, 'n_tone_row_instances': 6},
],
meter_numerator=4,
meter_denominator=4,
n_measures=8,
line_ids=[1, 2, 3, 4],
upper_line_highest_note='E6',
upper_line_lowest_note='E4',
pauses_fraction=0.1,
temporal_content={},
sonic_content={}
),
# `expected_n_events_by_line`
[13, 27, 27, 26]
),
(
# `params`
FragmentParams(
tone_row=['B', 'A#', 'G', 'C#', 'D#', 'C', 'D', 'A', 'F#', 'E', 'G#', 'F'],
groups=[
{'n_melodic_lines': 1, 'n_tone_row_instances': 1},
{'n_melodic_lines': 3, 'n_tone_row_instances': 6},
],
meter_numerator=4,
meter_denominator=4,
n_measures=8,
line_ids=[1, 2, 3, 4],
upper_line_highest_note='E6',
upper_line_lowest_note='E4',
pauses_fraction=0.1,
temporal_content={1: {'durations': [4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0]}},
sonic_content={}
),
# `expected_n_events_by_line`
[13, 8, 36, 36]
),
(
# `params`
FragmentParams(
tone_row=['B', 'A#', 'G', 'C#', 'D#', 'C', 'D', 'A', 'F#', 'E', 'G#', 'F'],
groups=[
{'n_melodic_lines': 1, 'n_tone_row_instances': 1},
{'n_melodic_lines': 3, 'n_tone_row_instances': 6},
],
meter_numerator=4,
meter_denominator=4,
n_measures=8,
line_ids=[1, 2, 3, 4],
upper_line_highest_note='E6',
upper_line_lowest_note='E4',
pauses_fraction=0.1,
temporal_content={},
sonic_content={
0: {
'pitch_classes': [
'B', 'A#', 'G', 'C#', 'D#', 'C', 'D', 'A', 'F#', 'E', 'G#', 'F'
]
}
}
),
# `expected_n_events_by_line`
[12, 27, 27, 26]
),
]
)
def test_create_initial_temporal_content(
params: FragmentParams, expected_n_events_by_line: list[int]
) -> None:
"""Test `create_initial_temporal_content` function."""
temporal_content = create_initial_temporal_content(params)
assert len(temporal_content) == len(params.line_ids)
n_events_by_line = [len(x) for x in temporal_content]
assert n_events_by_line == expected_n_events_by_line
@pytest.mark.parametrize(
"fragment, expected",
[
(
# `fragment`
Fragment(
temporal_content=[
[4.0],
[3.0, 1.0],
[2.0, 2.0],
],
sonic_content=[
['C'],
['D', 'E', 'F', 'G'],
],
meter_numerator=4,
meter_denominator=4,
n_beats=4,
line_ids=[1, 2, 3],
upper_line_highest_position=88,
upper_line_lowest_position=1,
n_melodic_lines_by_group=[1, 2],
n_tone_row_instances_by_group=[0, 0],
mutable_temporal_content_indices=[0, 1, 2],
mutable_sonic_content_indices=[0, 1],
),
# `expected`
[
[
Event(line_index=0, start_time=0.0, duration=4.0, pitch_class='C'),
],
[
Event(line_index=1, start_time=0.0, duration=3.0, pitch_class='D'),
Event(line_index=1, start_time=3.0, duration=1.0, pitch_class='G'),
],
[
Event(line_index=2, start_time=0.0, duration=2.0, pitch_class='E'),
Event(line_index=2, start_time=2.0, duration=2.0, pitch_class='F'),
]
]
),
]
)
def test_distribute_pitch_classes(fragment: Fragment, expected: list[list[Event]]) -> None:
"""Test `distribute_pitch_classes` function."""
result = distribute_pitch_classes(fragment)
assert result == expected
@pytest.mark.parametrize(
"params, expected",
[
(
# `params`
FragmentParams(
tone_row=['B', 'A#', 'G', 'C#', 'D#', 'C', 'D', 'A', 'F#', 'E', 'G#', 'F'],
groups=[
{'n_melodic_lines': 1, 'n_tone_row_instances': 1},
{'n_melodic_lines': 3, 'n_tone_row_instances': 6},
],
meter_numerator=4,
meter_denominator=4,
n_measures=8,
line_ids=[1, 2, 3, 4],
upper_line_highest_note='E6',
upper_line_lowest_note='E4',
pauses_fraction=0.1,
temporal_content={},
sonic_content={
0: {
'pitch_classes': [
'B', 'A#', 'G', 'C#', 'D#', 'C', 'D', 'A', 'F#', 'E', 'G#', 'F'
],
'immutable': True
}
}
),
# `expected`
[1]
),
]
)
def test_find_mutable_sonic_content_indices(params: FragmentParams, expected: list[int]) -> None:
"""Test `find_mutable_sonic_content_indices` function."""
result = find_mutable_sonic_content_indices(params)
assert result == expected
@pytest.mark.parametrize(
"params, expected",
[
(
# `params`
FragmentParams(
tone_row=['B', 'A#', 'G', 'C#', 'D#', 'C', 'D', 'A', 'F#', 'E', 'G#', 'F'],
groups=[
{'n_melodic_lines': 1, 'n_tone_row_instances': 1},
{'n_melodic_lines': 3, 'n_tone_row_instances': 6},
],
meter_numerator=4,
meter_denominator=4,
n_measures=8,
line_ids=[1, 2, 3, 4],
upper_line_highest_note='E6',
upper_line_lowest_note='E4',
pauses_fraction=0.1,
temporal_content={
1: {
'durations': [4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0],
'immutable': True
}
}
),
# `expected`
[0, 2, 3]
),
]
)
def test_find_mutable_temporal_content_indices(
params: FragmentParams, expected: list[int]
) -> None:
"""Test `find_mutable_temporal_content_indices` function."""
result = find_mutable_temporal_content_indices(params)
assert result == expected
@pytest.mark.parametrize(
"melodic_lines, expected",
[
(
# `melodic_lines`
[
[
Event(line_index=0, start_time=0.0, duration=3.0),
Event(line_index=0, start_time=3.0, duration=1.0),
],
[
Event(line_index=1, start_time=0.0, duration=2.0),
Event(line_index=1, start_time=2.0, duration=2.0),
],
[
Event(line_index=2, start_time=0.0, duration=2.0),
Event(line_index=2, start_time=2.0, duration=2.0),
],
],
# `expected`
[
[
Event(line_index=0, start_time=0.0, duration=3.0),
Event(line_index=1, start_time=0.0, duration=2.0),
Event(line_index=2, start_time=0.0, duration=2.0),
],
[
Event(line_index=0, start_time=0.0, duration=3.0),
Event(line_index=1, start_time=2.0, duration=2.0),
Event(line_index=2, start_time=2.0, duration=2.0),
],
[
Event(line_index=0, start_time=3.0, duration=1.0),
Event(line_index=1, start_time=2.0, duration=2.0),
Event(line_index=2, start_time=2.0, duration=2.0),
],
]
),
]
)
def test_find_sonorities(melodic_lines: list[list[Event]], expected: list[list[Event]]) -> None:
"""Test `find_sonorities` function."""
result = find_sonorities(melodic_lines)
assert result == expected
@pytest.mark.parametrize(
"params",
[
(
FragmentParams(
tone_row=['B', 'A#', 'G', 'C#', 'D#', 'C', 'D', 'A', 'F#', 'E', 'G#', 'F'],
groups=[
{'n_melodic_lines': 1, 'n_tone_row_instances': 1},
{'n_melodic_lines': 3, 'n_tone_row_instances': 6},
],
meter_numerator=4,
meter_denominator=4,
n_measures=8,
line_ids=[1, 2, 3, 4],
upper_line_highest_note='E6',
upper_line_lowest_note='E4',
pauses_fraction=0.1
)
),
]
)
def test_initialize_fragment(params: FragmentParams) -> None:
"""Test `initialize_fragment` function."""
fragment = initialize_fragment(params)
for melodic_line in fragment.melodic_lines:
for event in melodic_line:
assert event.position_in_semitones is not None or event.pitch_class == 'pause'
@pytest.mark.parametrize(
"fragment, max_interval, default_shift, expected_melodic_lines, expected_sonorities",
[
(
# `fragment`
Fragment(
temporal_content=[
[1.0, 1.0, 1.0, 1.0],
[1.0, 1.0, 1.0, 1.0],
],
sonic_content=[
['C', 'A', 'D', 'F'],
['D', 'B', 'G', 'A'],
],
meter_numerator=4,
meter_denominator=4,
n_beats=4,
line_ids=[1, 2],
upper_line_highest_position=55,
upper_line_lowest_position=41,
n_melodic_lines_by_group=[1, 1],
n_tone_row_instances_by_group=[0, 0],
mutable_temporal_content_indices=[0, 1],
mutable_sonic_content_indices=[0, 1],
),
# `max_interval`
16,
# `default_shift`
7,
# `expected_melodic_lines`
[
[
Event(line_index=0, start_time=0.0, duration=1.0, pitch_class='C', position_in_semitones=51),
Event(line_index=0, start_time=1.0, duration=1.0, pitch_class='A', position_in_semitones=48),
Event(line_index=0, start_time=2.0, duration=1.0, pitch_class='D', position_in_semitones=53),
Event(line_index=0, start_time=3.0, duration=1.0, pitch_class='F', position_in_semitones=44),
],
[
Event(line_index=1, start_time=0.0, duration=1.0, pitch_class='D', position_in_semitones=41),
Event(line_index=1, start_time=1.0, duration=1.0, pitch_class='B', position_in_semitones=38),
Event(line_index=1, start_time=2.0, duration=1.0, pitch_class='G', position_in_semitones=46),
Event(line_index=1, start_time=3.0, duration=1.0, pitch_class='A', position_in_semitones=36),
],
],
# `expected_sonorities`
[
[
Event(line_index=0, start_time=0.0, duration=1.0, pitch_class='C', position_in_semitones=51),
Event(line_index=1, start_time=0.0, duration=1.0, pitch_class='D', position_in_semitones=41),
],
[
Event(line_index=0, start_time=1.0, duration=1.0, pitch_class='A', position_in_semitones=48),
Event(line_index=1, start_time=1.0, duration=1.0, pitch_class='B', position_in_semitones=38),
],
[
Event(line_index=0, start_time=2.0, duration=1.0, pitch_class='D', position_in_semitones=53),
Event(line_index=1, start_time=2.0, duration=1.0, pitch_class='G', position_in_semitones=46),
],
[
Event(line_index=0, start_time=3.0, duration=1.0, pitch_class='F', position_in_semitones=44),
Event(line_index=1, start_time=3.0, duration=1.0, pitch_class='A', position_in_semitones=36),
],
]
),
(
# `fragment`
Fragment(
temporal_content=[
[2.0, 1.0, 1.0],
[2.0, 1.0, 1.0],
[1.0, 1.0, 1.0, 1.0],
],
sonic_content=[
['C', 'D', 'F'],
['C', 'D', 'F'],
['G', 'B', 'G', 'A'],
],
meter_numerator=4,
meter_denominator=4,
n_beats=4,
line_ids=[1, 2, 3],
upper_line_highest_position=55,
upper_line_lowest_position=41,
n_melodic_lines_by_group=[1, 1, 1],
n_tone_row_instances_by_group=[0, 0, 0],
mutable_temporal_content_indices=[0, 1, 2],
mutable_sonic_content_indices=[0, 1, 2],
),
# `max_interval`
16,
# `default_shift`
7,
# `expected_melodic_lines`
[
[
Event(line_index=0, start_time=0.0, duration=2.0, pitch_class='C', position_in_semitones=51),
Event(line_index=0, start_time=2.0, duration=1.0, pitch_class='D', position_in_semitones=53),
Event(line_index=0, start_time=3.0, duration=1.0, pitch_class='F', position_in_semitones=44),
],
[
Event(line_index=1, start_time=0.0, duration=2.0, pitch_class='C', position_in_semitones=39),
Event(line_index=1, start_time=2.0, duration=1.0, pitch_class='D', position_in_semitones=41),
Event(line_index=1, start_time=3.0, duration=1.0, pitch_class='F', position_in_semitones=32),
],
[
Event(line_index=2, start_time=0.0, duration=1.0, pitch_class='G', position_in_semitones=34),
Event(line_index=2, start_time=1.0, duration=1.0, pitch_class='B', position_in_semitones=38),
Event(line_index=2, start_time=2.0, duration=1.0, pitch_class='G', position_in_semitones=34),
Event(line_index=2, start_time=3.0, duration=1.0, pitch_class='A', position_in_semitones=24),
],
],
# `expected_sonorities`
[
[
Event(line_index=0, start_time=0.0, duration=2.0, pitch_class='C', position_in_semitones=51),
Event(line_index=1, start_time=0.0, duration=2.0, pitch_class='C', position_in_semitones=39),
Event(line_index=2, start_time=0.0, duration=1.0, pitch_class='G', position_in_semitones=34),
],
[
Event(line_index=0, start_time=0.0, duration=2.0, pitch_class='C', position_in_semitones=51),
Event(line_index=1, start_time=0.0, duration=2.0, pitch_class='C', position_in_semitones=39),
Event(line_index=2, start_time=1.0, duration=1.0, pitch_class='B', position_in_semitones=38),
],
[
Event(line_index=0, start_time=2.0, duration=1.0, pitch_class='D', position_in_semitones=53),
Event(line_index=1, start_time=2.0, duration=1.0, pitch_class='D', position_in_semitones=41),
Event(line_index=2, start_time=2.0, duration=1.0, pitch_class='G', position_in_semitones=34),
],
[
Event(line_index=0, start_time=3.0, duration=1.0, pitch_class='F', position_in_semitones=44),
Event(line_index=1, start_time=3.0, duration=1.0, pitch_class='F', position_in_semitones=32),
Event(line_index=2, start_time=3.0, duration=1.0, pitch_class='A', position_in_semitones=24),
],
]
),
(
# `fragment`
Fragment(
temporal_content=[
[1.0, 1.0, 1.0, 1.0],
[1.0, 1.0, 1.0, 1.0],
[1.0, 1.0, 1.0, 1.0],
],
sonic_content=[
['C', 'A', 'D', 'F'],
['D', 'pause', 'G', 'A'],
['D', 'B', 'G', 'A'],
],
meter_numerator=4,
meter_denominator=4,
n_beats=4,
line_ids=[1, 2, 3],
upper_line_highest_position=55,
upper_line_lowest_position=41,
n_melodic_lines_by_group=[1, 1, 1],
n_tone_row_instances_by_group=[0, 0, 0],
mutable_temporal_content_indices=[0, 1, 2],
mutable_sonic_content_indices=[0, 1, 2],
),
# `max_interval`
16,
# `default_shift`
24,
# `expected_melodic_lines`
[
[
Event(line_index=0, start_time=0.0, duration=1.0, pitch_class='C', position_in_semitones=51),
Event(line_index=0, start_time=1.0, duration=1.0, pitch_class='A', position_in_semitones=48),
Event(line_index=0, start_time=2.0, duration=1.0, pitch_class='D', position_in_semitones=53),
Event(line_index=0, start_time=3.0, duration=1.0, pitch_class='F', position_in_semitones=44),
],
[
Event(line_index=1, start_time=0.0, duration=1.0, pitch_class='D', position_in_semitones=41),
Event(line_index=1, start_time=1.0, duration=1.0, pitch_class='pause', position_in_semitones=None),
Event(line_index=1, start_time=2.0, duration=1.0, pitch_class='G', position_in_semitones=46),
Event(line_index=1, start_time=3.0, duration=1.0, pitch_class='A', position_in_semitones=36),
],
[
Event(line_index=2, start_time=0.0, duration=1.0, pitch_class='D', position_in_semitones=29),
Event(line_index=2, start_time=1.0, duration=1.0, pitch_class='B', position_in_semitones=14),
Event(line_index=2, start_time=2.0, duration=1.0, pitch_class='G', position_in_semitones=22),
Event(line_index=2, start_time=3.0, duration=1.0, pitch_class='A', position_in_semitones=24),
],
],
# `expected_sonorities`
[
[
Event(line_index=0, start_time=0.0, duration=1.0, pitch_class='C', position_in_semitones=51),
Event(line_index=1, start_time=0.0, duration=1.0, pitch_class='D', position_in_semitones=41),
Event(line_index=2, start_time=0.0, duration=1.0, pitch_class='D', position_in_semitones=29),
],
[
Event(line_index=0, start_time=1.0, duration=1.0, pitch_class='A', position_in_semitones=48),
Event(line_index=1, start_time=1.0, duration=1.0, pitch_class='pause', position_in_semitones=None),
Event(line_index=2, start_time=1.0, duration=1.0, pitch_class='B', position_in_semitones=14),
],
[
Event(line_index=0, start_time=2.0, duration=1.0, pitch_class='D', position_in_semitones=53),
Event(line_index=1, start_time=2.0, duration=1.0, pitch_class='G', position_in_semitones=46),
Event(line_index=2, start_time=2.0, duration=1.0, pitch_class='G', position_in_semitones=22),
],
[
Event(line_index=0, start_time=3.0, duration=1.0, pitch_class='F', position_in_semitones=44),
Event(line_index=1, start_time=3.0, duration=1.0, pitch_class='A', position_in_semitones=36),
Event(line_index=2, start_time=3.0, duration=1.0, pitch_class='A', position_in_semitones=24),
],
]
),
]
)
def test_set_pitches_of_lower_lines(
fragment: Fragment,
max_interval: int,
default_shift: int,
expected_melodic_lines: list[list[Event]],
expected_sonorities: list[list[Event]]
) -> None:
"""Test `set_pitches_of_lower_lines` function."""
# Below three lines are added instead of setting all arguments initially,
# because `sonorities` and `melodic_lines` must reference to the same events.
fragment.melodic_lines = distribute_pitch_classes(fragment)
fragment.sonorities = find_sonorities(fragment.melodic_lines)
fragment = set_pitches_of_upper_line(fragment)
fragment = set_pitches_of_lower_lines(fragment, max_interval, default_shift)
assert fragment.melodic_lines == expected_melodic_lines
assert fragment.sonorities == expected_sonorities
@pytest.mark.parametrize(
"fragment, expected",
[
(
# `fragment`
Fragment(
temporal_content=[
[1.0, 1.0, 1.0, 1.0],
[1.0, 1.0, 1.0, 1.0],
],
sonic_content=[
['C', 'A', 'D', 'F'],
['D', 'B', 'G', 'A'],
],
meter_numerator=4,
meter_denominator=4,
n_beats=4,
line_ids=[1, 2],
upper_line_highest_position=55,
upper_line_lowest_position=41,
n_melodic_lines_by_group=[1, 1],
n_tone_row_instances_by_group=[0, 0],
mutable_temporal_content_indices=[0, 1],
mutable_sonic_content_indices=[0, 1],
melodic_lines=[
[
Event(line_index=0, start_time=0.0, duration=1.0, pitch_class='C'),
Event(line_index=0, start_time=1.0, duration=1.0, pitch_class='A'),
Event(line_index=0, start_time=2.0, duration=1.0, pitch_class='D'),
Event(line_index=0, start_time=3.0, duration=1.0, pitch_class='F'),
],
[
Event(line_index=1, start_time=0.0, duration=1.0, pitch_class='D'),
Event(line_index=1, start_time=1.0, duration=1.0, pitch_class='B'),
Event(line_index=1, start_time=2.0, duration=1.0, pitch_class='G'),
Event(line_index=1, start_time=3.0, duration=1.0, pitch_class='A'),
],
],
),
# `expected`
[
[
Event(line_index=0, start_time=0.0, duration=1.0, pitch_class='C', position_in_semitones=51),
Event(line_index=0, start_time=1.0, duration=1.0, pitch_class='A', position_in_semitones=48),
Event(line_index=0, start_time=2.0, duration=1.0, pitch_class='D', position_in_semitones=53),
Event(line_index=0, start_time=3.0, duration=1.0, pitch_class='F', position_in_semitones=44),
],
[
Event(line_index=1, start_time=0.0, duration=1.0, pitch_class='D'),
Event(line_index=1, start_time=1.0, duration=1.0, pitch_class='B'),
Event(line_index=1, start_time=2.0, duration=1.0, pitch_class='G'),
Event(line_index=1, start_time=3.0, duration=1.0, pitch_class='A'),
],
],
),
(
# `fragment`
Fragment(
temporal_content=[
[1.0, 1.0, 1.0, 1.0],
[1.0, 1.0, 1.0, 1.0],
],
sonic_content=[
['pause', 'A', 'D', 'F'],
['D', 'B', 'G', 'A'],
],
meter_numerator=4,
meter_denominator=4,
n_beats=4,
line_ids=[1, 2],
upper_line_highest_position=55,
upper_line_lowest_position=41,
n_melodic_lines_by_group=[1, 1],
n_tone_row_instances_by_group=[0, 0],
mutable_temporal_content_indices=[0, 1],
mutable_sonic_content_indices=[0, 1],
melodic_lines=[
[
Event(line_index=0, start_time=0.0, duration=1.0, pitch_class='pause'),
Event(line_index=0, start_time=1.0, duration=1.0, pitch_class='A'),
Event(line_index=0, start_time=2.0, duration=1.0, pitch_class='D'),
Event(line_index=0, start_time=3.0, duration=1.0, pitch_class='F'),
],
[
Event(line_index=1, start_time=0.0, duration=1.0, pitch_class='D'),
Event(line_index=1, start_time=1.0, duration=1.0, pitch_class='B'),
Event(line_index=1, start_time=2.0, duration=1.0, pitch_class='G'),
Event(line_index=1, start_time=3.0, duration=1.0, pitch_class='A'),
],
],
),
# `expected`
[
[
Event(line_index=0, start_time=0.0, duration=1.0, pitch_class='pause', position_in_semitones=None),
Event(line_index=0, start_time=1.0, duration=1.0, pitch_class='A', position_in_semitones=48),
Event(line_index=0, start_time=2.0, duration=1.0, pitch_class='D', position_in_semitones=53),
Event(line_index=0, start_time=3.0, duration=1.0, pitch_class='F', position_in_semitones=44),
],
[
Event(line_index=1, start_time=0.0, duration=1.0, pitch_class='D'),
Event(line_index=1, start_time=1.0, duration=1.0, pitch_class='B'),
Event(line_index=1, start_time=2.0, duration=1.0, pitch_class='G'),
Event(line_index=1, start_time=3.0, duration=1.0, pitch_class='A'),
],
],
),
(
# `fragment`
Fragment(
temporal_content=[
[1.0, 1.0, 1.0, 1.0],
[1.0, 1.0, 1.0, 1.0],
],
sonic_content=[
['C', 'pause', 'D', 'F'],
['D', 'B', 'G', 'A'],
],
meter_numerator=4,
meter_denominator=4,
n_beats=4,
line_ids=[1, 2],
upper_line_highest_position=55,
upper_line_lowest_position=41,
n_melodic_lines_by_group=[1, 1],
n_tone_row_instances_by_group=[0, 0],
mutable_temporal_content_indices=[0, 1],
mutable_sonic_content_indices=[0, 1],
melodic_lines=[
[
Event(line_index=0, start_time=0.0, duration=1.0, pitch_class='C'),
Event(line_index=0, start_time=1.0, duration=1.0, pitch_class='pause'),
Event(line_index=0, start_time=2.0, duration=1.0, pitch_class='D'),
Event(line_index=0, start_time=3.0, duration=1.0, pitch_class='F'),
],
[
Event(line_index=1, start_time=0.0, duration=1.0, pitch_class='D'),
Event(line_index=1, start_time=1.0, duration=1.0, pitch_class='B'),
Event(line_index=1, start_time=2.0, duration=1.0, pitch_class='G'),
Event(line_index=1, start_time=3.0, duration=1.0, pitch_class='A'),
],
],
),
# `expected`
[
[
Event(line_index=0, start_time=0.0, duration=1.0, pitch_class='C', position_in_semitones=51),
Event(line_index=0, start_time=1.0, duration=1.0, pitch_class='pause', position_in_semitones=None),
Event(line_index=0, start_time=2.0, duration=1.0, pitch_class='D', position_in_semitones=53),
Event(line_index=0, start_time=3.0, duration=1.0, pitch_class='F', position_in_semitones=44),
],
[
Event(line_index=1, start_time=0.0, duration=1.0, pitch_class='D'),
Event(line_index=1, start_time=1.0, duration=1.0, pitch_class='B'),
Event(line_index=1, start_time=2.0, duration=1.0, pitch_class='G'),
Event(line_index=1, start_time=3.0, duration=1.0, pitch_class='A'),
],
],
),
]
)
def test_set_pitches_of_upper_line(fragment: Fragment, expected: list[list[Event]]) -> None:
"""Test `set_pitches_of_upper_line` function."""
fragment = set_pitches_of_upper_line(fragment)
assert fragment.melodic_lines == expected
@pytest.mark.parametrize(
"n_measures, n_events, meter_numerator",
[
(2, 9, 4),
(8, 51, 3),
]
)
def test_split_time_span(n_measures: int, n_events: int, meter_numerator: float) -> None:
"""Test `split_time_span` function."""
durations = split_time_span(n_measures, n_events, meter_numerator)
assert len(durations) == n_events
assert sum(durations) == n_measures * meter_numerator
for duration in durations:
assert duration in SUPPORTED_DURATIONS
@pytest.mark.parametrize(
"n_measures, n_events, meter_numerator, match",
[
(4, 3, 4, "Average duration of an event is longer than semibreve."),
(1, 20, 4, "The number of events is so high that some of them are too short.")
]
)
def test_split_time_span_with_invalid_arguments(
n_measures: int, n_events: int, meter_numerator: float, match: str
) -> None:
"""Test `split_time_span` function with invalid arguments."""
with pytest.raises(ValueError, match=match):
split_time_span(n_measures, n_events, meter_numerator)
@pytest.mark.parametrize(
"params, match",
[
(
FragmentParams(
tone_row=['B', 'A#', 'G', 'C#', 'D#', 'C', 'D', 'A', 'F#', 'E', 'G#', 'F'],
groups=[{'n_melodic_lines': 1, 'n_tone_row_instances': 2}],
meter_numerator=4,
meter_denominator=4,
n_measures=8,
line_ids=[1, 2],
upper_line_highest_note='E6',
upper_line_lowest_note='E4',
pauses_fraction=0.0
),
"Number of lines in `groups` is not equal to that in `line_ids`."
),
(
FragmentParams(
tone_row=['B', 'A#', 'G', 'C#', 'D#', 'C', 'D', 'A', 'F#', 'E', 'G#', 'F'],
groups=[{'n_melodic_lines': 2, 'n_tone_row_instances': 2}],
meter_numerator=4,
meter_denominator=4,
n_measures=8,
line_ids=[1, 1],
upper_line_highest_note='E6',
upper_line_lowest_note='E4',
pauses_fraction=0.0
),
"IDs of melodic lines must be unique."
),
(
FragmentParams(
tone_row=['B', 'A#', 'G', 'C#', 'D#', 'C', 'D', 'A', 'F#', 'E', 'G#', 'F'],
groups=[{'n_melodic_lines': 2, 'n_tone_row_instances': 2}],
meter_numerator=5,
meter_denominator=4,
n_measures=8,
line_ids=[1, 2],
upper_line_highest_note='E6',
upper_line_lowest_note='E4',
pauses_fraction=0.0
),
"Meter numerator = 5 is not supported."
),
(
FragmentParams(
tone_row=['B', 'A#', 'G', 'C#', 'D#', 'C', 'D', 'A', 'F#', 'E', 'G#', 'F'],
groups=[{'n_melodic_lines': 2, 'n_tone_row_instances': 2}],
meter_numerator=4,
meter_denominator=4,
n_measures=8,
line_ids=[1, 2],
upper_line_highest_note='E6',
upper_line_lowest_note='E4',
pauses_fraction=0.0,
temporal_content={
0: {'durations': [1.0 for _ in range(40)]},
1: {'durations': [1.0]},
}
),
"A line has duration that is not equal to that of the fragment."
),
(
FragmentParams(
tone_row=['B', 'A#', 'G', 'C#', 'D#', 'C', 'D', 'A', 'F#', 'E', 'G#', 'F'],
groups=[{'n_melodic_lines': 2, 'n_tone_row_instances': 2}],
meter_numerator=4,
meter_denominator=4,
n_measures=8,
line_ids=[1, 2],
upper_line_highest_note='E6',
upper_line_lowest_note='E4',
pauses_fraction=0.0,
sonic_content={
0: {
'pitch_classes': [
'B', 'A#', 'G', 'C#', 'D#', 'C', 'D', 'A', 'F#', 'E', 'G#', 'F',
'B', 'A#', 'G', 'C#', 'D#', 'C', 'D', 'A', 'F#', 'E', 'G#', 'F',
'B', 'A#', 'G', 'C#', 'D#', 'C', 'D', 'A', 'F#', 'E', 'G#', 'F',
]
},
}
),
"A group has wrong number of tone row instances."
),
]
)
def test_validate(params: FragmentParams, match: str) -> None:
"""Test `validate` function."""
with pytest.raises(ValueError, match=match):
validate(params)
@pytest.mark.parametrize(
"first_temporal_content, second_temporal_content, first_sonic_content, second_sonic_content, "
"expected",
[
(
[[1.0 for _ in range(12)]],
[[1.0 for _ in range(12)]],
[['B', 'A#', 'G', 'C#', 'D#', 'C', 'D', 'A', 'F#', 'E', 'G#', 'F']],
[['B', 'A#', 'G', 'C#', 'D#', 'C', 'D', 'A', 'F#', 'E', 'G#', 'F']],
True
),
]
)
def test_equality_of_fragments(
first_temporal_content: list[list[float]], second_temporal_content: list[list[float]],
first_sonic_content: list[list[str]], second_sonic_content: list[list[str]],
expected: bool
) -> None:
"""Test `__eq__` method of `Fragment` class."""
first_fragment = Fragment(
first_temporal_content,
first_sonic_content,
meter_numerator=4,
meter_denominator=4,
n_beats=12,
line_ids=[1],
upper_line_highest_position=88,
upper_line_lowest_position=0,
n_melodic_lines_by_group=[1],
n_tone_row_instances_by_group=[1],
mutable_temporal_content_indices=[0],
mutable_sonic_content_indices=[0]
)
first_fragment = override_calculated_attributes(first_fragment)
second_fragment = Fragment(
second_temporal_content,
second_sonic_content,
meter_numerator=4,
meter_denominator=4,
n_beats=12,
line_ids=[1],
upper_line_highest_position=88,
upper_line_lowest_position=0,
n_melodic_lines_by_group=[1],
n_tone_row_instances_by_group=[1],
mutable_temporal_content_indices=[0],
mutable_sonic_content_indices=[0]
)
second_fragment = override_calculated_attributes(second_fragment)
result = first_fragment == second_fragment
assert result == expected
|
import glob
import os
import cv2
import numpy as np
import pickle
WIDTH = 224
HEIGHT = 224
test_data_set = ['00']
train_data_set = ['01','02','03','04']
inp_dir = '../Prepare_dataset_resnet/output/'
out_pickle_file = 'medico_v2_ensophagitis_normal_z_line_dataset.pickle'
imgs_train = []
imgs_test = []
X_test = []
y_test = []
X_train = []
y_train = []
for folder in train_data_set:
imgs_train += glob.glob(inp_dir+folder+'/*')
fo = open(inp_dir+'labels_'+folder+'.txt')
for line in fo:
y_train.append(int(line))
for folder in test_data_set:
imgs_test += glob.glob(inp_dir+folder+'/*')
fo = open(inp_dir+'labels_'+folder+'.txt')
for line in fo:
y_test.append(int(line))
for imgfile in imgs_test:
img = cv2.imread(imgfile)
img = cv2.resize(img,(WIDTH,HEIGHT))
X_test.append(img)
for imgfile in imgs_train:
img = cv2.imread(imgfile)
img = cv2.resize(img,(WIDTH,HEIGHT))
X_train.append(img)
X_test = np.array(X_test)
y_test = np.array(y_test).reshape((len(y_test),1))
X_train = np.array(X_train)
y_train = np.array(y_train).reshape((len(y_train),1))
print('X_test_shape: ',X_test.shape)
print('y_test_shape: ',y_test.shape)
print('X_train_shape: ',X_train.shape)
print('y_train_shape: ',y_train.shape)
of = open(out_pickle_file,"wb")
pickle.dump(((X_train,y_train),(X_test,y_test)),of)
of.close() |
# Copyright (c) 2015. Mount Sinai School of Medicine
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from . import alignment_key
class PileupElement(object):
'''
A PileupElement represents the segment of an alignment that aligns to a
particular base in the reference.
Attributes
----------
locus : Varcode.Locus
The reference locus. Must be length 1, i.e. a single base.
offset_start : int
0-based start offset into the alignment sequence, inclusive
offset_end : int
0-based end offset into the alignment sequence, exclusive
alignment : pysam.AlignedSegment
pysam alignment instance
alignment_key : tuple
value computed from the alignment instance that uniquely specifies its
properties. Used for comparisons since pysam.AlignedSegment instances
do not support a useful notion of equality (they compare using object
identity). See `read_evidence.alignment_key` for the implementation of
this key.
'''
def __init__(self, locus, offset_start, offset_end, alignment):
'''
Construct a PileupElement object.
'''
assert offset_end >= offset_start, \
"offset_start=%d > offset_end=%d" % (offset_start, offset_end)
self.locus = locus
self.offset_start = offset_start
self.offset_end = offset_end
self.alignment = alignment
self.alignment_key = alignment_key(self.alignment)
def fields(self):
'''
Fields that should be considered for our notion of object equality.
'''
return (
self.locus, self.offset_start, self.offset_end, self.alignment_key)
def __eq__(self, other):
return hasattr(other, "fields") and self.fields() == other.fields()
def __hash__(self):
return hash(self.fields())
@property
def bases(self):
'''
The sequenced bases in the alignment that align to this locus in the
genome, as a string.
Empty string in the case of a deletion. String of length > 1 if there
is an insertion here.
'''
sequence = self.alignment.query_sequence
assert self.offset_end <= len(sequence), \
"End offset=%d > sequence length=%d. CIGAR=%s. SEQUENCE=%s" % (
self.offset_end,
len(sequence),
self.alignment.cigarstring,
sequence)
return sequence[self.offset_start:self.offset_end]
@property
def base_qualities(self):
'''
The phred-scaled base quality scores corresponding to `self.bases`, as
a list.
'''
return self.alignment.query_qualities[
self.offset_start:self.offset_end]
@property
def min_base_quality(self):
'''
The minimum of the base qualities. In the case of a deletion, in which
case there are no bases in this PileupElement, the minimum is taken
over the sequenced bases immediately before and after the deletion.
'''
try:
return min(self.base_qualities)
except ValueError:
# We are mid-deletion. We return the minimum of the adjacent bases.
assert self.offset_start == self.offset_end
adjacent_qualities = [
self.alignment.query_qualities[offset]
for offset in [self.offset_start - 1, self.offset_start]
if 0 <= offset < len(self.alignment.query_qualities)
]
return min(adjacent_qualities)
@staticmethod
def from_pysam_alignment(locus, pileup_read):
'''
Factory function to create a new PileupElement from a pysam
`PileupRead`.
Parameters
----------
locus : varcode.Locus
Reference locus for which to construct a PileupElement. Must
include exactly one base.
pileup_read : pysam.calignmentfile.PileupRead
pysam PileupRead instance. Its alignment must overlap the locus.
Returns
----------
PileupElement
'''
assert not pileup_read.is_refskip, (
"Can't create a PileupElement in a refskip (typically an intronic "
"gap in an RNA alignment)")
# Pysam has an `aligned_pairs` method that gives a list of
# (offset, locus) pairs indicating the correspondence between bases in
# the alignment and reference loci. Here we use that to compute
# offset_start and offset_end.
#
# This is slightly tricky in the case of insertions and deletions.
# Here are examples of the desired logic.
#
# Target locus = 1000
#
# (1) Simple case: matching bases.
#
# OFFSET LOCUS
# 0 999
# 1 1000
# 2 1001
#
# DESIRED RESULT: offset_start=1, offset_end=2.
#
#
# (2) A 1 base insertion at offset 2.
#
# OFFSET LOCUS
# 0 999
# 1 1000
# 2 None
# 3 1001
#
# DESIRED RESULT: offset_start = 1, offset_end=3.
#
#
# (3) A 2 base deletion at loci 1000 and 1001.
#
# OFFSET LOCUS
# 0 999
# None 1000
# None 1001
# 1 1002
#
# DESIRED RESULT: offset_start = 1, offset_end=1.
#
offset_start = None
offset_end = len(pileup_read.alignment.query_sequence)
# TODO: doing this with get_blocks() may be faster.
for (offset, position) in pileup_read.alignment.aligned_pairs:
if offset is not None and position is not None:
if position == locus.position:
offset_start = offset
elif position > locus.position:
offset_end = offset
break
if offset_start is None:
offset_start = offset_end
assert pileup_read.is_del == (offset_end - offset_start == 0), \
"Deletion=%s but | [%d,%d) |=%d for locus %d in: \n%s" % (
pileup_read.is_del,
offset_start,
offset_end,
offset_end - offset_start,
locus.position,
pileup_read.alignment.aligned_pairs)
assert offset_end >= offset_start
result = PileupElement(
locus, offset_start, offset_end, pileup_read.alignment)
return result
|
import warnings
from ..water_viscosity_korson_1969 import water_viscosity
def test_water_viscosity():
warnings.filterwarnings("error") # Table II (p. 38):
assert abs(water_viscosity(273.15 + 0) - 1.7916) < 5e-4
assert abs(water_viscosity(273.15 + 5) - 1.5192) < 5e-4
assert abs(water_viscosity(273.15 + 10) - 1.3069) < 5e-4
assert abs(water_viscosity(273.15 + 15) - 1.1382) < 5e-4
assert abs(water_viscosity(273.15 + 20) - 1.0020) < 5e-4
assert abs(water_viscosity(273.15 + 25) - 0.8903) < 5e-4
assert abs(water_viscosity(273.15 + 30) - 0.7975) < 5e-4
assert abs(water_viscosity(273.15 + 35) - 0.7195) < 5e-4
assert abs(water_viscosity(273.15 + 40) - 0.6532) < 5e-4
assert abs(water_viscosity(273.15 + 45) - 0.5963) < 5e-4
assert abs(water_viscosity(273.15 + 50) - 0.5471) < 5e-4
assert abs(water_viscosity(273.15 + 55) - 0.5042) < 5e-4
assert abs(water_viscosity(273.15 + 60) - 0.4666) < 5e-4
assert abs(water_viscosity(273.15 + 65) - 0.4334) < 5e-4
assert abs(water_viscosity(273.15 + 70) - 0.4039) < 5e-4
assert abs(water_viscosity(273.15 + 75) - 0.3775) < 5e-4
assert abs(water_viscosity(273.15 + 80) - 0.3538) < 5e-4
assert abs(water_viscosity(273.15 + 85) - 0.3323) < 5e-4
assert abs(water_viscosity(273.15 + 90) - 0.3128) < 5e-4
assert abs(water_viscosity(273.15 + 95) - 0.2949) < 6e-4
assert abs(water_viscosity(273.15 + 100) - 0.2783) < 2e-3
warnings.resetwarnings()
|
from typing import List, Dict, Any, Optional
from mage.graph_coloring_module.components.individual import Individual
from mage.graph_coloring_module.graph import Graph
from mage.graph_coloring_module.components.correlation_population import (
CorrelationPopulation,
)
from mage.graph_coloring_module.utils.generate_individuals import generate_individuals
from mage.graph_coloring_module.components.population import Population
class ChainPopulationFactory:
@staticmethod
def create(
graph: Graph, parameters: Dict[str, Any] = None
) -> Optional[List[Population]]:
individuals = generate_individuals(graph, parameters)
return [ChainPopulation(graph, individuals)]
class ChainPopulation(CorrelationPopulation):
"""A class that represents a chain population. In this
population, the last individual is followed by the first
individual, and the predecessor of the first individual
is the last individual."""
def __init__(self, graph: Graph, individuals: List[Individual]):
super().__init__(graph, individuals)
self._set_correlations()
def _get_prev_correlation_index(self, index: int) -> int:
"""Returns the index of the correlation with the previous
individual in the chain of individuals."""
return index - 1 if index - 1 >= 0 else self.size - 1
def _get_next_correlation_index(self, index: int) -> int:
"""Returns the index of the correlation with the next
individual in the chain of individuals."""
return index
def get_prev_individual(self, index: int) -> Individual:
"""Returns the individual that precedes the individual on the given index."""
if index < 0 or index >= self.size:
raise IndexError()
prev_ind = index - 1 if index - 1 >= 0 else self.size - 1
return self.individuals[prev_ind]
def get_next_individual(self, index: int) -> Individual:
"""Returns the individual that follows the individual on the given index."""
if index < 0 or index >= self.size:
raise IndexError()
next_ind = index + 1 if index + 1 < self.size else 0
return self.individuals[next_ind]
def _set_correlations(self) -> None:
for i in range(self.size):
j = i + 1 if i + 1 < self.size else 0
c = self._calculate_correlation(self.individuals[i], self.individuals[j])
self._correlation.append(c)
self._cumulative_correlation += c
|
import asyncio
import discord
from redbot.core import checks, commands
from redbot.core.utils.chat_formatting import box
from redbot.core.utils.predicates import MessagePredicate
from .core import Core
class Captcher(Core):
@commands.group(name="setcaptcher", alias=["captcherset"])
@commands.guild_only()
@checks.admin_or_permissions(manage_roles=True)
async def config(self, ctx: commands.GuildContext):
"""
Configure settings for Captcher.
"""
pass
@config.command()
async def settings(self, ctx: commands.Context):
"""Show settings for your guild."""
params = {
"autorole": "Automatic role to give",
"temprole": "Temporary role to give through verification",
"verifchannel": "Channel to send captcha",
"logschannel": "Channel to logs actions",
"active": "Captcher activated",
}
settings = await self.data.guild(ctx.guild).all()
message = ""
for setting in settings.items():
parameter = params[setting[0]]
rawvalue = setting[1]
value = rawvalue
if setting[0] in ("verifchannel", "logschannel"):
value = f"<#{rawvalue}>" # Channel mention.
if setting[0] in ("autorole", "temprole"):
value = f"<@&{rawvalue}>" # Role mention, but don't worry, bot won't ping.
if rawvalue is None:
value = "Not set."
message += "{param}: {val}\n".format(param=parameter, val=value)
await ctx.send(message, allowed_mentions=discord.AllowedMentions(roles=False))
@config.command()
async def autorole(self, ctx: commands.Context, *, role_to_give: discord.Role = None):
"""
Give a role when the user successfully completed the captcha.
If a role is already set and you don't provide a role, actual role will
be deleted.
"""
if role_to_give is None and await self.data.guild(ctx.guild).autorole():
await self.data.guild(ctx.guild).autorole.clear()
await ctx.send("Role configuration removed.")
return
if role_to_give:
if ctx.author.top_role < role_to_give:
await ctx.send(
(
"This role is higher than your highest role in the role "
"hierarchy, choose another role, ask someone else to use this "
"command, or get a higher role."
)
)
return
if ctx.me.top_role < role_to_give:
await ctx.send(
(
"This role is higher than my highest role in the hierarchy, "
"give me an another role, put my role higher or put the role "
"lower."
)
)
return
await self.data.guild(ctx.guild).autorole.set(role_to_give.id)
message = "{role.name} will be given when members pass the captcha.".format(
role=role_to_give
)
else:
await ctx.send_help()
message = box("There's no role in configuration.")
await ctx.send(message)
@config.command()
async def temprole(self, ctx: commands.Context, *, temporary_role: discord.Role = None):
"""
Role to give when someone join, it will be automatically removed after
passing captcha.
If a role is already set and you don't provide a role, actual role will
be deleted.
"""
if temporary_role is None and await self.data.guild(ctx.guild).temprole():
await self.data.guild(ctx.guild).temprole.clear()
await ctx.send("Temporary role configuration removed.")
return
if not temporary_role:
await ctx.send_help()
await ctx.send(box("There's no temporary role in configuration."))
return
if temporary_role:
if ctx.author.top_role < temporary_role:
await ctx.send(
(
"This role is higher than your highest role in the role "
"hierarchy, choose another role, ask someone else to use this "
"command, or get a higher role."
)
)
return
if ctx.me.top_role < temporary_role:
await ctx.send(
(
"This role is higher than my highest role in the hierarchy, "
"give me an another role, put my role higher or put the role "
"lower."
)
)
return
await self.data.guild(ctx.guild).temprole.set(temporary_role.id)
await ctx.send(
(
"{role.name} will be given when members start the captcha.".format(
role=temporary_role
)
)
)
@config.command(alias=["verificationchannel", "verifchan"])
async def verifchannel(self, ctx: commands.Context, *, channel: discord.TextChannel = None):
"""
Set where the captcha must be sent.
"""
if channel is None and await self.data.guild(ctx.guild).verifchannel():
await self.data.guild(ctx.guild).verifchannel.clear()
await ctx.send("Verification channel configuration removed.")
if not channel:
await ctx.send_help()
await ctx.send(box("There's no verification channel configured."))
return
needed_permissions = [
"manage_messages",
"read_messages",
"send_messages",
"manage_roles",
"attach_files",
]
perms = self._permissions_checker(needed_permissions, channel)
if isinstance(perms, str):
message = perms
else:
await self.data.guild(ctx.guild).verifchannel.set(channel.id)
message = "Channel has been configured."
await ctx.send(message)
@config.command(alias=["logchan", "logschan", "logchannel"])
async def logschannel(self, ctx: commands.Context, *, channel: discord.TextChannel = None):
"""
Set the log channel, really recommended for knowing who passed verification
or who failed.
"""
if channel is None and await self.data.guild(ctx.guild).logschannel():
await self.data.guild(ctx.guild).logschannel.clear()
await ctx.send("Logging channel configuration removed.")
return
if not channel:
await ctx.send_help()
await ctx.send(box("There's no logs channel configured"))
return
needed_permissions = [
"read_messages",
"send_messages",
]
checker = self._permissions_checker(needed_permissions, channel)
if isinstance(checker, str):
await ctx.send(checker)
return # Missing permission
await self.data.guild(ctx.guild).logschannel.set(channel.id)
await ctx.send("{channel.name} will be used for captcha logs.".format(channel=channel))
@config.command()
async def activate(self, ctx: commands.Context, true_or_false: bool = None):
"""
Set if Captcher is activated.
"""
data = await self.data.guild(ctx.guild).all()
if true_or_false is not None:
channel_id = data["verifchannel"]
fetched_channel = self.bot.get_channel(channel_id)
if fetched_channel:
needed_permissions = [
"manage_messages",
"read_messages",
"send_messages",
"manage_roles",
"attach_files",
]
result = self._permissions_checker(needed_permissions, fetched_channel)
if not isinstance(result, str):
if data["temprole"] or data["autorole"]:
await self.data.guild(ctx.guild).active.set(true_or_false)
message = "Captcher is now {term}activate.".format(
term="" if true_or_false else "de"
)
else:
message = (
"Cannot complete request: No temporary or automatic role "
"are configured."
)
else:
message = result
else:
message = "Cannot complete request: No channel are configured."
if channel_id:
await self.data.guild(ctx.guild).verification_channel.clear()
else:
await ctx.send_help()
message = box(
"Captcher is {term}activated.".format(term="" if data["active"] else "de")
)
await ctx.send(message)
@config.command()
@commands.bot_has_permissions(administrator=True)
async def autoconfig(self, ctx: commands.Context):
"""Automatically set Captcher."""
await ctx.send(
"This command will:\n"
"- Create a new role called: Unverified\n"
"- Create a new channel called: #verification\n"
"- Create a new channel called: #verification-logs\n"
"\nBot will overwrite all channels to:\n"
"- Do not allow Unverified to read in others channels.\n"
"- Unverified will be able to read & send message in #verification.\n"
"\nDo you wish to continue?"
)
try:
predicator = MessagePredicate.yes_or_no(ctx)
await self.bot.wait_for("message", timeout=30, check=predicator)
except asyncio.TimeoutError:
await ctx.send("Command cancelled, caused by timeout.")
if predicator.result:
if not ctx.channel.permissions_for(ctx.guild.me).administrator:
await ctx.send("I require the Administrator permission first.")
return # In case it's funny to remove perm after using command.
await self.data.guild(ctx.guild).clear()
possible_result = await self._overwrite_server(ctx)
if possible_result:
await ctx.send(possible_result)
return
else:
await ctx.send("Uhm, why does the captain' had this idea...")
return
r = await self._ask_for_role_add(ctx)
if r:
await ctx.send("Configuration is done. Activate Captcher? (y/n)")
try:
predicator = MessagePredicate.yes_or_no(ctx)
await self.bot.wait_for("message", timeout=30, check=predicator)
except asyncio.TimeoutError:
await ctx.send("Question cancelled, caused by timeout.")
if predicator.result:
await self.data.guild(ctx.guild).active.set(True)
await ctx.send("Done.")
@commands.command()
@checks.admin_or_permissions(administrator=True)
async def challengeuser(
self, ctx: commands.Context, user: discord.Member, *, reason: str = None
):
"""Make an user pass the captcha again."""
if user.bot: # Do not challenge bot.
await ctx.send("Bots are my friend, I cannot let you do that to them.")
return
if user is ctx.author:
await ctx.send("Really... REALLY? ARE YOU TRYING TO CHALLENGE YOURSELF?")
return
if user.top_role >= ctx.author.top_role:
await ctx.send(
"This user has a role who is higher or equal to your higher role, I "
"cannot let you do that."
)
return
data = await self.data.guild(ctx.guild).all()
# Get channel
verifchannel = data["verifchannel"]
if not verifchannel:
await ctx.send("There is no verification channel registered.")
return
channel = self.bot.get_channel(verifchannel)
if not channel:
await ctx.send("I cannot find the verification channel, please add one again.")
return
# Permissions checker (In case someone changed something meanwhile)
needed_permissions = [
"manage_messages",
"read_messages",
"send_messages",
"manage_roles",
"attach_files",
]
checker = self._permissions_checker(needed_permissions, channel)
if isinstance(checker, str):
await ctx.send(checker)
return # Missing perm(s)
await ctx.send(
"This will remove all roles to the users that will get challenged, he will"
"receive his roles back after passing the captcha or get kicked if fail, "
"would you like to continue? (Y/N)"
)
pred = MessagePredicate.yes_or_no(ctx)
await self.bot.wait_for("message", check=pred)
if not pred.result:
await ctx.send("We're sleeping, for now...")
return
# Start challenge
if not reason:
reason = (
"Hello [user], a server administrator challenged you for a second time in "
"this server, please complete the following captcha. If you fail or take "
"too much time to answer (5 minutes), you will be automatically kicked "
"from this server.\nNote: The captcha doesn't include space.".replace(
"[user]", user.mention
)
)
roles = self._roles_keeper(user)
await self._roles_remover(user, roles)
if data["temprole"]:
role = ctx.guild.get_role(data["temprole"])
await user.add_roles(role, reason="Temporary role given by captcha.")
async with ctx.typing():
captched, bot_message, user_message = await self.challenger(
user, channel, f"Challenged manually by {ctx.author}", reason
)
final = await channel.send(
"You {term} the captcha.".format(term="completed" if captched else "failed")
)
has_been_kicked = False
if captched:
await self._add_roles(user, roles)
await self._report_log(user, "completed", f"Completed captcha.")
else:
await self._report_log(user, "kick", "Failed captcha.")
result = await self._mute_or_unmute_user(channel, user, False)
if not result: # Immediate kick
await self._kicker(user, "Failed the captcha. (Immediate kick)")
has_been_kicked = True
await asyncio.sleep(5)
if not captched and not has_been_kicked:
await self._kicker(user, "Failed the captcha.")
await bot_message.delete()
await user_message.delete()
await final.delete()
del self.in_challenge[user.id]
|
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:percent
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.11.2
# kernelspec:
# display_name: pytg
# language: python
# name: pytg
# ---
# %% [markdown]
# # Solve the TG equation for test data
# %%
import findiff as fd
import matplotlib.pyplot as plt
import numpy as np
import scipy as sp
import pytg.TG as TG
# load data
dat = np.loadtxt("Nash_data.txt", comments="%")
v = dat[:, 0]
rho = 1000.0 * dat[:, 1]
z = dat[:, 2]
g = 9.81
rho0 = np.mean(rho)
b = -g * (rho / rho0 - 1)
# %%
fig, axs = plt.subplots(1, 3, sharey=True)
axs[0].plot(v, z)
axs[1].plot(rho, z)
axs[0].set_ylabel("$z$ (m)")
axs[0].set_xlabel("$v$ (m s$^{-1}$)")
axs[1].set_xlabel(r"$\rho$ (kg m$^{-3}$)")
axs[2].plot(b, z)
axs[2].set_xlabel("$b$ (m s$^{-2}$)")
# %% [markdown]
# Try the module.
# %%
dat = np.loadtxt("Nash_data.txt", comments="%")
u = dat[:, 0]
rho = 1000.0 * dat[:, 1]
z = dat[:, 2]
g = 9.81
rho0 = np.mean(rho)
b = -g * (rho / rho0 - 1)
Kv = 1.0e-3
Kb = Kv / 7
# Wavenumber
k = 1e-4
l = 0.0
# %%
om, wvec, bvec, uvec, pvec = TG.vTG(
z,
u,
u * 0,
b,
k,
l,
Kv,
Kb,
BCv_upper="rigid",
BCv_lower="rigid",
BCb_upper="constant",
BCb_lower="constant",
)
cp = -om.imag / k
fig, ax = plt.subplots(1, 1)
ax.plot(cp, ".")
ax.set_xlabel("Mode")
fig, axs = plt.subplots(1, 4, sharey=True)
axs[0].plot(wvec[:, -1].real, z)
axs[1].plot(bvec[:, -1].real, z)
axs[2].plot(uvec[:, -1].real, z)
axs[3].plot(pvec[:, -1].real, z)
axs[0].plot(wvec[:, 0].real, z)
axs[1].plot(bvec[:, 0].real, z)
axs[2].plot(uvec[:, 0].real, z)
axs[3].plot(pvec[:, 0].real, z)
axs[3].set_xlim(-3, 3)
# %%
om, wvec, bvec, uvec = TG.vTG_sparse(
z,
u,
u * 0,
b,
k,
l,
Kv,
Kb,
BCv_upper="rigid",
BCv_lower="rigid",
BCb_upper="constant",
BCb_lower="constant",
nmodes=10,
which="LM",
)
cp = -om.imag / k
fig, ax = plt.subplots(1, 1)
ax.plot(cp, ".")
ax.set_xlabel("Mode")
fig, axs = plt.subplots(1, 3, sharey=True)
axs[0].plot(wvec[:, -1].real, z)
axs[1].plot(bvec[:, -1].real, z)
axs[2].plot(uvec[:, -1].real, z)
axs[0].plot(wvec[:, 0].real, z)
axs[1].plot(bvec[:, 0].real, z)
axs[2].plot(uvec[:, 0].real, z)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Sep 1 21:45:11 2018
@author: abhigyan
"""
import numpy as np
#Default Parameters
"""Initialization --> Gaussian Random
Learning Rate --> 0.1
Optimiser --> Gradient Descent"""
"Layer class: Used by the Neural_Network class to create new layers for the Neural Network"
class layer(object):
#Initializer Methods
def __init__(self, nodes, activation, learningRate,
momentumCoeff = None, alpha = None): #alpha is the slope of negative part in leaky ReLu
self.activation = activation #or parameters which are required in other activations
self.nodes = nodes #like prelu, elu and selu
self.mu = momentumCoeff
self.alpha = alpha
def initializeVariables(self, previousLayerNodes):
self.weights = np.random.normal(size = (previousLayerNodes, self.nodes),
loc = 0.0, scale = 1.0)
self.bias = np.random.normal(size = (1, self.nodes), loc = 0.0, scale = 1.0)
self.weightGradientHistory = np.zeros((previousLayerNodes, self.nodes))
self.biasGradientHistory = np.zeros((1, self.nodes))
#End
#Getter Methods
def getActivation(self):
return self.activation
def getWeightsAndBias(self):
return self.weights, self.bias
#End
#Forward Propagation
def applyActivation(self, inputs):
self.inputs = inputs
self.z = np.dot(inputs, self.weights) + self.bias
self.inputShape = inputs.shape
if(self.activation == 'sigmoid'):
self.a = np.power((1 + np.exp(-self.z)), -1)
elif(self.activation == 'relu'):
self.a = np.where(self.z > 0, self.z, 0)
elif(self.activation == 'hsigmoid'):
hSigmoidApprox = 0.2*self.z + 0.5
self.a = np.clip(hSigmoidApprox, 0, 1)
elif(self.activation == 'leakyrelu'):
self.a = np.where(self.z > 0, self.z, self.alpha * self.z)
elif(self.activation == 'tanh'):
self.a = (np.exp(self.z) - np.exp(-self.z)) * np.power(np.exp(+self.z) + np.exp(-self.z), -1)
elif(self.activation == 'elu'):
self.a = np.where(self.z > 0, self.z, self.alpha * (np.exp(self.z) - 1))
elif(self.activation == 'prelu'):
self.a = np.where(self.z > 0, self.z, self.alpha * self.z)
return self.a
#End
#Weight manipulations based on the optimiser used
def maintainGradientHistory(self):
self.weightGradientHistory = self.weightUpdate
self.biasGradientHistory = self.biasUpdate
def createWeightUpdates(self, learningRate, optimiser):
if(optimiser == 'gradient_descent'):
self.weightUpdate = learningRate * self.weightGradient
self.biasUpdate = learningRate * self.biasGradient
elif(optimiser == 'gradient_descent_momentum'):
self.weightUpdate = self.mu * self.weightGradientHistory + (1-self.mu) * learningRate * self.weightGradient
self.biasUpdate = self.mu * self.biasGradientHistory + (1-self.mu) * learningRate * self.biasGradient
elif(optimiser == 'RMS_prop'):
pass
#End
#Backpropagation Through Neural Network, Updation of weights and calculation of Downstream gradients
def backpropagation(self, gradientWRTz):
self.gradientWRTz = gradientWRTz
self.summedGradientWRTz = np.sum(gradientWRTz, axis = 0, keepdims = True)
self.weightGradient = np.dot(self.inputs.T, self.gradientWRTz) / self.inputShape[0]
self.biasGradient = self.summedGradientWRTz / self.inputShape[0]
def updateWeights(self):
self.weights = self.weights - self.weightUpdate
self.bias = self.bias - self.biasUpdate
def calculateGradientWRTpreviousZ(self, activation = None): #Calculate gradient WRT 'z' of previous layer
self.gradientWRTw = np.dot(self.gradientWRTz, self.weights.T)
if(activation == 'sigmoid'):
self.gradientWRTpreviousZ = self.gradientWRTw * self.inputs * (1 - self.inputs)
elif(activation == 'relu'):
self.gradientWRTpreviousZ = self.gradientWRTw * np.where(self.inputs > 0, 1, 0)
elif(activation == 'hsigmoid'):
self.gradientWRTpreviousZ = self.gradientWRTw * np.where(np.abs(self.inputs - 0.5) < 0.5, 0.2, 0)
elif(activation == 'leakyrelu'):
self.gradientWRTpreviousZ = self.gradientWRTw * np.where(self.inputs > 0, 1, self.alpha)
elif(activation == 'tanh'):
self.gradientWRTpreviousZ = self.gradientWRTw * (1 - np.power(self.inputs, 2))
elif(activation == 'elu'):
self.gradientWRTpreviousZ = self.gradientWRTw * np.where(self.inputs > 0, 1, self.inputs + self.alpha)
elif(activation == None):
return self.gradientWRTw
return self.gradientWRTpreviousZ
#End
"Final Layer class: Used by the Neural_Network class to create final layer for the Neural Network"
class finalLayer(object):
#Initializer Methods
def __init__(self, classes, outputFunction, errorFunction,
momentumCoeff = None, alpha = None):
self.classes = classes
self.outputFunction = outputFunction
self.errorFunction = errorFunction
self.mu = momentumCoeff
self.alpha = alpha
def initializeVariables(self, previousLayerNodes):
self.weights = np.random.normal(size = (previousLayerNodes, self.classes),
loc = 0.0, scale = 1.0)
self.bias = np.random.normal(size = (1, self.classes), loc = 0.0, scale = 1.0)
self.weightGradientHistory = np.zeros((previousLayerNodes, self.classes))
self.biasGradientHistory = np.zeros((1, self.classes))
#End
#Getter Methods
def getActivation(self):
return self.outputFunction
def getWeightsAndBias(self):
return self.weights, self.bias
def getResult(self):
return self.a
#End
#Forward Propagation
def applyOutputFunction(self, inputs):
self.inputs = inputs
self.z = np.dot(self.inputs, self.weights) + self.bias
self.inputShape = inputs.shape
if(self.outputFunction == 'sigmoid'):
self.a = np.power((1 + np.exp(-self.z)), -1)
elif(self.outputFunction == 'relu'):
self.a = np.where(self.z > 0, self.z, 0)
elif(self.outputFunction == 'leakyrelu'):
self.a = np.where(self.z > 0, self.z, self.alpha * self.z)
elif(self.outputFunction == 'softmax'):
self.a = np.exp(self.z) / np.sum(np.exp(self.z), axis = 1, keepdims = True)
return self.a
#End
#Weight manipulations based on the optimiser used
def maintainGradientHistory(self):
self.weightGradientHistory = self.weightUpdate
self.biasGradientHistory = self.biasUpdate
def createWeightUpdates(self, learningRate, optimiser):
if(optimiser == 'gradient_descent'):
self.weightUpdate = learningRate * self.weightGradient
self.biasUpdate = learningRate * self.biasGradient
elif(optimiser == 'gradient_descent_momentum'):
self.weightUpdate = self.mu * self.weightGradientHistory + (1-self.mu) * learningRate * self.weightGradient
self.biasUpdate = self.mu * self.biasGradientHistory + (1-self.mu) * learningRate * self.biasGradient
elif(optimiser == 'RMS_prop'):
pass
#End
#Calculation of Loss, Backpropagation through Neural Net, Weight Updation, Calculation of Downstream Gradients
def calculateLoss(self, targets):
self.loss = 0
self.targets = targets
if(self.errorFunction == 'cross_entropy' and self.outputFunction == 'sigmoid'):
self.loss = -np.sum(self.targets * np.log(self.a) + (1 - self.targets) * np.log(1 - self.a)) / self.inputShape[0]
elif(self.errorFunction == 'cross_entropy' and self.outputFunction == 'softmax'):
self.loss = -np.sum(self.targets * np.log(self.a)) / self.inputShape[0]
elif(self.errorFunction == 'squared_error'):
self.loss = np.sum((self.a - self.targets) ** 2) / self.inputShape[0]
return self.loss
def backpropagation(self):
self.error = self.a - self.targets
if(not(self.outputFunction == 'sigmoid' or self.outputFunction == 'softmax')):
print("Loss is only available for sigmoid and softmax activation functions")
elif(self.errorFunction == 'cross_entropy'):
self.gradientWRTz = np.copy(self.error)
self.summedGradientWRTz = np.sum(self.error, axis = 0, keepdims = True)
self.weightGradient = np.dot(self.inputs.T, self.gradientWRTz) / self.inputShape[0]
self.biasGradient = self.summedGradientWRTz / self.inputShape[0]
elif(self.errorFunction == 'squared_error'):
self.gradientWRTz = 2 * (self.a - self.targets) * self.a * (1 - self.a)
self.summedGradientWRTz = np.sum(self.error, axis = 0, keepdims = True)
self.weightGradient = (self.inputs.T * self.summedGradientWRTz) / self.inputShape[0]
self.biasGradient = self.summedGradientWRTz / self.inputShape[0]
def updateWeights(self):
self.weights = self.weights - self.weightUpdate
self.bias = self.bias - self.biasUpdate
def calculateGradientWRTpreviousZ(self, activation):
self.gradientWRTpreviousZ = None
self.gradientWRTw = np.dot(self.gradientWRTz, self.weights.T)
if(activation == 'sigmoid'):
self.gradientWRTpreviousZ = self.gradientWRTw * self.inputs * (1 - self.inputs)
if(activation == 'hsigmoid'):
self.gradientWRTpreviousZ = self.gradientWRTw * np.where(np.abs(self.inputs - 0.5) < 0.5, 0.2, 0)
elif(activation == 'relu'):
self.gradientWRTpreviousZ = self.gradientWRTw * np.where(self.inputs > 0, 1, 0)
elif(activation == 'leakyrelu'):
self.gradientWRTpreviousZ = self.gradientWRTw * np.where(self.inputs > 0, 1, self.alpha)
elif(activation == 'tanh'):
self.gradientWRTpreviousZ = self.gradientWRTw * (1 - np.power(self.inputs, 2))
elif(activation == 'elu'):
self.gradientWRTpreviousZ = self.gradientWRTw * np.where(self.inputs > 0, 1, self.inputs + self.alpha)
elif(activation == None):
return self.gradientWRTw
return self.gradientWRTpreviousZ
#End
"Main creator class which creates the entire Neural Network based on the parameters given by the user"
class Neural_Network(layer, finalLayer):
def __init__(self, layers, nodes, activations, errorFunction, alpha = 0.01,
optimizer = 'GradientDescent', momentumCoeff = 0.9, learningRate = 0.1, lrDecay = None, decayRate = 0.0 ):
self.optimizer = optimizer
self.layers = layers
self.nodes = nodes
self.learningRate = learningRate
self.NNlayers = []
self.NNlayerOutputs = []
self.errorFunction = errorFunction
self.mu = momentumCoeff
self.alpha = alpha
self.lrDecay = lrDecay
self.decayRate = decayRate
if((layers != len(nodes)) or (layers != len(activations))):
print("Invalid Neural Network Parameters")
else:
for i in range(0, layers):
if(i == layers-1):
l = finalLayer(nodes[i], activations[i], self.errorFunction, self.mu, self.alpha)
self.NNlayers.append(l)
break
l = layer(nodes[i], activations[i], self.mu, self.alpha)
self.NNlayers.append(l)
#Neural Network Inititializer function
def initializeNN(self, inputs, targets, epochs):
self.inputs = inputs
self.targets = targets
self.inputShape = inputs.shape
self.epochs = epochs
for j in range(0, self.layers):
if(j == 0):
self.NNlayers[j].initializeVariables(self.inputShape[1])
output = self.NNlayers[j].applyActivation(self.inputs)
self.NNlayerOutputs.append(output)
elif(j == self.layers - 1):
self.NNlayers[j].initializeVariables(self.nodes[j-1])
output = self.NNlayers[j].applyOutputFunction(self.NNlayerOutputs[j-1])
self.NNlayerOutputs.append(output)
else:
self.NNlayers[j].initializeVariables(self.nodes[j-1])
output = self.NNlayers[j].applyActivation(self.NNlayerOutputs[j-1])
self.NNlayerOutputs.append(output)
#Function which will run the Neural Network
def run_Neural_Network(self):
for i in range(0, self.epochs):
self.gradientWRTz = []
self.NNlayerOutputs = []
#Forward Propagation
for j in range(0, self.layers):
if(j == 0):
output = self.NNlayers[j].applyActivation(self.inputs)
self.NNlayerOutputs.append(output)
elif(j == self.layers - 1):
output = self.NNlayers[j].applyOutputFunction(self.NNlayerOutputs[j-1])
self.NNlayerOutputs.append(output)
else:
output = self.NNlayers[j].applyActivation(self.NNlayerOutputs[j-1])
self.NNlayerOutputs.append(output)
#Loss Calculation
self.loss = self.NNlayers[-1].calculateLoss(self.targets)
print(self.loss)
self.accuracy_calculator()
self.F_Score_calculator()
#Backpropagation
for j in range(self.layers-1, -1, -1):
if(j == self.layers-1):
self.NNlayers[j].backpropagation()
self.gradientWRTz.append(self.NNlayers[j].calculateGradientWRTpreviousZ(self.NNlayers[j-1].getActivation()))
elif(j == 0):
self.NNlayers[j].backpropagation(self.gradientWRTz[self.layers-j-2])
else:
self.NNlayers[j].backpropagation(self.gradientWRTz[self.layers-j-2])
self.gradientWRTz.append(self.NNlayers[j].calculateGradientWRTpreviousZ(self.NNlayers[j-1].getActivation()))
#Learning Rate Decay
if(self.lrDecay == None):
self.currentLearningRate = self.learningRate
elif(self.lrDecay == 'first_order_time'):
self.currentLearningRate = self.learningRate/(1+self.decayRate*i)
elif(self.lrDecay == 'second_order_time'):
self.currentLearningRate = self.learningRate/(1+self.decayRate*(i**2))
elif(self.lrDecay == 'exponential_decay'):
self.currentLearningRate = self.learningRate*np.exp(-self.decayRate*i)
#Optimization
if(self.optimizer == 'GradientDescent'):
self.gradient_descent_optimizer()
elif(self.optimizer == 'GradientDescentWithMomentum'):
self.momentum_descent_optimizer()
#Optimiser functions
def gradient_descent_optimizer(self):
for i in range(0, self.layers):
self.NNlayers[i].createWeightUpdates(self.currentLearningRate, 'gradient_descent')
self.NNlayers[i].updateWeights()
def momentum_descent_optimizer(self):
for i in range(0, self.layers):
self.NNlayers[i].createWeightUpdates(self.currentLearningRate, 'gradient_descent_momentum')
self.NNlayers[i].updateWeights()
self.NNlayers[i].maintainGradientHistory()
#Score functions accuracy and F_score
def accuracy_calculator(self):
self.hypothesis = self.NNlayers[-1].getResult()
if(self.NNlayers[-1].getActivation() == 'sigmoid' and self.nodes[-1] == 1):
self.accuracyMatrix = np.round(self.hypothesis) == self.targets
self.accuracyMatrix = self.accuracyMatrix.astype(dtype = np.int32)
self.accuracy = np.sum(self.accuracyMatrix) / len(self.targets)
print(self.accuracy)
else:
self.accuracyMatrix = np.argmax(self.hypothesis, axis = 1) == np.argmax(self.targets, axis = 1)
self.accuracyMatrix.astype(dtype = np.int32)
self.accuracy = np.sum(self.accuracyMatrix) / len(self.targets)
print(self.accuracy)
def calculatePrecision(self, predictions, target):
TP = np.sum(predictions & target)
FP = np.sum(predictions & np.abs(target - 1))
return TP/(TP+FP)
def calculateRecall(self, predictions, target):
TP = np.sum(predictions & target)
FN = np.sum(np.abs(predictions - 1) & target)
return TP/(TP+FN)
def F_Score_calculator(self, averaging ='macro'): #For more info check out https://sebastianraschka.com/faq/docs/multiclass-metric.html
self.hypothesis = self.NNlayers[-1].getResult()
predictions = np.array(np.round(self.hypothesis), dtype = np.int16)
self.targets = np.array(self.targets, dtype = np.int16)
if(self.NNlayers[-1].getActivation() == 'sigmoid' and self.nodes[-1] == 1):
precision = self.calculatePrecision(predictions, self.targets)
recall = self.calculateRecall(predictions, self.targets)
self.F_score = (precision * recall)/(precision + recall)
else:
precision = np.array([])
recall = np.array([])
for i in range(self.targets.shape[1]):
precision = np.append(precision, self.calculatePrecision(predictions[:, i], self.targets[:, i]))
recall = np.append(recall, self.calculateRecall(predictions[:, i], self.targets[:, i]))
if(averaging == 'macro'):
averagePrecision = np.average(precision)
averageRecall = np.average(recall)
self.F_score = (averagePrecision * averageRecall)/(averagePrecision + averageRecall)
print('F_score: ' + str(self.F_score))
#End
|
import os, random
def list_dir_files(t_dir):
f_list = []
if t_dir[-1] != '/':
t_dir+='/'
for dir in os.listdir(t_dir):
f_list += [t_dir+dir+'/'+i for i in os.listdir(t_dir+dir)]
return f_list
def list_1perdir_files(t_dir):
f_list = []
if t_dir[-1] != '/':
t_dir += '/'
for dir in os.listdir(t_dir):
f_list += [random.choice([t_dir + dir + '/' + i for i in os.listdir(t_dir + dir)])]
# f_list=['/media/zero/41FF48D81730BD9B/Final_Thesies/data/New_test_set/msr_paraphrase_text.pickle']
return f_list
# pickle.dump(f_list,open('/media/zero/41FF48D81730BD9B/Final_Thesies/data/wiki/pickle_list.pickle','w'))
if __name__ == '__main__':
for i in list_1perdir_files('/media/zero/41FF48D81730BD9B/Final_Thesies/data/wiki/pickles'):
print i
|
# coding: utf-8
# In[ ]:
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
driver = webdriver.Chrome()
driver.get("https://www.google.co.in/imghp?hl=en&tab=wi")
elem = driver.find_element_by_name("q")
elem.clear()
x=input("enter the name of the wallpaper you want to download : ")
x=x+" wallpaper hd "
elem.send_keys(x)
elem.send_keys(Keys.RETURN)
assert "No results found." not in driver.page_source
# In[ ]:
from bs4 import BeautifulSoup as bs
# In[19]:
import requests
# In[20]:
url=driver.current_url
# In[21]:
webdata=requests.get(url)
# In[22]:
if webdata.status_code==200:
print("fetching done")
else:
print("not done")
# In[23]:
data=webdata.text
soup=bs(data,'lxml')
# In[24]:
anchors=soup.find_all('a')
# In[25]:
div_article=soup.find_all('img')
# In[26]:
flink=div_article[1].attrs.get('src')
# In[27]:
import random
import urllib.request
# In[28]:
address = input('Address to save image (eg: d:/): ')
name = random.randrange(1,1000)
full_name = str(address)
full_name += str(name) + '.jpg'
urllib.request.urlretrieve(flink,full_name)
print('Your image is being downloaded and saved to ' + full_name)
# In[29]:
driver.close()
|
# coding: utf-8
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import pytest
import asyncio
import os
import unittest
from azure.core.pipeline.transport import AioHttpTransport
from multidict import CIMultiDict, CIMultiDictProxy
from azure.storage.blob.aio import (
BlobServiceClient,
ContainerClient,
BlobClient,
BlobType
)
from testcase import (
StorageTestCase,
TestMode,
record,
)
#------------------------------------------------------------------------------
TEST_BLOB_PREFIX = 'blob'
FILE_PATH = 'blob_input.temp.dat'
LARGE_BLOB_SIZE = 64 * 1024
#------------------------------------------------------------------------------
class AiohttpTestTransport(AioHttpTransport):
"""Workaround to vcrpy bug: https://github.com/kevin1024/vcrpy/pull/461
"""
async def send(self, request, **config):
response = await super(AiohttpTestTransport, self).send(request, **config)
if not isinstance(response.headers, CIMultiDictProxy):
response.headers = CIMultiDictProxy(CIMultiDict(response.internal_response.headers))
response.content_type = response.headers.get("content-type")
return response
class StorageAppendBlobTestAsync(StorageTestCase):
def setUp(self):
super(StorageAppendBlobTestAsync, self).setUp()
url = self._get_account_url()
credential = self._get_shared_key_credential()
self.bsc = BlobServiceClient(url, credential=credential, max_block_size=4 * 1024, transport=AiohttpTestTransport())
self.config = self.bsc._config
self.container_name = self.get_resource_name('utcontainer')
def tearDown(self):
if not self.is_playback():
loop = asyncio.get_event_loop()
try:
loop.run_until_complete(self.bsc.delete_container(self.container_name))
except:
pass
if os.path.isfile(FILE_PATH):
try:
os.remove(FILE_PATH)
except:
pass
return super(StorageAppendBlobTestAsync, self).tearDown()
#--Helpers-----------------------------------------------------------------
async def _setup(self):
if not self.is_playback():
try:
await self.bsc.create_container(self.container_name)
except:
pass
def _get_blob_reference(self):
return self.get_resource_name(TEST_BLOB_PREFIX)
async def _create_blob(self):
blob_name = self._get_blob_reference()
blob = self.bsc.get_blob_client(
self.container_name,
blob_name)
await blob.create_append_blob()
return blob
async def assertBlobEqual(self, blob, expected_data):
stream = await blob.download_blob()
actual_data = await stream.content_as_bytes()
self.assertEqual(actual_data, expected_data)
class NonSeekableFile(object):
def __init__(self, wrapped_file):
self.wrapped_file = wrapped_file
def write(self, data):
self.wrapped_file.write(data)
def read(self, count):
return self.wrapped_file.read(count)
#--Test cases for append blobs --------------------------------------------
async def _test_create_blob_async(self):
# Arrange
await self._setup()
blob_name = self._get_blob_reference()
# Act
blob = self.bsc.get_blob_client(self.container_name, blob_name)
create_resp = await blob.create_append_blob()
# Assert
blob_properties = await blob.get_blob_properties()
self.assertIsNotNone(blob_properties)
self.assertEqual(blob_properties.etag, create_resp.get('etag'))
self.assertEqual(blob_properties.last_modified, create_resp.get('last_modified'))
@record
def test_create_blob_async(self):
loop = asyncio.get_event_loop()
loop.run_until_complete(self._test_create_blob_async())
async def _test_create_blob_with_lease_id_async(self):
# Arrange
await self._setup()
blob = await self._create_blob()
# Act
lease = await blob.acquire_lease()
create_resp = await blob.create_append_blob(lease=lease)
# Assert
blob_properties = await blob.get_blob_properties()
self.assertIsNotNone(blob_properties)
self.assertEqual(blob_properties.etag, create_resp.get('etag'))
self.assertEqual(blob_properties.last_modified, create_resp.get('last_modified'))
@record
def test_create_blob_with_lease_id_async(self):
loop = asyncio.get_event_loop()
loop.run_until_complete(self._test_create_blob_with_lease_id_async())
async def _test_create_blob_with_metadata_async(self):
# Arrange
await self._setup()
metadata = {'hello': 'world', 'number': '42'}
blob_name = self._get_blob_reference()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
# Act
await blob.create_append_blob(metadata=metadata)
# Assert
md = await blob.get_blob_properties()
self.assertDictEqual(md.metadata, metadata)
@record
def test_create_blob_with_metadata_async(self):
loop = asyncio.get_event_loop()
loop.run_until_complete(self._test_create_blob_with_metadata_async())
async def _test_append_block_async(self):
# Arrange
await self._setup()
blob = await self._create_blob()
# Act
for i in range(5):
resp = await blob.append_block(u'block {0}'.format(i).encode('utf-8'))
self.assertEqual(int(resp['blob_append_offset']), 7 * i)
self.assertEqual(resp['blob_committed_block_count'], i + 1)
self.assertIsNotNone(resp['etag'])
self.assertIsNotNone(resp['last_modified'])
# Assert
await self.assertBlobEqual(blob, b'block 0block 1block 2block 3block 4')
@record
def test_append_block_async(self):
loop = asyncio.get_event_loop()
loop.run_until_complete(self._test_append_block_async())
async def _test_append_block_unicode_async(self):
# Arrange
await self._setup()
blob = await self._create_blob()
# Act
resp = await blob.append_block(u'啊齄丂狛狜', encoding='utf-16')
self.assertEqual(int(resp['blob_append_offset']), 0)
self.assertEqual(resp['blob_committed_block_count'], 1)
self.assertIsNotNone(resp['etag'])
self.assertIsNotNone(resp['last_modified'])
# Assert
@record
def test_append_block_unicode_async(self):
loop = asyncio.get_event_loop()
loop.run_until_complete(self._test_append_block_unicode_async())
async def _test_append_block_with_md5_async(self):
# Arrange
await self._setup()
blob = await self._create_blob()
# Act
resp = await blob.append_block(b'block', validate_content=True)
self.assertEqual(int(resp['blob_append_offset']), 0)
self.assertEqual(resp['blob_committed_block_count'], 1)
self.assertIsNotNone(resp['etag'])
self.assertIsNotNone(resp['last_modified'])
# Assert
@record
def test_append_block_with_md5_async(self):
loop = asyncio.get_event_loop()
loop.run_until_complete(self._test_append_block_with_md5_async())
async def _test_create_append_blob_with_no_overwrite_async(self):
# Arrange
await self._setup()
blob_name = self._get_blob_reference()
blob = self.bsc.get_blob_client(
self.container_name,
blob_name)
data1 = self.get_random_bytes(LARGE_BLOB_SIZE)
data2 = self.get_random_bytes(LARGE_BLOB_SIZE + 512)
# Act
create_resp = await blob.upload_blob(
data1,
overwrite=True,
blob_type=BlobType.AppendBlob,
metadata={'BlobData': 'Data1'})
update_resp = await blob.upload_blob(
data2,
overwrite=False,
blob_type=BlobType.AppendBlob,
metadata={'BlobData': 'Data2'})
props = await blob.get_blob_properties()
# Assert
appended_data = data1 + data2
await self.assertBlobEqual(blob, appended_data)
self.assertEqual(props.etag, update_resp.get('etag'))
self.assertEqual(props.blob_type, BlobType.AppendBlob)
self.assertEqual(props.last_modified, update_resp.get('last_modified'))
self.assertEqual(props.metadata, {'BlobData': 'Data1'})
self.assertEqual(props.size, LARGE_BLOB_SIZE + LARGE_BLOB_SIZE + 512)
@record
def test_create_append_blob_with_no_overwrite_async(self):
loop = asyncio.get_event_loop()
loop.run_until_complete(self._test_create_append_blob_with_no_overwrite_async())
async def _test_create_append_blob_with_overwrite_async(self):
# Arrange
await self._setup()
blob_name = self._get_blob_reference()
blob = self.bsc.get_blob_client(
self.container_name,
blob_name)
data1 = self.get_random_bytes(LARGE_BLOB_SIZE)
data2 = self.get_random_bytes(LARGE_BLOB_SIZE + 512)
# Act
create_resp = await blob.upload_blob(
data1,
overwrite=True,
blob_type=BlobType.AppendBlob,
metadata={'BlobData': 'Data1'})
update_resp = await blob.upload_blob(
data2,
overwrite=True,
blob_type=BlobType.AppendBlob,
metadata={'BlobData': 'Data2'})
props = await blob.get_blob_properties()
# Assert
await self.assertBlobEqual(blob, data2)
self.assertEqual(props.etag, update_resp.get('etag'))
self.assertEqual(props.last_modified, update_resp.get('last_modified'))
self.assertEqual(props.metadata, {'BlobData': 'Data2'})
self.assertEqual(props.blob_type, BlobType.AppendBlob)
self.assertEqual(props.size, LARGE_BLOB_SIZE + 512)
@record
def test_create_append_blob_with_overwrite_async(self):
loop = asyncio.get_event_loop()
loop.run_until_complete(self._test_create_append_blob_with_overwrite_async())
async def _test_append_blob_from_bytes_async(self):
# Arrange
await self._setup()
blob = await self._create_blob()
# Act
data = b'abcdefghijklmnopqrstuvwxyz'
append_resp = await blob.upload_blob(data, blob_type=BlobType.AppendBlob)
blob_properties = await blob.get_blob_properties()
# Assert
await self.assertBlobEqual(blob, data)
self.assertEqual(blob_properties.etag, append_resp['etag'])
self.assertEqual(blob_properties.last_modified, append_resp['last_modified'])
@record
def test_append_blob_from_bytes_async(self):
loop = asyncio.get_event_loop()
loop.run_until_complete(self._test_append_blob_from_bytes_async())
async def _test_append_blob_from_0_bytes_async(self):
# Arrange
await self._setup()
blob = await self._create_blob()
# Act
data = b''
append_resp = await blob.upload_blob(data, blob_type=BlobType.AppendBlob)
# Assert
await self.assertBlobEqual(blob, data)
# appending nothing should not make any network call
self.assertIsNone(append_resp.get('etag'))
self.assertIsNone(append_resp.get('last_modified'))
@record
def test_append_blob_from_0_bytes_async(self):
loop = asyncio.get_event_loop()
loop.run_until_complete(self._test_append_blob_from_0_bytes_async())
async def _test_append_blob_from_bytes_with_progress_async(self):
# Arrange
await self._setup()
blob = await self._create_blob()
data = b'abcdefghijklmnopqrstuvwxyz'
# Act
progress = []
def progress_gen(upload):
progress.append((0, len(upload)))
yield upload
upload_data = progress_gen(data)
await blob.upload_blob(upload_data, blob_type=BlobType.AppendBlob)
# Assert
await self.assertBlobEqual(blob, data)
self.assert_upload_progress(len(data), self.config.max_block_size, progress)
@record
def test_append_blob_from_bytes_with_progress_async(self):
loop = asyncio.get_event_loop()
loop.run_until_complete(self._test_append_blob_from_bytes_with_progress_async())
async def _test_append_blob_from_bytes_with_index_async(self):
# Arrange
await self._setup()
blob = await self._create_blob()
# Act
data = b'abcdefghijklmnopqrstuvwxyz'
await blob.upload_blob(data[3:], blob_type=BlobType.AppendBlob)
# Assert
await self.assertBlobEqual(blob, data[3:])
@record
def test_append_blob_from_bytes_with_index_async(self):
loop = asyncio.get_event_loop()
loop.run_until_complete(self._test_append_blob_from_bytes_with_index_async())
async def _test_append_blob_from_bytes_with_index_and_count_async(self):
# Arrange
await self._setup()
blob = await self._create_blob()
# Act
data = b'abcdefghijklmnopqrstuvwxyz'
await blob.upload_blob(data[3:], length=5, blob_type=BlobType.AppendBlob)
# Assert
await self.assertBlobEqual(blob, data[3:8])
@record
def test_append_blob_from_bytes_with_index_and_count_async(self):
loop = asyncio.get_event_loop()
loop.run_until_complete(self._test_append_blob_from_bytes_with_index_and_count_async())
async def _test_append_blob_from_bytes_chunked_upload_async(self):
# Arrange
await self._setup()
blob = await self._create_blob()
data = self.get_random_bytes(LARGE_BLOB_SIZE)
# Act
append_resp = await blob.upload_blob(data, blob_type=BlobType.AppendBlob)
blob_properties = await blob.get_blob_properties()
# Assert
await self.assertBlobEqual(blob, data)
self.assertEqual(blob_properties.etag, append_resp['etag'])
self.assertEqual(blob_properties.last_modified, append_resp.get('last_modified'))
@record
def test_append_blob_from_bytes_chunked_upload_async(self):
loop = asyncio.get_event_loop()
loop.run_until_complete(self._test_append_blob_from_bytes_chunked_upload_async())
async def _test_append_blob_from_bytes_with_progress_chunked_upload_async(self):
# Arrange
await self._setup()
blob = await self._create_blob()
data = self.get_random_bytes(LARGE_BLOB_SIZE)
# Act
progress = []
def progress_gen(upload):
n = self.config.max_block_size
total = len(upload)
current = 0
while upload:
progress.append((current, total))
yield upload[:n]
current += len(upload[:n])
upload = upload[n:]
upload_data = progress_gen(data)
await blob.upload_blob(upload_data, blob_type=BlobType.AppendBlob)
# Assert
await self.assertBlobEqual(blob, data)
self.assert_upload_progress(len(data), self.config.max_block_size, progress)
@record
def test_append_blob_from_bytes_with_progress_chunked_upload_async(self):
loop = asyncio.get_event_loop()
loop.run_until_complete(self._test_append_blob_from_bytes_with_progress_chunked_upload_async())
async def _test_append_blob_from_bytes_chunked_upload_with_index_and_count_async(self):
# Arrange
await self._setup()
blob = await self._create_blob()
data = self.get_random_bytes(LARGE_BLOB_SIZE)
index = 33
blob_size = len(data) - 66
# Act
await blob.upload_blob(data[index:], length=blob_size, blob_type=BlobType.AppendBlob)
# Assert
await self.assertBlobEqual(blob, data[index:index + blob_size])
@record
def test_append_blob_from_bytes_chunked_upload_with_index_and_count_async(self):
loop = asyncio.get_event_loop()
loop.run_until_complete(self._test_append_blob_from_bytes_chunked_upload_with_index_and_count_async())
async def _test_append_blob_from_path_chunked_upload_async(self):
# Arrange
await self._setup()
blob = await self._create_blob()
data = self.get_random_bytes(LARGE_BLOB_SIZE)
with open(FILE_PATH, 'wb') as stream:
stream.write(data)
# Act
with open(FILE_PATH, 'rb') as stream:
append_resp = await blob.upload_blob(stream, blob_type=BlobType.AppendBlob)
blob_properties = await blob.get_blob_properties()
# Assert
await self.assertBlobEqual(blob, data)
self.assertEqual(blob_properties.etag, append_resp.get('etag'))
self.assertEqual(blob_properties.last_modified, append_resp.get('last_modified'))
@record
def test_append_blob_from_path_chunked_upload_async(self):
loop = asyncio.get_event_loop()
loop.run_until_complete(self._test_append_blob_from_path_chunked_upload_async())
async def _test_append_blob_from_path_with_progress_chunked_upload_async(self):
# Arrange
await self._setup()
blob = await self._create_blob()
data = self.get_random_bytes(LARGE_BLOB_SIZE)
with open(FILE_PATH, 'wb') as stream:
stream.write(data)
# Act
progress = []
def progress_gen(upload):
n = self.config.max_block_size
total = LARGE_BLOB_SIZE
current = 0
while upload:
chunk = upload.read(n)
if not chunk:
break
progress.append((current, total))
yield chunk
current += len(chunk)
with open(FILE_PATH, 'rb') as stream:
upload_data = progress_gen(stream)
await blob.upload_blob(upload_data, blob_type=BlobType.AppendBlob)
# Assert
await self.assertBlobEqual(blob, data)
self.assert_upload_progress(len(data), self.config.max_block_size, progress)
@record
def test_append_blob_from_path_with_progress_chunked_upload_async(self):
loop = asyncio.get_event_loop()
loop.run_until_complete(self._test_append_blob_from_path_with_progress_chunked_upload_async())
async def _test_append_blob_from_stream_chunked_upload_async(self):
# Arrange
await self._setup()
blob = await self._create_blob()
data = self.get_random_bytes(LARGE_BLOB_SIZE)
with open(FILE_PATH, 'wb') as stream:
stream.write(data)
# Act
with open(FILE_PATH, 'rb') as stream:
append_resp = await blob.upload_blob(stream, blob_type=BlobType.AppendBlob)
blob_properties = await blob.get_blob_properties()
# Assert
await self.assertBlobEqual(blob, data)
self.assertEqual(blob_properties.etag, append_resp.get('etag'))
self.assertEqual(blob_properties.last_modified, append_resp.get('last_modified'))
@record
def test_append_blob_from_stream_chunked_upload_async(self):
loop = asyncio.get_event_loop()
loop.run_until_complete(self._test_append_blob_from_stream_chunked_upload_async())
async def _test_append_blob_from_stream_non_seekable_chunked_upload_known_size_async(self):
# Arrange
await self._setup()
blob = await self._create_blob()
data = self.get_random_bytes(LARGE_BLOB_SIZE)
with open(FILE_PATH, 'wb') as stream:
stream.write(data)
blob_size = len(data) - 66
# Act
with open(FILE_PATH, 'rb') as stream:
non_seekable_file = StorageAppendBlobTestAsync.NonSeekableFile(stream)
await blob.upload_blob(non_seekable_file, length=blob_size, blob_type=BlobType.AppendBlob)
# Assert
await self.assertBlobEqual(blob, data[:blob_size])
@record
def test_append_blob_from_stream_non_seekable_chunked_upload_known_size_async(self):
loop = asyncio.get_event_loop()
loop.run_until_complete(self._test_append_blob_from_stream_non_seekable_chunked_upload_known_size_async())
async def _test_append_blob_from_stream_non_seekable_chunked_upload_unknown_size_async(self):
# Arrange
await self._setup()
blob = await self._create_blob()
data = self.get_random_bytes(LARGE_BLOB_SIZE)
with open(FILE_PATH, 'wb') as stream:
stream.write(data)
# Act
with open(FILE_PATH, 'rb') as stream:
non_seekable_file = StorageAppendBlobTestAsync.NonSeekableFile(stream)
await blob.upload_blob(non_seekable_file, blob_type=BlobType.AppendBlob)
# Assert
await self.assertBlobEqual(blob, data)
@record
def test_append_blob_from_stream_non_seekable_chunked_upload_unknown_size_async(self):
loop = asyncio.get_event_loop()
loop.run_until_complete(self._test_append_blob_from_stream_non_seekable_chunked_upload_unknown_size_async())
async def _test_append_blob_from_stream_with_multiple_appends_async(self):
# Arrange
await self._setup()
blob = await self._create_blob()
data = self.get_random_bytes(LARGE_BLOB_SIZE)
with open(FILE_PATH, 'wb') as stream1:
stream1.write(data)
with open(FILE_PATH, 'wb') as stream2:
stream2.write(data)
# Act
with open(FILE_PATH, 'rb') as stream1:
await blob.upload_blob(stream1, blob_type=BlobType.AppendBlob)
with open(FILE_PATH, 'rb') as stream2:
await blob.upload_blob(stream2, blob_type=BlobType.AppendBlob)
# Assert
data = data * 2
await self.assertBlobEqual(blob, data)
@record
def test_append_blob_from_stream_with_multiple_appends_async(self):
loop = asyncio.get_event_loop()
loop.run_until_complete(self._test_append_blob_from_stream_with_multiple_appends_async())
async def _test_append_blob_from_stream_chunked_upload_with_count_async(self):
# Arrange
await self._setup()
blob = await self._create_blob()
data = self.get_random_bytes(LARGE_BLOB_SIZE)
with open(FILE_PATH, 'wb') as stream:
stream.write(data)
# Act
blob_size = len(data) - 301
with open(FILE_PATH, 'rb') as stream:
await blob.upload_blob(stream, length=blob_size, blob_type=BlobType.AppendBlob)
# Assert
await self.assertBlobEqual(blob, data[:blob_size])
@record
def test_append_blob_from_stream_chunked_upload_with_count_async(self):
loop = asyncio.get_event_loop()
loop.run_until_complete(self._test_append_blob_from_stream_chunked_upload_with_count_async())
async def _test_append_blob_from_stream_chunked_upload_with_count_parallel_async(self):
# parallel tests introduce random order of requests, can only run live
if TestMode.need_recording_file(self.test_mode):
return
# Arrange
await self._setup()
blob = await self._create_blob()
data = self.get_random_bytes(LARGE_BLOB_SIZE)
with open(FILE_PATH, 'wb') as stream:
stream.write(data)
# Act
blob_size = len(data) - 301
with open(FILE_PATH, 'rb') as stream:
append_resp = await blob.upload_blob(stream, length=blob_size, blob_type=BlobType.AppendBlob)
blob_properties = await blob.get_blob_properties()
# Assert
await self.assertBlobEqual(blob, data[:blob_size])
self.assertEqual(blob_properties.etag, append_resp.get('etag'))
self.assertEqual(blob_properties.last_modified, append_resp.get('last_modified'))
@record
def test_append_blob_from_stream_chunked_upload_with_count_parallel_async(self):
loop = asyncio.get_event_loop()
loop.run_until_complete(self._test_append_blob_from_stream_chunked_upload_with_count_parallel_async())
async def _test_append_blob_from_text_async(self):
# Arrange
await self._setup()
blob = await self._create_blob()
text = u'hello 啊齄丂狛狜 world'
data = text.encode('utf-8')
# Act
append_resp = await blob.upload_blob(text, blob_type=BlobType.AppendBlob)
blob_properties = await blob.get_blob_properties()
# Assert
await self.assertBlobEqual(blob, data)
self.assertEqual(blob_properties.etag, append_resp.get('etag'))
self.assertEqual(blob_properties.last_modified, append_resp.get('last_modified'))
@record
def test_append_blob_from_text_async(self):
loop = asyncio.get_event_loop()
loop.run_until_complete(self._test_append_blob_from_text_async())
async def _test_append_blob_from_text_with_encoding_async(self):
# Arrange
await self._setup()
blob = await self._create_blob()
text = u'hello 啊齄丂狛狜 world'
data = text.encode('utf-16')
# Act
await blob.upload_blob(text, encoding='utf-16', blob_type=BlobType.AppendBlob)
# Assert
await self.assertBlobEqual(blob, data)
@record
def test_append_blob_from_text_with_encoding_async(self):
loop = asyncio.get_event_loop()
loop.run_until_complete(self._test_append_blob_from_text_with_encoding_async())
async def _test_append_blob_from_text_with_encoding_and_progress_async(self):
# Arrange
await self._setup()
blob = await self._create_blob()
text = u'hello 啊齄丂狛狜 world'
data = text.encode('utf-16')
# Act
progress = []
def progress_gen(upload):
progress.append((0, len(data)))
yield upload
upload_data = progress_gen(text)
await blob.upload_blob(upload_data, encoding='utf-16', blob_type=BlobType.AppendBlob)
# Assert
self.assert_upload_progress(len(data), self.config.max_block_size, progress)
@record
def test_append_blob_from_text_with_encoding_and_progress_async(self):
loop = asyncio.get_event_loop()
loop.run_until_complete(self._test_append_blob_from_text_with_encoding_and_progress_async())
async def _test_append_blob_from_text_chunked_upload_async(self):
# Arrange
await self._setup()
blob = await self._create_blob()
data = self.get_random_text_data(LARGE_BLOB_SIZE)
encoded_data = data.encode('utf-8')
# Act
await blob.upload_blob(data, blob_type=BlobType.AppendBlob)
# Assert
await self.assertBlobEqual(blob, encoded_data)
@record
def test_append_blob_from_text_chunked_upload_async(self):
loop = asyncio.get_event_loop()
loop.run_until_complete(self._test_append_blob_from_text_chunked_upload_async())
async def _test_append_blob_with_md5_async(self):
# Arrange
await self._setup()
blob = await self._create_blob()
data = b'hello world'
# Act
await blob.append_block(data, validate_content=True)
# Assert
@record
def test_append_blob_with_md5_async(self):
loop = asyncio.get_event_loop()
loop.run_until_complete(self._test_append_blob_with_md5_async())
#------------------------------------------------------------------------------
if __name__ == '__main__':
unittest.main()
|
# -*- coding: utf-8 -*-
# Copyright (c) 2020, Frappe Technologies and Contributors
# See license.txt
from __future__ import unicode_literals
import unittest
import frappe
class TestModuleProfile(unittest.TestCase):
def test_make_new_module_profile(self):
if not frappe.db.get_value("Module Profile", "_Test Module Profile"):
frappe.get_doc(
{
"doctype": "Module Profile",
"module_profile_name": "_Test Module Profile",
"block_modules": [{"module": "Accounts"}],
}
).insert()
# add to user and check
if not frappe.db.get_value("User", "[email protected]"):
new_user = frappe.get_doc(
{"doctype": "User", "email": "[email protected]", "first_name": "Test User"}
).insert()
else:
new_user = frappe.get_doc("User", "[email protected]")
new_user.module_profile = "_Test Module Profile"
new_user.save()
self.assertEqual(new_user.block_modules[0].module, "Accounts")
|
# MIT License
#
# Copyright (C) 2021. Huawei Technologies Co., Ltd. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import math
import numpy as np
class Box(object):
def __init__(
self,
width=1,
height=1,
length=1,
centerX=0,
centerY=0,
centerZ=0,
yaw=0.0,
pitch=0.0,
roll=0.0,
translationX=0,
translationY=0,
translationZ=0,
):
# In webots length is in z-axis, width is in x-axis and height is in y-axis
# Center is the rotation center for the box
# -> in webots, this should be the rear axle location relative to the center of the box
# -> center is the vector from the true center of the box to the rotation center of the box
# In webots yaw is CC around the y-axis!
# In webots pitch is CC around the z-axis!
# In webots roll is CC around the x-axis!
# NOTE: this geometry class applies a translation to get the center of rotation,
# rotates the box and then applies a global translation to move the rectangle in a global coordinate system
self.dimensions = np.array([width, height, length])
self.center = np.array([centerX, centerY, centerZ])
self.translation = np.array([translationX, translationY, translationZ])
self.yaw = yaw
self.pitch = pitch
self.roll = roll
self.unrotatedegocorners = self._getunrotatedegocorners()
self.rotation = self.getyawrollpitchrotation(self.yaw, self.pitch, self.roll)
# The transpose is the inverse rotation matrix
self.reverserotation = np.transpose(self.rotation)
self.corners = self.getcorners()
def __str__(self):
return "[({},{},{}), center=({},{},{}), rotation=({},{},{}), translation=({},{},{})]".format(
self.dimensions[0],
self.dimensions[1],
self.dimensions[2],
self.center[0],
self.center[1],
self.center[2],
self.yaw,
self.pitch,
self.roll,
self.translation[0],
self.translation[1],
self.translation[2],
)
def getyawrollpitchrotation(self, yaw, pitch, roll):
sin_p = math.sin(pitch)
cos_p = math.cos(pitch)
sin_y = math.sin(yaw)
cos_y = math.cos(yaw)
sin_r = math.sin(roll)
cos_r = math.cos(roll)
return np.array(
[
[
cos_p * cos_y,
cos_p * sin_y * sin_r - sin_p * cos_r,
cos_p * sin_y * cos_r + sin_p * sin_r,
],
[
sin_p * cos_y,
sin_p * sin_y * sin_r + cos_p * cos_r,
sin_p * sin_y * cos_r - cos_p * sin_r,
],
[-sin_y, cos_y * sin_r, cos_y * cos_r],
]
)
def _getunrotatedegocorners(self):
x_diff1, y_diff1, z_diff1 = -self.dimensions / 2.0 - self.center
x_diff2, y_diff2, z_diff2 = self.dimensions / 2.0 - self.center
x1, y1, z1 = [
min(x_diff1, x_diff2),
min(y_diff1, y_diff2),
min(z_diff1, z_diff2),
]
x2, y2, z2 = [
max(x_diff1, x_diff2),
max(y_diff1, y_diff2),
max(z_diff1, z_diff2),
]
corners = np.array(
[
[x1, y1, z1],
[x1, y1, z2],
[x1, y2, z1],
[x1, y2, z2],
[x2, y1, z1],
[x2, y1, z2],
[x2, y2, z1],
[x2, y2, z2],
]
)
return corners
def getcorners(self):
corners = self._getunrotatedegocorners()
if abs(self.yaw) > 1e-30 or abs(self.pitch) > 1e-30 or abs(self.roll) > 1e-30:
corners = np.inner(corners, self.rotation)
corners += self.translation
return corners
def getvolume(self):
return np.prod(self.dimensions)
def containspoint(self, point):
return self.containspoints(np.array([point]))
def containspoints(self, points):
# 1.) Rotate the point around the center
# 2.) Check to see if the points lie inside the co-linear rectangle
N, d = points.shape
ego_points = points - self.translation
if abs(self.yaw) > 1e-30 or abs(self.pitch) > 1e-30 or abs(self.roll) > 1e-30:
rotated_points = np.inner(ego_points, self.reverserotation)
else:
rotated_points = ego_points
low_corner = self.unrotatedegocorners[0]
high_corner = self.unrotatedegocorners[7]
# This is why we rotate the points rather than the box -> simpler to check if the box is
# co-linear with the axis of the local coordinate system
return np.all(
np.logical_and(
(high_corner >= rotated_points), (rotated_points >= low_corner)
),
axis=1,
)
# Note to be used externly
def _unrotated_containspoints(self, unrotated_points):
low_corner = self.unrotatedegocorners[0]
high_corner = self.unrotatedegocorners[7]
# This is why we rotate the points rather than the box -> simpler to check if the box is
# co-linear with the axis of the local coordinate system
return np.all(
np.logical_and(
(high_corner >= unrotated_points), (unrotated_points >= low_corner)
),
axis=1,
)
def _getnormals(self):
# Just need three normals of the unrotated box
p1, p2, p3, p4, p5, p6, p7, p8 = self.unrotatedegocorners
xn = np.cross(p3 - p1, p2 - p1)
yn = np.cross(p2 - p1, p5 - p1)
zn = np.cross(p5 - p1, p3 - p1)
return xn, yn, zn
def getlines(self):
p1, p2, p3, p4, p5, p6, p7, p8 = self.corners
start_points = np.array([p1, p1, p1, p2, p2, p3, p3, p4, p5, p5, p6, p7])
end_points = np.array([p2, p3, p5, p4, p6, p7, p4, p8, p6, p7, p8, p8])
return start_points, end_points
def intersects(self, box):
# NOTE: the order of the points in self.corners and self.unrotatedegocorners must not change!
# Calculates whether any corners of rect fall within self
start1, end1 = box.getlines()
intersect1 = self.intersectswithlines(points=start1, end_points=end1)
# Also need to see if any of the corners of self fall in rect
start2, end2 = self.getlines()
intersect2 = box.intersectswithlines(points=start2, end_points=end2)
return np.any(np.concatenate((intersect1, intersect2)))
# Calculates intersection point between two parallel planes with norm and defined by points 1 and 2 respectively
# norm must be the outer norm for plane1 defined by point pts_on_plane1
def _get_line_intersect_with_planes_3d(
self, points, directions, norm, pts_on_plane1, pts_on_plane2
):
r = directions
n1 = norm
n2 = -norm
d1 = -np.inner(n1, pts_on_plane1[0])
d2 = -np.inner(n2, pts_on_plane2[0])
r_n1 = np.inner(r, n1)
r_n2 = np.inner(r, n2)
n1_px = np.inner(n1, points) + d1
n2_px = np.inner(n2, points) + d2
n1_p = np.inner(n1, points)
n2_p = np.inner(n2, points)
t1 = np.zeros(len(points))
t2 = np.zeros(len(points))
# Check for parallel
z1 = np.abs(r_n1) < 1e-20
z2 = np.abs(r_n2) < 1e-20
nz1 = np.logical_not(z1)
nz2 = np.logical_not(z2)
# Check for points on plane
on1 = np.abs(n1_px) < 1e-20
on2 = np.abs(n2_px) < 1e-20
non1 = np.logical_not(on1)
non2 = np.logical_not(on2)
# All points that are not on the plane but are perpendicular -> inf
t1[np.logical_and(z1, non1)] = -np.inf
t2[np.logical_and(z2, non2)] = np.inf
# All points not perpendicular and not on the plane
nz_non1 = np.logical_and(nz1, non1)
nz_non2 = np.logical_and(nz2, non2)
t1[nz_non1] = -(d1 + n1_p[nz_non1]) / r_n1[nz_non1]
t2[nz_non2] = -(d2 + n2_p[nz_non2]) / r_n2[nz_non2]
# Re-order points if necessary
t = np.stack((t1, t2), axis=1)
tpos = np.min(t, axis=1)
tneg = np.max(t, axis=1)
# print("POS {}\nNEG {}".format(tpos, tneg))
return tpos, tneg
# NOTE: directions must be vectors from points (start) to end_points
def intersectswithlines(self, points, end_points):
# Method is described here: https://math.stackexchange.com/questions/1477930/does-a-line-intersect-a-box-in-a-3d-space
rot_points = points - self.translation
rot_end_points = end_points - self.translation
if abs(self.yaw) > 1e-30 or abs(self.pitch) > 1e-30 or abs(self.roll) > 1e-30:
rot_points = np.inner(rot_points, self.reverserotation)
rot_end_points = np.inner(rot_end_points, self.reverserotation)
rot_directions = rot_end_points - rot_points
xn, yn, zn = self._getnormals()
with np.errstate(divide="ignore"):
low_xpoints = [self.unrotatedegocorners[0]]
low_ypoints = [self.unrotatedegocorners[0]]
low_zpoints = [self.unrotatedegocorners[0]]
high_xpoints = [self.unrotatedegocorners[7]]
high_ypoints = [self.unrotatedegocorners[7]]
high_zpoints = [self.unrotatedegocorners[7]]
t_xpos, t_xneg = self._get_line_intersect_with_planes_3d(
rot_points, rot_directions, xn, high_xpoints, low_xpoints
)
t_ypos, t_yneg = self._get_line_intersect_with_planes_3d(
rot_points, rot_directions, yn, high_ypoints, low_ypoints
)
t_zpos, t_zneg = self._get_line_intersect_with_planes_3d(
rot_points, rot_directions, zn, high_zpoints, low_zpoints
)
pos_ts = np.stack((t_xpos, t_ypos, t_zpos), axis=1)
neg_ts = np.stack((t_xneg, t_yneg, t_zneg), axis=1)
# print("{} {}".format(pos_ts, neg_ts))
maxpos = np.max(pos_ts, axis=1)
minneg = np.min(neg_ts, axis=1)
condition = np.array([False] * len(points))
start_contains = self._unrotated_containspoints(rot_points)
end_contains = self._unrotated_containspoints(rot_end_points)
both = np.logical_and(start_contains, end_contains)
one = np.logical_xor(start_contains, end_contains)
none = np.logical_not(np.logical_or(both, one))
# print("MAX {}; MIN {}".format(maxpos[none], minneg[none]))
# print("POS {}; NEG {}".format(pos_ts[none], neg_ts[none]))
# Handle the case where both points are in the box
condition[both] = True
# Handle the case where one point is in the box
condition[one] = np.logical_and(
maxpos[one] <= minneg[one],
np.logical_and(maxpos[one] <= 1, minneg[one] >= 0),
)
# Handle the case where both points are outside the box
if np.any(none):
possibles = np.array([False] * len(points))
possibles[none] = np.logical_and(
maxpos[none] <= minneg[none],
np.logical_and(
maxpos[none] >= 0,
np.logical_and(
minneg[none] <= 1,
np.logical_and(maxpos[none] <= 1, minneg[none] >= 0),
),
),
)
if np.any(possibles):
none_start_points = rot_points[possibles]
none_directions = rot_directions[possibles]
none_surface1 = none_start_points + np.transpose(
np.transpose(none_directions) * maxpos[possibles]
)
none_surface2 = none_start_points + np.transpose(
np.transpose(none_directions) * minneg[possibles]
)
# Update any possibles that were potentially true
possibles[possibles] = np.logical_and(
self._unrotated_containspoints(none_surface1),
self._unrotated_containspoints(none_surface2),
)
condition[none] = possibles[none]
return condition
# Support library for converting a LIDAR point (where the points are in scan order) to an image
class SphericalCartesianConverter(object):
def __init__(self, hfov, vfov, width, height):
# Assume tilt angle is 0!
# Scan from left to right
az = np.linspace(
math.pi + hfov / 2.0, math.pi - hfov / 2.0, width, dtype=np.float32
)
# Scan from top layer to bottom layer
el = np.linspace(vfov / 2.0, -vfov / 2.0, height, dtype=np.float32)
az_grid, el_grid = np.meshgrid(az, el, sparse=True)
self.z_factor = np.cos(az_grid) * np.cos(el_grid)
self.x_factor = np.sin(az_grid) * np.cos(el_grid)
self.y_factor = np.sin(el_grid)
def calculate_point_cloud(self, depth):
# These x, y, z are webots axes!
z = depth * self.z_factor # Points behind vehicle
x = depth * self.x_factor # Points to right of vehicle
y = depth * self.y_factor # Points up
points = np.stack((x.flatten(), y.flatten(), z.flatten()), axis=-1)
return points
def filter_points(points, ground_height=0.0, max_distance=99.9):
distances = np.linalg.norm(points, axis=1)
filtered_points = points[distances < max_distance]
return filtered_points[filtered_points[:, 1] > ground_height]
def calculate_point_cloud(depth, hfov, vfov, width, height):
# Assume tilt angle is 0!
# Scan from left to right
az = np.linspace(
math.pi + hfov / 2.0, math.pi - hfov / 2.0, width, dtype=np.float32
)
# Scan from top layer to bottom layer
el = np.linspace(vfov / 2.0, -vfov / 2.0, height, dtype=np.float32)
az_grid, el_grid = np.meshgrid(az, el, sparse=True)
z_factor = np.cos(az_grid) * np.cos(el_grid)
x_factor = np.sin(az_grid) * np.cos(el_grid)
y_factor = np.sin(el_grid)
# These x, y, z are webots axes!
z = depth * z_factor # Points behind vehicle
x = depth * x_factor # Points to right of vehicle
y = depth * y_factor # Points up
points = np.stack((x.flatten(), y.flatten(), z.flatten()), axis=-1)
return points
def yaw_rotation(yaw):
cos_y = math.cos(yaw)
sin_y = math.sin(yaw)
return np.array([[cos_y, 0, sin_y], [0, 1, 0], [-sin_y, 0, cos_y]])
def yaw_rotate(yaw, vectors):
rotation = yaw_rotation(yaw)
return np.inner(vectors, rotation)
# In webots yaw is CC around the y-axis!
# In webots pitch is CC around the z-axis!
# In webots roll is CC around the x-axis!
def yawrollpitch_rotation(yaw, pitch, roll):
sin_p = math.sin(pitch)
cos_p = math.cos(pitch)
sin_y = math.sin(yaw)
cos_y = math.cos(yaw)
sin_r = math.sin(roll)
cos_r = math.cos(roll)
return np.array(
[
[
cos_p * cos_y,
cos_p * sin_y * sin_r - sin_p * cos_r,
cos_p * sin_y * cos_r + sin_p * sin_r,
],
[
sin_p * cos_y,
sin_p * sin_y * sin_r + cos_p * cos_r,
sin_p * sin_y * cos_r - cos_p * sin_r,
],
[-sin_y, cos_y * sin_r, cos_y * cos_r],
]
)
def rotate(yaw, pitch, roll, vectors):
rotation = yawrollpitch_rotation(yaw, pitch, roll)
return np.inner(vectors, rotation)
def visualize_boxes(ax, boxes, markers=None, colors=None, faces_color=None):
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
if markers is None:
markers = ["o"] * len(boxes)
if colors is None:
colors = ["red"]
if faces_color is None:
faces_color = ["cyan"]
for box, marker, color, face_color in zip(boxes, markers, colors, faces_color):
Z = box.corners
ax.scatter(
box.corners[:, 0],
box.corners[:, 1],
box.corners[:, 2],
marker=marker,
color=color,
)
# generate list of sides' polygons of our pyramid
verts = [
[Z[0], Z[1], Z[3], Z[2]],
[Z[4], Z[5], Z[7], Z[6]],
[Z[0], Z[1], Z[5], Z[4]],
[Z[2], Z[3], Z[7], Z[6]],
[Z[1], Z[5], Z[7], Z[3]],
[Z[4], Z[0], Z[2], Z[6]],
]
# plot sides
ax.add_collection3d(
Poly3DCollection(
verts, facecolors=face_color, linewidths=0.5, edgecolors="k", alpha=0.5
)
)
ax.set_zlim(0, 50)
ax.set_xlim(50, 150)
ax.set_ylim(50, 150)
return ax
|
# Generated by Django 2.2.8 on 2020-03-24 06:22
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blogs', '0004_auto_20200324_1416'),
]
operations = [
migrations.AlterField(
model_name='article',
name='abstract',
field=models.TextField(blank=True, default='此文章没有摘要', null=True, verbose_name='文章摘要'),
),
]
|
import numpy as np
A = np.array([[1,2,3],
[1,0,3],
[1,2,3]])
B = np.array([[0,2,3],
[0,2,3],
[0,2,3]])
A = np.where(A<2, 6, A)
B = np.where(A<2, 7, B)
print(B) |
import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
''' IMPORTS '''
import xml
import tempfile
import contextlib
import OpenSSL.crypto
from xml.sax.saxutils import escape
import re
''' GLOBALS/PARAMS '''
FETCH_MAX_INCIDENTS = 500
SECURITY_INCIDENT_NODE_XPATH = ".//SecurityIncident"
SECURITY_INCIDENT_SUMMARY_NODE_XPATH = ".//SecurityIncidentSummary"
''' PREREQUISITES '''
@contextlib.contextmanager
def pfx_to_pem(pfx, pfx_password):
""" Decrypts the .pfx file to be used with requests. """
with tempfile.NamedTemporaryFile(suffix=".pem") as t_pem:
f_pem = open(t_pem.name, "wb")
p12 = OpenSSL.crypto.load_pkcs12(pfx, pfx_password)
f_pem.write(OpenSSL.crypto.dump_privatekey(OpenSSL.crypto.FILETYPE_PEM, p12.get_privatekey()))
f_pem.write(OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM, p12.get_certificate()))
ca = p12.get_ca_certificates()
if ca is not None:
for cert in ca:
f_pem.write(OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM, cert))
f_pem.close()
yield t_pem.name
def load_server_url():
""" Cleans and loads the server url from the configuration """
url = demisto.params()["server"]
url = re.sub("/[/]+$/", "", url)
url = re.sub("/$", "", url)
return url
def load_certificate():
""" Loads the certificate and passphrase from the configuration """
cert = demisto.params()["certificate"]
cert = base64.b64decode(cert)
passphrase = demisto.params()["passphrase"] if "passphrase" in demisto.params() else ""
return cert, passphrase
def load_severities():
possible_severities = ["Emergency", "Critical", "Warning", "Informational"]
try:
severities_list = demisto.params()["severities"].replace(" ", "").split(",")
except Exception:
raise Exception("Error parsing severities parameter.")
for s in severities_list:
if s not in possible_severities:
raise Exception("Illegal argument in severities parameter.")
return ",".join(severities_list)
''' GLOBALS/PARAMS '''
SERVER_URL = load_server_url()
CERTIFICATE, CERTIFICATE_PASSPHRASE = load_certificate()
FETCH_SEVERITIES = load_severities()
DST = 1 if time.daylight else 0
''' HELPER FUNCTIONS '''
def strip_unwanted_chars(s):
return re.sub('&\S{1,6};', '', s)
def api_call(body, headers):
""" Makes an HTTP Post to the SWS incidents API using the configured certificate """
with pfx_to_pem(CERTIFICATE, CERTIFICATE_PASSPHRASE) as cert:
res = requests.post(url=SERVER_URL + "/SWS/incidents.asmx", cert=cert, data=body, headers=headers)
if res.status_code < 200 or res.status_code >= 300:
raise Exception(
"Got status code " + str(res.status_code) + " with body " + res.content + " with headers " + str(
res.headers))
return xml.etree.ElementTree.fromstring(res.content)
def event_to_incident(event):
""" Converts a Symantec event to a Demisto incident """
incident = dict() # type: Dict[str, Any]
incident["name"] = "Incident: %s (%s)" % (event["IncidentNumber"], event["Classification"])
incident["occurred"] = event["TimeCreated"] + "+0%s:00" % DST
incident["rawJSON"] = json.dumps(event)
labels = [] # type: List[str]
incident["labels"] = labels
return incident
def isoformat(date):
""" Convert a datetime object to asmx ISO format """
return date.isoformat()[:-3] + "Z"
''' COMMANDS + REQUESTS FUNCTIONS '''
def test():
now = datetime.utcnow()
get_incidents_list_request(isoformat(now), None, None, 1)
demisto.results("ok")
def fetch_incidents():
t = datetime.utcnow()
now = isoformat(t)
last_run = demisto.getLastRun() and demisto.getLastRun()["time"]
if len(last_run) == 0:
t = t - timedelta(minutes=10)
last_run = isoformat(t)
incidents = []
events = get_incidents_list_request(time=last_run, src_ip=None, severities=FETCH_SEVERITIES,
max_incidents=FETCH_MAX_INCIDENTS)
for event in events:
inc = event_to_incident(event)
incidents.append(inc)
demisto.incidents(incidents)
demisto.setLastRun({"time": now})
def get_incidents_list(time):
src_ip = demisto.args()["sourceIp"] if "sourceIp" in demisto.args() else None
severities = demisto.args()["severities"] if "severities" in demisto.args() else None
max_incidents = demisto.args()["max"] if "max" in demisto.args() else None
# Request events
result = get_incidents_list_request(time, src_ip, severities, max_incidents)
# Set human readable
headers = [
"IncidentNumber",
"TimeCreated",
"Severity",
"Category",
"CountryOfOrigin",
"DaysSeenGlobally",
"SourceIPString",
"Correlation",
"HostNameList",
"IsInternalExternal",
"GlobalLookbackDays",
"LatestKeyEvent",
"CustomerSeverity",
"CountryCode",
"FirstSeenInLast30Days",
"DaysSeenInLast30Days",
"DestOrganizationName",
"SourceOrganizationName",
"FirstSeenGlobally",
"CountryName",
"UserList",
"Classification",
"UpdateTimestampGMT",
"PrevalenceGlobally"
]
hr = tableToMarkdown("Incidents", result, headers)
# Set context
context = {
"Symantec MSS.Incidents list(val.IncidentNumber && val.IncidentNumber === obj.IncidentNumber)": result
}
demisto.results({
"ContentsFormat": formats["json"],
"Type": entryTypes["note"],
"Contents": result,
"EntryContext": context,
"ReadableContentsFormat": formats["markdown"],
"HumanReadable": hr
})
def get_incidents_list_request(time, src_ip, severities, max_incidents):
src_ip = "<SourceIP>%s</SourceIP>" % src_ip if src_ip else ""
severities = "<Severity>%s</Severity>" % severities if severities else ""
max_incidents = "<MaxIncidents>%s</MaxIncidents>" % max_incidents if max_incidents else ""
body = """<?xml version="1.0" encoding="utf-8"?>
<soap12:Envelope xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" \
xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns:soap12="http://www.w3.org/2003/05/soap-envelope">
<soap12:Body>
<IncidentGetList xmlns="https://www.monitoredsecurity.com/">
<StartTimeStampGMT>%s</StartTimeStampGMT>
%s
%s
%s
</IncidentGetList>
</soap12:Body>
</soap12:Envelope>""" % (time, src_ip, severities, max_incidents)
headers = {
"content-Type": "application/soap+xml; charset=utf-8",
"content-Length": str(len(body))
}
root = api_call(body=body, headers=headers)
incident_nodes = root.findall(SECURITY_INCIDENT_SUMMARY_NODE_XPATH)
result = []
for incident in incident_nodes:
string_incident_xml = xml.etree.ElementTree.tostring(incident)
string_incident_json = xml2json(string_incident_xml)
dict_incident = json.loads(string_incident_json)["SecurityIncidentSummary"]
result.append(dict_incident)
return result
def update_incident():
# Fill in required fields from the existing incident (for the api call)
num = demisto.args()["number"]
dict_query = query_incident(num=num, workflow_query=True)
dict_workflow_query = dict_query["WorkFlowDetail"]
# Use the supplied params, filling the missing ones from the existing workflow if possible,
# if not possible - require from user
status = demisto.args()["status"] if "status" in demisto.args() else dict_workflow_query["Status"]
if not status:
raise Exception("No current status, please supply a status parameter")
resolution = demisto.args()["resolution"] if "resolution" in demisto.args() else dict_workflow_query["Resolution"]
if not resolution:
raise Exception("No current resolution, please supply a resolution parameter")
severity = demisto.args()["severity"] if "severity" in demisto.args() else dict_query["Severity"]
if not severity:
raise Exception("No current severity, please supply a severity parameter")
# Optional params
ref = demisto.args()["reference"] if "reference" in demisto.args() else None
comments = demisto.args()["comments"] if "comments" in demisto.args() else None
# Only one of them should exist
assign_to_org = demisto.args()["assignOrganization"] if "assignOrganization" in demisto.args() else None
assign_to_person = demisto.args()["assignPerson"] if "assignPerson" in demisto.args() else None
if assign_to_org and assign_to_person:
raise Exception("Unable to assign to both organization and a person, please choose only one")
if not assign_to_org and not assign_to_person:
if "AssignedOrganization" in dict_workflow_query and dict_workflow_query["AssignedOrganization"]:
assign_to_org = dict_workflow_query["AssignedOrganization"]
elif "AssignedPerson" in dict_workflow_query and dict_workflow_query["AssignedPerson"]:
assign_to_person = dict_workflow_query["AssignedPerson"]
# Make the request with the params
success = update_incident_request(num, status, resolution, ref, severity, assign_to_org, assign_to_person, comments)
# Create result
msg = "Updated successfully" if success else "Update failed"
result = [{"Update status": msg}]
hr = tableToMarkdown("", result)
demisto.results({
"ContentsFormat": formats["text"],
"Type": entryTypes["note"],
"Contents": msg,
"ReadableContentsFormat": formats["markdown"],
"HumanReadable": hr
})
def update_incident_request(num, status, resolution, ref, severity, assign_to_org, assign_to_person, comments):
# Create optional parameter tags if needed
ref = "<Reference>%s</Reference>" % (ref) if ref else ""
assign_to_org = "<AssignedToOrganiztion>%s</AssignedToOrganiztion>" % assign_to_org if assign_to_org else ""
assign_to_person = "<AssignedToPerson>%s</AssignedToPerson>" % assign_to_person if assign_to_person else ""
comments = "<Comments>%s</Comments>" % comments if comments else ""
body = """<?xml version="1.0" encoding="utf-8"?>
<soap12:Envelope xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" \
xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns:soap12="http://www.w3.org/2003/05/soap-envelope">
<soap12:Body>
<UpdateIncidentWorkflow xmlns="https://www.monitoredsecurity.com/">
<IncidentNumber>%s</IncidentNumber>
<Status>%s</Status>
<Resolution>%s</Resolution>
%s
<Severity>%s</Severity>
%s
%s
%s
</UpdateIncidentWorkflow>
</soap12:Body>
</soap12:Envelope>""" % (num, status, resolution, ref, severity, assign_to_org, assign_to_person,
escape(comments))
headers = {
"content-Type": "application/soap+xml; charset=utf-8",
"content-Length": str(len(body))
}
res = api_call(body=body, headers=headers)
res_string_xml = xml.etree.ElementTree.tostring(res)
res_string_json = xml2json(res_string_xml)
dict_res = json.loads(res_string_json)
res = dict_res["Envelope"]["Body"]["UpdateIncidentWorkflowResponse"]["UpdateIncidentWorkflowResult"]
return res == "true"
def query_incident_cmd():
result = query_incident(demisto.args()["number"], workflow_query=True)
# Create minimal signature list
sigs = []
for sig in result["SignatureList"]["Signature"]:
sig_dict = dict() # type: Dict[str, Any]
sig_dict["SourceIPString"] = sig["SourceIPString"]
sig_dict["SignatureName"] = sig["SignatureName"]
sig_dict["VendorSignature"] = sig["VendorSignature"]
sig_dict["NumberBlocked"] = sig["NumberBlocked"]
sig_dict["NumberNotBlocked"] = sig["NumberNotBlocked"]
sigs.append(sig_dict)
# Set Human readable
flatten_relevant_fields = [{
"Incident Number": result.get("IncidentNumber", ""),
"Time Created": result.get("TimeCreated", ""),
"Status": result.get("WorkFlowDetail", {}).get("Status", ""),
"Classification": result.get("Classification", ""),
"Assigned Person": result.get("WorkFlowDetail", {}).get("AssignedPerson",
"") if result.get("WorkFlowDetail", {}) else "",
"Description": result.get("Description", ""),
"Analyst Assessment": result.get("AnalystAssessment", ""),
"Number of Analyzed Signatures": result.get("NumberOfAnalyzedSignatures", ""),
"Signaturtes": json.dumps(sigs) or "",
"Related Incidents": json.dumps(result.get("RelatedIncidents",
{}).get("IncidentNumber", "")) if result.get("RelatedIncidents",
{}) else "",
"Comment": result.get("IncidentComments", {}).get("IncidentComment",
{}).get("Comment", "") if result.get("IncidentComments",
{}) else ""
}]
headers = [
"Incident Number",
"Time Created",
"Status",
"Classification",
"Assigned Person",
"Description",
"Analyst Assessment",
"Number of Analyzed Signatures",
"Signaturtes",
"Related Incidents",
"Comment"
]
hr = tableToMarkdown("Incident query", flatten_relevant_fields, headers)
# Set context
result_ctx = {
"IncidentNumber": result.get("IncidentNumber", ""),
"NumberOfAnalyzedSignatures": result.get("NumberOfAnalyzedSignatures", ""),
"SignatureList": {
"Signature": sigs
},
"TimeCreated": result.get("TimeCreated", ""),
"Classification": result.get("Classification", ""),
"Description": result.get("Description", ""),
"AnalystAssessment": result.get("AnalystAssessment", ""),
"CountryCode": result.get("CountryCode", ""),
"CountryName": result.get("CountryName", ""),
"RelatedTickets": result.get("RelatedTickets", ""),
"WorkFlowDetail": {
"Status": result.get("WorkFlowDetail", {}).get("Status", ""),
"AssignedPerson": result.get("WorkFlowDetail", {}).get("AssignedPerson", "")
},
"RelatedIncidents": {
"IncidentNumber": result["RelatedIncidents"]["IncidentNumber"] if result.get("RelatedIncidents") else ""
}
}
if result.get('IncidentComments') and result.get('IncidentComments').get('IncidentComment'):
result_ctx["IncidentComments"] = {"IncidentComment": {
"CommentedTimeStampGMT": result["IncidentComments"]["IncidentComment"]["CommentedTimeStampGMT"],
"Comment": result["IncidentComments"]["IncidentComment"]["Comment"],
"CommentedBy": result["IncidentComments"]["IncidentComment"]["CommentedBy"]
}
}
else:
result_ctx["IncidentComments"] = {}
if result.get("IncidentAttachmentItems") and result.get('IncidentAttachmentItems').get('IncidentAttachmentItem'):
result_ctx['IncidentAttachmentItems'] = {"IncidentAttachmentItem": {
"AttachmentNumber": result["IncidentAttachmentItems"]["IncidentAttachmentItem"]["AttachmentNumber"],
"AttachmentName": result["IncidentAttachmentItems"]["IncidentAttachmentItem"]["AttachmentName"],
"UploadDateGMT": result["IncidentAttachmentItems"]["IncidentAttachmentItem"]["UploadDateGMT"],
"UploadBy": result["IncidentAttachmentItems"]["IncidentAttachmentItem"]["UploadBy"],
"Comment": result["IncidentAttachmentItems"]["IncidentAttachmentItem"]["Comment"]
}
}
else:
result_ctx['IncidentAttachmentItems'] = {}
context = {
"Symantec MSS.Incident query(val.IncidentNumber && val.IncidentNumber === obj.IncidentNumber)": result_ctx
}
demisto.results({
"ContentsFormat": formats["json"],
"Type": entryTypes["note"],
"Contents": result,
"EntryContext": context,
"ReadableContentsFormat": formats["markdown"],
"HumanReadable": hr
})
def query_incident(num, workflow_query=False):
query = query_incident_request(num) if not workflow_query else query_incident_workflow_request(num)
return query
def query_incident_request(num):
body = """<?xml version="1.0" encoding="utf-8"?>
<soap12:Envelope xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" \
xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns:soap12="http://www.w3.org/2003/05/soap-envelope">
<soap12:Body>
<IncidentQuery xmlns="https://www.monitoredsecurity.com/">
<IncidentNumber>%s</IncidentNumber>
</IncidentQuery>
</soap12:Body>
</soap12:Envelope>""" % num
headers = {
"content-Type": "application/soap+xml; charset=utf-8",
"content-Length": str(len(body))
}
query = api_call(body=body, headers=headers)
query_node = query.find(SECURITY_INCIDENT_NODE_XPATH)
string_query_xml = xml.etree.ElementTree.tostring(query_node)
string_query_json = xml2json(string_query_xml)
dict_query = json.loads(string_query_json)["SecurityIncident"]
return dict_query
def query_incident_workflow_request(num):
body = """<?xml version="1.0" encoding="utf-8"?>
<soap12:Envelope xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" \
xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns:soap12="http://www.w3.org/2003/05/soap-envelope">
<soap12:Body>
<IncidentWorkflowQuery xmlns="https://www.monitoredsecurity.com/">
<IncidentNumber>%s</IncidentNumber>
</IncidentWorkflowQuery>
</soap12:Body>
</soap12:Envelope>""" % num
headers = {
"content-Type": "application/soap+xml; charset=utf-8",
"content-Length": str(len(body))
}
query = api_call(body=body, headers=headers)
query_node = query.find(SECURITY_INCIDENT_NODE_XPATH)
string_query_xml = xml.etree.ElementTree.tostring(query_node)
string_query_json = xml2json(string_query_xml)
dict_query = json.loads(string_query_json)["SecurityIncident"]
return dict_query
''' COMMANDS MANAGER / SWITCH PANEL '''
LOG('Command being called is %s' % (demisto.command()))
try:
handle_proxy()
if demisto.command() == "fetch-incidents":
fetch_incidents()
if demisto.command() == "test-module":
test()
if demisto.command() == "symantec-mss-update-incident":
update_incident()
if demisto.command() == "symantec-mss-get-incident":
query_incident_cmd()
if demisto.command() == "symantec-mss-incidents-list":
time = demisto.args()["time"] if "time" in demisto.args() else isoformat(
datetime.utcnow() - timedelta(hours=24))
get_incidents_list(time)
# Log exceptions
except Exception as e:
return_error(str(e))
|
class WydawnictwoNadrzednePBNAdapter:
def __init__(self, original):
self.original = original
def pbn_get_json(self):
if self.original.pbn_uid_id is not None:
return {"objectId": self.original.pbn_uid_id}
ret = {}
for attr in "isbn", "issn", "title", "year":
if hasattr(self.original, attr):
v = getattr(self.original, attr)
if v is not None:
ret[attr] = v
ret["title"] = self.original.tytul_oryginalny
ret["year"] = self.original.rok
from pbn_api.adapters.wydawnictwo import WydawnictwoPBNAdapter
volume = WydawnictwoPBNAdapter(self.original).nr_tomu()
if volume:
ret["volume"] = volume
translation = WydawnictwoPBNAdapter(self.original).get_translation()
ret["translation"] = translation
return ret
|
try:
from .poly_nms import poly_gpu_nms
except ImportError:
poly_gpu_nms = None
def poly_nms(dets, thresh, force_cpu=False):
"""Dispatch to either CPU or GPU NMS implementations."""
if poly_gpu_nms is not None and not force_cpu:
if dets.shape[0] == 0:
return []
return poly_gpu_nms(dets, thresh, device_id=0)
else:
raise NotImplemented("poly_nms的cpu版本暂未实现!")
|
from dataclasses import dataclass
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
from pytorch3d.pathtracer.warps import (
square_to_cos_hemisphere, square_to_cos_hemisphere_pdf,
NeuralWarp, MipMap,
)
from pytorch3d.pathtracer.utils import (
pos_weak_sigmoid, cartesian_to_log_polar, param_rusin,
param_rusin2, fwidth, dir_to_uv, weak_sigmoid,
)
from pytorch3d.pathtracer.neural_blocks import ( SkipConnMLP, DensityEstimator )
from itertools import chain
import pytorch3d.pathtracer as pt
from ..interaction import ( partial_frame, to_local )
# A sample of a light bounce from a BSDF
@dataclass
class BSDFSample:
pdf: torch.Tensor = 0
wo: torch.Tensor = 0
eta: torch.Tensor = 1
# Combines two mutually exclusive BSDF samples
def combine(self, other, mask_self, mask_other):
pdf = torch.where(mask_self, self.pdf,
torch.where(mask_other, other.pdf, torch.zeros_like(other.pdf)))
wo = torch.where(mask_self, self.wo,
torch.where(mask_other, other.wo, torch.zeros_like(other.wo)))
return BSDFSample(pdf=pdf, wo=wo, eta = self.eta)
# Empty BSDF sample
@classmethod
def zeros_like(cls, like):
return cls(
pdf = torch.zeros(like.shape[:-1], device=like.device),
wo = torch.zeros_like(like),
eta = 1, # :)
)
# Composes together a weighted sample of BSDFs (like a gather almost)
@staticmethod
def compose(samples: ["BSDFSample"], k, selections):
pdfs = torch.stack([s.pdf for s in samples], dim=-1)\
.reshape(-1, k.shape[-1])
pdf = pdfs[range(pdfs.shape[0]), selections]
# Have to multiply pdf by k since it's joint likelihood of selecting item
pdf = pdf * k.reshape(-1, k.shape[-1])[range(pdf.shape[0]), selections]
pdf = pdf.reshape_as(samples[0].pdf)
wos = torch.stack([s.wo for s in samples], dim=-1)\
.reshape(-1, 3, k.shape[-1])
wo = wos[range(wos.shape[0]), :, selections].reshape_as(samples[0].wo)
return BSDFSample(
pdf = pdf,
wo = F.normalize(wo, dim=-1),
# FIXME well it's not currently used...
eta = samples[0].eta,
)
# General interface for a BSDF
class BSDF(nn.Module):
def __init__(self):
super().__init__()
def sample(self, it, sampler, active=True): raise NotImplementedError()
def eval_and_pdf(self, it, wo, active=True): raise NotImplementedError()
def joint_eval_pdf(self, it, wo, active=True):
spectrum, pdf = self.eval_and_pdf(it, wo, active)
return torch.cat([ spectrum, pdf.reshape(spectrum.shape[:-1] + (1,)) ], dim=-1)
def eval(self, it, wo, active=True): return self.eval_and_pdf(it, wo, active)[0]
def pdf(self, it, wo, active=True): return self.eval_and_pdf(it, wo, active)[1]
def identity(x): return x
def identity_div_pi(x): return x/math.pi
# Diffuse BSDF with additional preprocessing (postprocessing?) functionality.
class Diffuse(BSDF):
def __init__(self, reflectance=[0.25, 0.2, 0.7], preprocess=identity_div_pi, device="cuda"):
super().__init__()
if type(reflectance) == list:
self.reflectance = torch.tensor(reflectance, device=device, requires_grad=True)
else:
self.reflectance = reflectance
self.preproc = preprocess
def parameters(self): return [self.reflectance]
def random(self):
self.reflectance = torch.rand_like(self.reflectance, requires_grad=True)
return self
def sample(self, it, sampler, active=True):
cos_theta_i = it.wi[..., 2]
bs = BSDFSample.zeros_like(it.p)
active = (cos_theta_i > 0) & active
if not active.any(): return bs, torch.zeros_like(it.p)
bs.wo = square_to_cos_hemisphere(sampler.sample(it.shape()[:-1] + (2,), device=it.device()))
bs.wo = F.normalize(bs.wo, dim=-1)
bs.pdf = square_to_cos_hemisphere_pdf(bs.wo)
bs.eta = 1.0
bs.sampled_component = 0
# cast spectrum to same shape as interaction
spectrum = self.preproc(self.reflectance).expand(*it.shape()).clone()
#spectrum[(~active) | (bs.pdf <= 0), :] = 0
return bs, spectrum
def eval_and_pdf(self, it, wo, active=True):
cos_theta_i = it.wi[..., 2]
cos_theta_o = wo[..., 2]
#active = (cos_theta_i > 0) & (cos_theta_o > 0) & active
spectrum = self.preproc(cos_theta_o.unsqueeze(-1) * self.reflectance)
#spectrum[~active] = 0
pdf = square_to_cos_hemisphere_pdf(wo)
#pdf[~active] = 0
return spectrum, pdf
# Reflection vector
@torch.jit.script
def reflect(n, v): return 2 * (n * v).sum(keepdim=True, dim=-1) * n - v
# Reflection vector in local frame
@torch.jit.script
def local_reflect(v):
x, y, z = v.split(1, dim=-1)
return torch.cat([-x, -y, z], dim=-1)
# A Phong BSDF with a few parameters and pre/post processing.
class Phong(BSDF):
def __init__(
self,
diffuse=[0.6, 0.5, 0.7],
specular=[0.8, 0.8, 0.8],
min_spec=1,
device="cuda",
):
super().__init__()
if type(diffuse) == list:
self.diffuse = torch.tensor(diffuse, device=device, requires_grad=True)
else: self.diffuse = diffuse
if type(specular) == list:
self.specular = torch.tensor(specular, device=device, requires_grad=True)
else: self.specular = specular
self.shine = torch.tensor(40., dtype=torch.float, device=device, requires_grad=True)
self.min_spec = min_spec
def parameters(self): return [self.specular, self.diffuse, self.shine]
def random(self):
self.shine = torch.rand_like(self.shine, requires_grad=True)
self.specular = torch.rand_like(self.specular, requires_grad=True)
self.diffuse = torch.rand_like(self.diffuse, requires_grad=True)
return self
def sample(self, it, sampler, active=True):
cos_theta_i = it.wi[..., 2]
bs = BSDFSample.zeros_like(it.p)
active = (cos_theta_i > 0) & active
if not active.any(): return bs, torch.zeros_like(it.p)
bs.wo = square_to_cos_hemisphere(sampler.sample(it.shape()[:-1] + (2,), device=it.device()))
bs.pdf = square_to_cos_hemisphere_pdf(bs.wo)
bs.eta = 1.0
bs.sampled_component = 0
# cast spectrum to same shape as interaction
cos_theta_o = bs.wo[..., 2]
active = (cos_theta_o > 0) & active
R = reflect(it.frame[..., 2], it.wi)
spectral = (R * bs.wo).sum(dim=-1).clamp(min=1e-20).pow(self.min_spec + self.shine.exp())
spectrum = cos_theta_i.unsqueeze(-1) * self.diffuse/math.pi + \
spectral.unsqueeze(-1) * self.specular/math.pi
spectrum[(~active) | (bs.pdf <= 0), :] = 0
return bs, spectrum
def eval_and_pdf(self, it, wo, active=True):
cos_theta_i = it.wi[..., 2]
cos_theta_o = wo[..., 2]
# active = (cos_theta_i > 0) & (cos_theta_o > 0) & active
R = reflect(it.frame[..., 2], it.wi)
spectral = (R * wo).sum(dim=-1).clamp(min=1e-20).pow(self.min_spec + self.shine.exp())
spectrum = cos_theta_i.unsqueeze(-1) * self.diffuse/math.pi + \
spectral.unsqueeze(-1) * self.specular/math.pi
# just a guess of the PDF since it's not physically based
pdf = square_to_cos_hemisphere_pdf(wo)
#spectrum[~active] = 0
#pdf[~active] = 0
return spectrum, pdf
# fresnel and fresnel_diff_refl taken from Mitsuba
# https://github.com/mitsuba-renderer/mitsuba2/blob/master/include/mitsuba/render/fresnel.h
def fresnel(cos_t, eta: float):
def fnma(x, y, z): return -x * y + z
def fma(x, y, z): return x * y + z
out_mask = (cos_t >= 0)
inv_eta = 1/eta
eta_it = torch.where(out_mask, eta, inv_eta)
eta_ti = torch.where(out_mask, inv_eta, eta)
cos_tt_sqr = fnma(fnma(cos_t, cos_t, 1), eta_ti * eta_ti, 1)
cos_t_abs = cos_t.abs()
cos_tt_abs = cos_tt_sqr.clamp(min=1e-10).sqrt()
idx_match = (eta == 1)
special_case = (cos_t_abs == 0) | idx_match
a_s = fnma(eta_it, cos_tt_abs, cos_t_abs)/\
fma(eta_it, cos_tt_abs, cos_t_abs)
a_p = fnma(eta_it, cos_t_abs, cos_tt_abs)/\
fma(eta_it, cos_t_abs, cos_tt_abs)
r = 0.5 * (a_s.square() + a_p.square())
r[special_case] = 0 if idx_match else 1
cos_tt = cos_tt_abs * -cos_t.sign()
return r, cos_tt, eta_it, eta_ti
def fresnel_diff_refl(eta):
if eta < 1:
return -1.4399 * (eta * eta) \
+ 0.7099 * eta \
+ 0.6681 \
+ 0.0636 / eta
inv_eta = 1/eta
inv_eta_2 = inv_eta * inv_eta
inv_eta_3 = inv_eta_2 * inv_eta
inv_eta_4 = inv_eta_3 * inv_eta
inv_eta_5 = inv_eta_4 * inv_eta
return 0.919317 - 3.4793 * inv_eta \
+ 6.75335 * inv_eta_2 \
- 7.80989 * inv_eta_3 \
+ 4.98554 * inv_eta_4 \
- 1.36881 * inv_eta_5
# A BSDF for representing plastic as per Mitsuba.
class Plastic(BSDF):
def __init__(
self,
diffuse=[0.5, 0.5, 0.5],
specular=[1.,1.,1.],
int_ior:float=1.49, ext_ior:float=1.000277,
device="cuda",
):
if type(diffuse) == list:
self.diffuse = torch.tensor(diffuse, device=device, requires_grad=True)
else: self.diffuse = diffuse
if type(specular) == list:
self.specular = torch.tensor(specular, device=device, requires_grad=True)
else: self.specular = specular
assert(int_ior > 0)
assert(ext_ior > 0)
self.eta = int_ior/ext_ior
self.inv_eta_2 = 1/(self.eta * self.eta)
self.fdr_int = fresnel_diff_refl(1/self.eta)
self.fdr_ext = fresnel_diff_refl(self.eta)
def spec_sample_weight(self):
d = self.diffuse.mean()
s = self.specular.mean()
return s/(d+s)
def parameters(self): return [self.diffuse, self.specular]
def random(self):
self.specular = torch.rand_like(self.specular, requires_grad=True)
self.diffuse = torch.rand_like(self.diffuse, requires_grad=True)
return self
def eval_and_pdf(self, it, wo, active=True):
cos_theta_i = it.wi[..., 2]
cos_theta_o = wo[..., 2]
active = (cos_theta_i > 0) & (cos_theta_o > 0) & active
f_i = fresnel(cos_theta_i, self.eta)[0]
f_o = fresnel(cos_theta_o, self.eta)[0]
pdf = square_to_cos_hemisphere_pdf(wo)
spectrum = (self.diffuse.expand_as(it.p)/(1 - self.fdr_int)) \
* self.inv_eta_2 * (pdf * (1 - f_i) * (1 - f_o)).unsqueeze(-1)
# DeltaReflection
ssw = self.spec_sample_weight()
prob_specular = ssw * f_i
prob_diffuse = (1 - f_i) * (1-ssw)
prob_diffuse = prob_diffuse/(prob_specular + prob_diffuse)
pdf = pdf * prob_diffuse
#spectrum[~active] = 0
#pdf[~active] = 0
return spectrum, pdf
def sample(self, it, sampler, active=True):
bs = BSDFSample.zeros_like(it.p)
spectrum = torch.zeros_like(it.p)
cos_theta_i = it.wi[..., 2]
active = (cos_theta_i > 0) & active
f_i = fresnel(cos_theta_i, self.eta)[0]
spec_sample_weight = self.spec_sample_weight()
p_spec = f_i * spec_sample_weight
p_diff = (1 - f_i) * (1 - spec_sample_weight)
p_spec = (p_spec)/(p_spec + p_diff)
p_diff = 1 - p_spec
sample_spec = active & (sampler.sample(p_spec.shape) < p_spec)
# sample_diff = active & (~sample_spec)
bs.wo = torch.where(
sample_spec.unsqueeze(-1),
reflect(it.frame[..., 2], it.wi),
square_to_cos_hemisphere(sampler.sample(it.shape()[:-1] + (2,), device=it.device())),
)
bs.pdf = torch.where(
sample_spec,
p_spec,
p_diff * square_to_cos_hemisphere_pdf(bs.wo),
).clamp(min=1e-10)
f_o = fresnel(bs.wo[..., 2], self.eta)[0]
spectrum = torch.where(
sample_spec.unsqueeze(-1),
self.specular * (f_i/bs.pdf).unsqueeze(-1),
self.diffuse.expand_as(it.p) / (1- self.fdr_int) \
* bs.pdf.unsqueeze(-1) * self.inv_eta_2 *\
(1 - f_i.unsqueeze(-1)) * (1 - f_o.unsqueeze(-1))
)
return bs, spectrum
@torch.jit.script
def fresnel_conductor(cos_t, eta_r: float, eta_i: float):
ct2 = cos_t * cos_t
st2 = (1 - ct2).clamp(min=1e-10)
st4 = st2 * st2
tmp = eta_r * eta_r - eta_i * eta_i - st2
a_2_pb_2 = (tmp*tmp + 4 * eta_i * eta_i * eta_r * eta_r).clamp(min=1e-10).sqrt()
a = (0.5 * (a_2_pb_2 + tmp)).clamp(min=1e-10).sqrt()
t1 = a_2_pb_2 + ct2
t2 = 2 * cos_t * a
r_s = (t1 - t2)/(t1 + t2)
t3 = a_2_pb_2 * ct2 + st4
t4 = t2 * st2
r_p = r_s * (t3 - t4) / (t3 + t4)
return 0.5 * (r_s + r_p)
# A BSDF for representing an entirely reflective conductor.
# Not thoroughly tested but should generally work.
class Conductor(BSDF):
def __init__(
self,
specular=[1.,1.,1.],
eta:float=1.3,
k:float=1,
device="cuda",
activation = torch.sigmoid,
):
super().__init__()
self.eta = torch.tensor(eta, requires_grad=True, dtype=torch.float)
self.k = torch.tensor(k, requires_grad=True, dtype=torch.float)
if type(specular) == list:
self.specular = torch.tensor(specular, device=device, requires_grad=True)
else: self.specular = specular
self.act = activation
def random(self):
self.specular = torch.rand_like(self.specular, requires_grad=True)
return self
def eval_and_pdf(self, it, wo, active=True):
spectrum = torch.zeros_like(it.p)
pdf = torch.zeros(it.p.shape[:-1], dtype=torch.float, device=it.p.device)
#active = (it.wi[..., 2] > 0) & (wo[..., 2] > 0) & active
refl = local_reflect(it.wi)
thresh = (refl * wo).sum(dim=-1, keepdim=True) > 0.94
fresnel = fresnel_conductor(it.wi[..., 2], F.softplus(self.eta), 0.0).reshape_as(thresh)
spectrum = torch.where(
thresh,
fresnel * self.act(self.specular),
torch.zeros_like(spectrum),
)
pdf = torch.where(
thresh.reshape_as(pdf),
torch.ones_like(pdf),
torch.zeros_like(pdf),
)
spectrum = torch.where(
active.unsqueeze(-1),
spectrum, torch.zeros_like(spectrum),
)
return spectrum, pdf
def parameters(self): return [self.eta, self.k, self.specular]
def sample(self, it, sampler, active=True):
cos_theta_i = it.wi[..., 2]
active = (cos_theta_i > 0) & active
bs = BSDFSample.zeros_like(it.p)
spectrum = torch.zeros_like(it.p)
bs.wo = reflect(it.wi)
bs.eta = 1
bs.pdf = torch.ones_like(active)
spectrum[active] = self.specular * fresnel_conductor(cos_theta_i, self.eta, self.k)[active]
return bs, spectrum
# inverts a direction along the z-axis
def invert_z(xyz) -> torch.Tensor:
x, y, z = xyz.split(1, dim=-1)
return torch.cat([x, y, -z], dim=-1)
# A 2-Sided BSDF, which by default makes both sides one BSDF.
class Bidirectional(BSDF):
def __init__(self, front, back=None):
super().__init__()
self.front = front
if back is None: back = front
self.back = back
def sample(self, it, sampler, active=True):
cos_theta_i = it.wi[..., 2]
front = (cos_theta_i > 0) & active
back = (cos_theta_i < 0) & active
front_bs, front_spectrum = self.front.sample(it, sampler, front)
# perform back-side sampling
original_wi = it.wi
it.wi = invert_z(it.wi)
back_bs, back_spectrum = self.back.sample(it, sampler, back)
back_bs.wo = invert_z(back_bs.wo)
it.wi = original_wi
spectrum = torch.where(front, front_spectrum,
torch.where(back, back_spectrum, torch.zeros_like(back_spectrum)))
return front_bs.combine(back_bs), spectrum
def eval_and_pdf(self, it, wo, active=True):
cos_theta_i = it.wi[..., 2]
front = (cos_theta_i > 0) & active
back = (cos_theta_i < 0) & active
front_eval, front_pdf = self.front.eval_and_pdf(it, wo, front)
og_wi = it.wi
it.wi = invert_z(og_wi)
back_eval, back_pdf = self.back.eval_and_pdf(it, invert_z(wo), back)
it.wi = og_wi
spectrum = torch.where(front.unsqueeze(-1), front_eval,
torch.where(back.unsqueeze(-1), back_eval, torch.zeros_like(back_eval)))
pdf = torch.where(front, front_pdf,
torch.where(back, back_pdf, torch.zeros_like(back_pdf)))
return spectrum, pdf
# Composes a bunch of BSDFs together using some static weighting (not spatially varying)
class Compose(BSDF):
def __init__(self, bsdfs: [BSDF], device="cuda"):
# have to keep it as a list but I wish I could dispatch to all of them simultaneously
# aghhhh pythonnnnn
self.bsdfs = bsdfs
self.weights = torch.rand(len(bsdfs), device=device) + 0.5
def sample(self, it, sampler, active=True):
raise NotImplementedError()
def eval_and_pdf(self, it, wo, active=True):
spec_pdf = self.normalized_weights() * torch.stack([
bsdf.joint_eval_pdf(it, wo, active)
for bsdf in self.bsdfs
], dim=-1)
spectrum, pdf = spec_pdf.sum(dim=-1).split([3, 1], dim=-1)
return spectrum, pdf.squeeze(-1)
def normalized_weights(self):
return F.softmax(self.weights, dim=-1)
def parameters(self):
return chain(
*[bsdf.parameters() for bsdf in self.bsdfs],
[self.weights],
)
def own_parameters(self): return [self.weights]
# A spatially-varying BSDF which is determined by some function f(xyz) -> [# BSDFS].
# By default it is a learned composition, but it can be a normal function as well.
class ComposeSpatialVarying(BSDF):
def __init__(self, bsdfs: [BSDF], spatial_varying_fn= None, device="cuda"):
super().__init__()
self.bsdfs = bsdfs
if spatial_varying_fn is None:
self.sp_var_fn = SkipConnMLP(
num_layers=16,
hidden_size=256,
freqs=128,
sigma=2<<6,
in_size=3, out=len(bsdfs),
device=device,
xavier_init=True,
).to(device)
else:
self.sp_var_fn = spatial_varying_fn
self.preprocess = identity
def sample(self, it, sampler, active=True):
bsdf_samples, spectrums = list(zip(
*[bsdf.sample(it, sampler, active) for bsdf in self.bsdfs]
))
k = self.normalized_weights(it.p, it)
selections = torch.multinomial(k.reshape(-1, len(self.bsdfs)), num_samples=1).squeeze(-1)
spectrums = torch.stack(spectrums, dim=-1).reshape(-1, 3, len(self.bsdfs))
spectrum = spectrums[range(spectrums.shape[0]), :, selections]\
.reshape_as(it.p)
bs = BSDFSample.compose(bsdf_samples, k, selections)
# how does one sample from a linear combination of BSDFs?
# This is just an approximation by sampling from one of them
return bs, spectrum
def eval_and_pdf(self, it, wo, active=True):
k = self.normalized_weights(it.p, it)
spec_pdf = torch.stack([
bsdf.joint_eval_pdf(it, wo, active) for bsdf in self.bsdfs
], dim=-1)
setattr(it, 'normalized_weights', k)
spec_pdf = torch.where(
active[..., None, None],
spec_pdf * k.unsqueeze(-2),
torch.zeros_like(spec_pdf),
)
spectrum, pdf = spec_pdf.sum(dim=-1).split([3, 1], dim=-1)
return spectrum, pdf.squeeze(-1)
def normalized_weights(self, p, it):
out_shape = p.shape[:-1] + (len(self.bsdfs),)
weights = self.sp_var_fn(self.preprocess(p))
weights = weights.reshape(out_shape)
setattr(it, 'nonnormalized_weights', weights)
#return F.softmax(weights, dim=-1)
# Softmax seems to have some issues with local minima, so below also works
return weights.sigmoid()
def parameters(self): return chain(self.own_parameters(), self.child_parameters())
def own_parameters(self): return self.sp_var_fn.parameters()
def child_parameters(self): return chain(*[bsdf.parameters() for bsdf in self.bsdfs])
# Hard classifier of BSDFs (used during experimentation)
class SelectBSDF(BSDF):
def __init__(self, selector, bsdfs, device="cuda"):
super().__init__()
self.selector = selector
self.bsdfs = bsdfs
def select(self, p):
return self.selector(p)
def parameters(self):
return chain(*[bsdf.parameters() for bsdf in self.bsdfs])
return chain(self.attenuation.parameters(), self.color.parameters(), self.dist.parameters())
def sample(self, it, sampler, active=True):
raise NotImplementedError()
return bs, spectrum
def eval_and_pdf(self, it, wo, active=True):
spec_pdf = torch.stack([
bsdf.joint_eval_pdf(it, wo, active) for bsdf in self.bsdfs
], dim=-1)
i = self.select(it.p)
flat_spec_pdf = spec_pdf.reshape(-1, 4, len(self.bsdfs))
spec_pdf = flat_spec_pdf[range(flat_spec_pdf.shape[0]), :, i]\
.reshape(spec_pdf.shape[:-1])
spectrum, pdf = spec_pdf.split([3, 1], dim=-1)
return spectrum, pdf.squeeze(-1)
# One big MLP for both spatially-varying and coloration (used while developing)
class GlobalNeuralBSDF(BSDF):
def __init__(self, device="cuda"):
super().__init__()
self.attenuation = SkipConnMLP(
in_size=3, out=1,
num_layers=3, hidden_size=64,
activation=F.relu,
device=device,
).to(device)
self.color = SkipConnMLP(
in_size=3, out=3,
num_layers=3, hidden_size=64,
activation=F.relu,
).to(device)
self.dist = NeuralWarp(device=device).to(device)
def parameters(self):
return chain(self.attenuation.parameters(), self.color.parameters(), self.dist.parameters())
def random(self): return self
def sample(self, it, sampler, active=True):
bs = BSDFSample.zeros_like(it.p)
direc, pdf = self.dist(it.p.shape[:-1])
bs.wo = F.normalize(direc, eps=1e-7, dim=-1)
bs.pdf = pdf.unsqueeze(-1)
bs.eta = 1.0
bs.sampled_component = 0
# cast spectrum to same shape as interaction
attenuation = (1+self.attenuation(param_rusin(it.n, it.wi, bs.wo)))/2
spectrum = attenuation * ((1 + self.color(it.p))/2)
w = (0.5 * fwidth(spectrum))
spectrum = spectrum * w.sin()/w.clamp(min=1e-7)
spectrum[(~active) | (bs.pdf <= 0), :] = 0
return bs, spectrum
def eval_and_pdf(self, it, wo, active=True):
attenuation = self.attenuation(param_rusin(it.n, it.wi, wo))
spectrum = attenuation * ((1 + self.color(it.p))/2)
w = (0.5 * fwidth(spectrum))
spectrum = spectrum * w.sin()/w.clamp(min=1e-7)
pdf = self.dist.pdf(dir_to_uv(wo)).unsqueeze(-1)
return spectrum, pdf
# A single component BSDF which is a just neural net f(rusin) -> RGB
class NeuralBSDF(BSDF):
def __init__(self, activation=torch.sigmoid, device="cuda"):
super().__init__()
self.mlp = SkipConnMLP(
in_size=3, out=3,
num_layers=6, hidden_size=96,
freqs=64,
device=device,
).to(device)
self.act = activation
def parameters(self): return chain(self.mlp.parameters())
def random(self): return self
def sample(self, it, sampler, active=True):
cos_theta_i = it.wi[..., 2]
bs = BSDFSample.zeros_like(it.p)
bs.wo = square_to_cos_hemisphere(sampler.sample(it.shape()[:-1] + (2,), device=it.device()))
bs.wo = F.normalize(bs.wo, dim=-1)
bs.pdf = square_to_cos_hemisphere_pdf(bs.wo)
bs.eta = 1.0
spectrum = self.act(self.mlp(param_rusin2(it.wi, bs.wo)))
return bs, spectrum
def eval_and_pdf(self, it, wo, active=True):
spectrum = self.act(self.mlp(param_rusin2(it.wi, wo)))
pdf = torch.ones(spectrum.shape[:-1], device=spectrum.device)
return spectrum, pdf
# Zeros out this MLP so that it doesn't return any colors. Can be useful when analyzing their
# outputs.
def zero(self):
class Zero(nn.Module):
def __init__(self): super().__init__()
def forward(self, x): return torch.zeros_like(x)
self.mlp = Zero()
|
from typing import Optional, Tuple
from uuid import UUID
from flask import request, current_app
from functools import wraps
from dateutil import parser
from datetime import datetime
from sqlalchemy import and_, or_
After = Tuple[Optional[datetime], Optional[str]]
def paginated(f):
@wraps(f)
def paginated_wrapper(*args, **kwargs):
"""
Parses details about a page being requested by its urls parameters
and injects them into the wrapped function's kwargs
Handles parameters of the form:
?after=1529089066.003078&after_uuid=e29fba44-6d39-4719-b600-97aadbe876a0&limit=10
Where the ?after parameter is a either a timestamp or parseable
datetime (as determined by the dateutil module).
The ?after_uuid parameter is the uuid used to resolve any conflicts
between rows with the same created_at datetime.
The ?limit parameter specifies how many results to return on a page
that occur after the specified ?after and ?after_uuid parameters
"""
def_limit = current_app.config['DEFAULT_PAGE_LIMIT']
max_limit = current_app.config['MAX_PAGE_LIMIT']
limit = min(request.args.get('limit', def_limit, type=int), max_limit)
after_date = request.args.get('after', '')
after_uuid = request.args.get('after_uuid', None)
# Assume timestamp if ?after is a number
if after_date.replace('.', '').isdigit():
after_date = float(after_date)
after_date = datetime.fromtimestamp(after_date)
# Otherwise, try to extract a datetime with dateutil parser
else:
try:
after_date = parser.parse(after_date)
# Parser couldn't derive a datetime from the string
except ValueError:
# Fallback to begining of the epoch if we can't parse
after_date = datetime.fromtimestamp(0)
# Try to parse a valid UUID, if there is one
if after_uuid is not None:
try:
after_uuid = str(UUID(after_uuid))
except ValueError:
after_uuid = None
# Default the uuid to the zero uuid if none is specified
if after_uuid is None:
after_uuid = str(UUID(int=0))
after = (after_date, after_uuid)
return f(*args, **kwargs, after=after, limit=limit)
return paginated_wrapper
class Pagination(object):
"""
Object to help paginate through endpoints using the created_at field and
uuid fields
"""
def __init__(self, query: str, after: After, limit: int):
self.query = query
self.after = after
self.limit = limit
self.total = query.count()
# Assumes that we only provide queries for one entity
# This is safe as pagination only accesses one entity at a time
model = query._entities[0].mapper.entity
after_date, after_uuid = after
query = query.order_by(model.created_at.asc(), model.uuid.asc())
# Resolve any rows that have the same created_at time by their uuid,
# return all other rows that were created later
query = query.filter(
or_(
and_(model.created_at == after_date, model.uuid > after_uuid),
model.created_at > after_date,
)
)
query = query.limit(limit)
self.items = query.all()
@property
def prev_num(self) -> After:
""" Returns the (datetime, uuid) tuple of the first item """
if len(self.items) > 0:
return (self.items[0].created_at, str(self.items[0].uuid))
return (datetime.fromtimestamp(0), str(UUID(int=0)))
@property
def curr_num(self) -> Optional[After]:
""" Returns the after index of the current page """
return self.after
@property
def next_num(self) -> Optional[After]:
""" Returns the timestamp, uuid of the last item"""
if self.has_next:
return (self.items[-1].created_at, str(self.items[-1].uuid))
@property
def has_next(self):
""" True if there are more than `limit` results """
return len(self.items) >= self.limit
def indexd_pagination(q, after, limit):
"""
Special logic to paginate through indexd objects.
Whenever an indexd object is encountered that has been deleted in indexd,
the file is then deleted in the dataservice, thus making it necesarry to
re-fetch new files to return the desired amount of objects per page
:param q: The base query to perform
:param after: The earliest datetime to return objects from
:param limit: The maximum number of objects to return in a page
:returns: A Pagination object
"""
pager = Pagination(q, after, limit)
keep = []
refresh = True
next_after = None
# Continue updating the page until we get a page with no deleted files
while (pager.total > 0 and refresh):
refresh = False
# Move the cursor ahead to the last valid file
next_after = keep[-1].created_at if len(keep) > 0 else after
# Number of results needed to fulfill the original limit
remain = limit - len(keep)
pager = Pagination(q, next_after, remain)
for st in pager.items:
if hasattr(st, 'was_deleted') and st.was_deleted:
refresh = True
else:
keep.append(st)
# Replace original page's items with new list of valid files
pager.items = keep
pager.after = next_after if next_after else after
return pager
|
import asyncio
from typing import Optional
from sqlalchemy import BigInteger, cast, select
from sqlalchemy.ext.asyncio import AsyncSession
from Backend.core.errors import CustomException
from Backend.crud.base import CRUDBase
from Backend.crud.misc.persistentMessages import persistent_messages
from Backend.database.models import LfgMessage
from Backend.misc.cache import cache
from Shared.networkingSchemas.destiny.lfgSystem import AllLfgDeleteOutputModel, LfgOutputModel, UserAllLfgOutputModel
class CRUDLfgMessages(CRUDBase):
@staticmethod
async def get_channel_id(db: AsyncSession, guild_id: int) -> int:
"""Return the guild's lfg channel id"""
# check cache:
async with asyncio.Lock():
cache_key = f"{guild_id}|lfg_channel"
# populate cache
if cache_key not in cache.persistent_messages:
try:
await persistent_messages.get(db=db, guild_id=guild_id, message_name="lfg_channel")
except CustomException as e:
if e.error == "PersistentMessageNotExist":
cache.persistent_messages.update({cache_key: None})
else:
raise e
result = cache.persistent_messages[cache_key]
if not result:
raise CustomException("NoLfgChannelForGuild")
return result.channel_id if result else None
@staticmethod
async def get_voice_category_channel_id(db: AsyncSession, guild_id: int) -> Optional[int]:
"""Return the guild's lfg voice category channel id if set"""
# check cache:
async with asyncio.Lock():
cache_key = f"{guild_id}|lfg_voice_category"
# populate cache
if cache_key not in cache.persistent_messages:
try:
await persistent_messages.get(db=db, guild_id=guild_id, message_name="lfg_voice_category")
except CustomException as e:
if e.error == "PersistentMessageNotExist":
cache.persistent_messages.update({cache_key: None})
else:
raise e
result = cache.persistent_messages[cache_key]
return result.channel_id if result else None
async def insert(self, db: AsyncSession, to_create: LfgMessage):
"""Inserts the lfg info and gives it a new id"""
await self._insert(db=db, to_create=to_create)
async def get(self, db: AsyncSession, lfg_id: int, guild_id: int) -> LfgMessage:
"""Get the lfg info for the guild"""
result = await self._get_with_key(db=db, primary_key=lfg_id)
# check exists and guild
if (not result) or (result.guild_id != guild_id):
raise CustomException("NoLfgEventWithIdForGuild")
return result
async def get_all(self, db: AsyncSession, guild_id: int) -> list[LfgMessage]:
"""Get the lfg info for the guild"""
return await self._get_multi(db=db, guild_id=guild_id)
async def get_user(self, db: AsyncSession, discord_id: int) -> UserAllLfgOutputModel:
"""Get the lfg infos for the user"""
result = UserAllLfgOutputModel()
joined = await self._get_user_events(db=db, discord_id=discord_id, joined=True)
result.joined = [LfgOutputModel.from_orm(obj) for obj in joined]
backup = await self._get_user_events(db=db, discord_id=discord_id, backup=True)
result.backup = [LfgOutputModel.from_orm(obj) for obj in backup]
return result
async def delete(self, db: AsyncSession, lfg_id: int, guild_id: int, discord_id: int):
"""Delete the lfg info belonging to the lfg id and guild"""
obj = await self.get(db=db, lfg_id=lfg_id, guild_id=guild_id)
# check author
await self._check_author(obj=obj, discord_id=discord_id)
await self._delete(db=db, obj=obj)
async def delete_all(self, db: AsyncSession, guild_id: int) -> AllLfgDeleteOutputModel:
"""Delete all lfg events for the guild"""
objs: list[LfgMessage] = await self._delete_multi(db=db, guild_id=guild_id)
return AllLfgDeleteOutputModel(event_ids=[obj.id for obj in objs])
async def update(self, db: AsyncSession, lfg_id: int, guild_id: int, discord_id: int, **update_data) -> LfgMessage:
"""Update the lfg info belonging to the lfg id and guild"""
obj = await self.get(db=db, lfg_id=lfg_id, guild_id=guild_id)
# check author
await self._check_author(obj=obj, discord_id=discord_id)
# remove none values
update_data = {k: v for k, v in update_data.items() if v is not None}
await self._update(db=db, to_update=obj, **update_data)
return obj
@staticmethod
async def _check_author(obj: LfgMessage, discord_id: int):
"""Checks if the discord_id is the creator"""
if discord_id != 1:
if obj.author_id != discord_id:
raise CustomException("NoLfgEventPermissions")
async def _get_user_events(
self, db: AsyncSession, discord_id: int, joined: bool = False, backup: bool = False
) -> list[LfgMessage]:
"""Get the lfg infos for the user"""
query = select(LfgMessage)
if joined:
query = query.filter(LfgMessage.joined_members.any(cast(discord_id, BigInteger())))
if backup:
query = query.filter(LfgMessage.backup_members.any(cast(discord_id, BigInteger())))
result = await self._execute_query(db, query)
return result.scalars().fetchall()
lfg = CRUDLfgMessages(LfgMessage)
|
import sqlalchemy_utils
from babel import Locale
from wtforms import Form
from tests import MultiDict
from wtforms_alchemy import CountryField
sqlalchemy_utils.i18n.get_locale = lambda: Locale('en')
class TestCountryField(object):
field_class = CountryField
def init_form(self, **kwargs):
class TestForm(Form):
test_field = self.field_class(**kwargs)
self.form_class = TestForm
return self.form_class
def setup_method(self, method):
self.valid_countries = [
'US',
'SA',
'FI'
]
self.invalid_countries = [
'unknown',
]
def test_valid_countries(self):
form_class = self.init_form()
for country in self.valid_countries:
form = form_class(MultiDict(test_field=country))
form.validate()
assert len(form.errors) == 0
def test_invalid_countries(self):
form_class = self.init_form()
for country in self.invalid_countries:
form = form_class(MultiDict(test_field=country))
form.validate()
assert len(form.errors['test_field']) == 2
|
import magic
from django import template
from django.core import serializers
from django.template.defaultfilters import safe
from ..models.validators.file import VALID_FILE_TYPES
register = template.Library()
@register.filter
def json_dump(values):
# Return values as JSON data
data = list(values)
data = serializers.serialize('json', data)
return safe(data)
@register.filter
def file_mime(file):
# Get the mime type of a file (E.g. image/jpeg, text/plain)
mime = magic.from_file(file, mime=True)
return mime
@register.filter
def file_type(mime):
# TODO: fix implementation of filter
try:
category, t = mime.split('/')
if category == 'application':
if t == 'vnd.apple.keynote':
return 'powerpoint'
if t in ['vnd.apple.pages', 'msword']:
return 'word'
if t in ['vnd.apple.numbers', 'csv']:
return 'excel'
if t in ['zip']:
return 'folder'
return t
if category in ['image', 'audio', 'video', 'text']:
if category == 'text':
if t in ['plain', 'rtf', 'richtext']:
return 'alt'
else:
return 'code'
return category
except ValueError:
return 'code'
@register.filter
def file_size(file, unit):
"""
Convert the size from bytes to other units like KB, MB or GB
Adapted from:
https://thispointer.com/python-get-file-size-in-kb-mb-or-gb-human-readable-format/
"""
base = 1024
if unit == 'KB':
size = file.size/base
elif unit == 'MB':
size = file.size/(base**2)
elif unit == 'GB':
size = file.size/(base**3)
else:
size = file.size
return f'{round(size, 2)} {unit}'
@register.filter
def total_file_sizes(files, unit):
total = 0
base = 1024
for file in files:
if unit == 'KB':
size = file.file.size / base
elif unit == 'MB':
size = file.file.size / (base ** 2)
elif unit == 'GB':
size = file.file.size / (base ** 3)
else:
size = file.file.size
total += size
return f'{round(total, 2)} {unit}'
@register.filter
def file_name(file):
return file.name.split('/')[-1]
|
from fits_align.ident import make_transforms
from fits_align.align import affineremap
from fits2image.conversions import fits_to_jpg
import click
from glob import glob
import os
from astropy.io import fits
import numpy as np
from astroscrappy import detect_cosmics
import logging
logger = logging.getLogger(__name__)
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.DEBUG)
def reproject_files(ref_image, images_to_align, tmpdir):
identifications = make_transforms(ref_image, images_to_align[1:3])
aligned_images = []
for id in identifications:
if id.ok:
aligned_img = affineremap(id.ukn.filepath, id.trans, outdir=tmpdir)
aligned_images.append(aligned_img)
img_list = [ref_image]+aligned_images
if len(img_list) != 3:
return images_to_align
return img_list
def sort_files_for_colour(file_list):
colour_template = {'rp':'1','V':'2','B':'3'}
colours = {v:k for k,v in colour_template.items()}
for f in file_list:
data, hdrs = fits.getdata(f, header=True)
filtr = hdrs['filter']
order = colour_template.get(filtr, None)
if not order and filtr == 'R':
order = '1'
elif not order:
logger.error('{} is not a recognised colour filter'.format(filtr))
return False
colours[order] = f
file_list = [colours[str(i)] for i in range(1,4)]
assert len(file_list) == 3
return file_list
def write_clean_data(filelist):
'''
Overwrite FITS files with cleaned and scaled data
- Data is read into uncompressed FITS file to remove dependency on FPack
'''
img_list =[]
for i, file_in in enumerate(filelist):
data, hdrs = fits.getdata(file_in, header=True)
filtr = hdrs['filter']
path = os.path.split(file_in)[0]
new_filename = os.path.join(path,"{}.fits".format(filtr))
data = clean_data(data)
hdu = fits.PrimaryHDU(data, header=hdrs)
hdu.writeto(new_filename)
img_list.append(new_filename)
return img_list
def remove_cr(data):
'''
Removes high value pixels which are presumed to be cosmic ray hits.
'''
m, imdata = detect_cosmics(data, readnoise=20., gain=1.4, sigclip=5., sigfrac=.5, objlim=6.)
return imdata
def clean_data(data):
'''
- Remove bogus (i.e. negative) pixels
- Remove Cosmic Rays
- Subtract the median sky value
'''
# Level out the colour balance in the frames
logger.debug('--- Begin CR removal ---')
median = np.median(data)
data[data<0.]=median
# Run astroScrappy to remove pesky cosmic rays
data = remove_cr(data)
logger.debug('Median=%s' % median)
logger.debug('Max after median=%s' % data.max())
return data
@click.command()
@click.option('--in_dir', '-i', help='Input folder')
@click.option("--name", "-n", help="Name of the output file")
def main(in_dir, name):
path_match = "*.fits.fz"
img_list = sorted(glob(os.path.join(in_dir, path_match)))
img_list = reproject_files(img_list[0], img_list, in_dir)
img_list = write_clean_data(img_list)
img_list = sort_files_for_colour(img_list)
fits_to_jpg(img_list, os.path.join(in_dir,name), width=1000, height=1000, color=True)
return
if __name__ == '__main__':
main()
|
from setuptools import setup, find_packages
packages = find_packages(include=('Arcapi', 'Arcapi.*'))
setup(
name='Arc_api',
version='1.0',
author='littlebutt',
author_email='[email protected]',
license='MIT License',
url="https://github.com/littlebutt/Arcapi",
description='An API for Arc Prober.',
packages=packages,
install_requires=['websockets>=8.1', 'websocket-client>=0.57.0','brotli>=1.0.9'],
python_requires='>=3.7',
platforms='any'
)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-09-21 22:37
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Keyword',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100, unique=True, verbose_name='name')),
('description', models.TextField(blank=True, null=True, verbose_name='description')),
('script', models.TextField(verbose_name='script')),
('values', models.TextField(blank=True, verbose_name='values')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='created_at')),
('modified', models.DateTimeField(auto_now=True, verbose_name='modified')),
],
options={
'verbose_name': 'keyword',
'verbose_name_plural': 'keywords',
'db_table': 'keywords',
},
),
]
|
# Copyright: 2005-2011 Brian Harring <[email protected]>
# License: GPL2/BSD
"""
base package class; instances should derive from this.
Right now, doesn't provide much, need to change that down the line
"""
__all__ = ("base", "wrapper", "dynamic_getattr_dict")
from snakeoil import klass
from snakeoil.compatibility import cmp, IGNORED_EXCEPTIONS
from pkgcore import exceptions as base_errors
from pkgcore.operations import format
from pkgcore.package import errors
class base(object, metaclass=klass.immutable_instance):
built = False
configurable = False
_operations = format.operations
__slots__ = ("__weakref__",)
_get_attr = {}
@property
def versioned_atom(self):
raise NotImplementedError(self, "versioned_atom")
@property
def unversioned_atom(self):
raise NotImplementedError(self, "unversioned_atom")
def operations(self, domain, **kwds):
return self._operations(domain, self, **kwds)
@property
def is_supported(self):
return True
class wrapper(base):
__slots__ = ("_raw_pkg", "_domain")
klass.inject_richcmp_methods_from_cmp(locals())
def operations(self, domain, **kwds):
return self._raw_pkg._operations(domain, self, **kwds)
def __init__(self, raw_pkg):
object.__setattr__(self, "_raw_pkg", raw_pkg)
def __cmp__(self, other):
if isinstance(other, wrapper):
return cmp(self._raw_pkg, other._raw_pkg)
return cmp(self._raw_pkg, other)
def __eq__(self, other):
if isinstance(other, wrapper):
return cmp(self._raw_pkg, other._raw_pkg) == 0
return cmp(self._raw_pkg, other) == 0
def __ne__(self, other):
return not self == other
__getattr__ = klass.GetAttrProxy("_raw_pkg")
__dir__ = klass.DirProxy("_raw_pkg")
built = klass.alias_attr("_raw_pkg.built")
versioned_atom = klass.alias_attr("_raw_pkg.versioned_atom")
unversioned_atom = klass.alias_attr("_raw_pkg.unversioned_atom")
is_supported = klass.alias_attr('_raw_pkg.is_supported')
def __hash__(self):
return hash(self._raw_pkg)
def dynamic_getattr_dict(self, attr):
functor = self._get_attr.get(attr)
if functor is None:
if attr == '__dict__':
return self._get_attr
raise AttributeError(self, attr)
try:
val = functor(self)
object.__setattr__(self, attr, val)
return val
except IGNORED_EXCEPTIONS:
raise
except errors.MetadataException as e:
if e.attr == attr:
raise
raise errors.MetadataException(self, attr, e.error, e.verbose) from e
except (errors.PackageError, UnicodeDecodeError) as e:
raise errors.MetadataException(self, attr, str(e)) from e
except PermissionError as e:
raise base_errors.PermissionDenied(self.path, write=False) from e
|
from __future__ import division, print_function
import sys
import numpy as np
import pickle
import tensorflow as tf
from tqdm import tqdm
from dnn_reco import detector
class DataTransformer:
"""Transforms data
Attributes
----------
trafo_model : dictionary
A dictionary containing the transformation settings and parameters.
"""
def __init__(self, data_handler,
treat_doms_equally=True, normalize_dom_data=True,
normalize_label_data=True, normalize_misc_data=True,
log_dom_bins=False, log_label_bins=False, log_misc_bins=False,
norm_constant=1e-6, float_precision='float64'):
"""Initializes a DataTransformer object and saves the trafo settings.
Parameters
----------
data_handler : :obj: of class DataHandler
An instance of the DataHandler class. The object is used to obtain
meta data.
treat_doms_equally : bool
All DOMs are treated equally, e.g. the mean and variance is
calculated over all DOMs and not individually.
normalize_dom_data : bool, optional
If true, dom data will be normalized to have a mean of 0 and a
variance of 1.
normalize_label_data : bool, optional
If true, labels will be normalized to have a mean of 0 and a
variance of 1.
normalize_misc_data : bool, optional
If true, misc data will be normalized to have a mean of 0 and a
variance of 1.
log_dom_bins : bool, list of bool
The natural logarithm is applied to the DOM bins prior
to normalization.
If a list is given, the length of the list must match the number of
bins 'num_bins'. The logarithm is applied to bin i if the ith entry
of the log_dom_bins list is True.
log_label_bins : bool, list of bool, dict
The natural logarithm is applied to the label bins prior
to normalization.
If a list is given, the length of the list must match the number of
labels label_shape[-1]. The logarithm is applied to bin i if the
ith entry of the log_label_bins list is True.
If a dictionary is provided, a list of length label_shape[-1] will
be initialized with False and only the values of the labels as
specified by the keys in the dictionary will be updated.
log_misc_bins : bool, list of bool, dict
The natural logarithm is applied to the misc data bins prior
to normalization.
If a list is given, the length of the list must match the number of
misc variabels misc_shape[-1]. The logarithm is applied to bin i
if the ith entry of the log_misc_bins list is True.
If a dictionary is provided, a list of length label_shape[-1] will
be initialized with False and only the values of the labels as
specified by the keys in the dictionary will be updated.
norm_constant : float
A small constant that is added to the denominator during
normalization to ensure finite values.
float_precision : str, optional
Float precision to use for trafo methods.
Examples: 'float32', 'float64'
Raises
------
ValueError
Description
"""
self._setup_complete = False
self._np_float_dtype = getattr(np, float_precision)
self._tf_float_dtype = getattr(tf, float_precision)
# If log_bins is a bool, logarithm is to be applied to all bins.
# In this case, create a list of bool for each data bin.
if isinstance(log_dom_bins, bool):
log_dom_bins = [log_dom_bins for i in range(data_handler.num_bins)]
if isinstance(log_label_bins, bool):
log_label_bins = [log_label_bins
for i in range(data_handler.label_shape[-1])]
elif isinstance(log_label_bins, dict):
log_dict = dict(log_label_bins)
log_label_bins = np.zeros(data_handler.label_shape[-1], dtype=bool)
for key, value in log_dict.items():
log_label_bins[data_handler.get_label_index(key)] = bool(value)
if isinstance(log_misc_bins, bool) and data_handler.misc_shape:
log_misc_bins = [log_misc_bins
for i in range(data_handler.misc_shape[-1])]
elif isinstance(log_misc_bins, dict) and data_handler.misc_shape:
log_dict = dict(log_misc_bins)
log_misc_bins = np.zeros(data_handler.misc_shape[-1], dtype=bool)
for key, value in log_dict.items():
log_misc_bins[data_handler.get_misc_index(key)] = bool(value)
# Some sanity checks
if len(log_dom_bins) != data_handler.num_bins:
raise ValueError('{!r} != {!r}. Wrong log_bins: {!r}'.format(
len(log_dom_bins),
data_handler.num_bins,
log_dom_bins))
if len(log_label_bins) != data_handler.label_shape[-1]:
raise ValueError('{!r} != {!r}. Wrong log_bins: {!r}'.format(
len(log_label_bins),
data_handler.label_shape[-1],
log_label_bins))
if data_handler.misc_shape is not None:
if len(log_misc_bins) != data_handler.misc_shape[-1]:
raise ValueError('{!r} != {!r}. Wrong log_bins: {!r}'.format(
len(log_misc_bins),
data_handler.misc_shape[-1],
log_misc_bins))
# create trafo_model_dict
self.trafo_model = {
'num_bins': data_handler.num_bins,
'label_shape': data_handler.label_shape,
'misc_shape': data_handler.misc_shape,
'misc_names': data_handler.misc_names,
'label_names': data_handler.label_names,
'treat_doms_equally': treat_doms_equally,
'normalize_dom_data': normalize_dom_data,
'normalize_label_data': normalize_label_data,
'normalize_misc_data': normalize_misc_data,
'log_dom_bins': log_dom_bins,
'log_label_bins': log_label_bins,
'log_misc_bins': log_misc_bins,
'norm_constant': norm_constant,
}
self._ic78_shape = [10, 10, 60, self.trafo_model['num_bins']]
self._deepcore_shape = [8, 60, self.trafo_model['num_bins']]
def _update_online_variance_vars(self, data_batch, n, mean, M2):
"""Update online variance variables.
This can be used to iteratively calculate the mean and variance of
a dataset.
Parameters
----------
data_batch : numpy ndarray
A batch of data for which to update the variance variables of the
dataset.
n : int
Counter for number of data elements.
mean : numpy ndarray
Mean of dataset.
M2 : numpy ndarray
Variance * size of dataset
Returns
-------
int, np.ndarray, np.ndarray
n, mean, M2
Returns the updated online variance variables
"""
for x in data_batch:
n += 1
delta = x - mean
mean += delta/n
delta2 = x - mean
M2 += delta*delta2
return n, mean, M2
def _perform_update_step(self, log_bins, data_batch, n, mean, M2):
"""Update online variance variables.
This can be used to iteratively calculate the mean and variance of
a dataset.
Parameters
----------
log_bins : list of bool
Defines whether the natural logarithm is appllied to bins along
last axis. Must have same length as data_batch.shape[-1].
data_batch : numpy ndarray
A batch of data for which to update the variance variables of the
dataset.
n : int
Counter for number of data elements.
mean : numpy ndarray
Mean of dataset.
M2 : numpy ndarray
Variance * size of dataset
Returns
-------
int, np.ndarray, np.ndarray
n, mean, M2
Returns the updated online variance variables
"""
data_batch = np.array(data_batch, dtype=self._np_float_dtype)
# perform logarithm on bins
for bin_i, log_bin in enumerate(log_bins):
if log_bin:
data_batch[..., bin_i] = np.log(1.0 + data_batch[..., bin_i])
# calculate onlince variance and mean for DOM responses
return self._update_online_variance_vars(data_batch=data_batch, n=n,
mean=mean, M2=M2)
def create_trafo_model_iteratively(self, data_iterator, num_batches):
"""Iteratively create a transformation model.
Parameters
----------
data_iterator : generator object
A python generator object which generates batches of
dom_responses and cascade_parameters.
num_batches : int
How many batches to use to create the transformation model.
"""
# create empty onlince variance variables
ic78_n = 0.
ic78_mean = np.zeros(self._ic78_shape)
ic78_M2 = np.zeros(self._ic78_shape)
deepcore_n = 0.
deepcore_mean = np.zeros(self._deepcore_shape)
deepcore_M2 = np.zeros(self._deepcore_shape)
label_n = 0.
label_mean = np.zeros(self.trafo_model['label_shape'])
label_M2 = np.zeros(self.trafo_model['label_shape'])
if self.trafo_model['misc_shape'] is not None:
misc_n = 0.
misc_mean = np.zeros(self.trafo_model['misc_shape'])
misc_M2 = np.zeros(self.trafo_model['misc_shape'])
for i in tqdm(range(num_batches), total=num_batches):
x_ic78, x_deepcore, label, misc_data = next(data_iterator)
ic78_n, ic78_mean, ic78_M2 = self._perform_update_step(
log_bins=self.trafo_model['log_dom_bins'],
data_batch=x_ic78,
n=ic78_n,
mean=ic78_mean,
M2=ic78_M2)
deepcore_n, deepcore_mean, deepcore_M2 = self._perform_update_step(
log_bins=self.trafo_model['log_dom_bins'],
data_batch=x_deepcore,
n=deepcore_n,
mean=deepcore_mean,
M2=deepcore_M2)
label_n, label_mean, label_M2 = self._perform_update_step(
log_bins=self.trafo_model['log_label_bins'],
data_batch=label,
n=label_n,
mean=label_mean,
M2=label_M2)
if self.trafo_model['misc_shape'] is not None:
misc_n, misc_mean, misc_M2 = self._perform_update_step(
log_bins=self.trafo_model['log_misc_bins'],
data_batch=misc_data,
n=misc_n,
mean=misc_mean,
M2=misc_M2)
# Calculate standard deviation
ic78_std = np.sqrt(ic78_M2 / ic78_n)
deepcore_std = np.sqrt(deepcore_M2 / deepcore_n)
label_std = np.sqrt(label_M2 / label_n)
if self.trafo_model['misc_shape'] is not None:
misc_std = np.sqrt(misc_M2 / misc_n)
# combine DOM data over all DOMs if desired
if self.trafo_model['treat_doms_equally']:
# initalize with zeros
self.trafo_model['ic78_mean'] = np.zeros(self._ic78_shape)
self.trafo_model['ic78_std'] = np.zeros(self._ic78_shape)
# now calculate normalization for real DOMs
self.trafo_model['ic78_mean'][detector.ic78_real_DOMs_mask] = \
np.mean(ic78_mean[detector.ic78_real_DOMs_mask], axis=0)
self.trafo_model['ic78_std'][detector.ic78_real_DOMs_mask] = \
np.mean(ic78_std[detector.ic78_real_DOMs_mask], axis=0)
# DeepCore
self.trafo_model['deepcore_mean'] = np.mean(deepcore_mean,
axis=(0, 1),
keepdims=True)
self.trafo_model['deepcore_std'] = np.mean(deepcore_std,
axis=(0, 1),
keepdims=True)
else:
self.trafo_model['ic78_mean'] = ic78_mean
self.trafo_model['ic78_std'] = ic78_std
self.trafo_model['deepcore_mean'] = deepcore_mean
self.trafo_model['deepcore_std'] = deepcore_std
self.trafo_model['label_mean'] = label_mean
self.trafo_model['label_std'] = label_std
if self.trafo_model['misc_shape'] is not None:
self.trafo_model['misc_mean'] = misc_mean
self.trafo_model['misc_std'] = misc_std
# set constant parameters to have a std dev of 1 instead of zero
std_names = ['ic78_std', 'deepcore_std', 'label_std']
if self.trafo_model['misc_shape'] is not None:
std_names.append('misc_std')
for key in std_names:
mask = self.trafo_model[key] == 0
self.trafo_model[key][mask] = 1.
self._setup_complete = True
def load_trafo_model(self, model_path):
"""Load a transformation model from file.
Parameters
----------
model_path : str
Path to trafo model file.
Raises
------
ValueError
If settings in loaded transformation model do not match specified
settings.
If not all specified settings are defined in the loaded
transformation model.
"""
# load trafo model from file
with open(model_path, 'rb') as handle:
if sys.version_info.major >= 3:
trafo_model = pickle.load(handle, encoding='latin1')
else:
trafo_model = pickle.load(handle)
# make sure that settings match
for key in self.trafo_model:
if key not in trafo_model:
raise ValueError('Key {!r} does not exist in {!r}'.format(
key, model_path))
mismatch = self.trafo_model[key] != trafo_model[key]
error_msg = 'Setting {!r} does not match!'.format(key)
if isinstance(mismatch, bool):
if mismatch:
raise ValueError(error_msg)
elif mismatch.any():
raise ValueError(error_msg)
# update trafo model
self.trafo_model = trafo_model
self._setup_complete = True
def save_trafo_model(self, model_path):
"""Saves transformation model to file.
Parameters
----------
model_path : str
Path to trafo model file.
"""
with open(model_path, 'wb') as handle:
pickle.dump(self.trafo_model, handle, protocol=2)
def _check_settings(self, data, data_type):
"""Check settings and return necessary parameters for trafo and inverse
trafo method.
Parameters
----------
data : numpy.ndarray or tf.Tensor
The data that will be transformed.
data_type : str
Specifies what kind of data this is. This must be one of:
'ic78', 'deepcore', 'label', 'misc'
Returns
-------
type(data)
The transformed data
Raises
------
ValueError
If DataTransformer object has not created or loaded a trafo model.
If provided data_type is unkown.
"""
dtype = data.dtype
data_type = data_type.lower()
if not self._setup_complete:
raise ValueError('DataTransformer needs to create or load a trafo'
'model prior to transform call.')
if data_type not in ['ic78', 'deepcore', 'label', 'misc']:
raise ValueError('data_type {!r} is unknown!'.format(data_type))
# check if shape of data matches expected shape
if data_type == 'ic78':
shape = [10, 10, 60, self.trafo_model['num_bins']]
elif data_type == 'deepcore':
shape = [8, 60, self.trafo_model['num_bins']]
else:
shape = self.trafo_model['{}_shape'.format(data_type)]
if list(data.shape[1:]) != shape:
raise ValueError('Shape of data {!r} does'.format(data.shape[1:]) +
' not match expected shape {!r}'.format(shape))
if data_type in ['ic78', 'deepcore']:
log_name = 'log_dom_bins'
normalize_name = 'normalize_dom_data'
else:
log_name = 'log_{}_bins'.format(data_type)
normalize_name = 'normalize_{}_data'.format(data_type)
is_tf = tf.is_tensor(data)
if is_tf:
if dtype != self._tf_float_dtype:
data = tf.cast(data, dtype=self._tf_float_dtype)
else:
data = np.array(data, dtype=self._np_float_dtype)
# choose numpy or tensorflow log function
if is_tf:
log_func = tf.math.log
exp_func = tf.exp
else:
log_func = np.log
exp_func = np.exp
return data, log_name, normalize_name, log_func, exp_func, is_tf, dtype
def transform(self, data, data_type, bias_correction=True):
"""Applies transformation to the specified data.
Parameters
----------
data : numpy.ndarray or tf.Tensor
The data that will be transformed.
data_type : str
Specifies what kind of data this is. This must be one of:
'ic78', 'deepcore', 'label', 'misc'
bias_correction : bool, optional
If true, the transformation will correct the bias, e.g. subtract
of the data mean to make sure that the transformed data is centered
around zero. Usually this behaviour is desired. However, when
transforming uncertainties, this might not be useful.
If false, it is assumed that uncertaintes are being transformed,
hence, the logarithm will not be applied.
Returns
-------
type(data)
The transformed data.
No Longer Raises
----------------
ValueError
If DataTransformer object has not created or loaded a trafo model.
If provided data_type is unkown.
"""
data, log_name, normalize_name, log_func, exp_func, is_tf, dtype = \
self._check_settings(data, data_type)
# perform logarithm on bins
if bias_correction:
if np.all(self.trafo_model[log_name]):
# logarithm is applied to all bins: one operation
data = log_func(1.0 + data)
else:
# logarithm is only applied to some bins
if is_tf:
data_list = tf.unstack(data, axis=-1)
for bin_i, do_log in enumerate(self.trafo_model[log_name]):
if do_log:
data_list[bin_i] = log_func(1.0 + data_list[bin_i])
data = tf.stack(data_list, axis=-1)
else:
for bin_i, do_log in enumerate(self.trafo_model[log_name]):
if do_log:
data[..., bin_i] = log_func(1.0 + data[..., bin_i])
# normalize data
if self.trafo_model[normalize_name]:
if bias_correction:
data -= self.trafo_model['{}_mean'.format(data_type.lower())]
data /= (self.trafo_model['norm_constant'] +
self.trafo_model['{}_std'.format(data_type.lower())])
# cast back to original dtype
if is_tf:
if dtype != self._tf_float_dtype:
data = tf.cast(data, dtype=dtype)
else:
data = data.astype(dtype)
return data
def inverse_transform(self, data, data_type, bias_correction=True):
"""Applies inverse transformation to the specified data.
Parameters
----------
data : numpy.ndarray or tf.Tensor
The data that will be transformed.
data_type : str
Specifies what kind of data this is. This must be one of:
'ic78', 'deepcore', 'label', 'misc'
bias_correction : bool, optional
If true, the transformation will correct the bias, e.g. subtract
of the data mean to make sure that the transformed data is centered
around zero. Usually this behaviour is desired. However, when
transforming uncertainties, this might not be useful.
If false, it is assumed that uncertaintes are being transformed,
hence, the exponential will not be applied.
Returns
-------
type(data)
Returns the inverse transformed DOM respones and
cascade_parameters.
No Longer Raises
----------------
ValueError
If DataTransformer object has not created or loaded a trafo model.
If provided data_type is unkown.
"""
data, log_name, normalize_name, log_func, exp_func, is_tf, dtype = \
self._check_settings(data, data_type)
# de-normalize data
if self.trafo_model[normalize_name]:
data *= (self.trafo_model['norm_constant'] +
self.trafo_model['{}_std'.format(data_type.lower())])
if bias_correction:
data += self.trafo_model['{}_mean'.format(data_type.lower())]
# undo logarithm on bins
if bias_correction:
if np.all(self.trafo_model[log_name]):
# logarithm is applied to all bins: one operation
data = exp_func(data) - 1.0
else:
# logarithm is only applied to some bins
if is_tf:
data_list = tf.unstack(data, axis=-1)
for bin_i, do_log in enumerate(self.trafo_model[log_name]):
if do_log:
data_list[bin_i] = \
tf.clip_by_value(data_list[bin_i], -60., 60.)
data_list[bin_i] = exp_func(data_list[bin_i]) - 1.0
data = tf.stack(data_list, axis=-1)
else:
for bin_i, do_log in enumerate(self.trafo_model[log_name]):
if do_log:
data[..., bin_i] = exp_func(data[..., bin_i]) - 1.0
# cast back to original dtype
if is_tf:
if dtype != self._tf_float_dtype:
data = tf.cast(data, dtype=dtype)
else:
data = data.astype(dtype)
return data
|
import pandas
import unittest
from unittest.mock import Mock, patch
from mstrio.report import Report
class TestReport(unittest.TestCase):
def setUp(self):
self.report_id = '1DC7D33611E9AE27F6B00080EFE52FBC'
self.report_name = 'UnitTest'
self.connection = "test_connection"
self.__definition = {'id': self.report_id,
'name': self.report_name,
'result': {'definition': {'availableObjects': {'metrics': [{'name': 'Age',
'id': '089FB58611E9CA4D39700080EF15B5B9',
'type': 'Metric'},
{'name': 'Row Count - table1',
'id': '089DE7BA11E9CA4D085B0080EFC515B9',
'type': 'Metric'}],
'attributes': [{'name': 'Name',
'id': '089FC10C11E9CA4D39700080EF15B5B9',
'type': 'Attribute',
'forms': [{'id': '45C11FA478E745FEA08D781CEA190FE5',
'name': 'ID',
'dataType': 'Char',
'baseFormCategory': 'ID',
'baseFormType': 'Text'}]}]}}}}
self.__instance = {'id': self.report_id,
'name': self.report_name,
'status': 1,
'instanceId': '49C2D26C11E9CB21237E0080EF1546B6',
'result': {
'definition': {
'metrics': [{'name': 'Age',
'id': '089FB58611E9CA4D39700080EF15B5B9',
'type': 'Metric',
'min': 18,
'max': 21,
'dataType': 'Integer',
'numberFormatting': {'category': 9, 'formatString': 'General'}}],
'attributes': [{'name': 'Name',
'id': '089FC10C11E9CA4D39700080EF15B5B9',
'type': 'Attribute',
'forms': [{'id': '45C11FA478E745FEA08D781CEA190FE5',
'name': 'ID',
'dataType': 'Char',
'baseFormCategory': 'ID',
'baseFormType': 'Text'}]}],
'thresholds': [],
'sorting': []},
'data': {'paging': {'total': 4,
'current': 2,
'offset': 0,
'limit': 2,
'prev': None,
'next': None},
'root': {'isPartial': True,
'children': [{'depth': 0,
'element': {'attributeIndex': 0,
'formValues': {'ID': 'jack'},
'name': 'jack',
'id': 'hjack;089FC10C11E9CA4D39700080EF15B5B9'},
'metrics': {'Age': {'rv': 18, 'fv': '18', 'mi': 0}}},
{'depth': 0,
'element': {'attributeIndex': 0,
'formValues': {'ID': 'krish'},
'name': 'krish',
'id': 'hkrish;089FC10C11E9CA4D39700080EF15B5B9'},
'metrics': {'Age': {'rv': 19, 'fv': '19', 'mi': 0}}}]}}}}
self.__instance_id = {'id': self.report_id,
'name': self.report_name,
'status': 1,
'instanceId': '49C2D26C11E9CB21237E0080EF1546B6',
'result': {
'definition': {
'metrics': [{'name': 'Age',
'id': '089FB58611E9CA4D39700080EF15B5B9',
'type': 'Metric',
'min': 18,
'max': 21,
'dataType': 'Integer',
'numberFormatting': {'category': 9, 'formatString': 'General'}}],
'attributes': [{'name': 'Name',
'id': '089FC10C11E9CA4D39700080EF15B5B9',
'type': 'Attribute',
'forms': [{'id': '45C11FA478E745FEA08D781CEA190FE5',
'name': 'ID',
'dataType': 'Char',
'baseFormCategory': 'ID',
'baseFormType': 'Text'}]}],
'thresholds': [],
'sorting': []},
'data': {'paging': {'total': 4,
'current': 2,
'offset': 2,
'limit': 2,
'prev': None,
'next': None},
'root': {'isPartial': True,
'children': [{'depth': 0,
'element': {'attributeIndex': 0,
'formValues': {'ID': 'nick'},
'name': 'nick',
'id': 'hnick;089FC10C11E9CA4D39700080EF15B5B9'},
'metrics': {'Age': {'rv': 21, 'fv': '21', 'mi': 0}}},
{'depth': 0,
'element': {'attributeIndex': 0,
'formValues': {'ID': 'Tom'},
'name': 'Tom',
'id': 'hTom;089FC10C11E9CA4D39700080EF15B5B9'},
'metrics': {'Age': {'rv': 20, 'fv': '20', 'mi': 0}}}]}}}}
self.__attr_elements = [{'id': '089FC10C11E9CA4D39700080EF15B5B9:jack', 'formValues': ['jack']},
{'id': '089FC10C11E9CA4D39700080EF15B5B9:krish', 'formValues': ['krish']},
{'id': '089FC10C11E9CA4D39700080EF15B5B9:nick', 'formValues': ['nick']},
{'id': '089FC10C11E9CA4D39700080EF15B5B9:Tom', 'formValues': ['Tom']}]
self.__headers = {'x-mstr-total-count': '4'}
self.__selected_attr = ['089FC10C11E9CA4D39700080EF15B5B9']
self.__selected_metrs = ['089FB58611E9CA4D39700080EF15B5B9']
self.__selected_elem = ['089FC10C11E9CA4D39700080EF15B5B9:Tom', '089FC10C11E9CA4D39700080EF15B5B9:jack']
self.__dataframe = pandas.DataFrame({'Name':['jack', 'krish','nick','Tom'], 'Age':[18,19,21,20]})
@patch('mstrio.api.reports.report_instance')
@patch('mstrio.api.reports.report_single_attribute_elements')
@patch('mstrio.api.reports.report')
def test_init_report(self, mock_definition, mock_attr_element, mock_instance):
"""Test that definition of the report is assigned properly when report is initialized."""
from mstrio.api.reports import report_instance
mock_definition.return_value = Mock(ok=True)
mock_definition.return_value.json.return_value = self.__definition
mock_attr_element.return_value = Mock(headers=self.__headers)
mock_attr_element.return_value.json.return_value = self.__attr_elements
mock_instance.return_value = Mock(ok=True)
mock_instance.return_value.json.return_value = self.__instance
report = Report(connection=self.connection, report_id=self.report_id)
self.assertTrue(mock_instance.called)
self.assertFalse(mock_attr_element.called)
self.assertEqual(report._connection, self.connection)
self.assertEqual(report._report_id, self.report_id)
self.assertEqual(report.name, self.report_name)
self.assertEqual(report.attributes, [{'name': 'Name', 'id': '089FC10C11E9CA4D39700080EF15B5B9'}])
self.assertEqual(report.metrics, [{'name': metric['name'],
'id': metric['id']} \
for metric in self._TestReport__instance['result']['definition']['metrics']])
self.assertIsNone(report.selected_attributes)
self.assertIsNone(report.selected_metrics)
self.assertIsNone(report.selected_attr_elements)
self.assertIsNone(report._dataframe)
with self.assertWarns(Warning):
report.dataframe
@patch('mstrio.api.reports.report_instance')
@patch('mstrio.api.reports.report_single_attribute_elements')
@patch('mstrio.api.reports.report')
def test_apply_filters(self, mock_definition, mock_attr_element, mock_instance):
"""Test that selected objects are assigned properly when filter is applied."""
mock_definition.return_value = Mock(ok=True)
mock_definition.return_value.json.return_value = self.__definition
mock_attr_element.return_value = Mock(ok=True, headers=self.__headers)
mock_attr_element.return_value.json.return_value = self.__attr_elements
mock_instance.return_value = Mock(ok=True)
mock_instance.return_value.json.return_value = self.__instance
report = Report(connection=self.connection, report_id=self.report_id)
report.apply_filters(self.__selected_attr, self.__selected_metrs, self.__selected_elem)
self.assertEqual(report.selected_attributes, self.__selected_attr)
self.assertEqual(report.selected_metrics, self.__selected_metrs)
self.assertEqual(report.selected_attr_elements, self.__selected_elem)
report.clear_filters()
report.apply_filters(attributes=[], metrics=[])
self.assertIsNone(report.selected_attributes)
self.assertIsNone(report.selected_metrics)
@patch('mstrio.api.reports.report_instance')
@patch('mstrio.api.reports.report_single_attribute_elements')
@patch('mstrio.api.reports.report')
def test_clear_filters(self, mock_definition, mock_attr_element, mock_instance):
"""Test that selected objects are assigned with empty lists when filter is cleared."""
mock_definition.return_value = Mock(ok=True)
mock_definition.return_value.json.return_value = self.__definition
mock_attr_element.return_value = Mock(ok=True, headers=self.__headers)
mock_attr_element.return_value.json.return_value = self.__attr_elements
mock_instance.return_value = Mock(ok=True)
mock_instance.return_value.json.return_value = self.__instance
report = Report(connection=self.connection, report_id=self.report_id)
report.apply_filters(self.__selected_attr, self.__selected_metrs, self.__selected_elem)
self.assertEqual(report.selected_attributes, self.__selected_attr)
self.assertEqual(report.selected_metrics, self.__selected_metrs)
self.assertEqual(report.selected_attr_elements, self.__selected_elem)
report.clear_filters()
self.assertIsNone(report.selected_attributes)
self.assertIsNone(report.selected_metrics)
self.assertIsNone(report.selected_attr_elements)
@patch('mstrio.api.reports.report_instance_id')
@patch('mstrio.api.reports.report_instance')
@patch('mstrio.api.reports.report_single_attribute_elements')
@patch('mstrio.api.reports.report')
def test_to_dataframe(self, mock_definition, mock_attr_element, mock_instance, mock_instance_id):
"""Test that data is retrieved and parsed properly when to_dataframe() is called.
Result should be saved to Report.dataframe property.
"""
mock_definition.return_value = Mock(ok=True)
mock_definition.return_value.json.return_value = self.__definition
mock_attr_element.return_value = Mock(ok=True, headers=self.__headers)
mock_attr_element.return_value.json.return_value = self.__attr_elements
mock_instance.return_value = Mock(ok=True)
mock_instance.return_value.json.return_value = self.__instance
mock_instance_id.return_value = Mock(ok=True)
mock_instance_id.return_value.json.return_value = self.__instance_id
report = Report(connection=self.connection, report_id=self.report_id)
df = report.to_dataframe(limit=2)
self.assertTrue(mock_instance.called)
self.assertTrue(mock_instance_id.called)
self.assertIsInstance(df, pandas.core.frame.DataFrame)
self.assertIsInstance(report.dataframe, pandas.core.frame.DataFrame)
self.assertTrue(df.equals(self.__dataframe))
@patch('mstrio.api.reports.report_instance')
@patch('mstrio.api.reports.report_single_attribute_elements')
@patch('mstrio.api.reports.report')
def test_apply_filters_for_incorrect_assignments(self, mock_definition, mock_attr_element, mock_instance):
"""Test that incorrectly assigned selected objects are assigned properly when filter is applied."""
mock_definition.return_value = Mock(ok=True)
mock_definition.return_value.json.return_value = self.__definition
mock_attr_element.return_value = Mock(ok=True, headers=self.__headers)
mock_attr_element.return_value.json.return_value = self.__attr_elements
mock_instance.return_value = Mock(ok=True)
mock_instance.return_value.json.return_value = self.__instance
report = Report(connection=self.connection, report_id=self.report_id)
# attributes assigned selected_metrs, metrics assigned selected_elem and attr_elements assigned selected_attr
report.apply_filters(attributes=self.__selected_metrs, metrics=self.__selected_elem,
attr_elements=self.__selected_attr)
self.assertEqual(report.selected_attributes, self.__selected_attr)
self.assertEqual(report.selected_metrics, self.__selected_metrs)
self.assertEqual(report.selected_attr_elements, self.__selected_elem)
@patch('mstrio.api.reports.report_instance')
@patch('mstrio.api.reports.report_single_attribute_elements')
@patch('mstrio.api.reports.report')
def test_apply_filters_no_list(self, mock_definition, mock_attr_element, mock_instance):
"""Test that selected objects passed as strings are assigned properly when filter is applied."""
mock_definition.return_value = Mock(ok=True)
mock_definition.return_value.json.return_value = self.__definition
mock_attr_element.return_value = Mock(ok=True, headers=self.__headers)
mock_attr_element.return_value.json.return_value = self.__attr_elements
mock_instance.return_value = Mock(ok=True)
mock_instance.return_value.json.return_value = self.__instance
report = Report(connection=self.connection, report_id=self.report_id)
report.apply_filters(attributes=self.__selected_attr[0], metrics=self.__selected_metrs[0],
attr_elements=self.__selected_elem[0])
self.assertEqual(report.selected_attributes, self.__selected_attr)
self.assertEqual(report.selected_metrics, self.__selected_metrs)
self.assertEqual(report.selected_attr_elements, self.__selected_elem[:1])
@patch('mstrio.api.reports.report_instance')
@patch('mstrio.api.reports.report_single_attribute_elements')
@patch('mstrio.api.reports.report')
def test_apply_filters_invalid_elements(self, mock_definition, mock_attr_element, mock_instance):
"""Test that invalid id passed to a filter raises ValueError."""
mock_definition.return_value = Mock(ok=True)
mock_definition.return_value.json.return_value = self.__definition
mock_attr_element.return_value = Mock(ok=True, headers=self.__headers)
mock_attr_element.return_value.json.return_value = self.__attr_elements
mock_instance.return_value = Mock(ok=True)
mock_instance.return_value.json.return_value = self.__instance
report = Report(connection=self.connection, report_id=self.report_id)
self.assertRaises(ValueError, report.apply_filters, attributes='INV123456')
if __name__ == '__main__':
unittest.main()
|
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 26 12:57:11 2018
@author: sosene
"""
#Przepisac na wersje kolumnowo-macierzowa...
import random
import numpy as np
import seaborn as sb
import pandas as pd
#from sklearn import datasets
import matplotlib.pyplot as plt
random.seed(7)
entityNumber = 100
worldSize = {'x': 100, 'z': 100}
numKinds = 3
class Entity:
# current state
kind = 0.
intensity = 0.25
px = 0.
pz = 0.
v = 0.
alpha = 0.
alive = 1
energy = 50.
# parameters
aMax = 2.
rotMax = 45.
viewRange = 100.
rangeSegments = 5
rotSegments = 11 # better to have odd number probably
halfBeta = 30
mass = 0.2
# init function
def __init__(self,rx,rz,alpha,kind,energy):
#print('Hej')
self.px = rx
self.pz = rz
self.alpha = alpha
self.kind = kind
self.energy = energy
def move(self, dt):
self.px = (self.px + self.v * np.cos(self.alpha * 2*np.pi/360) * dt) % worldSize['x']
self.pz = (self.pz + self.v * np.sin(self.alpha * 2*np.pi/360) * dt) % worldSize['z']
self.energy = self.energy - dt*self.mass*self.v**2/2
def accelerate(self, aFrac):
self.v = self.v + aFrac*self.aMax
def rotate(self, rotFrac):
self.alpha = (self.alpha + rotFrac * self.rotMax/(1+self.v)) % 360
def checkState(self):
if (self.energy < 0):
self.alive = 0
entityList = []
# generate entities
for i in range(entityNumber):
rx = random.random()*worldSize['x']
rz = random.random()*worldSize['z']
alpha = random.random()*360
kind = int(random.random()*3)
energy = random.random()*50+100
entityList.append(Entity(rx,rz,alpha,kind,energy))
# accelerate entities
for i in range(len(entityList)):
entityList[i].accelerate(random.random())
for j in range(1000):
# move entities
for i in range(len(entityList)):
entityList[i].move(1)
# rotate entities
for i in range(len(entityList)):
entityList[i].rotate(random.random()*2-1)
pxList = []
pzList = []
kindList = []
energyList = []
for i in range(len(entityList)):
pxList.append(entityList[i].px)
pzList.append(entityList[i].pz)
kindList.append(entityList[i].kind)
energyList.append(entityList[i].energy)
df = pd.DataFrame(
{
'px': pxList,
'pz': pzList,
'kind': kindList,
'energy': energyList
})
sb.set(font_scale=1.2, style="ticks") #set styling preferences
sb.plt.xlim(0,100)
sb.plt.ylim(0,100)
points = plt.scatter(df["px"], df["pz"],
c=df["kind"], cmap="Spectral", s=df["energy"]) #set style options
points.figure.set_size_inches(10, 10)
points.figure.savefig("oaa_1_"+str(j)+".png")
plt.clf()
t=0
tE = entityList[0]
print("tE position ",tE.px, " ", tE.pz)
print("tE angle ",tE.alpha)
perception = np.zeros((numKinds, tE.rangeSegments, tE.rotSegments))
rangeSegmentsList = [tE.viewRange*(2**n-1)/(2**tE.rangeSegments-1) for n in range(tE.rangeSegments+1)]
rotSegmentsList = [180-tE.halfBeta-angle*(360.-2*tE.halfBeta)/tE.rotSegments for angle in range(tE.rotSegments+1)]
for i in range(len(entityList)):
if (i != t):
tF = entityList[i]
dist = np.sqrt((tE.px-tF.px)**2+(tE.pz-tF.pz)**2)
print('Distance: ', dist)
if(dist < tE.viewRange):
if(tE.pz < tF.pz):
relativeAngle = ((90 - tE.alpha+np.arcsin((tE.px-tF.px)/dist)*360/(2*np.pi))%360)-180
else:
relativeAngle = ((90 - tE.alpha+180-np.arcsin((tE.px-tF.px)/dist)*360/(2*np.pi))%360)-180
print('Angle: ', relativeAngle)
rangeSegment = tE.rangeSegments
for seg in reversed(range(tE.rangeSegments)):
if (dist>= rangeSegmentsList[seg]):
rangeSegment = seg
break
print('Segment: ', rangeSegment)
if(np.abs(relativeAngle) < (180- tE.halfBeta)):
for angSeg in range(1,len(rotSegmentsList)):
if(relativeAngle > rotSegmentsList[angSeg]):
rotSegment = angSeg-1
print('rotSegment: ', angSeg)
perception[tF.kind,rangeSegment,rotSegment] += tF.intensity
break
# viewRange = 10.
# rangeSegments = 5
# rotSegments = 5
# halfBeta = 30
print(perception[0])
print(perception[1])
print(perception[2])
|
import pandas as pd
import functools
from trumania.core.util_functions import merge_2_dicts, merge_dicts, is_sequence, make_random_assign, cap_to_total
from trumania.core.util_functions import build_ids, latest_date_before, bipartite, make_random_bipartite_data
def test_merge_two_empty_dict_should_return_empty_dict():
assert {} == merge_2_dicts({}, {})
def test_merge_two_none_dict_should_return_empty_dict():
assert {} == merge_2_dicts(None, None)
def test_merging_one_dict_with_none_should_yield_dict():
d1 = {"a": 1, "b": 2}
assert d1 == merge_2_dicts(d1, None)
def test_merging_none_with_one_dict_should_yield_dict():
d2 = {"a": 1, "b": 2}
assert d2 == merge_2_dicts(None, d2)
def test_merge_empty_with_dict_should_return_itself():
d1 = {"a": 1, "b": 2}
assert d1 == merge_2_dicts(d1, {})
assert d1 == merge_2_dicts({}, d1)
def test_merge_non_overlapping_dict_should_return_all_values():
d1 = {"a": 1, "b": 2}
d2 = {"c": 3, "d": 4}
assert {"a": 1, "b": 2, "c": 3, "d": 4} == merge_2_dicts(d1, d2)
def test_merge_dict_to_itself_should_return_doubled_values():
d1 = {"a": 1, "b": 2}
assert {"a": 2, "b": 4} == merge_2_dicts(d1, d1, lambda a, b: a + b)
def test_merging_one_dictionary_should_yield_itself():
d1 = {"a": 1, "b": 2}
assert d1 == merge_dicts([d1], lambda a, b: a + b)
def test_merging_an_empty_list_of_dicts_should_yield_empty_dict():
assert {} == merge_dicts([])
def test_merging_an_empty_gen_of_dicts_should_yield_empty_dict():
emtpy_gen = ({"a": 1} for _ in [])
assert {} == merge_dicts(emtpy_gen)
def test_merging_many_dictionary_should_yield_expected_result():
d1 = {"a": 10, "b": 20}
d2 = {"a": 100, "c": 30}
d3 = {}
d4 = {"b": 200, "z": 1000}
d5 = {"z": -10}
merged = merge_dicts([d1, d2, d3, d4, d5], lambda a, b: a + b)
assert {"a": 110, "b": 220, "c": 30, "z": 990} == merged
def test_merging_many_dictionary_from_gen_should_yield_expected_result():
ds = [{"a": 10, "b": 20},
{"a": 100, "c": 30},
{},
{"b": 200, "z": 1000},
{"z": -10}]
dicts_gens = (d for d in ds)
merged = merge_dicts(dicts_gens, lambda a, b: a + b)
assert {"a": 110, "b": 220, "c": 30, "z": 990} == merged
def test_is_sequence():
assert is_sequence([])
assert is_sequence([1, 2, 3, 1])
assert is_sequence({1, 2, 3, 1})
assert not is_sequence(1)
assert not is_sequence("hello")
def test_make_random_assign_shoud_assign_each_element_only_once():
dealers = build_ids(size=10, prefix="DEALER_", max_length=2)
sims = build_ids(size=1000, prefix="SIM_", max_length=4)
assignment = make_random_assign(set1=sims, set2=dealers, seed=10)
# all sims should have been assigned
assert assignment.shape == (1000, 2)
# all SIM should have been given
assert set(assignment["set1"].unique().tolist()) == set(sims)
# all owners should be part of the dealers
assert set(assignment["chosen_from_set2"].unique().tolist()) <= set(dealers)
def test_cap_to_total_should_leave_untouched_values_below_target():
assert [10, 20, 30] == cap_to_total([10, 20, 30], target_total=100)
def test_cap_to_total_should_leave_untouched_equal_to_target():
assert [50, 40, 20] == cap_to_total([50, 40, 20], target_total=110)
def test_cap_to_total_should_lower_last_correctly():
assert [50, 40, 5] == cap_to_total([50, 40, 20], target_total=95)
def test_cap_to_total_should_zero_last_correctly():
assert [50, 40, 0] == cap_to_total([50, 40, 20], target_total=90)
def test_cap_to_total_should_zero_several_correctly():
assert [38, 0, 0] == cap_to_total([50, 40, 20], target_total=38)
def test_latest_date_before_should_return_input_if_within_range():
starting_date = pd.Timestamp("6 June 2016")
upper_bound = pd.Timestamp("8 June 2016")
time_step = pd.Timedelta("7D")
result = latest_date_before(starting_date, upper_bound, time_step)
assert result == starting_date
def test_latest_date_before_should_return_input_if_start_equals_ub():
starting_date = pd.Timestamp("8 June 2016")
upper_bound = pd.Timestamp("8 June 2016")
time_step = pd.Timedelta("7D")
result = latest_date_before(starting_date, upper_bound, time_step)
assert result == starting_date
def test_latest_date_before_should_shift_backward_ne_week_input_as_required():
starting_date = pd.Timestamp("10 June 2016")
expected_date = pd.Timestamp("3 June 2016")
upper_bound = pd.Timestamp("8 June 2016")
time_step = pd.Timedelta("7D")
result = latest_date_before(starting_date, upper_bound, time_step)
assert result == expected_date
def test_latest_date_before_should_shift_backward_n_weeks_input_as_required():
starting_date = pd.Timestamp("10 June 2016")
expected_date = pd.Timestamp("25 March 2016")
upper_bound = pd.Timestamp("31 March 2016")
time_step = pd.Timedelta("7D")
result = latest_date_before(starting_date, upper_bound, time_step)
assert result == expected_date
def test_latest_date_before_should_shift_forward_n_weeks_input_as_required():
starting_date = pd.Timestamp("10 June 2016")
expected_date = pd.Timestamp("27 January 2017")
upper_bound = pd.Timestamp("29 January 2017")
time_step = pd.Timedelta("7D")
result = latest_date_before(starting_date, upper_bound, time_step)
assert result == expected_date
def test_latest_date_before_should_shift_forward_until_upper_bound():
# here the upper bound IS the expected date => makes sure we go up to
# thsi ons
starting_date = pd.Timestamp("10 June 2016")
upper_bound = pd.Timestamp("24 June 2016")
time_step = pd.Timedelta("7D")
result = latest_date_before(starting_date, upper_bound, time_step)
assert result == upper_bound
def test_if_networkx_bipartite_keeps_actual_structure():
# Currently, Netorkx.bipartite returns bipartite networks where the first node
# is always in the first group, and the second node is always in the second group
RB = bipartite.random_graph(5, 10, 0.9, 1234)
assert functools.reduce(lambda x, y: x & y, [e[0] < 5 for e in RB.edges()])
def test_random_bipartite_network_generation_returns_empty_list_if_first_entry_is_empty():
assert [] == make_random_bipartite_data([], [1, 2], 1., 1234)
def test_random_bipartite_network_generation_returns_empty_list_if_second_entry_is_empty():
assert [] == make_random_bipartite_data([1, 2], [], 1., 1234)
def test_random_bipartite_network_generation_returns_empty_list_if_both_entries_are_empty():
assert [] == make_random_bipartite_data([], [], 1., 1234)
def test_random_bipartite_network_generation_returns_empty_list_if_prob_is_zero():
assert [] == make_random_bipartite_data([1, 2], [5, 6], 0., 1234)
def test_random_bipartite_network_generation_returns_bipartite_network():
all_edges = [(1, 5), (1, 6), (2, 5), (2, 6)]
bp = make_random_bipartite_data([1, 2], [5, 6], 1., 1234)
assert functools.reduce(lambda x, y: x & y, [e in bp for e in all_edges])
|
"""
币安推荐码: 返佣10%
https://www.binancezh.pro/cn/register?ref=AIR1GC70
币安合约推荐码: 返佣10%
https://www.binancezh.com/cn/futures/ref/51bitquant
if you don't have a binance account, you can use the invitation link to register one:
https://www.binancezh.com/cn/futures/ref/51bitquant
or use the inviation code: 51bitquant
风险提示: 网格交易在单边行情的时候,会承受比较大的风险,请你了解整个代码的逻辑,然后再使用。
RISK NOTE: Grid trading will endure great risk at trend market, please check the code before use it. USE AT YOUR OWN RISK.
"""
import time
import logging
from trader.binance_trader import BinanceTrader
from trader.binance_future_trader import BinanceFutureTrader
from utils import config
format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(level=logging.INFO, format=format, filename='grid_trader_log.txt')
logger = logging.getLogger('binance')
if __name__ == '__main__':
config.loads('./config.json')
if config.platform == 'binance_spot':
trader = BinanceTrader()
else:
trader = BinanceFutureTrader()
orders = trader.http_client.cancel_open_orders(config.symbol)
print(f"cancel orders: {orders}")
while True:
try:
trader.grid_trader()
time.sleep(20)
except Exception as error:
print(f"catch error: {error}")
time.sleep(5)
|
"""
Pandora API Transport
This module contains the very low level transport agent for the Pandora API.
The transport is concerned with the details of a raw HTTP call to the Pandora
API along with the request and response encryption by way of an Encyrpytor
object. The result from a transport is a JSON object for the API or an
exception.
API consumers should use one of the API clients in the pandora.client package.
"""
import random
import time
import json
import base64
import requests
from requests.adapters import HTTPAdapter
from Crypto.Cipher import Blowfish
from .errors import PandoraException
DEFAULT_API_HOST = "tuner.pandora.com/services/json/"
# This decorator is a temporary workaround for handling SysCallErrors, see:
# https://github.com/shazow/urllib3/issues/367. Should be removed once a fix is
# applied in urllib3.
def retries(max_tries, exceptions=(Exception,)):
"""Function decorator implementing retrying logic.
exceptions: A tuple of exception classes; default (Exception,)
The decorator will call the function up to max_tries times if it raises
an exception.
By default it catches instances of the Exception class and subclasses.
This will recover after all but the most fatal errors. You may specify a
custom tuple of exception classes with the 'exceptions' argument; the
function will only be retried if it raises one of the specified
exceptions.
"""
def decorator(func):
def function(*args, **kwargs):
retries_left = max_tries
while retries_left > 0:
try:
retries_left -= 1
return func(*args, **kwargs)
except exceptions as exc:
# Don't retry for PandoraExceptions - unlikely that result
# will change for same set of input parameters.
if isinstance(exc, PandoraException):
raise
if retries_left > 0:
time.sleep(delay_exponential(
0.5, 2, max_tries - retries_left))
else:
raise
return function
return decorator
def delay_exponential(base, growth_factor, attempts):
"""Calculate time to sleep based on exponential function.
The format is::
base * growth_factor ^ (attempts - 1)
If ``base`` is set to 'rand' then a random number between
0 and 1 will be used as the base.
Base must be greater than 0, otherwise a ValueError will be
raised.
"""
if base == 'rand':
base = random.random()
elif base <= 0:
raise ValueError("The 'base' param must be greater than 0, "
"got: {}".format(base))
time_to_sleep = base * (growth_factor ** (attempts - 1))
return time_to_sleep
class RetryingSession(requests.Session):
"""Requests Session With Retry Support
This Requests session uses an HTTPAdapter that retries on connection
failure three times. The Pandora API is fairly aggressive about closing
connections on clients and the default session doesn't retry.
"""
def __init__(self):
super(RetryingSession, self).__init__()
self.mount('https://', HTTPAdapter(max_retries=3))
self.mount('http://', HTTPAdapter(max_retries=3))
class APITransport(object):
"""Pandora API Transport
The transport is responsible for speaking the low-level protocol required
by the Pandora API. It knows about encryption, TLS and the other API
details. Once setup the transport acts like a callable.
"""
API_VERSION = "5"
REQUIRE_RESET = ("auth.partnerLogin", )
NO_ENCRYPT = ("auth.partnerLogin", )
REQUIRE_TLS = ("auth.partnerLogin", "auth.userLogin",
"station.getPlaylist", "user.createUser")
def __init__(self, cryptor, api_host=DEFAULT_API_HOST, proxy=None):
self.cryptor = cryptor
self.api_host = api_host
self._http = RetryingSession()
if proxy:
self._http.proxies = {"http": proxy, "https": proxy}
self.reset()
def reset(self):
self.partner_auth_token = None
self.user_auth_token = None
self.partner_id = None
self.user_id = None
self.start_time = None
self.server_sync_time = None
def set_partner(self, data):
self.sync_time = data["syncTime"]
self.partner_auth_token = data["partnerAuthToken"]
self.partner_id = data["partnerId"]
def set_user(self, data):
self.user_id = data["userId"]
self.user_auth_token = data["userAuthToken"]
@property
def auth_token(self):
if self.user_auth_token:
return self.user_auth_token
if self.partner_auth_token:
return self.partner_auth_token
return None
@property
def sync_time(self):
if not self.server_sync_time:
return None
return int(self.server_sync_time + (time.time() - self.start_time))
def remove_empty_values(self, data):
return dict((k, v) for k, v in data.items() if v is not None)
@sync_time.setter
def sync_time(self, sync_time):
self.server_sync_time = self.cryptor.decrypt_sync_time(sync_time)
def _start_request(self, method):
if method in self.REQUIRE_RESET:
self.reset()
if not self.start_time:
self.start_time = int(time.time())
def _make_http_request(self, url, data, params):
try:
data = data.encode("utf-8")
except AttributeError:
pass
params = self.remove_empty_values(params)
result = self._http.post(url, data=data, params=params)
result.raise_for_status()
return result.content
def test_url(self, url):
return self._http.head(url).status_code == requests.codes.OK
def _build_params(self, method):
return {
"method": method,
"auth_token": self.auth_token,
"partner_id": self.partner_id,
"user_id": self.user_id,
}
def _build_url(self, method):
return "{0}://{1}".format(
"https" if method in self.REQUIRE_TLS else "http",
self.api_host)
def _build_data(self, method, data):
data["userAuthToken"] = self.user_auth_token
if not self.user_auth_token and self.partner_auth_token:
data["partnerAuthToken"] = self.partner_auth_token
data["syncTime"] = self.sync_time
data = json.dumps(self.remove_empty_values(data))
if method not in self.NO_ENCRYPT:
data = self.cryptor.encrypt(data)
return data
def _parse_response(self, result):
result = json.loads(result.decode("utf-8"))
if result["stat"] == "ok":
return result["result"] if "result" in result else None
else:
raise PandoraException.from_code(result["code"], result["message"])
@retries(3)
def __call__(self, method, **data):
self._start_request(method)
url = self._build_url(method)
data = self._build_data(method, data)
params = self._build_params(method)
result = self._make_http_request(url, data, params)
return self._parse_response(result)
class Encryptor(object):
"""Pandora Blowfish Encryptor
The blowfish encryptor can encrypt and decrypt the relevant parts of the
API request and response. It handles the formats that the API expects.
"""
def __init__(self, in_key, out_key):
self.bf_out = Blowfish.new(out_key, Blowfish.MODE_ECB)
self.bf_in = Blowfish.new(in_key, Blowfish.MODE_ECB)
@staticmethod
def _decode_hex(data):
return base64.b16decode(data.encode("ascii").upper())
@staticmethod
def _encode_hex(data):
return base64.b16encode(data).lower()
def decrypt(self, data):
data = self.bf_out.decrypt(self._decode_hex(data))
return json.loads(self.strip_padding(data))
def decrypt_sync_time(self, data):
return int(self.bf_in.decrypt(self._decode_hex(data))[4:-2])
def add_padding(self, data):
block_size = Blowfish.block_size
pad_size = len(data) % block_size
return data + (chr(pad_size) * (block_size - pad_size))
def strip_padding(self, data):
pad_size = int(data[-1])
if not data[-pad_size:] == bytes((pad_size,)) * pad_size:
raise ValueError('Invalid padding')
return data[:-pad_size]
def encrypt(self, data):
return self._encode_hex(self.bf_out.encrypt(self.add_padding(data)))
|
import mxnet as mx
import numpy as np
from mx_constant import MyConstant
eps = 1e-5
def input_transform_net(data, batch_size, num_points, workspace, bn_mom=0.9, scope="itn_"):
data = mx.sym.expand_dims(data, axis=1) # (32,1,1024,3)
conv0 = mx.sym.Convolution(data=data, num_filter=64, kernel=(1, 3), stride=(1, 1), name=scope + "conv0",
workspace=workspace)
conv0 = mx.sym.BatchNorm(data=conv0, fix_gamma=False, eps=eps, momentum=bn_mom, name=scope + 'bn0')
conv0 = mx.sym.Activation(data=conv0, act_type='relu', name=scope + 'relu0')
conv1 = mx.sym.Convolution(data=conv0, num_filter=128, kernel=(1, 1), stride=(1, 1), name=scope + "conv1",
workspace=workspace)
conv1 = mx.sym.BatchNorm(data=conv1, fix_gamma=False, eps=eps, momentum=bn_mom, name=scope + 'bn1')
conv1 = mx.sym.Activation(data=conv1, act_type='relu', name=scope + 'relu1')
conv2 = mx.sym.Convolution(data=conv1, num_filter=1024, kernel=(1, 1), stride=(1, 1), name=scope + "conv2",
workspace=workspace)
conv2 = mx.sym.BatchNorm(data=conv2, fix_gamma=False, eps=eps, momentum=bn_mom, name=scope + 'bn2')
conv2 = mx.sym.Activation(data=conv2, act_type='relu', name=scope + 'relu2')
pool3 = mx.sym.Pooling(data=conv2, kernel=(num_points, 1), pool_type='max', name=scope + 'pool3')
pool3_reshaped = mx.sym.Reshape(data=pool3, shape=(batch_size, -1))
fc4 = mx.sym.FullyConnected(data=pool3_reshaped, num_hidden=512, name=scope + 'fc4')
fc4 = mx.sym.BatchNorm(data=fc4, fix_gamma=False, eps=eps, momentum=bn_mom, name=scope + 'bn4')
fc4 = mx.sym.Activation(data=fc4, act_type='relu', name=scope + 'relu4')
fc5 = mx.sym.FullyConnected(data=fc4, num_hidden=256, name=scope + 'fc5')
fc5 = mx.sym.BatchNorm(data=fc5, fix_gamma=False, eps=eps, momentum=bn_mom, name=scope + 'bn5')
fc5 = mx.sym.Activation(data=fc5, act_type='relu', name=scope + 'relu5')
input_transformer_weight = mx.sym.Variable(name="input_transformer_weight", shape=(9, 256), init=mx.init.Zero())
input_transformer_bias = mx.sym.Variable(name="input_transformer_bias", shape=(9), init=mx.init.Zero())
transform = mx.sym.FullyConnected(data=fc5, num_hidden=9, weight=input_transformer_weight, bias=input_transformer_bias, name=scope + 'fc6')
const_arr = [1, 0, 0, 0, 1, 0, 0, 0, 1]
a = mx.sym.Variable('itn_addi_bias', shape=(batch_size, 9), init=MyConstant(value=[const_arr]*batch_size))
a = mx.sym.BlockGrad(a) # now variable a is a constant
transform = mx.sym.elemwise_add(transform, a, name=scope + "add_eye")
transform_reshaped = mx.sym.Reshape(data=transform, shape=(batch_size, 3, 3), name=scope + "reshape_transform")
return transform_reshaped
def feature_transform_net(data, batch_size, num_points, workspace, bn_mom=0.9, scope="ftn_"):
conv0 = mx.sym.Convolution(data=data, num_filter=64, kernel=(1, 1), stride=(1, 1), name=scope + "conv0",
workspace=workspace)
conv0 = mx.sym.BatchNorm(data=conv0, fix_gamma=False, eps=eps, momentum=bn_mom, name=scope + 'bn0')
conv0 = mx.sym.Activation(data=conv0, act_type='relu', name=scope + 'relu0')
conv1 = mx.sym.Convolution(data=conv0, num_filter=128, kernel=(1, 1), stride=(1, 1), name=scope + "conv1",
workspace=workspace)
conv1 = mx.sym.BatchNorm(data=conv1, fix_gamma=False, eps=eps, momentum=bn_mom, name=scope + 'bn1')
conv1 = mx.sym.Activation(data=conv1, act_type='relu', name=scope + 'relu1')
conv2 = mx.sym.Convolution(data=conv1, num_filter=1024, kernel=(1, 1), stride=(1, 1), name=scope + "conv2",
workspace=workspace)
conv2 = mx.sym.BatchNorm(data=conv2, fix_gamma=False, eps=eps, momentum=bn_mom, name=scope + 'bn2')
conv2 = mx.sym.Activation(data=conv2, act_type='relu', name=scope + 'relu2')
pool3 = mx.sym.Pooling(data=conv2, kernel=(num_points, 1), pool_type='max', name=scope + 'pool3')
pool3_reshaped = mx.sym.Reshape(data=pool3, shape=(batch_size, -1))
fc4 = mx.sym.FullyConnected(data=pool3_reshaped, num_hidden=512, name=scope + 'fc4')
fc4 = mx.sym.BatchNorm(data=fc4, fix_gamma=False, eps=eps, momentum=bn_mom, name=scope + 'bn4')
fc4 = mx.sym.Activation(data=fc4, act_type='relu', name=scope + 'relu4')
fc5 = mx.sym.FullyConnected(data=fc4, num_hidden=256, name=scope + 'fc5')
fc5 = mx.sym.BatchNorm(data=fc5, fix_gamma=False, eps=eps, momentum=bn_mom, name=scope + 'bn5')
fc5 = mx.sym.Activation(data=fc5, act_type='relu', name=scope + 'relu5')
feat_transformer_weight = mx.sym.Variable(name="feat_transformer_weight", shape=(64*64, 256), init=mx.init.Zero())
feat_transformer_bias = mx.sym.Variable(name="feat_transformer_bias", shape=(64*64), init=mx.init.Zero())
transform = mx.sym.FullyConnected(data=fc5, num_hidden=64 * 64, weight=feat_transformer_weight, bias=feat_transformer_bias, name=scope + 'fc6')
const_arr = np.eye(64, dtype=np.float32).flatten().tolist()
a = mx.sym.Variable('ftn_addi_bias', shape=(batch_size, 64 * 64), init=MyConstant(value=[const_arr]*batch_size))
a = mx.sym.BlockGrad(a) # now variable a is a constant
transform = mx.sym.elemwise_add(transform, a, name=scope + "add_eye")
transform_reshaped = mx.sym.Reshape(data=transform, shape=(batch_size, 64, 64), name=scope + "reshape_transform")
return transform_reshaped
|
from tkinter import *
total = 0
pre_total = []
saletotal = 0
#=========================Events
def add_ev(event):
x = float(current_Price_str.get())
global total
global pre_total
total += x
total_int.set(round(total))
current_Price_str.set("")
note_str.set(str(round(x))+" Sucessfully added!")
def calculate_ev(event):
global total
global saletotal
global pre_total
y = total
gst= (y/100)*5
withGST = gst + y
saletotal += withGST
total_with_Gst_str.set(str(round(withGST)))
av_off = (y/100)*20
av_price = y - av_off
av_Price_str.set(str(av_price))
note_str.set(" Sucessfully Calculated! "+ "Today's Sale: " +str(round(saletotal)))
pre_total.insert(0,str(round(withGST)))
print(pre_total)
pass
def sub_ev(event):
x = float(current_Price_str.get())
global total
total -=x
total_int.set(round(total))
current_Price_str.set("")
note_str.set(str(round(x))+" Sucessfully Subtracted!")
pass
def reset_ev(event):
current_Price_str.set("")
total_int.set("")
total_with_Gst_str.set("")
av_Price_str.set("")
global total
total = 0
init()
note_str.set(" Sucessfully Reset! ")
pass
#=============================END of Events
def init():
total_int.set("0")
total_with_Gst_str.set("0")
av_Price_str.set("0")
note_str.set("Created by Eknath")
pass
def short_tp():
top = Toplevel()
top.title("The Short Cut")
msg = Message(top, text="Enter: Adding Item Price \n Ctrl+ - : for Reducing the Amount \n Ctrl+ Shift + +: Add \n Crl+r : Reset")
msg.pack()
button = Button(top, text="Dismiss", command=top.destroy)
button.pack()
pass
def add():
x = float(current_Price_str.get())
global total
total += x
total_int.set(round(total))
current_Price_str.set("")
note_str.set(str(round(x))+" Sucessfully added!")
pass
def sub():
x = float(current_Price_str.get())
global total
total -=x
total_int.set(round(total))
current_Price_str.set("")
note_str.set(str(round(x))+" Sucessfully Subtracted!")
pass
def calculate():
global total
global saletotal
y = total
gst= (y/100)*5
withGST = gst + y
total_with_Gst_str.set(str(round(withGST)))
av_off = (y/100)*20
av_price = y - av_off
av_Price_str.set(str(av_price))
saletotal += withGST
note_str.set(" Sucessfully Calculated! " + "Today's Sale: " +str(round(saletotal)))
pass
def reset():
current_Price_str.set("")
total_int.set("")
total_with_Gst_str.set("")
av_Price_str.set("")
global total
total = 0
init()
note_str.set("Reset Sucessfull! ")
pass
#==============test
def quitm():
r.destroy()
pass
#===============
r = Tk()
r.title("Tanto -Cashier Tool V0.001 beta-V 0.01")
r.geometry("400x250")
r.wm_iconbitmap('tantoapp.ico')
#=====================================String Variables
current_Price_str = StringVar()
total_int = StringVar()
total_with_Gst_str = StringVar()
av_Price_str = StringVar()
note_str = StringVar()
#=====================================End String Variables
#=======================FileMenu
menubar = Menu(r)
menubar.add_command(label="Short-Cut",command=short_tp)
menubar.add_command(label="Quit!",command=quitm)
r.config(menu=menubar)
#=======================
f= Frame(r,relief=FLAT,bd=0)
lblh1 = Label(r,text="Tanto Srima-Cashier Tool ",font=("Helvetica", "20","bold"),fg="forestgreen",bg="white").pack(fill=X)
#====Current Price===============
entry_currentPrice_lbl = Label(f,text="Current Item Price : ",bd=0).grid(row="1",column="0",sticky="nsew")
entry_currentPrice_entry = Entry(f,textvariable=current_Price_str,takefocus="Takefocus",font=("Helvetica", "12")).grid(row="1",column="1")
#add button for current price
current_add_btn = Button(f,text="+",command=add ,width="5",height="1").grid(row="1",column="2")
current_sub_btn = Button(f,text="-",command=sub,width="5",height="1").grid(row="1",column="3",sticky="ns")
#=====================Total Price
total_Price_lbl = Label(f,text="Total Price : ").grid(row="2",column="0",sticky="E")
entry_totalPrice_label = Label(f,textvariable=total_int,font=("Helvetica", "20")).grid(row="2",column="1")
total_add_btn = Button(f,text="Calculate",command=calculate,).grid(row="2",column="2",sticky="EWNS",columnspan="2",rowspan="2")
#========== price with GST
total_Price_with_GST_lbl = Label(f,text="Grand Total",bg="green",font=("Helvetica", "12"),width="10").grid(row="3",column="0",sticky="EWNS")
total_Price_with_GST_entry = Label(f,textvariable=total_with_Gst_str,font=("Helvetica", "20","bold")).grid(row="4",column="0",sticky="E")
total_Price_Av = Label(f,text="Aurovillie Amount",bg="orange",font=("Helvetica", "12")).grid(row="3",column="1",sticky="E")
total_Price_Av_lable = Label(f,textvariable=av_Price_str,font=("Helvetica", "20","bold"),fg="orange").grid(row="4",column="1",sticky="E")
#=============Note
warning_lbl = Label(f,textvariable=note_str,bg="lightgreen",width="54").grid(row="5",column="0",columnspan="5")
#====================reset Button
reset_btn = Button(f,text="Reset",command=reset,width="10").grid(row="4",column="2",sticky="nswe",columnspan="2")
#=============Event Button
r.bind("<Return>",add_ev)
r.bind("<Control-plus>",add_ev)
r.bind("<Control-Return>",calculate_ev)
r.bind("<Control-minus>",sub_ev)
r.bind("<Control-r>",reset_ev)
#binding event======
#packing the Frame
f.pack()
init()
r.mainloop()
|
"""
Created on 21 Jun 2019
@author: Bruno Beloff ([email protected])
"""
import time
from collections import OrderedDict
from multiprocessing import Manager
from scs_core.data.json import JSONable
from scs_core.sync.interval_timer import IntervalTimer
from scs_core.sync.synchronised_process import SynchronisedProcess
from scs_display.display.system_display import SystemDisplay
# --------------------------------------------------------------------------------------------------------------------
class SystemMonitor(SynchronisedProcess):
"""
classdocs
"""
UPDATE_INTERVAL = 5.0 # seconds
# ----------------------------------------------------------------------------------------------------------------
@classmethod
def construct(cls, device_name, startup_message, shutdown_message, show_time, psu_report_class,
psu_report_filename, queue_report_filename, gps_report_filename):
display = SystemDisplay.construct(device_name, startup_message, show_time, psu_report_class,
psu_report_filename, queue_report_filename, gps_report_filename)
return cls(display, shutdown_message)
# ----------------------------------------------------------------------------------------------------------------
def __init__(self, display, shutdown_message):
"""
Constructor
"""
manager = Manager()
SynchronisedProcess.__init__(self, manager.list())
self.__shutdown_message = shutdown_message
self.__display = display
# ----------------------------------------------------------------------------------------------------------------
# SynchronisedProcess implementation...
def stop(self):
try:
self.__display.status_message = self.__shutdown_message
self.__display.clear()
time.sleep(self.UPDATE_INTERVAL)
super().stop()
except (BrokenPipeError, KeyboardInterrupt, SystemExit):
pass
def run(self):
try:
timer = IntervalTimer(self.UPDATE_INTERVAL)
while timer.true():
with self._lock:
status = SystemStatus.construct_from_jdict(OrderedDict(self._value))
if status is not None:
self.__display.status_message = status.message
self.__display.update()
except (BrokenPipeError, KeyboardInterrupt, SystemExit):
pass
# ----------------------------------------------------------------------------------------------------------------
# setter for client process...
def set_message(self, message):
status = SystemStatus(message)
if status.is_valid():
with self._lock:
status.as_list(self._value)
# ----------------------------------------------------------------------------------------------------------------
def __str__(self, *args, **kwargs):
return "SystemMonitor:{value:%s, shutdown_message: %s, display:%s}" % \
(self._value, self.__shutdown_message, self.__display)
# --------------------------------------------------------------------------------------------------------------------
class SystemStatus(JSONable):
"""
classdocs
"""
# ----------------------------------------------------------------------------------------------------------------
@classmethod
def construct_from_jdict(cls, jdict):
if not jdict:
return None
message = jdict.get('message')
return SystemStatus(message)
# ----------------------------------------------------------------------------------------------------------------
def __init__(self, message):
"""
Constructor
"""
self.__message = message # string
# ----------------------------------------------------------------------------------------------------------------
def is_valid(self):
return self.message is not None
# ----------------------------------------------------------------------------------------------------------------
def as_json(self):
jdict = OrderedDict()
jdict['message'] = self.message
return jdict
# ----------------------------------------------------------------------------------------------------------------
@property
def message(self):
return self.__message
# ----------------------------------------------------------------------------------------------------------------
def __str__(self, *args, **kwargs):
return "SystemStatus:{message:%s}" % self.message
|
import FWCore.ParameterSet.Config as cms
from Configuration.StandardSequences.RawToDigi_cff import *
# RPC Merged Digis
from Configuration.Eras.Modifier_run3_RPC_cff import run3_RPC
run3_RPC.toModify(muonRPCDigis,
inputTagTwinMuxDigis = 'rpcTwinMuxRawToDigi',
inputTagOMTFDigis = 'omtfStage2Digis',
inputTagCPPFDigis = 'rpcCPPFRawToDigi'
)
|
import os
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(os.path.dirname(__file__), 'docs.db'),
}
}
SECRET_KEY = 'HASJFDYWQ98r6y2hesakjfhakjfy87eyr1hakjwfa'
CACHE_BACKEND = 'locmem://'
LOCAL_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), 'data'))
ADMINS = [
('Example Admin', '[email protected]'),
]
# Set this in order to bypass code that auto-fills the database with
# SCMTool data.
RUNNING_TEST = True
ENABLED_FEATURES = {
'diffviewer.dvcs': True,
}
|
from ..player import BasePlayer
class DataKeeper(BasePlayer):
'''
Player who keeps his most repeated pieces.
'''
def __init__(self, name):
super().__init__(f"DataKeeper::{name}")
def filter(self, valids=None):
valids = super().filter(valids)
datas = {}
for p1, p2 in self.pieces:
if p1 != p2:
datas[p2] = datas.get(p2, 0) + 1
datas[p1] = datas.get(p1, 0) + 1
best, selected = float('inf'), []
for piece, head in valids:
value = max(datas[piece[0]], datas[piece[1]])
if value < best:
best = value
selected.clear()
if value == best:
selected.append((piece, head))
return selected
|
from typing import List, Set
from crosshair.statespace import MessageType
from crosshair.test_util import check_states
from crosshair.core_and_libs import standalone_statespace, NoTracing, proxy_for_type
def test_dict_index():
a = {"two": 2, "four": 4, "six": 6}
def numstr(x: str) -> int:
"""
post: _ != 4
raises: KeyError
"""
return a[x]
assert check_states(numstr) == {MessageType.POST_FAIL}
def test_dict_comprehension():
with standalone_statespace as space:
with NoTracing():
x = proxy_for_type(int, "x")
space.add(x.var >= 40)
space.add(x.var < 50)
d = {k: v for k, v in ((35, 3), (x, 4))}
with NoTracing():
assert type(d) is not dict
for k in d:
if k == 35:
continue
with NoTracing():
assert type(k) is not int
assert space.is_possible((k == 43).var)
assert space.is_possible((k == 48).var)
def test_dict_comprehension_e2e():
def f(l: List[int]) -> dict:
"""
post: 4321 not in __return__
"""
return {i: i for i in l}
assert check_states(f) == {MessageType.POST_FAIL}
def test_set_comprehension():
with standalone_statespace as space:
with NoTracing():
x = proxy_for_type(int, "x")
space.add(x.var >= 40)
space.add(x.var < 50)
result_set = {k for k in (35, x)}
with NoTracing():
assert type(result_set) is not set
for k in result_set:
if k == 35:
continue
with NoTracing():
assert type(k) is not int
assert space.is_possible((k == 43).var)
assert space.is_possible((k == 48).var)
def test_set_comprehension_e2e():
def f(s: Set[int]) -> Set:
"""
post: 4321 not in __return__
"""
return {i for i in s}
assert check_states(f) == {MessageType.POST_FAIL}
|
#!/usr/bin/python
# This software was developed in whole or in part by employees of the
# Federal Government in the course of their official duties, and with
# other Federal assistance. Pursuant to title 17 Section 105 of the
# United States Code portions of this software authored by Federal
# employees are not subject to copyright protection within the United
# States. For portions not authored by Federal employees, the Federal
# Government has been granted unlimited rights, and no claim to
# copyright is made. The Federal Government assumes no responsibility
# whatsoever for its use by other parties, and makes no guarantees,
# expressed or implied, about its quality, reliability, or any other
# characteristic.
#
# We would appreciate acknowledgement if the software is used.
"""Redact an image file using a ruleset...
Image Redaction Project.
This program redacts disk image files.
inputs:
* The disk image file
* A set of rules that describe what to redact, and how to redact it.
Rule File format:
The readaction command file consists of commands.
Each command has an "condition" and an "action"
[condition] [action]
Conditions:
FILENAME <afilename> - a file with the given name
FILEPAT <a file pattern> - any file with a given pattern
DIRNAME <a directory> - any file in the directory
MD5 <a md5> - any file with the given md5
SHA1 <a sha1> - any file with the given sha1
CONTAINS <a string> - any file that contains <a string>
Actions:
SCRUB MATCH - Scrubs the pattern where it occurs
SCRUB SECTOR - Scrubs the block where the pattern occurs
SCRUB FILE - Scrubs the file in which the pattern occurs
Actions:
FILL 0x44 - overwrite by filling with character 0x44 ('D')
ENCRYPT - encrypts the data
FUZZ - fuzz the binary, but not the strings
Examples:
Example file:
===============
MD5 3482347345345 SCRUB FILE
MATCH [email protected] SCRUB FILE
MATCH foobar SCRUB BLOCK
================================================================
Other actions in file:
KEY 12342343 (an encryption key)
"""
import xml.parsers.expat
import hashlib
import os.path
import dfxml.fiwalk as fiwalk
import re
################################################################
def convert_fileglob_to_re(fileglob):
regex = fileglob.replace(".","[.]").replace("*",".*").replace("?",".?")
return re.compile(regex)
class redact_rule:
""" Instances of this class are objects that can decide whether or not to redact."""
def __init__(self,line):
self.line = line
self.complete = True # by default, redacts everything
def should_redact(self,fileobject):
"""Returns True if this fileobject should be redacted"""
raise ValueError("redact method of redact_rule super class should not be called")
def __str__(self):
return "action<"+self.line+">"
def runs_to_redact(self,fi):
"""Returns the byte_runs of the source which match the rule.
By default this is the entire object."""
return fi.byte_runs()
class redact_rule_md5(redact_rule):
""" redact if the MD5 matches"""
def __init__(self,line,val):
redact_rule.__init__(self,line)
self.md5val = val.lower()
def should_redact(self,fi):
return self.md5val == fi.tag('md5')
class redact_rule_sha1(redact_rule):
""" redact if the SHA1 matches"""
def __init__(self,line,val):
redact_rule.__init__(self,line)
self.sha1val = val.lower()
def should_redact(self,fi):
return self.sha1val == fi.tag('sha1')
class redact_rule_filepat(redact_rule):
def __init__(self,line,filepat):
import re
redact_rule.__init__(self,line)
# convert fileglobbing to regular expression
self.filepat_re = convert_fileglob_to_re(filepat)
print("adding rule to redact path "+self.filepat_re.pattern)
def should_redact(self,fileobject):
return self.filepat_re.search(fileobject.filename())
class redact_rule_filename(redact_rule):
def __init__(self,line,filename):
redact_rule.__init__(self,line)
self.filename = filename
print("adding rule to redact filename "+self.filename)
def should_redact(self,fileobject):
was = os.path.sep
os.path.sep = '/' # Force Unix filename conventions
ret = self.filename == os.path.basename(fileobject.filename())
os.path.sep = was
return ret
class redact_rule_dirname(redact_rule):
def __init__(self,line,dirname):
redact_rule.__init__(self,line)
self.dirname = dirname
def should_redact(self,fileobject):
was = os.path.sep
os.path.sep = '/' # Force Unix filename conventions
ret = self.dirname == os.path.dirname(fileobject.filename())
os.path.sep = was
return ret
class redact_rule_contains(redact_rule):
def __init__(self,line,text):
redact_rule.__init__(self,line)
self.text = text
def should_redact(self,fileobject):
return self.text in fileobject.contents()
class redact_rule_string(redact_rule):
def __init__(self,line,text):
redact_rule.__init__(self,line)
self.text = text
self.complete = False # doesn't redact the entire file
def should_redact(self,fileobject):
return self.text in fileobject.contents()
def runs_to_redact(self,fi):
"""Overridden to return the byte runs of just the given text"""
ret = []
tlen = len(self.text)
for run in fi.byte_runs():
(file_offset,run_len,img_offset) = run
run_content = fi.content_for_run(run)
offset = 0
# Now find all the places inside "run"
# where the text "self.text" appears
print("looking for '{}' in '{}'".format(self.text,run))
while offset>=0:
offset = run.find(self.text,offset)
if offset>=0:
ret.append((file_offset+offset,tlen,img_offset+offset))
offset += 1 #
return ret
"""Not actually a redact rule, but rather a rule for global ignores"""
class ignore_rule():
def __init__(self):
self.ignore_patterns = []
def ignore(self,ignore):
"""Ignores specified files based on a regex"""
self.ignore_patterns.append(re.compile(convert_fileglob_to_re(ignore)))
return self
def should_ignore(self, fi):
for ig in self.ignore_patterns:
if ig.search(fi.filename()):
return True
return False
################################################################
class redact_action():
"""Instances of this class are objects that specify how a redaction should be done."""
def redact(self,rule,fileobject,rc):
"""Performs the redaction"""
raise ValueError("redact method of redact_action super class should not be called")
class redact_action_fill(redact_action):
""" Perform redaction by filling"""
def __init__(self,val):
self.fillvalue = val
def redact(self,rule,fi,rc):
for run in rule.runs_to_redact(fi):
print(" Current run %s " % run)
rc.imagefile.seek(run.img_offset)
runlen = run.len
print("\tFile info - \n\t\tname: %s \n\t\tclosed: %s \n\t\tposition: %d \n\t\tmode: %s" % ("\\"))
(rc.imagefile.name, rc.imagefile.closed, rc.imagefile.tell(), rc.imagefile.mode)
print(" Filling at offset {}, {} bytes with pattern {}".format(run.img_offset,runlen,hex(self.fillvalue)))
if rc.commit:
rc.imagefile.seek(run.img_offset)
rc.imagefile.write(chr(self.fillvalue) * run.len)
print(" >>COMMIT\n")
class redact_action_encrypt(redact_action):
""" Perform redaction by encrypting"""
def redact(self,rule,fileobject,rc):
for run in rule.runs_to_redact(fileobject):
print(" encrypting at offset {}, {} bytes with cipher".format(run.img_offset,run.bytes))
raise ValueError("Didn't write this yet")
class redact_action_fuzz(redact_action):
""" Perform redaction by fuzzing x86 instructions """
def redact(self,rule,fileobject,rc):
'''
The net effect of this function is that bytes 127-255 are "fuzzed" over
the range of 159-191, with each series of four bytes
(e.g. 128-131) to one byte value (e.g. 160).
'''
def fuzz(ch):
o = ord(ch)
if(o<127):
r = ch
else:
r = chr(((o>>2)+128)%256)
return r
print("Redacting with FUZZ: ",fileobject)
for run in rule.runs_to_redact(fileobject):
try:
print(" Fuzzing at offset: %d, can fuzz up to %d bytes " % (run.img_offset,run.len))
rc.imagefile.seek(run.img_offset)
# Previously redacted only first 10 bytes, now redacts entire sequence
#first_ten_bytes = rc.imagefile.read(10)
run_bytes = rc.imagefile.read(run.len)
print("\tFile info - \n\t\tname: %s \n\t\tclosed: %s \n\t\tposition: %d \n\t\tmode: %s" % "\\")
print(rc.imagefile.name, rc.imagefile.closed, rc.imagefile.tell(), rc.imagefile.mode)
print(" Fuzzing %d bytes - should be %d" % (len(run_bytes), run.len))
newbytes = "".join([fuzz(x) for x in run_bytes])
#debug
print("new: %i old: %i" % (len(newbytes), run.len))
assert(len(newbytes)==run.len)
if rc.commit:
rc.imagefile.seek(run.img_offset)
rc.imagefile.write(newbytes)
print("\n >>COMMIT")
except AttributeError:
print("!AttributeError: no byte run?")
################################################################
class RedactConfig:
"""Class to read and parse a redaction config file"""
def __init__(self,fn):
self.cmds = []
self.commit = False
self.filename = None
self.xmlfile = None
self.ignore_rule = ignore_rule()
for line in open(fn,"r"):
if line[0] in '#;': continue # comment line
line = line.strip()
if line=="": continue
atoms = line.split(" ")
while "" in atoms: atoms.remove("") # take care of extra spaces
cmd = atoms[0].lower()
rule = None
action = None
# First look for simple commands
if cmd=='key':
self.key = atoms[1]
continue
if cmd=="commit":
self.commit = True
continue
if cmd=="imagefile":
self.imagefile = open(atoms[1],"r+b")
continue
if cmd=="xmlfile":
self.xmlfile = open(atoms[1],"r")
continue
if cmd=='ignore':
self.ignore_rule.ignore(atoms[1])
continue
# Now look for commands that are rules
if cmd=='md5':
rule = redact_rule_md5(line,atoms[1])
if cmd=='sha1':
rule = redact_rule_sha1(line,atoms[1])
if cmd=='filename':
rule = redact_rule_filename(line,atoms[1])
if cmd=='filepat':
rule = redact_rule_filepat(line,atoms[1])
if cmd=='contains':
rule = redact_rule_contains(line,atoms[1])
if cmd=='string':
rule = redact_rule_string(line,atoms[1])
if rule:
if atoms[2].lower()=='fill':
action = redact_action_fill(eval(atoms[3]))
if atoms[2].lower()=='encrypt':
action = redact_action_encrypt()
if atoms[2].lower()=='fuzz':
action = redact_action_fuzz()
if not rule or not action:
print("atoms:",atoms)
print("rule:",rule)
print("action:",action)
raise ValueError("Cannot parse: '%s'" % line)
self.cmds.append((rule,action))
def need_md5(self):
for (rule,action) in self.cmds:
if rule.__class__==redact_rule_md5: return True
return False
def need_sha1(self):
for (rule,action) in self.cmds:
if rule.__class__==redact_rule_sha1: return True
return False
def fiwalk_opts(self):
"Returns the options that fiwalk needs given the redaction requested."
opts = "-x"
if self.need_sha1(): opts = opts+"1"
if self.need_md5(): opts = opts+"m"
return opts
def process_file(self,fileinfo):
for (rule,action) in self.cmds:
if rule.should_redact(fileinfo):
print("Processing file: %s" % fileinfo.filename())
if self.ignore_rule.should_ignore(fileinfo):
print("(Ignoring %s)" % fileinfo.filename())
return
print("")
print("Redacting ",fileinfo.filename())
print("Reason:",str(rule))
print("Action:",action)
action.redact(rule,fileinfo,self)
if rule.complete:
return # only need to redact once!
def close_files(self):
if self.imagefile and self.imagefile.closed == False:
print("Closing file: %s" % self.imagefile.name)
self.imagefile.close()
if self.xmlfile and self.xmlfile.closed == False:
print("Closing file: %s" % self.xmlfile.name)
self.xmlfile.close()
if __name__=="__main__":
import sys,time
from optparse import OptionParser
from subprocess import Popen,PIPE
global options
parser = OptionParser()
parser.usage = "%prog [options] config-file"
parser.add_option("-d","--debug",help="prints debugging info",dest="debug")
(options,args) = parser.parse_args()
t0 = time.time()
# Read the redaction configuration file
rc = RedactConfig(args[0])
if not rc.imagefile:
print("Error: a filename must be specified in the redaction config file")
sys.exit(1)
fiwalk.fiwalk_using_sax(imagefile=rc.imagefile,xmlfile=rc.xmlfile,callback=rc.process_file)
t1 = time.time()
rc.close_files()
print("Time to run: %d seconds" % (t1-t0))
|
import RPi.GPIO as GPIO
import time
import requests
from datetime import datetime
GPIO.setmode(GPIO.BCM)
TRIG = 23
ECHO = 24
ALERT = 17
start = datetime.now()
print "calibration in progress"
GPIO.setup(TRIG,GPIO.OUT)
GPIO.setup(ALERT,GPIO.OUT)
GPIO.setup(ECHO,GPIO.IN)
content = ""
api_token = 'your_api_token'
api_url_base = 'http://eskimo:8080/checkin'
headers = {'Content-Type': 'application/json'}
response = requests.get(api_url_base, headers=headers, json=content)
def settle(settleTime):
GPIO.output(TRIG, False)
print "Waiting For Sensor To Settle"
time.sleep(settleTime)
def formatDateAndTime(aDateAndTime):
format = '%Y-%m-%d_%H:%M:%S'
return (aDateAndTime.strftime(format))
def ping(shouldSettle):
#print "Sending Trig signal"
GPIO.output(TRIG, True)
time.sleep(0.00001)
GPIO.output(TRIG, False)
pulse_start = 0
pulse_end = 0
while GPIO.input(ECHO)==0:
pulse_start = time.time()
while GPIO.input(ECHO)==1:
pulse_end = time.time()
#print "Received Echo signal"
pulse_duration = pulse_end - pulse_start
#print "Pulse Duration: ", pulse_duration
distance = pulse_duration * 34300
distance = distance / 2
distance = round(distance, 2)
if shouldSettle > 0 :
settle(shouldSettle)
return distance
calibrationDistance = (ping(1) + ping(1) + ping(1))
#toggle = False
#for i in range(10):
# toggle = not toggle
# GPIO.output(ALERT, toggle)
# time.sleep(500.0/1000)
#GPIO.output(ALERT, False)
calibrationDistance = calibrationDistance / 3
print "Calibration Distance: ",calibrationDistance,"cm"
tolerance = .10
print "Acceptable tolerance: ",tolerance
threshold = calibrationDistance * tolerance
print "Threshold: ", threshold
print "Calibration Complete, taking measurements"
measurements = []
for i in range(45):
current = ping(0)
diff = calibrationDistance - current
print "Current Distance: ", current," Diff: ",diff
#if abs(diff) > threshold:
# print "ALERT ALERT ALERT"
# GPIO.output(ALERT, True)
# time.sleep(1)
# GPIO.output(ALERT, False)
now = datetime.now()
measurements.append({"timeStamp":formatDateAndTime(now), "measurement": current})
time.sleep(1)
stop = datetime.now()
print "Measurements complete"
content = {"start":formatDateAndTime(start),"stop":formatDateAndTime(stop),"items":measurements}
api_token = 'your_api_token'
api_url_base = 'http://eskimo:8080/sensor/'
headers = {'Content-Type': 'application/json'}
response = requests.post(api_url_base, headers=headers, json=content)
GPIO.cleanup() |
# @Title: 平衡二叉树 (Balanced Binary Tree)
# @Author: 18015528893
# @Date: 2021-02-08 11:33:55
# @Runtime: 68 ms
# @Memory: 19.6 MB
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def isBalanced(self, root: TreeNode) -> bool:
if root is None:
return True
def get_height(root):
if root is None:
return 0
return max(get_height(root.left), get_height(root.right)) + 1
return abs(get_height(root.left) - get_height(root.right)) <= 1 \
and self.isBalanced(root.left) and self.isBalanced(root.right)
|
import unittest
from importlib import import_module
component = import_module('run.helpers.parse')
class parse_Test(unittest.TestCase):
# Actions
def setUp(self):
self.parse = component.parse
# Tests
def test(self):
self.assertEqual(self.parse('1'), ((1,), {}))
self.assertEqual(self.parse('a=1'), ((), {'a': 1}))
self.assertEqual(self.parse('1, a=1'), ((1,), {'a': 1}))
self.assertEqual(self.parse('"1", a=1'), (('1',), {'a': 1}))
|
# -*- coding: utf-8 -*-
"""
Intro
=====
Sandhi splitter for Samskrit.
Builds up a database of sandhi rules and utilizes them for both
performing sandhi and splitting words.
Will generate splits that may not all be valid words. That is left to the
calling module to validate. See for example SanskritLexicalAnalyzer
Example usage:
from sandhi import Sandhi
sandhi = Sandhi()
joins = sandhi.join('tasmin', 'iti')
splits = sandhi.split_at('tasminniti', 5)
Draws inspiration from https://github.com/sanskrit/sanskrit
@author: Avinash Varna (github: @avinashvarna)
Usage
=====
The ``Sandhi`` class can be used to join/split words:
.. code:: python
>>> from sanskrit_parser.parser.sandhi import Sandhi
>>> sandhi = Sandhi()
>>> word1 = SanskritImmutableString('te')
>>> word2 = SanskritImmutableString('eva')
>>> joins = sandhi.join(word1, word2)
>>> for join in joins:
... print(join)
...
teeva
taeva
ta eva
tayeva
To split at a specific position, use the ``Sandhi.split_at()`` method:
.. code:: python
>>> w = SanskritImmutableString('taeva')
>>> splits = sandhi.split_at(w, 1)
>>> for split in splits:
... print(split)
...
(u'tar', u'eva')
(u'tas', u'eva')
(u'taH', u'eva')
(u'ta', u'eva')
To split at all possible locations, use the ``Sandhi.split_all()``
method:
.. code:: python
>>> splits_all = sandhi.split_all(w)
>>> for split in splits_all:
... print(split)
...
(u't', u'aeva')
(u'tar', u'eva')
(u'taev', u'a')
(u'to', u'eva')
(u'ta', u'eva')
(u'te', u'eva')
(u'taH', u'eva')
(u'tae', u'va')
(u'taeva', u'')
(u'tas', u'eva')
**Note**: As mentioned previously, both over-generation and
under-generation are possible with the ``Sandhi`` class.
Command line usage
==================
::
$ python -m sanskrit_parser.parser.sandhi --join te eva
Joining te eva
set([u'teeva', u'taeva', u'ta eva', u'tayeva'])
$ python -m sanskrit_parser.parser.sandhi --split taeva 1
Splitting taeva at 1
set([(u'tar', u'eva'), (u'tas', u'eva'), (u'taH', u'eva'), (u'ta', u'eva')])
$ python -m sanskrit_parser.parser.sandhi --split taeva --all
All possible splits for taeva
set([(u't', u'aeva'), (u'tar', u'eva'), (u'taev', u'a'), (u'to', u'eva'),
(u'ta', u'eva'), (u'te', u'eva'), (u'taH', u'eva'), (u'tae', u'va'),
(u'taeva', u''), (u'tas', u'eva')])
"""
import itertools
import pickle
import logging
import datetime
from zipfile import ZipFile
from sanskrit_parser.base.sanskrit_base import SanskritNormalizedString, SCHEMES, outputctx
from sanskrit_parser.util.data_manager import data_file_path
class Sandhi(object):
"""
Class to hold all the sandhi rules and methods for joining and splitting.
Uses SLP1 encoding for all internal operations.
"""
def __init__(self, rules_dir=None, use_default_rules=True, logger=None):
"""
Sandhi class constructor
:param rules_dir: directory to read rules from
:param use_default_rules: reads pre-built-rules from sandhi_rules dir under module directory
:param logger: instance of python logger to use
"""
self.forward = None
self.backward = None
self.logger = logger or logging.getLogger(__name__)
@staticmethod
def _load_rules_pickle(filename):
zip_path = data_file_path('sandhi_rules.zip')
with ZipFile(zip_path) as myzip:
with myzip.open(filename) as f:
return pickle.load(f)
def _load_forward(self):
if self.forward is None:
self.forward = self._load_rules_pickle('sandhi_forward.pkl')
keys = self.forward.keys()
self.lc_len_max = max(len(k[0]) for k in keys)
self.rc_len_max = max(len(k[1]) for k in keys)
def _load_backward(self):
if self.backward is None:
self.backward = self._load_rules_pickle('sandhi_backward.pkl')
keys = self.backward.keys()
self.after_len_max = max(len(k) for k in keys)
def join(self, first_in, second_in):
"""
Performs sandhi.
**Warning**: May generate forms that are not lexically valid.
:param first_in: SanskritImmutableString first word of the sandhi
:param second_in: SanskritImmutableString word of the sandhi
:return: list of strings of possible sandhi forms, or None if no sandhi can be performed
"""
self._load_forward()
first = first_in.canonical()
second = second_in.canonical()
self.logger.debug("Join: {}, {}".format(first, second))
if first is None or len(first) == 0:
return second
if second is None:
return first
left_chars = [first[i:] for i in range(max(0, len(first)-self.lc_len_max), len(first))]
left_chars.append("^"+first)
right_chars = [second[0:i] for i in range(min(self.rc_len_max, len(second))+1)]
self.logger.debug("left_chars = %s, right_chars %s", left_chars, right_chars)
joins = set()
for key in itertools.product(left_chars, right_chars):
afters = self.forward.get(key)
if afters:
for after, annotation in afters:
self.logger.debug("Found sandhi %s = %s (%s)", key, after, annotation)
joins.add(first[:-len(key[0])] + after + second[len(key[1]):])
if len(joins) == 0:
self.logger.debug("No joins found")
return None
else:
return joins
def split_at(self, word_in, idx):
"""
Split sandhi at the given index of word.
**Warning**: Will generate splits that are not lexically valid.
:param word_in: SanskritImmutableString word to split
:param idx: position within word at which to try the split
:return: set of tuple of strings of possible split forms, or None if no split can be performed
"""
self._load_backward()
word = word_in.canonical()
self.logger.debug("Split: %s, %d", word, idx)
splits = set()
# Figure out how may chars we can extract for the afters
stop = min(idx+self.after_len_max, len(word))
afters = [word[idx:i] for i in range(idx+1, stop+1)]
for after in afters:
self.logger.debug("Trying after %s", after)
befores = self.backward[after]
if befores:
for before, annotation in befores:
self.logger.debug("Found split %s -> %s (%s)", after, before, annotation)
# Do we have a beginning-of-line match rule
if before[0][0] == "^":
if idx != 0:
# Can't allow matches at any other position
continue
else:
# drop the ^ in the result
before = (before[0][1:], before[1])
left = word[:idx] + before[0]
right = before[1] + word[idx+len(after):]
splits.add((left, right))
if len(splits) == 0:
self.logger.debug("No split found")
return None
else:
return splits
def split_all(self, word_in, start=None, stop=None):
"""
Split word at all possible locations and return splits.
**Warning**: Will generate splits that are not lexically valid.
:param word_in: SanskritImmutableString word to split
:return: set of tuple of strings of possible split forms, or None if no split can be performed
"""
splits = set()
word = word_in.canonical()
start = start or 0
stop = stop or len(word)
for idx in range(start, stop):
split = self.split_at(word_in, idx)
if split:
splits |= split
if len(splits) == 0:
self.logger.debug("No split found")
return None
else:
return splits
if __name__ == "__main__":
from argparse import ArgumentParser
def getArgs():
"""
Argparse routine.
Returns args variable
"""
# Parser Setup
parser = ArgumentParser(description='Sandhi Utility')
# Input Encoding (autodetect by default)
parser.add_argument('--input-encoding', type=str, default=None)
parser.add_argument('--loglevel', type=str, help="logging level. Can be any level supported by logging module")
parser.add_argument('--split', action='store_true', help="Split the given word using sandhi rules")
parser.add_argument('--join', action='store_true', help="Join the given words using sandhi rules")
parser.add_argument('--all', action='store_true', help="Return splits at all possible locations")
parser.add_argument('--strict-io', action='store_true',
help="Do not modify the input/output string to match conventions", default=False)
# String to encode
parser.add_argument('word', nargs='?', type=str,
default="tasminniti",
help="First word of sandhi if join, or word to split")
parser.add_argument('word_or_pos', nargs="?", type=str,
default="eva",
help="Second word of sandhi if join, or position to split")
return parser.parse_args()
def main():
args = getArgs()
if args.input_encoding is None:
ie = None
else:
ie = SCHEMES[args.input_encoding]
# Setup logging
if args.loglevel:
numeric_level = getattr(logging, args.loglevel.upper(), None)
if not isinstance(numeric_level, int):
raise ValueError('Invalid log level: %s' % args.loglevel)
logging.basicConfig(filename="sandhi.log", filemode="wb", level=numeric_level)
logging.info("---------------------------------------------------")
logging.info("Started processing at %s", datetime.datetime.now())
sandhi = Sandhi()
# if neither split nor join is chosen, just demo both
if not args.split and not args.join:
print("Neither split nor join option chosen. Here's a demo of joining")
args.join = True
with outputctx(args.strict_io):
if args.split:
word_in = SanskritNormalizedString(args.word, encoding=ie, strict_io=args.strict_io)
if args.all:
print("All possible splits for {}".format(args.word))
splits = sandhi.split_all(word_in)
else:
pos = int(args.word_or_pos)
print("Splitting {0} at {1}".format(args.word, pos))
splits = sandhi.split_at(word_in, pos)
print(splits)
if args.join:
print("Joining {0} {1}".format(args.word, args.word_or_pos))
first_in = SanskritNormalizedString(args.word, encoding=ie, strict_io=args.strict_io)
second_in = SanskritNormalizedString(args.word_or_pos, encoding=ie, strict_io=args.strict_io)
joins = sandhi.join(first_in, second_in)
print(joins)
logging.info("Finished processing at %s", datetime.datetime.now())
logging.info("---------------------------------------------------")
logging.shutdown()
main()
|
# -*- encoding: utf-8 -*-
#! /usr/bin/env python
'''
文件说明: 这是个Net局域网版本的极简交互式笔记程序
作者信息: penguinjing
版本自述: 0.0.2
程序参考: https://pymotw.com/2/socket/udp.html
'''
# 全局引用
import socket
import sys
from os.path import exists
# 全局变量
# PATH = "/path/2/work dir"
# 函式撰写区
def print_usage():
print 'no or wrong specify mode, please run it again.'
print 'python main.py [s|c]'
print '\t\t| |'
print '\t\t| - client mode'
print '\t\t- - server mode'
def print_help():
print "?/h/H - print help"
print "q/quit/bye - quit the Notes"
print "r/sync - synchorme history notes"
print "shutdown - shuting down the server"
def read_all_records():
log_name = 'mydiary.log'
if exists(log_name) == True:
current_file = open(log_name)
his_content = current_file.read()
current_file.close()
else:
his_content = 'no historical notes'
return his_content
def setupserver():
# Echo Server programe part
# Create a TCP/IP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
local_address = socket.gethostbyname(socket.getfqdn())
# Bind the socket to the port
server_address = (local_address, 9009)
print >>sys.stderr, 'starting up server on %s port %s' % server_address
sock.bind(server_address)
print >>sys.stderr, 'Hit Ctrl + C to interrupt'
print >>sys.stderr, '\nwaiting to receive notes:'
while True:
data, address = sock.recvfrom(4096)
#print >>sys.stderr, 'received %s bytes from %s' % (len(data), address)
if data in ['r', 'sync']:
content = read_all_records()
sock.sendto(content, address)
continue
elif data == 'shutdown':
print >>sys.stderr, '\nshuting down the server...'
break
else:
log_name = 'mydiary.log'
current_file = open(log_name, 'a+')
print >>sys.stderr, data
current_file.write(data+'\n')
current_file.close()
print >>sys.stderr, 'closing socket'
sock.close()
def setupclient():
# Echo client program part
# Create a UDP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
address = raw_input('Please input Notes server address:')
server_address = (address, 9009)
while True:
message = raw_input('>>>' )
if message in ['r', 'sync']:
sock.sendto(message, server_address)
data, server = sock.recvfrom(4096)
print >>sys.stderr, data
continue
elif message in ['?', 'h', 'H']:
print_help()
continue
elif message in ['q', 'quit', 'bye']:
break
elif message =='':
continue
else:
sock.sendto(message, server_address)
print >>sys.stderr, 'closing socket'
sock.close()
def main():
if len(sys.argv) == 1:
print_usage()
elif sys.argv[1] == 'c':
setupclient()
elif sys.argv[1] == 's':
setupserver()
else:
print_usage()
# 自检区
if __name__ == "__main__":
main() |
import tensorflow as tf
import numpy as np
# Data generator for domain adversarial neural network
class DataGeneratorDANN(tf.keras.utils.Sequence):
'Generates data for Keras'
def __init__(self, source_images, source_labels, target_images, source_train = True, batch_size = 32, shuffle = True):
self.source_images = source_images
self.source_labels = source_labels
self.target_images = target_images
self.batch_size = batch_size
self.nsamples = source_images.shape[0]
self.shuffle = shuffle
self.source_train = source_train
self.on_epoch_end()
def set_source_train(self,flag):
self.source_train = flag
def __len__(self):
'Denotes the number of batches per epoch'
return np.ceil(self.nsamples/self.batch_size).astype(int)
def __getitem__(self, index):
'Generate one batch of data'
# Generate indexes of the batch
batch_indexes = self.indexes[index * self.batch_size:(index + 1) * self.batch_size]
# Generate data
return self.__data_generation(batch_indexes)
def on_epoch_end(self):
'Updates indexes after each epoch'
self.indexes = np.arange(self.nsamples)
if self.shuffle == True:
np.random.shuffle(self.indexes)
def __data_generation(self, batch_indexes):
'Generates data containing batch_size samples'
Xsource = self.source_images[batch_indexes]
Ysource = self.source_labels[batch_indexes]
Xtarget = self.target_images[batch_indexes]
if self.source_train:
return Xsource, Ysource
else:
return Xsource, Ysource, Xtarget
# Model with no domain adaptation
def model_NDA(ishape = (32,32,3)):
input_layer = tf.keras.layers.Input(ishape)
x1 = tf.keras.layers.Conv2D(32, (3,3), activation = 'relu')(input_layer)
x2 = tf.keras.layers.Conv2D(32, (3,3), activation = 'relu')(x1)
x3 = tf.keras.layers.MaxPooling2D(pool_size=(2, 2))(x2)
x4 = tf.keras.layers.BatchNormalization()(x3)
x5 = tf.keras.layers.Conv2D(64, (3,3), activation = 'relu')(x4)
x6 = tf.keras.layers.Conv2D(64, (3,3), activation = 'relu')(x5)
x7 = tf.keras.layers.MaxPooling2D(pool_size=(2, 2))(x6)
x8 = tf.keras.layers.BatchNormalization()(x7)
# Feature vector
x9 = tf.keras.layers.Flatten()(x8)
# Label classifier
out = tf.keras.layers.Dense(10, activation = "softmax")(x9)
model = tf.keras.models.Model(inputs = [input_layer], outputs = [out])
return model
#Gradient Reversal Layer
@tf.custom_gradient
def gradient_reverse(x, lamda=1.0):
y = tf.identity(x)
def grad(dy):
return lamda * -dy, None
return y, grad
class GradientReversalLayer(tf.keras.layers.Layer):
def __init__(self):
super().__init__()
def call(self, x, lamda=1.0):
return gradient_reverse(x, lamda)
# Domain adversarial neural network implementation
class DANN(tf.keras.models.Model):
def __init__(self):
super().__init__()
#Feature Extractor
self.feature_extractor_layer0 = tf.keras.layers.Conv2D(32, (3,3), activation = 'relu')
self.feature_extractor_layer1 = tf.keras.layers.Conv2D(32, (3,3), activation = 'relu')
self.feature_extractor_layer2 = tf.keras.layers.MaxPooling2D(pool_size=(2, 2))
self.feature_extractor_layer3 = tf.keras.layers.BatchNormalization()
self.feature_extractor_layer4 = tf.keras.layers.Conv2D(64, (3,3), activation = 'relu')
self.feature_extractor_layer5 = tf.keras.layers.Conv2D(64, (3,3), activation = 'relu')
self.feature_extractor_layer6 = tf.keras.layers.MaxPooling2D(pool_size=(2, 2))
self.feature_extractor_layer7 = tf.keras.layers.BatchNormalization()
#Label Predictor
self.label_predictor_layer0 = tf.keras.layers.Dense(10, activation= 'softmax')
#Domain Predictor
self.domain_predictor_layer0 = GradientReversalLayer()
self.domain_predictor_layer1 = tf.keras.layers.Dense(1, activation='sigmoid')
def call(self, x, train=False, source_train=True, lamda=1.0):
#Feature Extractor
x = self.feature_extractor_layer0(x)
x = self.feature_extractor_layer1(x)
x = self.feature_extractor_layer2(x)
x = self.feature_extractor_layer3(x , training=train)
x = self.feature_extractor_layer4(x)
x = self.feature_extractor_layer5(x)
x = self.feature_extractor_layer6(x)
x = self.feature_extractor_layer7(x, training=train)
features = tf.keras.layers.Flatten()(x)
#Label Predictor
if source_train is True:
feature_slice = features
else:
feature_slice = tf.slice(features, [0, 0], [features.shape[0] // 2, -1])
#Label Predictor
l_logits = self.label_predictor_layer0(feature_slice)
#Domain Predictor
if source_train is True:
return l_logits
else:
dp_x = self.domain_predictor_layer0(features, lamda) #GradientReversalLayer
d_logits = self.domain_predictor_layer1(dp_x)
return l_logits, d_logits
|
import numpy as np
import torch as th
import cv2
import argparse
import tempfile
from torch.utils.data import DataLoader
import os
import pyexr
import cv2
import skimage.io as skio
from ttools.modules.image_operators import crop_like
import matplotlib.pyplot as plt
from collections import defaultdict
from sbmc import losses
from sbmc import modules
import ttools
import sbmc
LOG = ttools.get_logger(__name__)
ttools.get_logger('matplotlib.font_manager').disabled = True
#'ksize': 21, 'gather': False, 'pixel': False
def main(args):
if not os.path.exists(args.data):
raise ValueError("input {} does not exist".format(args.data))
# Load the data
data_params = dict(spp=args.spp)
data = sbmc.FullImagesDataset(args.data, **data_params)
dataloader = DataLoader(data, batch_size=1, shuffle=False, num_workers=0)
# Load the two models
temp = th.load(f"{args.model1}", map_location=th.device('cpu'))
model_one = sbmc.RecurrentMultisteps(data.num_features, data.num_global_features)
try: # Depending on the way a model is saved, the statedict is referenced with different keys
model_one.load_state_dict(temp['model'])
except:
model_one.load_state_dict(temp['model_state_dict'])
model_one.train(False)
temp = th.load(f"{args.model2}", map_location=th.device('cpu'))
model_two = sbmc.Multisteps(data.num_features, data.num_global_features)
try: # Depending on the way a model is saved, the statedict is referenced with different keys
model_two.load_state_dict(temp['model'])
except:
model_two.load_state_dict(temp['model_state_dict'])
model_two.train(False)
device = "cuda" if th.cuda.is_available() else "cpu"
if (device == "cuda"):
LOG.info("Using CUDA")
model_one.cuda()
model_two.cuda()
rmse_checker = losses.RelativeMSE()
rmse_checker.to(device)
# start = np.random.randint(0, 80) * 5
start = 0
model_one_outputs = []
model_two_outputs = []
ground_thruths = []
for batch_idx, batch in enumerate(dataloader):
if batch_idx < start:
continue
if batch_idx >= start + args.amount:
break
for k in batch.keys():
if not batch[k].__class__ == th.Tensor:
continue
batch[k] = batch[k].to(device) # Sets the tensors to the correct device type
# Compute the radiances using the two models
with th.no_grad():
output1 = model_one(batch)["radiance"]
output2 = model_two(batch)["radiance"]
model_one_outputs.append(output1)
model_two_outputs.append(output2)
# Get the input image and ground thruth for comparison
tgt = crop_like(batch["target_image"], output1)
ground_thruths.append(tgt)
low_spp = crop_like(batch["low_spp"], output1)
# Compare to ground thruth
with th.no_grad():
rmse1 = rmse_checker(output1, tgt)
rmse2 = rmse_checker(output2, tgt)
LOG.info(f"Model 1 denoised with rmse: {rmse1} || Model 2 denoised with rmse: {rmse2}")
if rmse2 < rmse1:
LOG.info("Model 2 outperformed model 1")
else:
LOG.info("Model 1 outperformed model 2")
save_img(output1, output2, low_spp, tgt, args.save_dir, str(batch_idx))
#Display Denoising quality
data_to_show = [model_one_outputs, model_two_outputs, ground_thruths]
fig, axeslist = plt.subplots(ncols=len(model_one_outputs), nrows=len(data_to_show))
plot_data = []
for i, data in enumerate(data_to_show):
for idx, img in enumerate(data):
rmse = rmse_checker(img, ground_thruths[idx]).item()
res = process_radiance(img)
plot_data.append({'img': res, 'rmse': rmse})
# Create image matrix
for ind, data in enumerate(plot_data):
axeslist.ravel()[ind].imshow(data['img'])
axeslist.ravel()[ind].set_title(str(round(data['rmse'], 5)))
axeslist.ravel()[ind].set_axis_off()
plt.tight_layout() # optional
plt.show()
# Show differences
diff_array = []
fig, axeslist = plt.subplots(ncols=len(model_one_outputs), nrows=3)
rmse_data = defaultdict(list)
data_to_show = [model_one_outputs, model_two_outputs, ground_thruths]
for i, data in enumerate(data_to_show):
for idx, img in enumerate(data):
if idx > 0:
diff = (img - data[idx-1]).abs()
rmse = rmse_checker(img, data[idx-1]).item()
rmse_data[str(i)].append(rmse)
else:
diff = th.zeros_like(tgt)
rmse = 0
res = process_radiance(diff)
diff_array.append({'img': res, 'rmse': rmse})
# Create image matrix
for ind, data in enumerate(diff_array):
axeslist.ravel()[ind].imshow(data['img'])
axeslist.ravel()[ind].set_title(str(round(data['rmse'], 5)))
axeslist.ravel()[ind].set_axis_off()
plt.tight_layout() # optional
plt.show()
# save_compare_frame(output1, output2, tgt)
# make_compare_video(args.save_dir)
def process_radiance(data):
data = th.clamp(data, 0)
data /= 1 + data
data = th.pow(data, 1.0/2.2)
data = th.clamp(data, 0, 1)
data = data[0, ...].cpu().detach().numpy().transpose([1, 2, 0])
data = np.ascontiguousarray(data)
return data
frames = []
def save_compare_frame(radiance1, radiance2, tgt):
# Difference between models and ground thruth
diff_model1 = (radiance1 - tgt).abs()
diff_model2 = (radiance2 - tgt).abs()
first_row = th.cat([radiance1, diff_model1], -1)
second_row = th.cat([radiance2, diff_model2], -1)
data = th.cat([first_row, second_row], -2)
data = th.clamp(data, 0)
data /= 1 + data
data = th.pow(data, 1.0/2.2)
data = th.clamp(data, 0, 1)
data = data[0, ...].cpu().detach().numpy().transpose([1, 2, 0])
# Clip to 0-255 to remove HDR and pure radiance estimates + change to BGR color spectrum for opencv
frames.append(cv2.cvtColor((np.clip(data, 0, 1)*255).astype(np.uint8), cv2.COLOR_RGB2BGR))
def make_compare_video(location):
height, width, layers = frames[0].shape
# Write to video
out = cv2.VideoWriter(f'{location}/compare_video.mp4', cv2.VideoWriter_fourcc(*'mp4v'), 5, (width, height))
# Stitch 5 times to create loop
for _ in range(10):
for i in range(len(frames)):
out.write(frames[i])
frames.reverse()
out.release()
def save_img(radiance1, radiance2, low_radiance, tgt, checkpoint_dir, name):
tmp_empty = th.zeros_like(radiance1) # Empty filler tensor
# Difference between models and ground thruth
diff_model1 = (radiance1 - tgt).abs()
diff_model2 = (radiance2 - tgt).abs()
# Create output data in the form:
# low spp input --
# ouput model1 -- Diff with tgt
# ouput model2 -- Diff with tgt
# tgt --
first_row = th.cat([tmp_empty, low_radiance, tmp_empty], -1)
second_row = th.cat([tmp_empty, radiance1, diff_model1], -1)
third_row = th.cat([tmp_empty, radiance2, diff_model2], -1)
fourth_row = th.cat([tmp_empty, tgt, tmp_empty], -1)
# Concate the data in a vertical stack
data = th.cat([first_row, second_row, third_row, fourth_row], -2)
data = th.clamp(data, 0)
data /= 1 + data
data = th.pow(data, 1.0/2.2)
data = th.clamp(data, 0, 1)
data = data[0, ...].cpu().detach().numpy().transpose([1, 2, 0])
data = np.ascontiguousarray(data)
# Add text to the images
jump = radiance1.size()[2]
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(data, '4spp', (10, jump * 0 + 50), font, 0.5, (255, 255, 255), 1, cv2.LINE_AA)
cv2.putText(data, 'Model 1', (10, jump * 1 + 50), font, 0.5, (255, 255, 255), 1, cv2.LINE_AA)
cv2.putText(data, 'Model 2', (10, jump * 2 + 50), font, 0.5, (255, 255, 255), 1, cv2.LINE_AA)
cv2.putText(data, 'Target', (10, jump * 3 + 50), font, 0.5, (255, 255, 255), 1, cv2.LINE_AA)
os.makedirs(checkpoint_dir, exist_ok=True)
outputfile = os.path.join(checkpoint_dir, f'{name}.png')
pyexr.write(outputfile, data)
png = outputfile.replace(".exr", ".png")
skio.imsave(png, (np.clip(data, 0, 1)*255).astype(np.uint8))
def load_model(model, load_path):
checkpoint = th.load(load_path)
model.load_state_dict(checkpoint['model_state_dict'])
epoch = checkpoint['epoch']
return model, epoch
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
'--model1', required=True, help="path to the first model")
parser.add_argument(
'--model2', required=True, help="path to the second model")
parser.add_argument(
'--save_dir', required=True, help="path to the dir where everything has to be saved")
parser.add_argument(
'--data', required=True, help="path to the training data.")
parser.add_argument(
'--amount', required=False, type=int,default=1, help="Amount of frames to denoise and compare")
parser.add_argument('--spp', type=int,
help="number of samples to use as input.")
args = parser.parse_args()
ttools.set_logger(True)
main(args) |
from django.contrib import admin
from models import *
admin.site.register(Pantalla)
admin.site.register(Carrusel)
admin.site.register(Imagen)
|
""" Utility module with helper functions """
def clean_escaped_strings(dirty):
return dirty.replace('\n', '').replace('\t', '')
def rate_sort(rate_dict):
return float(rate_dict['rate'])
def calc_diff(rates):
for i in range(len(rates)):
if i == 0:
rates[i]['diff'] = '-'
else:
rates[i]['diff'] = round(((rates[i].get('rate') - rates[0].get('rate'))/rates[0].get('rate'))*100, 2)
return rates
|
##########################################################################
#
# Copyright (c) 2018, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import IECore
import Gaffer
import GafferDispatch
##########################################################################
# Setup the UI
##########################################################################
if application["gui"].getTypedValue() :
import GafferUI
import GafferDispatchUI
for module in (
"GafferSceneUI",
"GafferImageUI",
"GafferAppleseedUI",
"GafferArnoldUI",
"GafferDelightUI",
"GafferTractorUI",
) :
with IECore.IgnoredExceptions( ImportError ) :
__import__( module )
menu = GafferDispatchUI.DispatchDialogue.menuDefinition()
menu.append( "/Edit/Undo", {
"command" : lambda menu : menu.ancestor( GafferDispatchUI.DispatchDialogue ).scriptNode().undo(),
"shortCut" : "Ctrl+Z",
"active" : lambda menu : menu.ancestor( GafferDispatchUI.DispatchDialogue ).scriptNode().undoAvailable(),
} )
menu.append( "/Edit/Redo", {
"command" : lambda menu : menu.ancestor( GafferDispatchUI.DispatchDialogue ).scriptNode().redo(),
"shortCut" : "Shift+Ctrl+Z",
"active" : lambda menu : menu.ancestor( GafferDispatchUI.DispatchDialogue ).scriptNode().redoAvailable(),
} )
|
import unittest
from os import path
import sys
sys.path.append(path.join(path.dirname(path.dirname(path.abspath(__file__))), 'airodb_analyzer'))
from models.accessPoint import AccessPoint
from models.macAddress import MACAddress
class TestAccessPointMethods(unittest.TestCase):
def test_constructor_TestWithNoneMacAddress_ThrowTypeError(self):
with self.assertRaises(TypeError):
AccessPoint(None, None)
def test_constructor_TestWithStringMacAddress_ThrowTypeError(self):
with self.assertRaises(TypeError):
AccessPoint("12:34:56:78:89:FF", None)
def test_constructor_TestWithNoneName_ThrowTypeError(self):
with self.assertRaises(TypeError):
AccessPoint(MACAddress("12:34:56:78:89:FF"), None)
def test_constructor_TestWithEmptyName_ReturnValid(self):
AccessPoint(MACAddress("12:34:56:78:89:FF"), "")
def test_constructor_TestWithValidMACAndName_ReturnValid(self):
AccessPoint(MACAddress("12:34:56:78:89:FF"), "MyAP")
def test_getMACAddress_TestWith1234567889FF_Return1234567889FF(self):
ap = AccessPoint(MACAddress("12:34:56:78:89:FF"), "MyAP")
expected = MACAddress("12:34:56:78:89:FF")
self.assertEqual(expected, ap.getMACAddress())
def test_getName_TestWithMyAP_ReturnMyAP(self):
ap = AccessPoint(MACAddress("12:34:56:78:89:FF"), "MyAP")
expected = "MyAP"
self.assertEqual(expected, ap.getName())
def test_setMACAddress_TestWithNoneMacAddress_ThrowTypeError(self):
ap = AccessPoint(MACAddress("12:34:56:78:89:FF"), "MyAP")
with self.assertRaises(TypeError):
ap.setMACAddress(None)
def test_setMACAddress_TestWithStringMacAddress_ThrowTypeError(self):
ap = AccessPoint(MACAddress("12:34:56:78:89:FF"), "MyAP")
with self.assertRaises(TypeError):
ap.setMACAddress("12:34:56:78:89:FF")
def test_setMACAddress_TestWithValidMAC_ReturnValid(self):
ap = AccessPoint(MACAddress("12:34:56:78:89:FF"), "MyAP")
ap.setMACAddress(MACAddress("12:34:56:77:89:FF"))
self.assertEqual(ap.getMACAddress(), MACAddress("12:34:56:77:89:FF"))
def test_setName_TestWithNoneName_ThrowTypeError(self):
ap = AccessPoint(MACAddress("12:34:56:78:89:FF"), "MyAP")
with self.assertRaises(TypeError):
ap.setName(None)
def test_setName_TestWithEmptyName_ReturnValid(self):
ap = AccessPoint(MACAddress("12:34:56:78:89:FF"), "MyAP")
ap.setName("")
self.assertEqual(ap.getName(), "")
def test_setName_TestWithNonEmptyName_ReturnValid(self):
ap = AccessPoint(MACAddress("12:34:56:78:89:FF"), "MyAP")
ap.setName("abc")
self.assertEqual(ap.getName(), "abc")
def test_equalityOperator_TestWithIdenticalValues_ReturnTrue(self):
ap1 = AccessPoint(MACAddress("12:34:56:78:89:FF"), "MyAP")
ap2 = AccessPoint(MACAddress("12:34:56:78:89:FF"), "MyAP")
self.assertEqual(ap1, ap2)
def test_equalityOperator_TestWithDifferentMAC_ReturnTrue(self):
ap1 = AccessPoint(MACAddress("12:34:56:78:89:FE"), "MyAP")
ap2 = AccessPoint(MACAddress("12:34:56:78:89:FF"), "MyAP")
self.assertNotEqual(ap1, ap2)
def test_equalityOperator_TestWithDifferentName_ReturnTrue(self):
ap1 = AccessPoint(MACAddress("12:34:56:78:89:FF"), "MyAP1")
ap2 = AccessPoint(MACAddress("12:34:56:78:89:FF"), "MyAP")
self.assertNotEqual(ap1, ap2)
def test_equalityOperator_TestWithDifferentMACAndName_ReturnTrue(self):
ap1 = AccessPoint(MACAddress("12:34:56:78:89:FE"), "MyAP1")
ap2 = AccessPoint(MACAddress("12:34:56:78:89:FF"), "MyAP")
self.assertNotEqual(ap1, ap2)
def test_equalityOperator_TestWithAnotherType_ReturnFalse(self):
self.assertNotEqual(AccessPoint(MACAddress("12:34:56:78:89:FE"), "MyAP1"), 3)
def test_isHidden_TestWithEmptyName_ReturnTrue(self):
ap = AccessPoint(MACAddress("12:34:56:78:89:FE"), "")
self.assertTrue(ap.isHidden())
def test_isHidden_TestWithOneX00Name_ReturnTrue(self):
ap = AccessPoint(MACAddress("12:34:56:78:89:FE"), "\\x00")
self.assertTrue(ap.isHidden())
def test_isHidden_TestWithTwoX00Name_ReturnTrue(self):
ap = AccessPoint(MACAddress("12:34:56:78:89:FE"), "\\x00\\x00")
self.assertTrue(ap.isHidden())
def test_isHidden_TestWithThreeX00Name_ReturnTrue(self):
ap = AccessPoint(MACAddress("12:34:56:78:89:FE"), "\\x00\\x00\\x00")
self.assertTrue(ap.isHidden())
def test_isHidden_TestWithValidName_ReturnFalse(self):
ap = AccessPoint(MACAddress("12:34:56:78:89:FE"), "Home")
self.assertFalse(ap.isHidden())
def test_hashOperator_TestWithDifferentMAC_ReturnDifferentHash(self):
ap1 = AccessPoint(MACAddress("12:34:56:78:89:FE"), "Home")
ap2 = AccessPoint(MACAddress("12:34:56:78:89:FF"), "Home")
self.assertNotEqual(hash(ap1), hash(ap2))
def test_hashOperator_TestWithDifferentName_ReturnDifferentHash(self):
ap1 = AccessPoint(MACAddress("12:34:56:78:89:FE"), "Home")
ap2 = AccessPoint(MACAddress("12:34:56:78:89:FF"), "Home1")
self.assertNotEqual(hash(ap1), hash(ap2))
def test_hashOperator_TestWithIdenticalValues_ReturnSameHash(self):
ap1 = AccessPoint(MACAddress("12:34:56:78:89:FE"), "Home")
ap2 = AccessPoint(MACAddress("12:34:56:78:89:FE"), "Home")
self.assertEqual(hash(ap1), hash(ap2)) |
from Message.Parameters.Parameter import Parameter, ParameterType
class Voltage(Parameter):
def __init__(self, volts=0.0):
super(Voltage, self).__init__(value=volts, length=2, type=ParameterType.IEEEFloat, resolution=0.01)
|
def fib1(n):
if n == 0 or n == 1:
return 1
return fib1(n - 1) + fib(n - 2)
# python yield style
# fib stream
# this algo complexity is O(phi^n) exponential level
# same as
def fib():
yield 0
yield 1
f1 = fib()
f2 = fib()
f1.next()
while 1:
yield f1.next() + f2.next()
# test code
f = fib()
for i in range(20):
print f.next()
|
import logging
import coloredlogs
import sentry_sdk
from aidbox_python_sdk.main import create_app as _create_app
from sentry_sdk.integrations.aiohttp import AioHttpIntegration
from sentry_sdk.integrations.logging import LoggingIntegration
# Don't remove these imports
import app.operations
from app.sdk import sdk, sdk_settings
coloredlogs.install(
level="DEBUG", fmt="%(asctime)s - %(name)s - %(levelname)s - %(message)s"
)
logging.basicConfig(
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", level=logging.DEBUG
)
logging.getLogger("aidbox_sdk").setLevel(logging.INFO)
logging.getLogger("urllib3").setLevel(logging.INFO)
sentry_logging = LoggingIntegration(
level=logging.DEBUG, # Capture info and above as breadcrumbs
event_level=logging.WARNING, # Send warnings as events
)
sentry_sdk.init(integrations=[AioHttpIntegration(), sentry_logging])
async def create_app():
return await _create_app(sdk_settings, sdk, debug=True)
|
# -*- coding: utf-8 -*-
"""
Utilities for user interfaces
Created on Tue Aug 3 21:14:25 2020
@author: Gerd Duscher, Suhas Somnath, Chris Smith
"""
from __future__ import division, print_function, absolute_import, unicode_literals
import os
import sys
import warnings
import ipyfilechooser
if sys.version_info.major == 3:
unicode = str
if sys.version_info.minor < 6:
ModuleNotFoundError = ValueError
class open_file_dialog(ipyfilechooser.FileChooser):
def __init__(self, directory='.'):
self._use_dir_icons = True
super().__init__(directory)
def _apply_selection(self):
super()._apply_selection()
selected = os.path.join(
self._selected_path,
self._selected_filename
)
if os.path.isfile(selected):
self._label.value = self._LBL_TEMPLATE.format(
self._selected_filename,
'blue'
)
else:
self._label.value = self._LBL_TEMPLATE.format(
self._selected_filename,
'green'
)
def _set_form_values(self, path: str, filename: str) -> None:
"""Set the form values."""
# Disable triggers to prevent selecting an entry in the Select
# box from automatically triggering a new event.
self._pathlist.unobserve(
self._on_pathlist_select,
names='value'
)
self._dircontent.unobserve(
self._on_dircontent_select,
names='value'
)
self._filename.unobserve(
self._on_filename_change,
names='value'
)
# In folder only mode zero out the filename
if self._show_only_dirs:
filename = ''
# Set form values
self._pathlist.options = ipyfilechooser.utils.get_subpaths(path)
self._pathlist.value = path
self._filename.value = filename
# file/folder real names
dircontent_real_names = ipyfilechooser.utils.get_dir_contents(
path,
show_hidden=self._show_hidden,
dir_icon_append=False,
show_only_dirs=self._show_only_dirs,
filter_pattern=self._filter_pattern
)
# file/folder display names
dircontent_display_names = ipyfilechooser.utils.get_dir_contents(
path,
show_hidden=self._show_hidden,
dir_icon_append=self._use_dir_icons,
show_only_dirs=self._show_only_dirs,
filter_pattern=self._filter_pattern
)
dircontent_display_names = self.set_display_names(dircontent_real_names, dircontent_display_names)
# Dict to map real names to display names
self._map_name_to_disp = {
real_name: disp_name
for real_name, disp_name in zip(
dircontent_real_names,
dircontent_display_names
)
}
# Dict to map display names to real names
self._map_disp_to_name = {
disp_name: real_name
for real_name, disp_name in
self._map_name_to_disp.items()
}
# Set _dircontent form value to display names
self._dircontent.options = dircontent_display_names
# If the value in the filename Text box equals a value in the
# Select box and the entry is a file then select the entry.
if ((filename in dircontent_real_names) and
os.path.isfile(os.path.join(path, filename))):
self._dircontent.value = self._map_name_to_disp[filename]
else:
self._dircontent.value = None
# Reenable triggers again
self._pathlist.observe(
self._on_pathlist_select,
names='value'
)
self._dircontent.observe(
self._on_dircontent_select,
names='value'
)
self._filename.observe(
self._on_filename_change,
names='value'
)
# Update the state of the select button
if self._gb.layout.display is None:
# Disable the select button if path and filename
# - equal an existing folder in the current view
# - equal the already selected values
# - don't match the provided filter pattern(s)
check1 = filename in dircontent_real_names
check2 = os.path.isdir(os.path.join(path, filename))
check3 = False
check4 = False
# Only check selected if selected is set
if ((self._selected_path is not None) and
(self._selected_filename is not None)):
selected = os.path.join(
self._selected_path,
self._selected_filename
)
check3 = os.path.join(path, filename) == selected
# Ensure only allowed extensions are used
if self._filter_pattern:
check4 = not ipyfilechooser.utils.match_item(filename, self._filter_pattern)
if (check1 and check2) or check3 or check4:
self._select.disabled = True
else:
self._select.disabled = False
def set_display_names(self, dircontent_real_names, dircontent_display_names):
for i in range(len(dircontent_display_names)):
name = dircontent_display_names[i]
full_name = os.path.join(self._pathlist.value, dircontent_real_names[i])
if os.path.isfile(full_name):
size = os.path.getsize(full_name) * 2 ** -20
basename, extension = os.path.splitext(name)
if extension in ['.hf5']:
dircontent_display_names[i] = f" {dircontent_display_names[i]:50} -- {size:.1f} MB"
else:
dircontent_display_names[i] = dircontent_display_names[i]
return dircontent_display_names
def check_ssh():
"""
Checks whether or not the python kernel is running locally (False) or remotely (True)
Returns
-------
output : bool
Whether or not the kernel is running over SSH (remote machine)
Notes
-----
When developing workflows that need to work on remote or virtual machines
in addition to one's own personal computer such as a laptop, this function
is handy at letting the developer know where the code is being executed
Examples
--------
>>> import sidpy
>>> mode = sidpy.interface_utils.check_ssh()
>>> print('Running on remote machine: {}'.format(mode))
"""
return 'SSH_CLIENT' in os.environ or 'SSH_TTY' in os.environ
def get_QT_app():
"""
Starts pyQT app if not running
Returns: QApplication
-------
instance : ``QApplication.instance``
"""
try:
from PyQt5.Qt import QApplication
except ImportError:
raise ModuleNotFoundError('Required package PyQt5 not available')
# start qt event loop
_instance = QApplication.instance()
if not _instance:
# print('not_instance')
_instance = QApplication([])
return _instance
def openfile_dialog_QT(file_types="All files (*)", multiple_files=False,
file_path='.', caption="Select a file..."):
"""
Opens a File dialog which is used in open_file() function
This function uses pyQt5.
Parameters
----------
file_types : str, optional. Default = all
types of files accepted
multiple_files : bool, optional. Default = False
Whether or not multiple files can be selected
file_path: str, optional. Default = '.'
path to starting or root directory
caption: str, optional. Default = "Select a file..."
caption of the open file dialog
Returns
-------
filename : str
full filename with absolute path and extension
Notes
-----
In jupyter notebooks use ``%gui Qt`` early in the notebook.
Examples
--------
>> import sidpy as sid
>> filename = sid.io.openfile_dialog()
>> print(filename)
"""
# Check whether QT is available
try:
from PyQt5 import QtGui, QtWidgets, QtCore
except ImportError:
raise ModuleNotFoundError('Required package PyQt5 not available')
# try to find a parent the file dialog can appear on top
try:
get_QT_app()
except:
pass
for param in [file_path, file_types, caption]:
if param is not None:
if not isinstance(param, (str, unicode)):
raise TypeError('param must be a string')
parent = None
if multiple_files:
func = QtWidgets.QFileDialog.getOpenFileNames
fnames, file_filter = func(parent, caption, file_path,
filter=file_types,
options=[QtCore.Qt.WindowStaysOnTopHint])
if len(fnames) > 0:
fname = fnames[0]
else:
return
else:
func = QtWidgets.QFileDialog.getOpenFileName
fname, file_filter = func(parent, caption, file_path,
filter=file_types)
if multiple_files:
return fnames
else:
return str(fname)
def savefile_dialog(initial_file='*.hf5', file_path='.',
file_types=None, caption="Save file as ..."):
"""
Produces a window / dialog to allow users to specify the location and name
of a file to save to.
Parameters
----------
initial_file : str, optional. Default = ``*.hf5``
File extension? @gduscher to clarify
file_path : str, optional. Default = '.'
path to starting or root directory
file_types : str, optional. Default = None
Filters for kinds of files to display in the window
caption: str, optional. Default = "Save file as..."
caption of the save file dialog
Returns
-------
fname : str
path to desired file
Notes
-----
In jupyter notebooks use ``%gui Qt`` early in the notebook.
"""
# Check whether QT is available
try:
from PyQt5 import QtGui, QtWidgets, QtCore
except ImportError:
raise ModuleNotFoundError('Required package PyQt5 not available')
else:
for param in [file_path, initial_file, caption]:
if param is not None:
if not isinstance(param, (str, unicode)):
raise TypeError('param must be a string')
if file_types is None:
file_types = "All files (*)"
try:
get_QT_app()
except:
pass
func = QtWidgets.QFileDialog.getSaveFileName
fname, file_filter = func(None, caption,
file_path + "/" + initial_file,
filter=file_types)
if len(fname) > 1:
return fname
else:
return None
try:
from PyQt5 import QtWidgets
class ProgressDialog(QtWidgets.QDialog):
"""
Simple dialog that consists of a Progress Bar and a Button.
Clicking on the button results in the start of a timer and
updates the progress bar.
"""
def __init__(self, title=''):
super().__init__()
self.initUI(title)
def initUI(self, title):
self.setWindowTitle('Progress Bar: ' + title)
self.progress = QtWidgets.QProgressBar(self)
self.progress.setGeometry(10, 10, 500, 50)
self.progress.setMaximum(100)
self.show()
def set_value(self, count):
self.progress.setValue(count)
except ImportError:
pass
def progress_bar(title='Progress', start=0, stop=100):
"""
Opens a progress bar window
Parameters
----------
title: str, optional. Default = 'Progress'
Title for the progress window
start: int, optional. Default = 0
Start value
stop: int, optional. Default = 100
End value
Returns
-------
progress : QtWidgets.QProgressDialog
Progress dialog
Examples
--------
>>> import sidpy
>>> progress = sidpy.interface_utils.progress_bar('progress', 1,50)
>>> for count in range(50):
>>> progress.setValue(count)
"""
# Check whether QT is available
warnings.warn("progress_bar() is deprecated; use tqdm package instead", warnings.DeprecationWarning)
try:
from PyQt5 import QtGui, QtWidgets, QtCore
except ImportError:
raise ModuleNotFoundError('Required package PyQt5 not available')
try:
get_QT_app()
except:
pass
progress = QtWidgets.QProgressDialog(title, "Abort", 0, 100)
progress.setWindowFlags(QtCore.Qt.WindowStaysOnTopHint)
progress.show()
return progress
|
'''
Author: ZHAO Zinan
Created: 12/21/2018
289. Game of Life
'''
class Solution:
def gameOfLife(self, data):
"""
:type data: List[List[int]]
:rtype: void Do not return anything, modify board in-place instead.
"""
if len(data) == 0 or len(data[0]) == 0:
return
row = len(data)
col = len(data[0])
def update(data, x, y):
for i in range(max(0, x-1), min(len(data), x+1+1)):
for j in range(max(0, y-1), min(len(data[0]), y+1+1)):
data[i][j] += 1
data[x][y] -= 1
neighbors = [[0 for i in range(col)] for i in range(row)]
for i in range(row):
for j in range(col):
if data[i][j]:
update(neighbors, i, j)
# print(neighbors)
for i in range(row):
for j in range(col):
if neighbors[i][j] == 3:
data[i][j] = 1
elif neighbors[i][j] < 2 or neighbors[i][j] > 3:
data[i][j] = 0
# return data
# test
if __name__ == '__main__':
data1 = [
[0,1,0],
[0,0,1],
[1,1,1],
[0,0,0]
]
print(Solution().validUtf8(data1))
|
# -*- coding: utf-8 -*-
from unittest import TestCase
import json
import logging
import os
import shutil
import subprocess
import tempfile
logger = logging.getLogger(__name__)
class TorchtextTestCase(TestCase):
def setUp(self):
logging.basicConfig(format=('%(asctime)s - %(levelname)s - '
'%(name)s - %(message)s'),
level=logging.INFO)
# Directory where everything temporary and test-related is written
self.project_root = os.path.abspath(os.path.realpath(os.path.join(
os.path.dirname(os.path.realpath(__file__)), os.pardir, os.pardir)))
self.test_dir = tempfile.mkdtemp()
self.test_ppid_dataset_path = os.path.join(self.test_dir, "test_ppid_dataset")
def tearDown(self):
try:
shutil.rmtree(self.test_dir)
except:
subprocess.call(["rm", "-rf", self.test_dir])
def write_test_ppid_dataset(self, data_format="csv"):
data_format = data_format.lower()
if data_format == "csv":
delim = ","
elif data_format == "tsv":
delim = "\t"
dict_dataset = [
{"id": "0", "question1": "When do you use シ instead of し?",
"question2": "When do you use \"&\" instead of \"and\"?",
"label": "0"},
{"id": "1", "question1": "Where was Lincoln born?",
"question2": "Which location was Abraham Lincoln born?",
"label": "1"},
{"id": "2", "question1": "What is 2+2",
"question2": "2+2=?",
"label": "1"},
]
with open(self.test_ppid_dataset_path, "w") as test_ppid_dataset_file:
for example in dict_dataset:
if data_format == "json":
test_ppid_dataset_file.write(json.dumps(example) + "\n")
elif data_format == "csv" or data_format == "tsv":
test_ppid_dataset_file.write("{}\n".format(
delim.join([example["id"], example["question1"],
example["question2"], example["label"]])))
else:
raise ValueError("Invalid format {}".format(data_format))
|
import os.path
from .parser import load_data
import biothings.hub.dataload.uploader as uploader
class UMLSUploader(uploader.BaseSourceUploader):
name = "umls"
def load_data(self, data_folder):
umls_docs = load_data(data_folder)
return umls_docs
@classmethod
def get_mapping(klass):
mapping = {
"umls": {
"properties": {
"cui": {
"type": "keyword",
"normalizer" : "keyword_lowercase_normalizer",
'copy_to': ['all'],
},
"protein_cui": {
"type": "keyword",
"normalizer" : "keyword_lowercase_normalizer",
'copy_to': ['all'],
}
}
}
}
return mapping
|
# mysql.connector from https://dev.mysql.com/downloads/connector/python/
import mysql.connector
# creds to the TA led database lecture https://scs.hosted.panopto.com/Panopto/Pages/Viewer.aspx?id=1e68b000-e15c-4e52-af04-ac6800f82ae8
# for its guidance on using mysql through python
# prior to running my game one needs to install mysql through https://dev.mysql.com/downloads/mysql/
# and then pip install mysql-connector-python
# then set up mysql using the setup interface and set the server user to root and password to password
grudgeGamedb = mysql.connector.connect(host= 'localhost', user='root', password= 'password')
cursor = grudgeGamedb.cursor()
cursor.execute("CREATE DATABASE IF NOT EXISTS GrudgeGame")
grudgeGamedb = mysql.connector.connect(host= 'localhost', user='root', password= 'password', database="GrudgeGame")
cursor = grudgeGamedb.cursor()
cursor.execute("CREATE TABLE IF NOT EXISTS Leaderboard(\
ID int PRIMARY KEY AUTO_INCREMENT,\
Username varchar(255) NOT NULL,\
Score int NOT NULL)")
grudgeGamedb.commit()
def getLeaderboard():
cursor.execute('SELECT Username, Score FROM Leaderboard ORDER BY Score DESC')
leaderBoard = [row for row in cursor.fetchall()]
return leaderBoard
def updateTable(username, score):
cursor.execute('SELECT Username, Score FROM Leaderboard ORDER BY Score DESC')
usersList = [row for row in cursor.fetchall()]
cursor.execute('SELECT Username, Score FROM Leaderboard ORDER BY Score DESC')
userNameList = [row[0] for row in cursor.fetchall()]
for name, dbScore in usersList:
if username == name and score > dbScore:
cursor.execute('UPDATE Leaderboard SET Score ='+str(score)+' WHERE Username= "'+username+'"')
if username not in userNameList:
cursor.execute("INSERT INTO Leaderboard (Username, Score) VALUES ('"+username+"', "+str(score)+")")
grudgeGamedb.commit()
|
from django.db import models
# Create your models here.
class Product(models.Model):
prod_no = models.IntegerField()
prod_name = models.CharField(max_length=50)
prod_price=models.FloatField()
prod_qty=models.IntegerField()
|
import string, Utils
# list of directory options to offer in configure
dir_options = {
'with-cachedir' : [ '${PREFIX}/var/locks', 'where to put temporary cache files' ],
'with-codepagedir' : [ '${PREFIX}/lib/samba', 'where to put codepages' ],
'with-configdir' : [ '${PREFIX}/etc/samba', 'Where to put configuration files' ],
'with-lockdir' : [ '${PREFIX}/var/locks', 'where to put lock files' ],
'with-logfilebase' : [ '${PREFIX}/var/log/samba', 'Where to put log files' ],
'with-ncalrpcdir' : [ '${PREFIX}/var/ncalrpc', 'where to put ncalrpc sockets' ],
'with-nmbdsocketdir' : [ '${PREFIX}/var/locks/.nmbd', 'Where to put the nmbd socket directory' ],
'with-ntp-signd-socket-dir' : [ '${PREFIX}/var/run/ntp_signd', 'NTP signed directory'],
'with-pammodulesdir' : [ '', 'Which directory to use for PAM modules' ],
'with-piddir' : [ '${PREFIX}/var/locks', 'where to put pid files' ],
'with-privatedir' : [ '${PREFIX}/private', 'where to put smbpasswd' ],
'with-selftest-prefix' : [ '', 'The prefix where make test will be run' ],
'with-selftest-shrdir' : [ '', 'The share directory that make test will be run against' ],
'with-statedir' : [ '${PREFIX}/var/locks', 'where to put persistent state files' ],
'with-swatdir' : [ '${PREFIX}/swat', 'Where to put SWAT files' ],
'with-winbindd-privileged-socket-dir' : [ '${PREFIX}/var/lib/winbindd_privileged', 'winbind privileged socket directory'],
'with-winbindd-socket-dir' : [ '${PREFIX}/var/lib/winbindd', 'winbind socket directory' ],
}
# list of cflags to use for dynconfig.c
dyn_cflags = {
'BINDIR' : '${BINDIR}',
'CACHEDIR' : '${CACHEDIR}',
'CODEPAGEDIR' : '${CODEPAGEDIR}',
'CONFIGDIR' : '${SYSCONFDIR}',
'CONFIGFILE' : '${SYSCONFDIR}/smb.conf',
'DATADIR' : '${DATADIR}',
'LIBDIR' : '${LIBDIR}',
'LOCALEDIR' : '${LOCALEDIR}',
'LMHOSTSFILE' : '${SYSCONFDIR}/lmhosts',
'LOCKDIR' : '${LOCALSTATEDIR}/locks',
'LOGFILEBASE' : '${LOCALSTATEDIR}',
'MODULESDIR' : '${PREFIX}/modules',
'NCALRPCDIR' : '${LOCALSTATEDIR}/ncalrpc',
'NMBDSOCKETDIR' : '${LOCKDIR}/.nmbd',
'NTP_SIGND_SOCKET_DIR' : '${NTP_SIGND_SOCKET_DIR}',
'PIDDIR' : '${LOCALSTATEDIR}/run',
'PKGCONFIGDIR' : '${LIBDIR}/pkgconfigdir',
'PRIVATE_DIR' : '${PRIVATEDIR}',
'SBINDIR' : '${SBINDIR}',
'SETUPDIR' : '${DATADIR}/setup',
'SMB_PASSWD_FILE' : '${PRIVATEDIR}/smbpasswd',
'STATEDIR' : '${LOCALSTATEDIR}',
'SWATDIR' : '${PREFIX}/swat',
'WINBINDD_PRIVILEGED_SOCKET_DIR' : '${WINBINDD_PRIVILEGED_SOCKET_DIR}',
'WINBINDD_SOCKET_DIR' : '${WINBINDD_SOCKET_DIR}',
}
def get_varname(v):
'''work out a variable name from a configure option name'''
if v.startswith('with-'):
v = v[5:]
v = v.upper()
v = string.replace(v, '-', '_')
return v
def dynconfig_cflags(bld):
'''work out the extra CFLAGS for dynconfig.c'''
cflags = []
for f in dyn_cflags.keys():
# substitute twice, as we could have substitutions containing variables
v = Utils.subst_vars(dyn_cflags[f], bld.env)
v = Utils.subst_vars(v, bld.env)
bld.ASSERT(v != '', "Empty dynconfig value for %s" % f)
bld.ASSERT(v.find('${') == -1, "Unsubstituted variable in %s : %s : %s" % (f, dyn_cflags[f], v))
cflags.append('-D%s="%s"' % (f, v))
return cflags
|
import tempfile
import subprocess
import os
import glob
from subprocess import Popen, PIPE
from multiprocessing import cpu_count
import numpy as np
import montemodes.classes.results as res
def create_tinker_input(molecule):
temp_file_name = tempfile.gettempdir() + '/tinker_temp'+ '_' + str(os.getpid())
tinker_input_file = open(temp_file_name,mode='w')
tinker_input_file.write(str(molecule.get_number_of_atoms()) + '\n')
for i in range(molecule.get_number_of_atoms()):
line = str([list(molecule.get_atomic_numbers()[i]) +
list(molecule.get_atomic_elements()[i]) +
list(molecule.get_coordinates()[i]) +
list(molecule.get_atom_types()[i]) +
list(molecule.get_connectivity()[i])]).strip('[]').replace(',', '').replace("'", "")
tinker_input_file.write(line + '\n')
tinker_input_file.close()
return tinker_input_file
def create_gaussian_input(molecule, calculation='pm6', internal=False, type='energy', processors=1):
dict = {'energy' : ' ', 'vibration' : ' freq '}
if processors is None:
processors = cpu_count()
multiplicity = molecule.multiplicity
charge = molecule.charge
input_file = '%NProcShared={0}\n'.format(processors)
input_file += '#'+dict[type]+calculation+'\n\nPython Input\n\n'+str(charge)+' '+str(multiplicity)+'\n'
# Z-matrix
if internal:
atomic_elements = molecule.get_atomic_elements_with_dummy()[:, 0]
z_matrix = molecule.get_z_matrix()
input_file += atomic_elements[0] + '\n'
for index, element in enumerate(atomic_elements[1:]):
input_file += (element + '\t' +
'\t'.join(z_matrix[index+1][0]) + '\n')
internal_labels = molecule.get_int_label()
input_file += 'Variables:\n'
for label in internal_labels:
input_file += (label[0] + '\t' +
str(molecule.get_int_dict()[label[0]])+'\n')
# Cartessian
else:
atomic_elements = molecule.get_atomic_elements()[:, 0]
coordinates = molecule.get_coordinates()
for index, element in enumerate(atomic_elements):
input_file += (element + "\t" +
str(coordinates[index][0]) + "\t" +
str(coordinates[index][1]) + "\t" +
str(coordinates[index][2]) + "\n")
return input_file + "\n"
def get_energy_from_tinker(molecule, force_field = 'mm3.prm'):
tinker_input_file = create_tinker_input(molecule)
key_file_name = os.path.splitext(molecule.file_name)[0] + '.key'
if not os.path.isfile(key_file_name):
key_file_name = ''
tinker_command = 'analyze ' + tinker_input_file.name + \
' ' + force_field + ' E -k ' + key_file_name
tinker_process = subprocess.Popen(tinker_command, stdin=PIPE, stderr=PIPE, stdout=PIPE, shell=True)
(output, err) = tinker_process.communicate()
tinker_process.wait()
os.unlink(tinker_input_file.name)
try:
energy = float(output[output.find('Total Potential Energy'):].replace('D','E').split()[4])
except IndexError and ValueError:
print('\n'.join(output.splitlines()[-3:]))
print('Failed trying to get energy from tinker output')
energy = 1E20
return energy
def get_energy_from_gaussian(molecule, calculation='pm6', internal=False, processors=1, binary='g09'):
input_data = create_gaussian_input(molecule,
calculation=calculation,
internal=internal,
processors=processors)
conversion = 627.503 # hartree to kcal/mol
gaussian_process = Popen(binary, stdout=PIPE, stdin=PIPE, stderr=PIPE, shell=True)
(output, err) = gaussian_process.communicate(input=input_data)
gaussian_process.wait()
try:
energy = float(output[output.find('E('):].split()[2])
except IndexError or ValueError:
print('Failed trying to get energy from gaussian output')
print('\n'.join(output.splitlines()[-10:]))
energy = 1E20
return energy * conversion
def get_modes_from_gaussian(molecule, calculation='pm6', internal=False, binary='g09'):
input_data = create_gaussian_input(molecule,
calculation=calculation,
internal=internal,
type='vibration')
conversion = 627.503 # Hartree to kcal/mol
gaussian_process = Popen(binary, stdout=PIPE, stdin=PIPE, stderr=PIPE, shell=True)
(output, err) = gaussian_process.communicate(input=input_data)
gaussian_process.wait()
lines = output[output.find('Frequencies'):].split()
# Frequencies
indexes = [i for i, x in enumerate(lines) if x == 'Frequencies']
frequencies = np.array([[lines[i+2], lines[i+3], lines[i+4]] for i in indexes],dtype=float).flatten()
# Modes
num_atoms = molecule.get_number_of_atoms()
num_modes = 3 * num_atoms
modes = []
for block in range(num_modes/3-2):
indexes = [i for i, x in enumerate(lines) if x == 'Atom']
freq_i = np.array([lines[indexes[block]+11+i*11:indexes[block]+11+(i+1)*11] for i in range(num_atoms)],dtype=float)[:,2:]
for i in range(0, 9, 3):
modes.append(freq_i[:,i:i+3].tolist())
#Energia
try:
energy = float(output[output.find('E('):].split()[2])
except IndexError or ValueError:
print('Failed trying to get energy from gaussian output')
print('\n'.join(output.splitlines()[-10:]))
energy = 1E20
total_modes = res.Vibration(frequencies=np.array(frequencies),
modes=np.array(modes))
return total_modes, energy * conversion
def get_modes_from_tinker(molecule, force_field='mm3.prm', num_modes=None):
if num_modes is None:
tinker_list = ' A'
num_modes = 3 * molecule.get_number_of_atoms()
else:
if num_modes >= (3 * molecule.get_number_of_atoms()):
tinker_list = ' A'
num_modes = 3 * molecule.get_number_of_atoms()
else:
tinker_list = ' ' + ' '.join(map(str, range(1,num_modes+1)))
tinker_input_file = create_tinker_input(molecule)
tinker_command = 'vibrate ' + tinker_input_file.name + ' ' + force_field + tinker_list
tinker_process = subprocess.Popen(tinker_command, stdout=subprocess.PIPE, shell=True)
(output, err) = tinker_process.communicate()
tinker_process.wait()
# for i in range(10):
lines = output.split()
modes = []
frequencies = []
pos = lines.index('Vibrational')
if num_modes is None:
number_of_modes = molecule.get_number_of_atoms() * 3
else:
number_of_modes = num_modes
for f in range(number_of_modes):
pos = lines.index('Vibrational', pos + 1)
if pos == -1:
break
frequencies.append(float(lines[pos + 6]))
pos = lines.index('Z', pos + 1)
mode = []
for k in range(molecule.get_number_of_atoms()):
mode.append([float(i) for i in lines[pos + k * 4 + 2:pos + k * 4 + 5]])
modes.append(mode)
total_modes = res.Vibration(frequencies=np.array(frequencies),
modes=np.array(modes))
for filePath in glob.glob(tinker_input_file.name+".*"):
if os.path.isfile(filePath):
os.remove(filePath)
return total_modes
if __name__ == '__main__':
import montemodes.functions.reading as io_monte
# molecule = io_monte.reading_from_gzmat_file('../test.gzmat')
molecule = io_monte.reading_from_xyz_file('../test.xyz')
print(get_energy_from_gaussian(molecule, calculation='am1'))
# print(get_symmetry(molecule,symmetry='s'))
# print(create_gaussian_input(molecule,internal=False))
# print(get_energy_from_gaussian(molecule,calculation='pm6'))
|
from flask import (
request,
jsonify,
render_template
)
from api.extensions import db
import datetime
from flask_restful import Resource
from flask_jwt_extended import create_access_token, decode_token
from mods.users.models.user_model import UserModel, user_schema
from utils.errors import errors, error_handle
from flask_bcrypt import check_password_hash, generate_password_hash
from sqlalchemy.exc import IntegrityError
from jwt.exceptions import ExpiredSignatureError, DecodeError, \
InvalidTokenError
from api.mail_service import send_email
class SignupApi(Resource):
def post(self):
try:
if(request.json['email'] and request.json['phone'] and request.json['password']):
new_user = UserModel(
email=request.json['email'],
phone=request.json['phone'],
password=generate_password_hash(request.json['password']).decode('utf8')
)
db.session.add(new_user)
db.session.commit()
return user_schema.dump([new_user]), 200
else:
db.session.rollback()
return jsonify(errors["SchemaValidationError"])
except IntegrityError:
db.session.rollback()
return jsonify(errors["EmailAlreadyExistsError"])
except Exception as e:
db.session.rollback()
return error_handle(e)
class LoginApi(Resource):
def post(self):
try:
if((request.json['email'] or request.json['phone']) and request.json['password']):
if request.json['email']:
user = UserModel.query.filter_by(email=request.json['email']).first()
elif request.json['phone']:
user = UserModel.query.filter_by(phone=request.json['phone']).first()
authorized = check_password_hash(user.password, request.json['password'])
if not authorized:
return jsonify(errors["UnauthorizedError"])
expires = datetime.timedelta(days=7)
access_token = create_access_token(identity=str(user.id), expires_delta=expires)
return {'token': access_token}, 200
else:
return jsonify(errors["UnauthorizedError"])
except Exception as e:
return error_handle(e)
class ForgotPassword(Resource):
def post(self):
url = request.host_url + 'reset/'
try:
email = request.json['email']
if not email:
return jsonify(errors["SchemaValidationError"])
user = UserModel.query.filter_by(email=request.json['email']).first()
if not user:
return jsonify(errors["EmailDoesnotExistsError"])
expires = datetime.timedelta(hours=24)
reset_token = create_access_token(str(user.id), expires_delta=expires)
return send_email('Reset Your Password',
sender='[email protected]',
recipients=[user.email],
text_body=render_template('email/reset_password.txt',
url=url + reset_token),
html_body=render_template('email/reset_password.html',
url=url + reset_token))
except Exception as e:
return error_handle(e)
class ResetPassword(Resource):
def post(self):
# url = request.host_url + 'reset/'
try:
reset_token = request.json['reset_token']
password = request.json['password']
if not reset_token or not password:
return jsonify(errors["SchemaValidationError"])
user_id = decode_token(reset_token)['identity']
user = UserModel.query.filter_by(id=user_id).first()
user.password = generate_password_hash(password).decode('utf8')
db.session.commit()
return send_email('Password reset successful',
sender='[email protected]',
recipients=[user.email],
text_body='Password reset was successful',
html_body='<p>Password reset was successful</p>')
except ExpiredSignatureError:
return jsonify(errors["ExpiredTokenError"])
except (DecodeError, InvalidTokenError):
return jsonify(errors["BadTokenError"])
except Exception as e:
return error_handle(e)
|
# Copyright (c) 2017 Alex Pliutau
from slackclient import SlackClient
import logging
import time
import sys
class SlackBot(object):
def __init__(self, token, rasa_nlu):
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s')
self.sc = SlackClient(token)
self.rasa_nlu = rasa_nlu
def connect(self):
if self.sc.rtm_connect():
logging.info("connected to slack rtm")
else:
logging.error("could not connect to slack rtm")
sys.exit(1)
def start(self):
self.connect()
while True:
for reply in self.sc.rtm_read():
self.input(reply)
time.sleep(.1)
def input(self, data):
# do not handle bot messages
if "type" in data and not "bot_id" in data and data["type"] == "message":
self.process_msg(data)
def process_msg(self, data):
logging.info("received message from {}: {}".format(data["user"], data["text"]))
text_to_reply = self.rasa_nlu.find_reply(data["text"])
if text_to_reply:
self.send_im_msg(data["user"], text_to_reply)
def send_im_msg(self, user, msg):
self.sc.api_call(
"chat.postMessage",
channel=user,
as_user="true",
text=msg
)
logging.info("sent message to {}: {}".format(user, msg)) |
#!/usr/bin/env python2.7
#
# a wrapper for olevba since it is python2
import argparse
import codecs
import json
import os
import os.path
import sys
import traceback
parser = argparse.ArgumentParser(description="Analyzes a given file with olevba parser and saves the output in a useful way.")
parser.add_argument('file', help="The file to analyze.")
parser.add_argument('-d', '--output-dir', dest='output_dir', required=False, default=None,
help="The directory to put the output. Defaults to file_path.olevba")
parser.add_argument('--olevba-lib-path', dest='olevba_lib_path', required=False, default='/opt',
help="Alternate directory of olevba library path.")
args = parser.parse_args()
if args.output_dir is None:
args.output_dir = '{}.olevba'.format(args.file)
if not os.path.isdir(args.output_dir):
os.makedirs(args.output_dir)
sys.path.append(args.olevba_lib_path)
from oletools.olevba import VBA_Parser, VBA_Scanner, TYPE_OLE, TYPE_OpenXML, TYPE_Word2003_XML, TYPE_MHTML
result = {}
try:
vba_parser = VBA_Parser(args.file)
result['type'] = vba_parser.type
if result['type'] not in [ TYPE_OLE, TYPE_OpenXML, TYPE_Word2003_XML, TYPE_MHTML ]:
sys.exit(0)
#with open(os.path.join(args.output_dir, 'type'), 'w') as fp:
#fp.write(vba_parser.type)
#sys.stdout.write(os.path.join(args.output_dir, 'type') + '\n')
if not vba_parser.detect_vba_macros():
sys.exit(0)
except Exception, e:
sys.exit(2)
# make a directory to put all the macros in
macro_dir = None
# extract all the macroses ;)
macro_index = 0
for filename, stream_path, vba_filename, vba_code in vba_parser.extract_macros():
if not macro_dir:
macro_dir = os.path.join(args.output_dir, 'macros')
if not os.path.isdir(macro_dir):
os.makedirs(macro_dir)
result['macros'] = []
macro_path = os.path.join(macro_dir, 'macro_{}.bas'.format(macro_index))
macro_index += 1
with open(macro_path, 'w') as fp:
fp.write(vba_code)
macro_json = {}
macro_json['path'] = macro_path
macro_json['filename'] = filename
macro_json['stream_path'] = stream_path
macro_json['vba_filename'] = unicode(vba_filename, 'utf-8', errors='replace')
#sys.stdout.write(macro_path + '\n')
#details_path = os.path.join(macro_dir, 'macro_{0}.details'.format(macro_index))
#with codecs.open(details_path, 'w', encoding='unicode_escape') as fp:
#try:
#fp.write(u'filename: {0}\nstream_path: {1}\nvba_filename: {2}\n'.format(
#filename,
#stream_path,
#unicode(vba_filename, 'unicode_escape')))
#except:
#traceback.print_exc()
#sys.stdout.write(details_path + '\n')
macro_json['analysis'] = []
scanner = VBA_Scanner(vba_code)
#analysis_path = os.path.join(macro_dir, 'macro_{0}.analysis'.format(macro_index))
kw_counts = {} # key = keyword, value = int
#with open(analysis_path, 'w') as fp:
for kw_type, keyword, description in scanner.scan(include_decoded_strings=True):
macro_json['analysis'].append({'kw_type': unicode(kw_type, encoding='utf-8', errors='replace'),
'keyword': unicode(keyword, encoding='utf-8', errors='replace', ),
'description': unicode(description, 'utf-8', errors='replace')})
#fp.write('{0}\t{1}\t{2}\n'.format(kw_type, keyword, description))
if kw_type.lower() not in kw_counts:
kw_counts[kw_type.lower()] = 0
kw_counts[kw_type.lower()] += 1
# generate a summary of the olevba keywords
macro_json['olevba_summary'] = {}
# and update a global summary of all of them
if 'olevba_summary' not in result:
result['olevba_summary'] = {}
#sys.stdout.write(analysis_path + '\n')
#summary_path = os.path.join(macro_dir, 'macro_{0}.summary'.format(macro_index))
#with open(summary_path, 'w') as fp:
for keyword in kw_counts.keys():
macro_json['olevba_summary'][keyword] = kw_counts[keyword]
if keyword not in result['olevba_summary']:
result['olevba_summary'][keyword] = 0
result['olevba_summary'][keyword] += kw_counts[keyword]
#fp.write('{0}\t{1}\n'.format(keyword, str(kw_counts[keyword])))
#sys.stdout.write(summary_path + '\n')
result['macros'].append(macro_json)
sys.stdout.write(json.dumps(result).encode(errors='replace'))
|
from typing import List
class Solution:
def sortColors(self, nums: List[int]) -> None:
"""
Do not return anything, modify nums in-place instead.
"""
i, j, k = 0, 0, len(nums) - 1
while j <= k:
if nums[j] == 2:
nums[j], nums[k] = nums[k], nums[j]
k -= 1
elif nums[j] == 0:
nums[j], nums[i] = nums[i], nums[j]
i += 1
j += 1
else:
j += 1
sol = Solution()
nums = [2, 0, 2, 1, 1, 0]
sol.sortColors(nums)
print(nums)
|
import unittest
from unittest.mock import MagicMock, patch
import dbt.flags
import dbt.compilation
from dbt.adapters.postgres import Plugin
from dbt.contracts.files import FileHash
from dbt.contracts.graph.manifest import Manifest
from dbt.contracts.graph.parsed import NodeConfig, DependsOn, ParsedModelNode
from dbt.contracts.graph.compiled import CompiledModelNode, InjectedCTE
from dbt.node_types import NodeType
from datetime import datetime
from .utils import inject_adapter, clear_plugin, config_from_parts_or_dicts
class CompilerTest(unittest.TestCase):
def assertEqualIgnoreWhitespace(self, a, b):
self.assertEqual(
"".join(a.split()),
"".join(b.split()))
def setUp(self):
dbt.flags.STRICT_MODE = True
self.maxDiff = None
self.model_config = NodeConfig.from_dict({
'enabled': True,
'materialized': 'view',
'persist_docs': {},
'post-hook': [],
'pre-hook': [],
'vars': {},
'quoting': {},
'column_types': {},
'tags': [],
})
project_cfg = {
'name': 'X',
'version': '0.1',
'profile': 'test',
'project-root': '/tmp/dbt/does-not-exist',
}
profile_cfg = {
'outputs': {
'test': {
'type': 'postgres',
'dbname': 'postgres',
'user': 'root',
'host': 'thishostshouldnotexist',
'pass': 'password',
'port': 5432,
'schema': 'public'
}
},
'target': 'test'
}
self.config = config_from_parts_or_dicts(project_cfg, profile_cfg)
self._generate_runtime_model_patch = patch.object(dbt.compilation, 'generate_runtime_model')
self.mock_generate_runtime_model = self._generate_runtime_model_patch.start()
inject_adapter(Plugin.adapter(self.config), Plugin)
# self.mock_adapter = PostgresAdapter MagicMock(type=MagicMock(return_value='postgres'))
# self.mock_adapter.Relation =
# self.mock_adapter.get_compiler.return_value = dbt.compilation.Compiler
# self.mock_plugin = MagicMock(
# adapter=MagicMock(
# credentials=MagicMock(return_value='postgres')
# )
# )
# inject_adapter(self.mock_adapter, self.mock_plugin)
# so we can make an adapter
def mock_generate_runtime_model_context(model, config, manifest):
def ref(name):
result = f'__dbt__CTE__{name}'
unique_id = f'model.root.{name}'
model.extra_ctes.append(InjectedCTE(id=unique_id, sql=None))
return result
return {'ref': ref}
self.mock_generate_runtime_model.side_effect = mock_generate_runtime_model_context
def tearDown(self):
self._generate_runtime_model_patch.stop()
clear_plugin(Plugin)
def test__prepend_ctes__already_has_cte(self):
ephemeral_config = self.model_config.replace(materialized='ephemeral')
manifest = Manifest(
macros={},
nodes={
'model.root.view': CompiledModelNode(
name='view',
database='dbt',
schema='analytics',
alias='view',
resource_type=NodeType.Model,
unique_id='model.root.view',
fqn=['root', 'view'],
package_name='root',
root_path='/usr/src/app',
refs=[],
sources=[],
depends_on=DependsOn(nodes=['model.root.ephemeral']),
config=self.model_config,
tags=[],
path='view.sql',
original_file_path='view.sql',
raw_sql='select * from {{ref("ephemeral")}}',
compiled=True,
extra_ctes_injected=False,
extra_ctes=[InjectedCTE(id='model.root.ephemeral', sql='select * from source_table')],
injected_sql='',
compiled_sql=(
'with cte as (select * from something_else) '
'select * from __dbt__CTE__ephemeral'),
checksum=FileHash.from_contents(''),
),
'model.root.ephemeral': CompiledModelNode(
name='ephemeral',
database='dbt',
schema='analytics',
alias='view',
resource_type=NodeType.Model,
unique_id='model.root.ephemeral',
fqn=['root', 'ephemeral'],
package_name='root',
root_path='/usr/src/app',
refs=[],
sources=[],
depends_on=DependsOn(),
config=ephemeral_config,
tags=[],
path='ephemeral.sql',
original_file_path='ephemeral.sql',
raw_sql='select * from source_table',
compiled=True,
compiled_sql='select * from source_table',
extra_ctes_injected=False,
extra_ctes=[],
injected_sql='',
checksum=FileHash.from_contents(''),
),
},
sources={},
docs={},
# '2018-02-14T09:15:13Z'
generated_at=datetime(2018, 2, 14, 9, 15, 13),
disabled=[],
files={},
)
compiler = dbt.compilation.Compiler(self.config)
result, _ = compiler._recursively_prepend_ctes(
manifest.nodes['model.root.view'],
manifest,
{}
)
self.assertEqual(result, manifest.nodes['model.root.view'])
self.assertEqual(result.extra_ctes_injected, True)
self.assertEqualIgnoreWhitespace(
result.injected_sql,
('with __dbt__CTE__ephemeral as ('
'select * from source_table'
'), cte as (select * from something_else) '
'select * from __dbt__CTE__ephemeral'))
self.assertEqual(
manifest.nodes['model.root.ephemeral'].extra_ctes_injected,
True)
def test__prepend_ctes__no_ctes(self):
manifest = Manifest(
macros={},
nodes={
'model.root.view': CompiledModelNode(
name='view',
database='dbt',
schema='analytics',
alias='view',
resource_type=NodeType.Model,
unique_id='model.root.view',
fqn=['root', 'view'],
package_name='root',
root_path='/usr/src/app',
refs=[],
sources=[],
depends_on=DependsOn(),
config=self.model_config,
tags=[],
path='view.sql',
original_file_path='view.sql',
raw_sql=('with cte as (select * from something_else) '
'select * from source_table'),
compiled=True,
extra_ctes_injected=False,
extra_ctes=[],
injected_sql='',
compiled_sql=('with cte as (select * from something_else) '
'select * from source_table'),
checksum=FileHash.from_contents(''),
),
'model.root.view_no_cte': CompiledModelNode(
name='view_no_cte',
database='dbt',
schema='analytics',
alias='view_no_cte',
resource_type=NodeType.Model,
unique_id='model.root.view_no_cte',
fqn=['root', 'view_no_cte'],
package_name='root',
root_path='/usr/src/app',
refs=[],
sources=[],
depends_on=DependsOn(),
config=self.model_config,
tags=[],
path='view.sql',
original_file_path='view.sql',
raw_sql='select * from source_table',
compiled=True,
extra_ctes_injected=False,
extra_ctes=[],
injected_sql='',
compiled_sql=('select * from source_table'),
checksum=FileHash.from_contents(''),
),
},
sources={},
docs={},
generated_at='2018-02-14T09:15:13Z',
disabled=[],
files={},
)
compiler = dbt.compilation.Compiler(self.config)
result, _ = compiler._recursively_prepend_ctes(
manifest.nodes['model.root.view'],
manifest,
{}
)
self.assertEqual(
result,
manifest.nodes.get('model.root.view'))
self.assertTrue(result.extra_ctes_injected)
self.assertEqualIgnoreWhitespace(
result.injected_sql,
manifest.nodes.get('model.root.view').compiled_sql)
compiler = dbt.compilation.Compiler(self.config)
result, _ = compiler._recursively_prepend_ctes(
manifest.nodes.get('model.root.view_no_cte'),
manifest,
{})
self.assertEqual(
result,
manifest.nodes.get('model.root.view_no_cte'))
self.assertTrue(result.extra_ctes_injected)
self.assertEqualIgnoreWhitespace(
result.injected_sql,
manifest.nodes.get('model.root.view_no_cte').compiled_sql)
def test__prepend_ctes(self):
ephemeral_config = self.model_config.replace(materialized='ephemeral')
manifest = Manifest(
macros={},
nodes={
'model.root.view': CompiledModelNode(
name='view',
database='dbt',
schema='analytics',
alias='view',
resource_type=NodeType.Model,
unique_id='model.root.view',
fqn=['root', 'view'],
package_name='root',
root_path='/usr/src/app',
refs=[],
sources=[],
depends_on=DependsOn(nodes=['model.root.ephemeral']),
config=self.model_config,
tags=[],
path='view.sql',
original_file_path='view.sql',
raw_sql='select * from {{ref("ephemeral")}}',
compiled=True,
extra_ctes_injected=False,
extra_ctes=[InjectedCTE(id='model.root.ephemeral', sql='select * from source_table')],
injected_sql='',
compiled_sql='select * from __dbt__CTE__ephemeral',
checksum=FileHash.from_contents(''),
),
'model.root.ephemeral': CompiledModelNode(
name='ephemeral',
database='dbt',
schema='analytics',
alias='ephemeral',
resource_type=NodeType.Model,
unique_id='model.root.ephemeral',
fqn=['root', 'ephemeral'],
package_name='root',
root_path='/usr/src/app',
refs=[],
sources=[],
depends_on=DependsOn(),
config=ephemeral_config,
tags=[],
path='ephemeral.sql',
original_file_path='ephemeral.sql',
raw_sql='select * from source_table',
compiled=True,
extra_ctes_injected=False,
extra_ctes=[],
injected_sql='',
compiled_sql='select * from source_table',
checksum=FileHash.from_contents(''),
),
},
sources={},
docs={},
generated_at='2018-02-14T09:15:13Z',
disabled=[],
files={},
)
compiler = dbt.compilation.Compiler(self.config)
result, _ = compiler._recursively_prepend_ctes(
manifest.nodes['model.root.view'],
manifest,
{}
)
self.assertEqual(result,
manifest.nodes.get('model.root.view'))
self.assertTrue(result.extra_ctes_injected)
self.assertEqualIgnoreWhitespace(
result.injected_sql,
('with __dbt__CTE__ephemeral as ('
'select * from source_table'
') '
'select * from __dbt__CTE__ephemeral'))
self.assertTrue(manifest.nodes['model.root.ephemeral'].extra_ctes_injected)
def test__prepend_ctes__cte_not_compiled(self):
ephemeral_config = self.model_config.replace(materialized='ephemeral')
parsed_ephemeral = ParsedModelNode(
name='ephemeral',
database='dbt',
schema='analytics',
alias='ephemeral',
resource_type=NodeType.Model,
unique_id='model.root.ephemeral',
fqn=['root', 'ephemeral'],
package_name='root',
root_path='/usr/src/app',
refs=[],
sources=[],
depends_on=DependsOn(),
config=ephemeral_config,
tags=[],
path='ephemeral.sql',
original_file_path='ephemeral.sql',
raw_sql='select * from source_table',
checksum=FileHash.from_contents(''),
)
compiled_ephemeral = CompiledModelNode(
name='ephemeral',
database='dbt',
schema='analytics',
alias='ephemeral',
resource_type=NodeType.Model,
unique_id='model.root.ephemeral',
fqn=['root', 'ephemeral'],
package_name='root',
root_path='/usr/src/app',
refs=[],
sources=[],
depends_on=DependsOn(),
config=ephemeral_config,
tags=[],
path='ephemeral.sql',
original_file_path='ephemeral.sql',
raw_sql='select * from source_table',
compiled=True,
compiled_sql='select * from source_table',
injected_sql='select * from source_table',
extra_ctes_injected=True,
extra_ctes=[],
checksum=FileHash.from_contents(''),
)
manifest = Manifest(
macros={},
nodes={
'model.root.view': CompiledModelNode(
name='view',
database='dbt',
schema='analytics',
alias='view',
resource_type=NodeType.Model,
unique_id='model.root.view',
fqn=['root', 'view'],
package_name='root',
root_path='/usr/src/app',
refs=[],
sources=[],
depends_on=DependsOn(nodes=['model.root.ephemeral']),
config=self.model_config,
tags=[],
path='view.sql',
original_file_path='view.sql',
raw_sql='select * from {{ref("ephemeral")}}',
compiled=True,
extra_ctes_injected=False,
extra_ctes=[InjectedCTE(id='model.root.ephemeral', sql='select * from source_table')],
injected_sql='',
compiled_sql='select * from __dbt__CTE__ephemeral',
checksum=FileHash.from_contents(''),
),
'model.root.ephemeral': parsed_ephemeral,
},
sources={},
docs={},
generated_at='2018-02-14T09:15:13Z',
disabled=[],
files={},
)
compiler = dbt.compilation.Compiler(self.config)
with patch.object(compiler, 'compile_node') as compile_node:
compile_node.return_value = compiled_ephemeral
result, _ = compiler._recursively_prepend_ctes(
manifest.nodes['model.root.view'],
manifest,
{}
)
compile_node.assert_called_once_with(parsed_ephemeral, manifest, {})
self.assertEqual(result,
manifest.nodes.get('model.root.view'))
self.assertTrue(manifest.nodes['model.root.ephemeral'].compiled)
self.assertTrue(result.extra_ctes_injected)
self.assertEqualIgnoreWhitespace(
result.injected_sql,
('with __dbt__CTE__ephemeral as ('
'select * from source_table'
') '
'select * from __dbt__CTE__ephemeral'))
self.assertTrue(manifest.nodes['model.root.ephemeral'].extra_ctes_injected)
def test__prepend_ctes__multiple_levels(self):
ephemeral_config = self.model_config.replace(materialized='ephemeral')
manifest = Manifest(
macros={},
nodes={
'model.root.view': CompiledModelNode(
name='view',
database='dbt',
schema='analytics',
alias='view',
resource_type=NodeType.Model,
unique_id='model.root.view',
fqn=['root', 'view'],
package_name='root',
root_path='/usr/src/app',
refs=[],
sources=[],
depends_on=DependsOn(nodes=['model.root.ephemeral']),
config=self.model_config,
tags=[],
path='view.sql',
original_file_path='view.sql',
raw_sql='select * from {{ref("ephemeral")}}',
compiled=True,
extra_ctes_injected=False,
extra_ctes=[InjectedCTE(id='model.root.ephemeral', sql=None)],
injected_sql=None,
compiled_sql='select * from __dbt__CTE__ephemeral',
checksum=FileHash.from_contents(''),
),
'model.root.ephemeral': ParsedModelNode(
name='ephemeral',
database='dbt',
schema='analytics',
alias='ephemeral',
resource_type=NodeType.Model,
unique_id='model.root.ephemeral',
fqn=['root', 'ephemeral'],
package_name='root',
root_path='/usr/src/app',
refs=[],
sources=[],
depends_on=DependsOn(),
config=ephemeral_config,
tags=[],
path='ephemeral.sql',
original_file_path='ephemeral.sql',
raw_sql='select * from {{ref("ephemeral_level_two")}}',
checksum=FileHash.from_contents(''),
),
'model.root.ephemeral_level_two': ParsedModelNode(
name='ephemeral_level_two',
database='dbt',
schema='analytics',
alias='ephemeral_level_two',
resource_type=NodeType.Model,
unique_id='model.root.ephemeral_level_two',
fqn=['root', 'ephemeral_level_two'],
package_name='root',
root_path='/usr/src/app',
refs=[],
sources=[],
depends_on=DependsOn(),
config=ephemeral_config,
tags=[],
path='ephemeral_level_two.sql',
original_file_path='ephemeral_level_two.sql',
raw_sql='select * from source_table',
checksum=FileHash.from_contents(''),
),
},
sources={},
docs={},
generated_at='2018-02-14T09:15:13Z',
disabled=[],
files={},
)
compiler = dbt.compilation.Compiler(self.config)
result, _ = compiler._recursively_prepend_ctes(
manifest.nodes['model.root.view'],
manifest,
{}
)
self.assertEqual(result, manifest.nodes['model.root.view'])
self.assertTrue(result.extra_ctes_injected)
self.assertEqualIgnoreWhitespace(
result.injected_sql,
('with __dbt__CTE__ephemeral_level_two as ('
'select * from source_table'
'), __dbt__CTE__ephemeral as ('
'select * from __dbt__CTE__ephemeral_level_two'
') '
'select * from __dbt__CTE__ephemeral'))
self.assertTrue(manifest.nodes['model.root.ephemeral'].compiled)
self.assertTrue(manifest.nodes['model.root.ephemeral_level_two'].compiled)
self.assertTrue(manifest.nodes['model.root.ephemeral'].extra_ctes_injected)
self.assertTrue(manifest.nodes['model.root.ephemeral_level_two'].extra_ctes_injected)
|
Subsets and Splits