id
stringlengths 1
8
| text
stringlengths 6
1.05M
| dataset_id
stringclasses 1
value |
---|---|---|
6661948
|
<filename>base/decorators.py<gh_stars>0
import functools
def abstract(f):
@functools.wraps(f)
def abstract(*args, **kwargs):
f(*args, **kwargs)
raise NotImplementedError()
return abstract
|
StarcoderdataPython
|
4887460
|
<filename>tests/cache/test_cache_storage.py<gh_stars>0
import pytest
from chocs_middleware.cache import InMemoryCacheStorage, CollectableInMemoryCacheStorage, ICacheStorage, CacheItem, \
CacheError
def test_can_instantiate() -> None:
# given
instance = InMemoryCacheStorage()
# then
assert isinstance(instance, InMemoryCacheStorage)
assert isinstance(instance, ICacheStorage)
assert instance.is_empty
def test_can_store_item() -> None:
# given
instance = InMemoryCacheStorage()
# when
instance.set(CacheItem("1", b"test_data"))
# then
assert "1" in instance._cache
assert not instance.is_empty
def test_can_get_item() -> None:
# given
instance = InMemoryCacheStorage()
item = CacheItem("1", b"test_data")
# when
instance.set(item)
retrieved = instance.get("1")
# then
assert retrieved == item
def test_fail_to_get_item() -> None:
# given
instance = InMemoryCacheStorage()
# then
with pytest.raises(CacheError):
instance.get("1")
def test_can_delete_item() -> None:
# given
instance = CollectableInMemoryCacheStorage()
item = CacheItem.empty("1")
# when
instance.set(item)
# then
assert not instance.is_empty
instance.get(item.id)
# when
instance.collect(item)
# then
with pytest.raises(CacheError):
instance.get(item.id)
assert instance.is_empty
|
StarcoderdataPython
|
5167340
|
"""**Concrete implementations of** `torchdatasets.Dataset` **and** `torchdatasets.Iterable`.
Classes below extend and/or make it easier for user to implement common functionalities.
To use standard PyTorch datasets defined by, for example, `torchvision`, you can
use `WrapDataset` or `WrapIterable` like this::
import torchdatasets
import torchvision
dataset = torchdatasets.datasets.WrapDataset(
torchvision.datasets.MNIST("./data", download=True)
)
After that you can use `map`, `apply` and other functionalities like you normally would with
either `torchdatasets.Dataset` or `torchdatasets.Iterable`.
"""
import abc
import functools
import pathlib
import typing
from torch.utils.data import ChainDataset as TorchChain
from torch.utils.data import ConcatDataset as TorchConcatDataset
from torch.utils.data import Dataset as TorchDataset
from torch.utils.data import IterableDataset as TorchIterable
from torch.utils.data import TensorDataset as TorchTensorDataset
from ._base import Base, MetaDataset, MetaIterable
from .cachers import Memory
try:
from typing import GenericMeta
except ImportError:
# in python > 3.7, genericmeta doesn't exist
class GenericMeta(type): pass
try:
from torch.utils.data import _typing
class MetaIterableWrapper(MetaIterable, GenericMeta, _typing._DataPipeMeta): pass
except ImportError:
# for pytorch < 1.9 _typing does not exist
class MetaIterableWrapper(MetaIterable): pass
class _DatasetBase(Base):
def __init__(self, concat_object, chain_object):
self._maps = []
self._concat_object = concat_object
self._chain_object = chain_object
def map(self, function: typing.Callable):
r"""**Map function to each element of dataset.**
Function has no specified signature; it is user's responsibility to ensure
it is taking correct arguments as returned from `__getitem__` (in case of `Dataset`)
or `__iter__` (in case of `Iterable`).
Parameters
----------
function: typing.Callable
Function (or functor) taking arguments returned from `__getitem__`
and returning anything.
Returns
-------
self
"""
self._maps.append(function)
return self
def apply(self, function):
r"""**Apply function to every element of the dataset.**
Specified function has to take Python generator as first argument.
This generator yields consecutive samples from the dataset and the function is free
to do whatever it wants with them.
Other arguments will be forwarded to function.
**WARNING:**
This function returns anything that's returned from function
and it's up to user to ensure correct pipeline functioning
after using this transformation.
**Example**::
class Dataset(torchdatasets.Dataset):
def __init__(self, max: int):
super().__init__() # This is necessary
self.range = list(range(max))
def __getitem__(self, index):
return self.range[index]
def __len__(self):
return len(self.range)
def summation(generator):
return sum(value for value in generator)
summed_dataset = Dataset(101).apply(summation) # Returns 5050
Parameters
----------
function : typing.Callable
Function (or functional object) taking item generator as first object
and variable list of other arguments (if necessary).
Returns
-------
typing.Any
Value returned by function
"""
return function((value for value in self))
def __or__(self, other):
r"""**Concatenate {self} and another {self} compatible object.**
During iteration, items from both dataset will be returned as `tuple`.
Another object could be PyTorch's base class of this object.
Length of resulting dataset is equal to `min(len(self), len(other))`
Parameters
----------
other : {self} or PyTorch's base counterpart
Dataset instance whose sample will be iterated over together
Returns
-------
{concat_object}
Proxy object responsible for concatenation between samples.
Can be used in the same manner as this object.
""".format(
self=self, concat_object=self._concat_object
)
return self._concat_object((self, other))
def __add__(self, other):
r"""**Chain {self} and another {self} compatible object.**
During iteration, items from self will be returned first and items
from other dataset after those.
Length of such dataset is equal to `len(self) + len(other)`
Parameters
----------
other : {self} or PyTorch's base counterpart
Dataset whose sample will be yielded after this dataset.
Returns
-------
{chain_object}
Proxy object responsible for chaining datasets.
Can be used in the same manner as this object.
""".format(
self=self, chain_object=self._chain_object
)
return self._chain_object((self, other))
class Iterable(TorchIterable, _DatasetBase, metaclass=MetaIterableWrapper):
r"""`torch.utils.data.IterableDataset` **dataset with extended capabilities**.
This class inherits from
`torch.utils.data.IterableDataset <https://pytorch.org/docs/stable/data.html#torch.utils.data.IterableDataset>`__,
co can be used in the same manner after inheritance.
It allows user to perform following operations:
- `map` - apply function to each element of dataset
- `apply` - apply function to **all** elements of dataset
- `filter` - return elements for which `predicate` returns `True`
**Example**::
# Based on original PyTorch example
class Dataset(torchdatasets.Iterable):
def __init__(self, start: int, end: int):
super().__init__() # This is necessary
self.start: int = start
self.end: int = end
def __iter__(self):
return iter(range(self.start, self.end))
# range(1,25) originally, mapped to range(13, 37)
dataset = Dataset(1, 25).map(lambda value: value + 12)
# Sample-wise concatenation, yields range(13, 37) and range(1, 25)
for first, second in dataset | Dataset(1, 25):
print(first, second) # 13 1 up to 37 25
"""
@abc.abstractmethod
def __iter__(self):
pass
def __init__(self):
_DatasetBase.__init__(self, ConcatIterable, ChainIterable)
self._filters = []
self._which = [0]
def filter(self, predicate: typing.Callable):
r"""**Filtered data according to** `predicate`.
Values are filtered based on value returned after every operation (including `map`)
specified before `filter`, for example::
dataset = (
ExampleIterable(0, 100)
.map(lambda value: value + 50)
.filter(lambda elem: elem % 2 == 0)
)
Above will return elements `[50, 100]` divisible by `2`.
Parameters
----------
predicate: Callable -> bool
Function returning bool and taking single argument (which is
whatever is returned from the dataset when `filter` is applied).
If `True`, sample will be returned, otherwise it is skipped.
Returns
-------
Dataset
Returns self
"""
self._which.append(len(self._maps))
self._filters.append(predicate)
return self
class MetaDatasetWrapper(MetaDataset, GenericMeta): pass
class Dataset(TorchDataset, _DatasetBase, metaclass=MetaDatasetWrapper):
r"""`torch.utils.data.Dataset` **with extended capabilities.**
This class inherits from
`torch.utils.data.Dataset <https://pytorch.org/docs/stable/data.html#torch.utils.data.Dataset>`__,
co can be used in the same manner after inheritance.
It allows user to perform the following operations:
- `cache` - cache all/part of data in memory or on disk
- `map` - apply function to each element of dataset
- `apply` - apply function to **all** elements of dataset
- `reduce` - reduce dataset to single value with specified function
**Important:**
- Last cache which is able to hold sample is used. Does not matter whether it's in-memory or on-disk or user-specified.
- Although multiple cache calls in different parts of `map` should work, users are encouraged to use it as rare as possible and possibly as late as possible for best performance.
**Example**::
import torchvision
from PIL import Image
# Image loading dataset (use Files for more serious business)
class Dataset(torchdatasets.Dataset):
def __init__(self, path: pathlib.Path):
super().__init__() # This is necessary
self.files = [file for file in path.glob("*")]
def __getitem__(self, index):
return Image.open(self.files[index])
def __len__(self, index):
return len(self.files)
# Map PIL to Tensor and cache dataset
dataset = Dataset("data").map(torchvision.transforms.ToTensor()).cache()
# Create DataLoader as normally
dataloader = torch.utils.data.DataLoader(dataset)
"""
def __init__(self):
_DatasetBase.__init__(self, ConcatDataset, ConcatIterable)
self._cachers = []
self._which = []
@abc.abstractmethod
def __len__(self):
pass
@abc.abstractmethod
def __getitem__(self, index):
pass
def cache(self, cacher: typing.Callable = None):
r"""**Cache data in memory, disk or specify custom caching.**
By default all samples are cached in memory. To change this behaviour specify `cacher`
argument. Some `cacher` implementations can be found in `torchdatasets.cacher` module or you can
provide your own by inheriting from `torchdatasets.cacher.Cacher` and implementing
appropriate methods.
Parameters
----------
cacher : torchdatasets.cacher.Cacher, optional
Instance of `torchdatasets.cacher.Cacher` (or any other object with compatible interface).
Check `cacher` module documentation for more information.
Default: `torchdatasets.cacher.Memory` which caches data in-memory
Returns
-------
Dataset
Returns self
"""
if cacher is None:
cacher = Memory()
self._cachers.append(cacher)
self._which.append(len(self._maps))
return self
def reduce(self, function: typing.Callable, initializer=None):
r"""**Reduce dataset to single element with function.**
Works like `functools.reduce <https://docs.python.org/3/library/functools.html#functools.reduce>`__.
**Example**::
class Dataset(torchdatasets.Dataset):
def __init__(self, max: int):
super().__init__() # This is necessary
self.range = list(range(max))
def __getitem__(self, index):
return self.range[index]
def __len__(self):
return len(self.range)
summed_dataset = Dataset(10).reduce(lambda x, y: x + y) # Returns 45
Parameters
----------
function : typing.Callable
Two argument function returning single value used to `reduce` dataset.
initializer: typing.Any, optional
Value with which reduction will start.
Returns
-------
typing.Any
Reduced value
"""
if initializer is None:
return functools.reduce(function, (item for item in self))
return functools.reduce(function, (item for item in self), initializer)
def reset(self, cache: bool = True, maps: bool = True):
r"""**Reset dataset state.**
`cache` and `maps` can be resetted separately.
Parameters
----------
cache : bool, optional
Reset current cache. Default: `True`
maps : bool, optional
Reset current disk cache. Default: `True`
"""
if cache:
self._cachers = []
if maps:
self._maps = []
################################################################################
#
# Dataset Concatenations
#
################################################################################
class ConcatDataset(Dataset):
r"""**Concrete** `torchdatasets.Dataset` **responsible for sample-wise concatenation.**
This class is returned when `|` (logical or operator) is used on instance
of `torchdatasets.Dataset` (original `torch.utils.data.Dataset
<https://pytorch.org/docs/stable/data.html#torch.utils.data.Dataset>`__ can be used as well).
**Important:** This class is meant to be more of a proxy for `|` operator,
you can use it directly though.
**Example**::
dataset = (
torchdatasets.ConcatDataset([dataset1, dataset2, dataset3])
.map(lambda sample: sample[0] + sample[1] + sample[2]))
)
Any `Dataset` methods can be used normally.
Attributes
----------
datasets : List[Union[torchdatasets.Dataset, torch.utils.data.Dataset]]
List of datasets to be concatenated sample-wise.
"""
def __init__(self, datasets: typing.List):
super().__init__()
self.datasets = datasets
def __getitem__(self, index):
return tuple(dataset[index] for dataset in self.datasets)
def __len__(self):
return min(len(dataset) for dataset in self.datasets)
class ConcatIterable(Iterable):
r"""**Concrete** `Iterable` **responsible for sample-wise concatenation.**
This class is returned when `|` (logical or operator) is used on instance
of `Iterable` (original `torch.utils.data.IterableDataset
<https://pytorch.org/docs/stable/data.html#torch.utils.data.IterableDataset>`__ can be used as well).
.. note::
This class is meant to be more of a proxy for `|` operator,
you can use it directly though.
**Example**::
dataset = (
torchdatasets.ConcatIterable([dataset1, dataset2, dataset3])
.map(lambda x, y, z: (x + y, z))
)
Any `IterableDataset` methods can be used normally.
Attributes
----------
datasets : List[Union[torchdatasets.Iterable, torch.utils.data.IterableDataset]]
List of datasets to be concatenated sample-wise.
"""
def __init__(self, datasets: typing.List):
super().__init__()
self.datasets = datasets
def __iter__(self):
yield from zip(*self.datasets)
def __getitem__(self, index):
return tuple(dataset[index] for dataset in self.datasets)
def __len__(self):
return min(len(dataset) for dataset in self.datasets)
class ChainDataset(TorchConcatDataset, Dataset):
r"""**Concrete** `torchdatasets.Dataset` **responsible for chaining multiple datasets.**
This class is returned when `+` (logical or operator) is used on instance
of `torchdatasets.Dataset` (original `torch.utils.data.Dataset` can be used as well).
Acts just like PyTorch's `+` or rather `torch.utils.data.ConcatDataset <https://pytorch.org/docs/stable/data.html#torch.utils.data.ConcatDataset>`__
.. note::
This class is meant to be more of a proxy for `+` operator,
you can use it directly though.
**Example**::
# Iterate over 3 datasets consecutively
dataset = torchdatasets.ChainDataset([dataset1, dataset2, dataset3])
Any `Dataset` methods can be used normally.
Attributes
----------
datasets : List[Union[torchdatasets.Dataset, torch.utils.data.Dataset]]
List of datasets to be chained.
"""
def __init__(self, datasets):
Dataset.__init__(self)
TorchConcatDataset.__init__(self, datasets)
class ChainIterable(TorchChain, Iterable):
r"""**Concrete** `torchdatasets.Iterable` **responsible for chaining multiple datasets.**
This class is returned when `+` (logical or operator) is used on instance
of `torchdatasets.Iterable` (original `torch.utils.data.Iterable` can be used as well).
Acts just like PyTorch's `+` and `ChainDataset <https://pytorch.org/docs/stable/data.html#torch.utils.data.ChainDataset>`__.
.. note::
This class is meant to be more of a proxy for `+` operator,
you can use it directly though.
**Example**::
# Iterate over 3 iterable datasets consecutively
dataset = torchdatasets.ChainDataset([dataset1, dataset2, dataset3])
Any `Iterable` methods can be used normally.
Attributes
----------
datasets : List[Union[torchdatasets.Iterable, torch.utils.data.IterableDataset]]
List of datasets to be chained.
"""
def __init__(self, datasets):
Iterable.__init__(self)
TorchChain.__init__(self, datasets)
###############################################################################
#
# CONCRETE CLASSES
#
###############################################################################
class Files(Dataset):
r"""**Create** `Dataset` **from list of files.**
Each file is a separate sample. User can use this class directly
as all necessary methods are implemented.
`__getitem__` uses Python's `open <https://docs.python.org/3/library/functions.html#open>`__
and returns file. It's implementation looks like::
# You can modify open behaviour by passing args nad kwargs to __init__
with open(self.files[index], *self.args, **self.kwargs) as file:
return file
you can use `map` method in order to modify returned `file` or you can overload
`__getitem__` (image opening example below)::
import torchdatasets
import torchvision
from PIL import Image
# Image loading dataset
class ImageDataset(torchdatasets.datasets.FilesDataset):
def __getitem__(self, index):
return Image.open(self.files[index])
# Useful class methods are inherited as well
dataset = ImageDataset.from_folder("./data", regex="*.png").map(
torchvision.transforms.ToTensor()
)
`from_folder` class method is available for common case of creating dataset
from files in folder.
Parameters
----------
files : List[pathlib.Path]
List of files to be used.
regex : str, optional
Regex to be used for filtering. Default: `*` (all files)
*args
Arguments saved for `__getitem__`
**kwargs
Keyword arguments saved for `__getitem__`
"""
@classmethod
def from_folder(cls, path: pathlib.Path, regex: str = "*", *args, **kwargs):
r"""**Create dataset from** `pathlib.Path` **-like object.**
Path should be a directory and will be extended via `glob` method taking `regex`
(if specified). Varargs and kwargs will be saved for use for `__getitem__` method.
Parameters
----------
path : pathlib.Path
Path object (directory) containing samples.
regex : str, optional
Regex to be used for filtering. Default: `*` (all files)
*args
Arguments saved for `__getitem__`
**kwargs
Keyword arguments saved for `__getitem__`
Returns
-------
FilesDataset
Instance of your file based dataset.
"""
files = [file for file in path.glob(regex)]
return cls(files, *args, **kwargs)
def __init__(self, files: typing.List[pathlib.Path], *args, **kwargs):
super().__init__()
self.files = files
self.args = args
self.kwargs = kwargs
def __len__(self):
return len(self.files)
def __getitem__(self, index):
with open(self.files[index], *self.args, **self.kwargs) as file:
return file
def filter(self, predicate: typing.Callable):
r"""**Remove** `files` **for which predicate returns** `False`**.**
**Note:** This is different from `torchdatasets.Iterable`'s `filter` method,
as the filtering is done when called, not during iteration.
Parameters
----------
predicate : Callable
Function-like object taking file as argument and returning boolean
indicating whether to keep a file.
Returns
-------
FilesDataset
Modified self
"""
self.files = [file for file in self.files if predicate(file)]
return self
def sort(self, key=None, reverse=False):
r"""**Sort files using Python's built-in** `sorted` **method.**
Arguments are passed directly to `sorted`.
Parameters
----------
key: Callable, optional
Specifies a function of one argument that is used to extract a comparison key from each element.
Default: `None` (compare the elements directly).
reverse: bool, optional
Whether `sorting` should be descending. Default: `False`
Returns
-------
FilesDataset
Modified self
"""
self.files = sorted(self.files, key=key, reverse=reverse)
return self
class TensorDataset(TorchTensorDataset, Dataset):
r"""**Dataset wrapping** `torch.tensors` **.**
`cache`, `map` etc. enabled version of `torch.utils.data.TensorDataset <https://pytorch.org/docs/stable/data.html#torch.utils.data.TensorDataset>`__.
Parameters:
-----------
*tensors : torch.Tensor
List of `tensors` to be wrapped.
"""
def __init__(self, *tensors):
Dataset.__init__(self)
TorchTensorDataset.__init__(self, *tensors)
class Generator(Iterable):
r"""**Iterable wrapping any generator expression.**
Parameters:
-----------
expression: Generator expression
Generator from which one can `yield` via `yield from` syntax.
"""
def __init__(self, expression):
super().__init__()
self.expression = expression
def __iter__(self):
yield from self.expression
class _Wrap:
def __getattr__(self, name):
return getattr(self.dataset, name)
class WrapDataset(_Wrap, Dataset):
r"""**Dataset wrapping standard** `torch.data.utils.Dataset` **and making it** `torchdatasets.Dataset` **compatible.**
All attributes of wrapped dataset can be used normally, for example::
dataset = td.datasets.WrapDataset(
torchvision.datasets.MNIST("./data")
)
dataset.train # True, has all MNIST attributes
Parameters:
-----------
dataset: `torch.data.utils.Dataset`
Dataset to be wrapped
"""
def __init__(self, dataset):
self.dataset = dataset
Dataset.__init__(self)
def __getitem__(self, index):
return self.dataset[index]
def __len__(self):
return len(self.dataset)
class WrapIterable(_Wrap, Iterable):
r"""**Iterable wrapping standard** `torch.data.utils.IterableDataset` **and making it** `torchdatasets.Iterable` **compatible.**
All attributes of wrapped dataset can be used normally as is the case for
`torchdatasets.datasets.WrapDataset`.
Parameters:
-----------
dataset: `torch.data.utils.Dataset`
Dataset to be wrapped
"""
def __init__(self, dataset):
Iterable.__init__(self)
self.dataset = dataset
def __iter__(self):
yield from self.dataset
|
StarcoderdataPython
|
3380518
|
<gh_stars>1-10
import pandas as pd
import numpy as np
from pandas_datareader import data as pdr
import yfinance as yf
import datetime as dt
import datetime
import matplotlib.pyplot as plt
import plotly.express as px
import plotly.graph_objects as go
import math
class ModelAnalysis():
def __init__(self,start_date, end_date, stock_name):
self.start_date = start_date
self.end_date = end_date
self.stock_name = stock_name
self.col_name = None
self.data = pd.DataFrame()
self.ticker_list = list()
self.buy_sell_data = pd.DataFrame()
yf.pdr_override()
def get_stock_name(self):
try:
self.data = pdr.get_data_yahoo(self.stock_name,start=self.start_date,end=self.end_date)
###startes
self.data['%change'] = np.nan
for j in range(1,self.data.shape[0]):
self.data['%change'][j] = ((self.data['Close'][j] - self.data['Close'][j-1])/self.data['Close'][j]) * 100
###endes
#self.data = self.data.style.applymap(self.color_negative_red)
return self.data
except Exception as e:
return None
def get_plot(self,col_name):
self.col_name = col_name
title = self.col_name + " Stocks for " + self.stock_name
fig = px.line(self.data, x=self.data.index, y=self.col_name, title=title)
fig.update_xaxes(
rangeslider_visible=True,
rangeselector=dict(
buttons=list([
dict(count=1, label="1m", step="month", stepmode="backward"),
dict(count=6, label="6m", step="month", stepmode="backward"),
dict(count=1, label="YTD", step="year", stepmode="todate"),
dict(count=1, label="1y", step="year", stepmode="backward"),
dict(step="all")
])
)
)
return fig
def compare_stocks(self, compare_ticker):
Data = pd.DataFrame()
no_stock_list = list()
for stock in compare_ticker:
try:
df = pdr.get_data_yahoo(stock,start=self.start_date,end=self.end_date)
df['Ticker'] = stock
df['%change'] = np.nan
for j in range(1,df.shape[0]):
df['%change'][j] = (
(df['Close'][j] - df['Close'][j-1])/df['Close'][j])*100
Data = Data.append(df)
del df
except Exception as e:
no_stock_list.append(stock)
Title = "Compare stocks with "
length = 0
for s in compare_ticker:
if length == len(compare_ticker) - 1:
Title += s
else:
Title += s + " and "
length += 1
fig = px.line(Data, x=Data.index, y=self.col_name, color='Ticker',title=Title)
return fig, no_stock_list
def ewa_sma(self):
EMA = self.data[self.col_name].ewm(span=20, adjust = False).mean()
SMA = self.data[self.col_name].rolling(window=20).mean()
self.data['EMA'] = EMA
self.data['SMA'] = SMA
#df = self.data[self.data[self.col_name] == self.stock_name]
#df[df['name'] == 'name']
fig = px.line(self.data, x=self.data.index, y=self.col_name)
fig.add_scatter(x=self.data.index, y=self.data['EMA'], mode='lines',name='Expotential Weighted Avg')
return fig
def candle_plot(self):
fig = go.Figure(data=[go.Candlestick(x=self.data.index,
open=self.data['Open'],
high=self.data['High'],
low=self.data['Low'],
close=self.data['Close'])])
return fig
def color_negative_red(self, val):
color = 'red' if val < 0 else 'black'
return 'color: %s' % color
def buy_sell(self, ticker_list):
self.ticker_list = ticker_list
today = datetime.date.today()
prev = today - datetime.timedelta(days=4)
for ticker in self.ticker_list:
try:
df = pdr.get_data_yahoo(
ticker,
start=prev,
end=dt.datetime.now()).iloc[-1:,:]
df['Ticker'] = ticker
self.buy_sell_data = self.buy_sell_data.append(df)
del df
except Exception as e:
pass
self.buy_sell_data['Buy'] = "don't buy"
self.buy_sell_data['Sell'] = "don't sell"
for i in range(self.buy_sell_data.shape[0]):
if np.floor(self.buy_sell_data['Open'][i]) == np.floor(self.buy_sell_data['Low'][i]):
self.buy_sell_data['Buy'][i] = "Buy"
elif np.floor(self.buy_sell_data['Open'][i]) == np.floor(self.buy_sell_data['High'][i]):
self.buy_sell_data['Sell'][i] = "Sell"
else:
pass
value_change = []
data = pd.DataFrame()
today = datetime.date.today()
prev = today - datetime.timedelta(days=4)
#curr = datetime.date.today()
for i in range(len(self.ticker_list)):
try:
df = pdr.get_data_yahoo(self.ticker_list[i],start=prev,end=dt.datetime.now())
df['ticker'] = self.ticker_list[i]
df['change'] = np.nan
for j in range(1,df.shape[0]):
df['change'][j] = ((df['Close'][j] - df['Close'][j-1])/df['Close'][j]) * 100
#print(df['change'][j])
#value_change.append(df.iloc[-1:,-1:]['change'][0])
data = data.append(df)
del df
except Exception as e:
print("No data available for {}".format(self.ticker_list[i]))
self.buy_sell_data['change'] = np.nan
#for i in range(2,data.shape[0],3):
# value_change.append(data['change'][i])
#self.buy_sell_data['%_change'] = value_change
#self.buy_sell_data.style.applymap(self.color_negative_red)
data['count'] = np.nan
for i in range(data.shape[0]):
data['count'][i] = i
data.set_index('count',drop=True,inplace=True)
index_missing = list()
for i in range(1,data.shape[0]):
try:
index_missing.append(
data['change'].index[data['change'].apply(np.isnan)][i]
)
except Exception as e:
pass
index_missing.append(data['change'][data.shape[0] - 1])
change_index = list()
for i in range(len(index_missing) - 1):
change_index.append(data['change'][index_missing[i] - 1])
change_index.append(data['change'][data.shape[0] - 1])
self.buy_sell_data['change'] = change_index
return self.buy_sell_data
#return data
|
StarcoderdataPython
|
3397884
|
<reponame>nwestbury/StockAnalyzer<filename>src/sa/__oldbots/basebot.py
#!/usr/bin/env python3
import json
from abc import ABCMeta, abstractmethod
class BaseBot(metaclass=ABCMeta):
"""
Parent class of all python-based bots, it is presumed that
the function "guess" will be defined in all children classes.
"""
@property
@abstractmethod
def name(self):
raise NotImplemented
@abstractmethod
def guess(self):
raise NotImplemented
def __call__(self):
"""
Returns a json string in the appropriate format.
"""
json_guess = json.dumps({"name": self.name,
"guess": self.guess()})
return json_guess
|
StarcoderdataPython
|
8206
|
<reponame>YuraHavrylko/revenuecat_python
from enum import Enum
class SubscriptionPlatform(Enum):
ios = 'ios'
android = 'android'
macos = 'macos'
uikitformac = 'uikitformac'
stripe = 'stripe'
class AttributionNetworkCode(Enum):
apple_search_ads = 0
adjust = 1
apps_flyer = 2
branch = 3
tenjin = 4
facebook = 5
|
StarcoderdataPython
|
6632785
|
<filename>src/api-engine/api/auth.py
import logging
import os
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from rest_framework import authentication
from rest_framework.permissions import BasePermission
from rest_framework.exceptions import AuthenticationFailed
from rest_framework_jwt.serializers import VerifyJSONWebTokenSerializer
from api.common.enums import UserRole
from api.models import UserProfile
LOG = logging.getLogger(__name__)
TOKEN_INFO_URL = getattr(settings, "TOKEN_INFO_URL", "")
SUPER_USER_TOKEN = os.environ.get("ADMIN_TOKEN", "")
ADMIN_NAME = os.getenv("ADMIN_USERNAME")
class CustomAuthenticate(authentication.BaseAuthentication):
def authenticate(self, request):
authorization = request.META.get("HTTP_AUTHORIZATION", None)
if not authorization or not authorization.startswith("JWT"):
return None
token = authorization.split(" ")[-1]
if token == SUPER_USER_TOKEN:
username = ADMIN_NAME
try:
user = UserProfile.objects.get(username=username)
except ObjectDoesNotExist:
return None
return user, None
else:
return None
class TokenAuth(authentication.BaseAuthentication):
def authenticate(self, request):
token = {"token": request.META.get('HTTP_AUTHORIZATION', None)}
valid_data = VerifyJSONWebTokenSerializer().validate(token)
user = valid_data['user']
organization = user.organization
#organization_id = user.organization.id
#organization_name = user.organization.name
#request.user.
if user:
return
else:
raise AuthenticationFailed('认证失败')
class IsAdminAuthenticated(BasePermission):
"""
Allows access only to authenticated users.
"""
def has_permission(self, request, view):
return (
request.user
and request.user.role == UserRole.Administrator.name.lower()
)
class IsOperatorAuthenticated(BasePermission):
"""
Allows access only to operators.
"""
def has_permission(self, request, view):
return (
request.user
and request.user.role == UserRole.Operator.name.lower()
)
class IsSuperUserAuthenticated(BasePermission):
"""
Allows access only to authenticated users.
"""
def has_permission(self, request, view):
return (
request.user
and request.user.is_authenticated
and request.user.is_super_user
)
|
StarcoderdataPython
|
8160195
|
<reponame>chetan201/PVMismatch
# -*- coding: utf-8 -*-
"""
This is the PVMismatch Package. It contains :mod:`~pvmismatch.pvmismatch_lib`
and :mod:`~pvmismatch.pvmismatch_tk`.
:mod:`~pvmismatch.pvmismatch_lib`
=================================
This package contains the basic library modules, methods, classes and
attributes to model PV system mismatch.
.. note::
The main library classes and modules are exposed through this package for
convenience.
For example::
>>> from pvmismatch import PVcell # imports the PVcell class
>>> # import pvconstants, pvcell, pvmodule, pvstring and pvsystem
>>> from pvmismatch import *
:mod:`~pvmismatch.pvmismatch_tk`
================================
This package contains an application that can be run using
:mod:`pvmismatch.pv_tk`.
"""
import os
import importlib
# try to import Dulwich or create dummies
try:
from dulwich.contrib.release_robot import get_current_version
from dulwich.repo import NotGitRepository
except ImportError:
NotGitRepository = NotImplementedError
def get_current_version(*args, **kwargs):
raise NotGitRepository
# import pvmismatch_lib modules so to match old API
import pvmismatch.pvmismatch_lib.pvconstants as pvconstants
import pvmismatch.pvmismatch_lib.pvcell as pvcell
import pvmismatch.pvmismatch_lib.pvmodule as pvmodule
import pvmismatch.pvmismatch_lib.pvstring as pvstring
import pvmismatch.pvmismatch_lib.pvsystem as pvsystem
import pvmismatch.pvmismatch_lib.pvexceptions as pvexceptions
# expose constructors to package's top level
PVconstants = pvconstants.PVconstants
PVcell = pvcell.PVcell
PVmodule = pvmodule.PVmodule
PVstring = pvstring.PVstring
PVsystem = pvsystem.PVsystem
# Dulwich Release Robot
BASEDIR = os.path.dirname(__file__) # this directory
PROJDIR = os.path.dirname(BASEDIR)
VER_FILE = 'version' # name of file to store version
# use release robot to try to get current Git tag
try:
GIT_TAG = get_current_version(PROJDIR)
except NotGitRepository:
GIT_TAG = None
# check version file
try:
version = importlib.import_module('%s.%s' % (__name__, VER_FILE))
except ImportError:
VERSION = None
else:
VERSION = version.VERSION
# update version file if it differs from Git tag
if GIT_TAG is not None and VERSION != GIT_TAG:
with open(os.path.join(BASEDIR, VER_FILE + '.py'), 'w') as vf:
vf.write('VERSION = "%s"\n' % GIT_TAG)
else:
GIT_TAG = VERSION # if Git tag is none use version file
VERSION = GIT_TAG # version
__author__ = '<NAME>'
__email__ = u'<EMAIL>'
__url__ = u'https://github.com/SunPower/PVMismatch'
__version__ = VERSION
__release__ = 'Kenya Ketchup'
__all__ = ['pvconstants', 'pvcell', 'pvmodule', 'pvstring', 'pvsystem']
|
StarcoderdataPython
|
4852442
|
#!/usr/bin/env python
'''
parse a MAVLink protocol XML file and generate a C# implementation
Copyright <NAME> 2018
Released under GNU GPL version 3 or later
'''
import sys, textwrap, os, time, re
from . import mavparse, mavtemplate
t = mavtemplate.MAVTemplate()
enumtypes = {}
map = {
'float' : 'float',
'double' : 'double',
'char' : 'byte',
'int8_t' : 'sbyte',
'uint8_t' : 'byte',
'uint8_t_mavlink_version' : 'B',
'int16_t' : 'short',
'uint16_t' : 'ushort',
'int32_t' : 'int',
'uint32_t' : 'uint',
'int64_t' : 'long',
'uint64_t' : 'ulong',
}
def generate_message_header(f, xml_list):
dedup = {}
for xml in xml_list:
print("generate_message_header " + xml.basename)
if xml.little_endian:
xml.mavlink_endian = "MAVLINK_LITTLE_ENDIAN"
else:
xml.mavlink_endian = "MAVLINK_BIG_ENDIAN"
if xml.crc_extra:
xml.crc_extra_define = "1"
else:
xml.crc_extra_define = "0"
if xml.command_24bit:
xml.command_24bit_define = "1"
else:
xml.command_24bit_define = "0"
if xml.sort_fields:
xml.aligned_fields_define = "1"
else:
xml.aligned_fields_define = "0"
# work out the included headers
xml.include_list = []
for i in xml.include:
base = i[:-4]
xml.include_list.append(mav_include(base))
if not hasattr(xml , 'message_names_enum'):
xml.message_names_enum = ''
# and message CRCs array
if not hasattr(xml , 'message_infos_array'):
xml.message_infos_array = ''
if xml.command_24bit:
# we sort with primary key msgid, secondary key dialect
for msgid in sorted(xml.message_names.keys()):
name = xml.message_names[msgid]
if name not in dedup:
dedup[name] = 1
xml_list[0].message_infos_array += ' new message_info(%u, "%s", %u, %u, %u, typeof( mavlink_%s_t )),\n' % (msgid,
name,
xml.message_crcs[msgid],
xml.message_min_lengths[msgid],
xml.message_lengths[msgid],
name.lower())
xml_list[0].message_names_enum += '\n %s = %u,' % (name, msgid)
else:
for msgid in range(256):
crc = xml.message_crcs.get(msgid, None)
name = xml.message_names.get(msgid, None)
length = xml.message_lengths.get(msgid, None)
if name is not None and name not in dedup:
dedup[name] = 1
xml_list[0].message_infos_array += ' new message_info(%u, "%s", %u, %u, %u, typeof( mavlink_%s_t )), // none 24 bit\n' % (msgid,
name,
crc,
length,
length,
name.lower())
xml_list[0].message_names_enum += '\n %s = %u,' % (name, msgid)
# add some extra field attributes for convenience with arrays
for m in xml.enum:
for fe in m.entry[:]:
fe.name = fe.name.replace("NAV_","")
t.write(f, '''
using System;
using System.Collections.Generic;
using System.Text;
using System.Runtime.InteropServices;
public partial class MAVLink
{
public const string MAVLINK_BUILD_DATE = "${parse_time}";
public const string MAVLINK_WIRE_PROTOCOL_VERSION = "${wire_protocol_version}";
public const int MAVLINK_MAX_PAYLOAD_LEN = ${largest_payload};
public const byte MAVLINK_CORE_HEADER_LEN = 9;///< Length of core header (of the comm. layer)
public const byte MAVLINK_CORE_HEADER_MAVLINK1_LEN = 5;///< Length of MAVLink1 core header (of the comm. layer)
public const byte MAVLINK_NUM_HEADER_BYTES = (MAVLINK_CORE_HEADER_LEN + 1);///< Length of all header bytes, including core and stx
public const byte MAVLINK_NUM_CHECKSUM_BYTES = 2;
public const byte MAVLINK_NUM_NON_PAYLOAD_BYTES = (MAVLINK_NUM_HEADER_BYTES + MAVLINK_NUM_CHECKSUM_BYTES);
public const int MAVLINK_MAX_PACKET_LEN = (MAVLINK_MAX_PAYLOAD_LEN + MAVLINK_NUM_NON_PAYLOAD_BYTES + MAVLINK_SIGNATURE_BLOCK_LEN);///< Maximum packet length
public const byte MAVLINK_SIGNATURE_BLOCK_LEN = 13;
public const int MAVLINK_LITTLE_ENDIAN = 1;
public const int MAVLINK_BIG_ENDIAN = 0;
public const byte MAVLINK_STX = ${protocol_marker};
public const byte MAVLINK_STX_MAVLINK1 = 0xFE;
public const byte MAVLINK_ENDIAN = ${mavlink_endian};
public const bool MAVLINK_ALIGNED_FIELDS = (${aligned_fields_define} == 1);
public const byte MAVLINK_CRC_EXTRA = ${crc_extra_define};
public const byte MAVLINK_COMMAND_24BIT = ${command_24bit_define};
public const bool MAVLINK_NEED_BYTE_SWAP = (MAVLINK_ENDIAN == MAVLINK_LITTLE_ENDIAN);
// msgid, name, crc, minlength, length, type
public static message_info[] MAVLINK_MESSAGE_INFOS = new message_info[] {
${message_infos_array}
};
public const byte MAVLINK_VERSION = ${version};
public const byte MAVLINK_IFLAG_SIGNED= 0x01;
public const byte MAVLINK_IFLAG_MASK = 0x01;
public struct message_info
{
public uint msgid { get; internal set; }
public string name { get; internal set; }
public byte crc { get; internal set; }
public uint minlength { get; internal set; }
public uint length { get; internal set; }
public Type type { get; internal set; }
public message_info(uint msgid, string name, byte crc, uint minlength, uint length, Type type)
{
this.msgid = msgid;
this.name = name;
this.crc = crc;
this.minlength = minlength;
this.length = length;
this.type = type;
}
public override string ToString()
{
return String.Format("{0} - {1}",name,msgid);
}
}
public enum MAVLINK_MSG_ID
{
${message_names_enum}
}
''', xml_list[0])
def generate_message_enum_types(xml):
print("generate_message_enum_types: " + xml.filename)
for m in xml.message:
for fld in m.fields:
if fld.array_length == 0:
fld.type = map[fld.type]
if fld.enum != "" and fld.array_length == 0:
enumtypes[fld.enum] = fld.type
print(fld.enum + " is type " + fld.type)
def cleanText(text):
text = text.replace("\n"," ")
text = text.replace("\r"," ")
return text.replace("\"","'")
def generate_message_enums(f, xml):
print("generate_message_enums: " + xml.filename)
# add some extra field attributes for convenience with arrays
for m in xml.enum:
m.description = cleanText(m.description)
m.flags = ""
if m.description.lower().find("bitmask") >= 0 or m.name.lower().find("_flags") >= 0:
m.flags = "[Flags]\n\t"
m.enumtype = enumtypes.get(m.name,"int /*default*/")
for fe in m.entry:
if fe.name.endswith('ENUM_END'):
m.entry.remove(fe)
continue
fe.description = cleanText(fe.description)
fe.name = fe.name.replace(m.name + "_","")
firstchar = re.search('^([0-9])', fe.name )
if firstchar != None and firstchar.group():
fe.name = '_%s' % fe.name
if hasattr(fe, "deprecated") and fe.deprecated is True:
fe.name = '''[Obsolete]
%s''' % fe.name
t.write(f, '''
${{enum:
///<summary> ${description} </summary>
${flags}public enum ${name}: ${enumtype}
{
${{entry:///<summary> ${description} |${{param:${description}| }} </summary>
[Description("${description}")]
${name}=${value},
}}
};
}}
''', xml)
def generate_message_footer(f, xml):
t.write(f, '''
}
''', xml)
f.close()
def generate_message_h(f, directory, m):
'''generate per-message header for a XML file'''
m.obsolete = ""
if hasattr(m, "deprecated") and m.deprecated is True:
m.obsolete = "[Obsolete]"
t.write(f, '''
${obsolete}
/// extensions_start ${extensions_start}
[StructLayout(LayoutKind.Sequential,Pack=1,Size=${wire_length})]
///<summary> ${description} </summary>
public struct mavlink_${name_lower}_t
{
public mavlink_${name_lower}_t(${{ordered_fields:${type} ${name},}})
{
${{ordered_fields: this.${name} = ${name};
}}
}
${{ordered_fields: /// <summary>${description} ${enum} ${units} ${display}</summary>
[Units("${units}")]
[Description("${description}")]
${array_prefix} ${type} ${name};
}}
};
''', m)
class mav_include(object):
def __init__(self, base):
self.base = base
def generate_one(fh, basename, xml):
'''generate headers for one XML file'''
directory = os.path.join(basename, xml.basename)
print("Generating CSharp implementation for %s" % xml.basename)
# add some extra field attributes for convenience with arrays
for m in xml.message:
m.msg_name = m.name
if xml.crc_extra:
m.crc_extra_arg = ", %s" % m.crc_extra
else:
m.crc_extra_arg = ""
m.msg_nameid = "MAVLINK_MSG_ID_${name} = ${id}"
m.description = cleanText(m.description)
if m.extensions_start is None:
m.extensions_start = 0;
for f in m.fields:
f.description = cleanText(f.description)
if f.array_length != 0:
f.array_prefix = '[MarshalAs(UnmanagedType.ByValArray,SizeConst=%u)]\n\t\tpublic' % f.array_length
f.array_arg = ', %u' % f.array_length
f.array_return_arg = '%u, ' % (f.array_length)
f.array_tag = ''
f.array_const = 'const '
f.decode_left = "%s.%s = " % (m.name_lower, f.name)
f.decode_right = ''
f.return_type = 'void'
f.return_value = 'void'
f.type = "%s%s" % (map[f.type], '[]')
else:
if f.enum != "":
f.type = "/*" +f.enum + "*/" + f.type;
#f.type = "/*" +f.type + "*/" + f.enum;
f.array_suffix = ''
f.array_prefix = 'public '
f.array_tag = 'BitConverter.To%s' % f.type
if f.type == 'byte':
f.array_tag = 'getByte'
if f.name == 'fixed': # this is a keyword
f.name = '@fixed'
f.array_arg = ''
f.array_return_arg = ''
f.array_const = ''
f.decode_left = "%s.%s = " % (m.name_lower, f.name)
f.decode_right = ''
f.get_arg = ''
f.c_test_value = f.test_value
f.return_type = f.type
# cope with uint8_t_mavlink_version
for m in xml.message:
m.arg_fields = []
m.array_fields = []
m.scalar_fields = []
for f in m.ordered_fields:
if f.array_length != 0:
m.array_fields.append(f)
else:
m.scalar_fields.append(f)
for f in m.fields:
if not f.omit_arg:
m.arg_fields.append(f)
f.putname = f.name
else:
f.putname = f.const_value
for m in xml.message:
generate_message_h(fh, directory, m)
def copy_fixed_headers(directory, xml):
'''copy the fixed protocol headers to the target directory'''
import shutil, filecmp
hlist = {
"1.0": [ 'MavlinkCRC.cs', 'MAVLinkMessage.cs', 'MavlinkParse.cs', 'MavlinkUtil.cs', 'MAVLink.csproj' ],
"2.0": [ 'MavlinkCRC.cs', 'MAVLinkMessage.cs', 'MavlinkParse.cs', 'MavlinkUtil.cs', 'MAVLink.csproj' ]
}
basepath = os.path.dirname(os.path.realpath(__file__))
srcpath = os.path.join(basepath, 'CS')
print("Copying fixed headers for protocol %s to %s" % (xml.wire_protocol_version, directory))
for h in hlist[xml.wire_protocol_version]:
src = os.path.realpath(os.path.join(srcpath, h))
dest = os.path.realpath(os.path.join(directory, h))
if src == dest or (os.path.exists(dest) and filecmp.cmp(src, dest)):
continue
shutil.copy(src, dest)
def generate(basename, xml_list):
'''generate complete MAVLink CSharp implemenation'''
print("generate for protocol %s to %s" % (xml_list[0].wire_protocol_version, basename))
directory = basename
if not os.path.exists(directory):
os.makedirs(directory)
f = open(os.path.join(directory, "mavlink.cs"), mode='w')
generate_message_header(f, xml_list)
for xml1 in xml_list:
generate_message_enum_types(xml1)
for xml2 in xml_list:
generate_message_enums(f, xml2)
for xml3 in xml_list:
generate_one(f, basename, xml3)
generate_message_footer(f,xml_list[0])
copy_fixed_headers(basename, xml_list[0])
|
StarcoderdataPython
|
11337617
|
<gh_stars>0
from ._BoundingBox3d import *
from ._BoundingBoxes3d import *
|
StarcoderdataPython
|
3520195
|
from .facility_models import * # noqa
# from .infrastructure import *
|
StarcoderdataPython
|
9608676
|
<reponame>gruber-sciencelab/MAPP
"""
##############################################################################
#
# Filter the protein-coding set of exons that have valid(long-enough)
# regions around 5/3-splice-site.
#
# AUTHOR: Maciej_Bak
# AFFILIATION: University_of_Basel
# AFFILIATION: Swiss_Institute_of_Bioinformatics
# CONTACT: <EMAIL>
# CREATED: 26-03-2020
# LICENSE: Apache_2.0
#
##############################################################################
"""
# imports
import os
import time
import logging
import logging.handlers
from argparse import ArgumentParser, RawTextHelpFormatter
from GTF import *
import pandas as pd
def parse_arguments():
"""Parser of the command-line arguments."""
parser = ArgumentParser(description=__doc__, formatter_class=RawTextHelpFormatter)
parser.add_argument(
"-v",
"--verbosity",
dest="verbosity",
choices=("DEBUG", "INFO", "WARN", "ERROR", "CRITICAL"),
default="ERROR",
help="Verbosity/Log level. Defaults to ERROR",
)
parser.add_argument(
"-l", "--logfile", dest="logfile", help="Store log to this file."
)
parser.add_argument(
"--in",
dest="input",
required=True,
help="Path to the SUPPA file with SE events.",
)
parser.add_argument(
"--gtf",
dest="gtf",
required=True,
help="Path to the annotation file in gtf format.",
)
parser.add_argument(
"--biotype_filters",
dest="biotype_filters",
required=True,
help="Comma-separated list of biotypes to filter the gtf ('all' string turns off the filtering).",
)
parser.add_argument(
"--out", dest="output", required=True, help="Path for the output file."
)
parser.add_argument(
"--region_size_3ss_up",
dest="region_size_3ss_up",
required=True,
help="Size of the region upstream 3' splicing site.",
)
parser.add_argument(
"--region_size_3ss_down",
dest="region_size_3ss_down",
required=True,
help="Size of the region downstream 3' splicing site.",
)
parser.add_argument(
"--region_size_5ss_up",
dest="region_size_5ss_up",
required=True,
help="Size of the region upstream 5' splicing site.",
)
parser.add_argument(
"--region_size_5ss_down",
dest="region_size_5ss_down",
required=True,
help="Size of the region downstream 5' splicing site.",
)
parser.add_argument(
"--fai",
dest="fai",
required=True,
help="Path to the genome index in fai format.",
)
parser.add_argument(
"--filtering-logfile",
dest="filter_log",
required=True,
help="Path to the file for filtering logs.",
)
return parser
##############################################################################
def main():
"""Main body of the script."""
# get dict of chrom. lengths
with open(options.fai, "r") as fai:
chrom_len_dict = {line.split("\t")[0]: int(line.split("\t")[1]) for line in fai}
df_gtf = dataframe(options.gtf) # from GTF.py
df_exons = pd.read_csv(options.input, sep="\t")
df_exons["ID"] = df_exons.apply(
lambda x: str(x.seqname)
+ ":"
+ x.event_id.split("-")[1].replace(":", "-")
+ ":"
+ x.event_id[-1],
axis=1,
)
with open(options.filter_log, "w") as log:
log.write("filter-exons.py" + os.linesep)
log.write(str(df_gtf.shape[0]) + "\tentries in GTF file" + os.linesep)
# get rid of duplicates (that differ on other fields) here.
if options.biotype_filters != "all":
df_gtf = (
df_gtf[["seqname", "start", "end", "strand", "feature", "gene_biotype"]]
.copy()
.drop_duplicates()
)
else:
df_gtf = (
df_gtf[["seqname", "start", "end", "strand", "feature"]]
.copy()
.drop_duplicates()
)
df_gtf["name"] = (
df_gtf["seqname"]
+ ":"
+ df_gtf["start"]
+ "-"
+ df_gtf["end"]
+ ":"
+ df_gtf["strand"]
)
# get only exons and filter on gene_biotype (optionally)
df_gtf = df_gtf[df_gtf["feature"] == "exon"].copy()
log.write(str(df_gtf.shape[0]) + "\tunique exons in GTF" + os.linesep)
if options.biotype_filters != "all":
biotypes = options.biotype_filters.split(",")
df_gtf = df_gtf[df_gtf["gene_biotype"].isin(biotypes)].copy()
log.write(
str(df_gtf.shape[0])
+ "\texons filtered on the gene biotype"
+ os.linesep
)
else:
log.write("gene_biotype filtering turned off" + os.linesep)
df_gtf[["start", "end"]] = df_gtf[["start", "end"]].astype(int)
# filter on minimal length
df_gtf["len"] = df_gtf["end"] - df_gtf["start"]
df_gtf = df_gtf[
df_gtf["len"]
> max(int(options.region_size_3ss_down), int(options.region_size_5ss_up))
].copy()
log.write(
str(df_gtf.shape[0])
+ "\tlength > max(3ss_down,5ss_up) so that 3/5 exonic regions do not overlap with intronic sites"
+ os.linesep
)
# filter on chrom beg:
df_gtf = df_gtf[
df_gtf["start"] - int(options.region_size_3ss_up) > 0
].copy() # for exons on + strand
log.write(
str(df_gtf.shape[0])
+ "\tmore than 3ss_up_region_size nucleotides down-stream chromosome start"
+ os.linesep
)
df_gtf = df_gtf[
df_gtf["start"] - int(options.region_size_5ss_down) > 0
].copy() # for exons on - strand
log.write(
str(df_gtf.shape[0])
+ "\tmore than 5ss_down_region_size nucleotides down-stream chromosome start"
+ os.linesep
)
# filter on chrom end:
index_list = []
for i, row in df_gtf.iterrows(): # indices are meaningless but unique
# watch out for +/- strand!
if (
chrom_len_dict[row.seqname]
> row.end + int(options.region_size_5ss_down)
) and (
chrom_len_dict[row.seqname] > row.end + int(options.region_size_3ss_up)
):
index_list.append(i)
df_gtf = df_gtf[df_gtf.index.isin(index_list)].copy()
log.write(
str(df_gtf.shape[0])
+ "\tmore than 5ss_down_region_size and 3ss_up_region_size nucleotides up-stream chromosome end"
+ os.linesep
)
# intersect SUPPA events with this exon list
df_exons[df_exons["ID"].isin(list(df_gtf["name"]))].copy()
del df_exons["ID"]
# retain the sorting from SUPPA ioe output file
# IMPORTANT!
# suppa in not-deterministic when it comes to the order of compatible transcripts for a given event
# therefore the output file will be affected
df_exons.to_csv(options.output, sep="\t", index=False)
##############################################################################
if __name__ == "__main__":
try:
# parse the command-line arguments
options = parse_arguments().parse_args()
# set up logging during the execution
formatter = logging.Formatter(
fmt="[%(asctime)s] %(levelname)s - %(message)s",
datefmt="%d-%b-%Y %H:%M:%S",
)
console_handler = logging.StreamHandler()
console_handler.setFormatter(formatter)
logger = logging.getLogger("logger")
logger.setLevel(logging.getLevelName(options.verbosity))
logger.addHandler(console_handler)
if options.logfile is not None:
logfile_handler = logging.handlers.RotatingFileHandler(
options.logfile, maxBytes=50000, backupCount=2
)
logfile_handler.setFormatter(formatter)
logger.addHandler(logfile_handler)
# execute the body of the script
start_time = time.time()
logger.info("Starting script")
main()
seconds = time.time() - start_time
# log the execution time
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
logger.info(
"Successfully finished in {hours}h:{minutes}m:{seconds}s",
hours=int(hours),
minutes=int(minutes),
seconds=int(seconds) if seconds > 1.0 else 1,
)
# log the exception in case it happens
except Exception as e:
logger.exception(str(e))
raise e
|
StarcoderdataPython
|
9672662
|
<reponame>ASH1998/SuZu
from ..logthings import get_module_logger
|
StarcoderdataPython
|
6428937
|
import os
import sys
from flask import send_from_directory
curr_path = os.path.dirname(os.path.abspath(__file__))
src_path = os.path.abspath(os.path.join(curr_path, "../"))
def _build_path():
build_path = os.path.join(src_path, 'ui/build/')
if not os.path.exists(build_path):
raise Exception("Client UI was not built before attempting to serve via Flask.")
return build_path
def _serve_ui(path=''):
build_path = _build_path()
req_path = os.path.join(build_path, path)
if req_path == build_path or not os.path.exists(req_path):
path = "index.html"
return send_from_directory(build_path, path)
def init_ui(app):
if not app.config['SERVE_UI']:
return
app.static_folder = os.path.join(_build_path(), "static")
app.add_url_rule("/", view_func=_serve_ui)
app.add_url_rule("/<path:path>", view_func=_serve_ui)
|
StarcoderdataPython
|
11284181
|
import numpy as np
from sklearn.base import BaseEstimator
from HTMLParser import HTMLParser
class FeatureMapper:
def __init__(self, features):
self.features = features
def fit(self, X, y=None):
for feature_name, column_name, extractor in self.features:
extractor.fit(X[column_name], y)
def transform(self, X):
extracted = []
for feature_name, column_name, extractor in self.features:
fea = extractor.transform(X[column_name])
if hasattr(fea, "toarray"):
extracted.append(fea.toarray())
else:
extracted.append(fea)
if len(extracted) > 1:
return np.concatenate(extracted, axis=1)
else:
return extracted[0]
def fit_transform(self, X, y=None):
extracted = []
for feature_name, column_name, extractor in self.features:
fea = extractor.fit_transform(X[column_name], y)
if hasattr(fea, "toarray"):
extracted.append(fea.toarray())
else:
extracted.append(fea)
if len(extracted) > 1:
return np.concatenate(extracted, axis=1)
else:
return extracted[0]
def identity(x):
return x
class SimpleTransform(BaseEstimator):
def __init__(self, transformer=identity):
self.transformer = transformer
def fit(self, X, y=None):
return self
def fit_transform(self, X, y=None):
return self.transform(X)
def transform(self, X, y=None):
return np.array([self.transformer(x) for x in X], ndmin=2).T
|
StarcoderdataPython
|
1880543
|
<reponame>K-A-R-T/JacMLDash<filename>mldash/web/ui_methods/config.py
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# File : config.py
# Author : <NAME>
# Email : <EMAIL>
# Date : 09/06/2019
#
# This file is part of JacMLDash.
# Distributed under terms of the MIT license.
import functools
__all__ = ['register_ui_methods', 'get_ui_methods']
_custom_ui_methods = None
def register_ui_methods(cls):
global _custom_ui_methods
_custom_ui_methods = cls
def get_ui_methods():
from . import ui_methods
return ui_methods
def get_custom_ui_method(name):
if _custom_ui_methods is not None and hasattr(_custom_ui_methods, name):
return getattr(_custom_ui_methods, name)
return None
def allow_custom_ui_method(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
custom = get_custom_ui_method(func.__name__)
if custom is not None:
return custom(*args, **kwargs)
return func(*args, **kwargs)
return wrapped
|
StarcoderdataPython
|
1824553
|
<filename>src/dsalgo/subsequence_test.py
import unittest
import dsalgo.subsequence
class Test(unittest.TestCase):
def test_count_common_subsequences(self) -> None:
a = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
b = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
self.assertEqual(
dsalgo.subsequence.count_common_subsequences(a, b, 10**9 + 7),
846527861,
)
def test_longest_common_subsequence(self) -> None:
s = "algorithm"
t = "datastructure"
self.assertEqual(
dsalgo.subsequence.longest_common_subsequence(s, t),
["a", "r", "t"],
)
if __name__ == "__main__":
unittest.main()
|
StarcoderdataPython
|
11262059
|
from __future__ import annotations
from dataclasses import dataclass
from datetime import datetime
from iso8601 import parse_date
@dataclass
class SourceSystem:
"""Class for describing an Argus Source system"""
pk: int = None
name: str = None
type: str = None
user: int = None
base_url: str = None
@classmethod
def from_json(cls, data: dict) -> SourceSystem:
"""Returns a SourceSystem object initalized from an Argus JSON dict"""
kwargs = data.copy()
kwargs["type"] = kwargs["type"]["name"]
return cls(**kwargs)
@dataclass
class Incident:
"""Class for describing an Argus Incident"""
pk: int = None
start_time: datetime = None
end_time: datetime = None
source: SourceSystem = None
source_incident_id: str = None
details_url: str = None
description: str = None
level: int = None
ticket_url: str = None
tags: dict = None
stateful: bool = None
open: bool = None
acked: bool = None
@classmethod
def from_json(cls, data: dict) -> Incident:
"""Returns an Incident object initalized from an Argus JSON dict"""
kwargs = data.copy()
if kwargs["start_time"]:
kwargs["start_time"] = parse_date(kwargs["start_time"])
if kwargs["end_time"]:
kwargs["end_time"] = (
parse_date(kwargs["end_time"])
if kwargs["end_time"] != "infinity"
else datetime.max
)
kwargs["source"] = SourceSystem.from_json(kwargs["source"])
tags = [tag["tag"] for tag in kwargs["tags"]]
tags = dict(tag.split("=", maxsplit=1) for tag in tags)
kwargs["tags"] = tags
return cls(**kwargs)
def to_json(self) -> dict:
"""Despite the name, this serializes this object into a dict that is suitable
for feeding to the stdlib JSON serializer.
"""
result = {}
for field in self.__dataclass_fields__:
value = getattr(self, field)
if value or field == "end_time":
if field == "start_time" and isinstance(value, datetime):
value = value.isoformat()
if field == "end_time" and isinstance(value, datetime):
value = value.isoformat() if value != datetime.max else "infinity"
if field == "source":
continue # Source will be assigned by Argus when posted
if field == "tags":
tags = ("{}={}".format(k, v) for k, v in value.items())
value = [{"tag": t} for t in tags]
result[field] = value
if "tags" not in result: # API requires tags to be present, but it can be empty
result["tags"] = []
return result
@dataclass
class Event:
"""Class for describing an Argus Incident Event"""
pk: int = None
actor: str = None
description: str = None
incident: int = None
received: datetime = None
timestamp: datetime = None
type: str = None
@classmethod
def from_json(cls, data: dict) -> Event:
"""Returns an Event object initalized from an Argus JSON dict"""
kwargs = data.copy()
kwargs["actor"] = kwargs.get("actor", {}).get("username")
if kwargs["received"]:
kwargs["received"] = parse_date(kwargs["received"])
if kwargs["timestamp"]:
kwargs["timestamp"] = parse_date(kwargs["timestamp"])
kwargs["type"] = kwargs["type"]["value"]
return cls(**kwargs)
def to_json(self) -> dict:
"""Despite the name, this serializes this object into a dict that is suitable
for feeding to the stdlib JSON serializer.
"""
result = {}
for field in self.__dataclass_fields__:
value = getattr(self, field)
if value:
if field == "actor":
continue # Actor is decided by Argus
if field == "received":
continue # Received timestamp is decided by Argus
if field == "timestamp" and isinstance(value, datetime):
value = value.isoformat()
result[field] = value
return result
@dataclass
class Acknowledgement:
"""Class for describing an Argus Acknowledgement"""
pk: int = None
expiration: datetime = None
event: Event = None
@classmethod
def from_json(cls, data: dict) -> Acknowledgement:
"""Returns an Acknowledgement object initalized from an Argus JSON dict"""
kwargs = {
"pk": data["pk"],
"event": Event.from_json(data["event"]),
"expiration": parse_date(data["expiration"])
if data["expiration"]
else None,
}
return cls(**kwargs)
|
StarcoderdataPython
|
8063886
|
import angr
path_to_bin = "../binaries/02_angr_find_condition"
# Find callback
def good_job(state):
# Get the output of the state
stdout = state.posix.dumps(1)
# If the program echo'ed "Good Job." then we've found a good state
return "Good Job." in str(stdout)
# Avoid callback
def try_again(state):
# Get the output of the state
stdout = state.posix.dumps(1)
# If the program echo'ed "Try again." then we found a state that we want to avoid
return "Try again." in str(stdout)
# Create an angr project
project = angr.Project(path_to_bin)
# Create the begin state starting from the entry point
entry_state = project.factory.entry_state(args=[path_to_bin])
# Create a simulation manager
simulation_manager = project.factory.simulation_manager(entry_state)
# Pass callbacks for states that we should find and avoid
simulation_manager.explore(avoid=try_again, find=good_job)
# If simulation manager has found a state
if simulation_manager.found:
found_state = simulation_manager.found[0]
# Dump the input that was fed to the binary to get to this state
input_str = found_state.posix.dumps(0)
print(input_str) # Get flag!
else:
print("No path found...")
|
StarcoderdataPython
|
1730285
|
import dis
import opcode
import weakref
import types
import copy
import importlib
def is_class_instance(o):
return bool(type(o).__flags__ & (1 << 9))
def walk_global_ops(code):
for instr in dis.get_instructions(code):
op = instr.opcode
if op in (opcode.opmap['STORE_GLOBAL'], opcode.opmap['DELETE_GLOBAL'], opcode.opmap['LOAD_GLOBAL']):
yield op, instr.arg
def extract_code_globals(co):
out_names = weakref.WeakKeyDictionary().get(co)
if out_names is None:
names = co.co_names
out_names = {names[oparg] for _, oparg in walk_global_ops(co)}
if co.co_consts:
for const in co.co_consts:
if isinstance(const, types.CodeType):
out_names |= extract_code_globals(const)
weakref.WeakKeyDictionary()[co] = out_names
return out_names
def refactor_dict(obj):
new_obj = copy.copy(obj)
for i in new_obj:
if type(new_obj[i]) in (int, float, bool, str, type(None)):
continue
elif type(new_obj[i]) is list:
new_obj[i] = refactor_list(new_obj[i])
elif type(new_obj[i]) is tuple:
new_obj[i] = refactor_list(list(new_obj[i]))
elif isinstance(new_obj[i], types.FunctionType) or isinstance(new_obj, types.MethodType):
new_obj[i] = function_to_dict(new_obj[i])
elif isinstance(new_obj[i], type):
new_obj[i] = class_to_dict(new_obj[i])
elif type(new_obj[i]) is dict:
new_obj[i] = refactor_dict(new_obj[i])
elif is_class_instance(new_obj[i]):
new_obj[i] = class_instance_to_dict(new_obj[i])
else:
raise ValueError(f"{type(new_obj)} isn't supported")
return new_obj
def refactor_list(obj):
new_obj = copy.copy(obj)
for i in range(len(new_obj)):
if type(new_obj[i]) in (int, float, bool, str, type(None)):
continue
elif type(new_obj[i]) is list:
new_obj[i] = refactor_list(new_obj[i])
elif type(new_obj[i]) is tuple:
new_obj[i] = refactor_list(list(new_obj[i]))
elif isinstance(new_obj[i], types.FunctionType) or isinstance(new_obj, types.MethodType):
new_obj[i] = function_to_dict(new_obj[i])
elif isinstance(new_obj[i], type):
new_obj[i] = class_to_dict(new_obj[i])
elif type(new_obj[i]) is dict:
new_obj[i] = refactor_dict(new_obj[i])
elif is_class_instance(new_obj[i]):
new_obj[i] = class_instance_to_dict(new_obj[i])
else:
raise ValueError(f"{type(new_obj)} isn't supported")
return new_obj
def refactor_object(obj):
new_obj = copy.copy(obj)
if type(new_obj) in (int, float, bool, str, type(None)):
return new_obj
elif type(new_obj) is list:
return refactor_list(new_obj)
elif type(new_obj) is tuple:
return refactor_list(list(new_obj))
elif isinstance(new_obj, types.FunctionType) or isinstance(new_obj, types.MethodType):
return function_to_dict(new_obj)
elif type(new_obj) is dict:
return refactor_dict(new_obj)
elif isinstance(new_obj, type):
return class_to_dict(obj)
elif is_class_instance(new_obj):
return class_instance_to_dict(new_obj)
else:
raise ValueError(f"{type(new_obj)} isn't supported")
def restore_object(obj):
if type(obj) in (int, float, bool, str, type(None)):
pass
elif type(obj) is list:
for i in range(len(obj)):
obj[i] = restore_object(obj[i])
elif type(obj) is dict:
if obj.get('type') == 'function':
obj = dict_to_function(obj)
elif obj.get('type') == 'class':
obj = dict_to_class(obj)
elif obj.get('type') == 'class_instance':
obj = dict_to_class_instance(obj)
else:
for i in obj:
obj[i] = restore_object(obj[i])
return obj
def function_to_dict(obj):
used_globals_names = [item for item in extract_code_globals(obj.__code__)]
globs_dct = {}
for x in used_globals_names:
if obj.__globals__.get(x) is not None:
if isinstance(obj.__globals__.get(x), types.ModuleType):
globs_dct[x] = "module"
else:
globs_dct[x] = obj.__globals__[x]
globs_dct['__builtins__'] = 'module'
source = get_function_code_attributes(obj)
return {'type': 'function', 'source': source, "globals": globs_dct}
def dict_to_function(obj):
source = obj['source']
function_globals = obj['globals']
for key in function_globals:
if function_globals[key] == 'module':
if globals().get(key) is not None:
function_globals[key] = globals()[key]
else:
function_globals[key] = importlib.import_module(key)
function_code = types.CodeType(source[0], source[1], source[2], source[3], source[4],
source[5], bytes.fromhex(source[6]), tuple(source[7]),
tuple(source[8]), tuple(source[9]), source[10],
source[11], source[12], bytes.fromhex(source[13]))
return types.FunctionType(function_code, function_globals)
def class_to_dict(obj):
class_attributes = {}
for attribute in dir(obj):
if not attribute.startswith('__'):
value = getattr(obj, attribute)
elif attribute == '__init__':
value = getattr(obj, attribute)
if not isinstance(value, types.FunctionType):
continue
else:
continue
class_attributes[attribute] = refactor_object(value)
return {'type': 'class', 'name': obj.__name__, 'attributes': class_attributes}
def dict_to_class(obj):
return type(obj['name'], (), restore_object(obj['attributes']))
def class_instance_to_dict(obj):
class_attributes = {}
for attribute in dir(obj):
if not attribute.startswith('__'):
value = getattr(obj, attribute)
else:
continue
class_attributes[attribute] = refactor_object(value)
return {'type': 'class_instance', 'name': obj.__class__.__name__, 'attributes': class_attributes}
def dict_to_class_instance(obj):
return type(obj['name'], (), restore_object(obj['attributes']))()
def get_function_code_attributes(obj):
source = [
obj.__code__.co_argcount,
obj.__code__.co_posonlyargcount,
obj.__code__.co_kwonlyargcount,
obj.__code__.co_nlocals,
obj.__code__.co_stacksize,
obj.__code__.co_flags,
obj.__code__.co_code.hex(),
list(obj.__code__.co_consts),
list(obj.__code__.co_names),
list(obj.__code__.co_varnames),
obj.__code__.co_filename,
obj.__code__.co_name,
obj.__code__.co_firstlineno,
obj.__code__.co_lnotab.hex()
]
return source
|
StarcoderdataPython
|
6627680
|
<gh_stars>1000+
from collections import OrderedDict
from imagededup.methods.hashing import Hashing
from imagededup.handlers.search.bktree import BKTree, BkTreeNode
# Test BkTreeNode
def initialize_for_bktree():
hash_dict = OrderedDict(
{'a': '9', 'b': 'D', 'c': 'A', 'd': 'F', 'e': '2', 'f': '6', 'g': '7', 'h': 'E'}
)
dist_func = Hashing.hamming_distance
return hash_dict, dist_func
def test_bktreenode_correct_initialization():
node_name, node_value, parent_name = 'test_node', '1aef', None
node = BkTreeNode(node_name, node_value, parent_name)
assert node.node_name == 'test_node'
assert node.node_value == '1aef'
assert node.parent_name is None
assert len(node.children) == 0
# test BKTree class
def test_insert_tree():
# initialize root node and add 1 new node, check it goes as root's child and has it's parent as root
_, dist_func = initialize_for_bktree()
hash_dict = {'a': '9', 'b': 'D'}
bk = BKTree(hash_dict, dist_func)
assert bk.ROOT == 'a'
assert 'b' in list(bk.dict_all['a'].children.keys())
assert bk.dict_all['b'].parent_name == 'a'
def test_insert_tree_collision():
# initialize root node, add 1 new node and enter another node with same distance from root, check it goes not as
# root's child but the other node's child
_, dist_func = initialize_for_bktree()
hash_dict = OrderedDict(
{'a': '9', 'b': 'D', 'c': '8'}
) # to guarantee that 'a' is the root of the tree
bk = BKTree(hash_dict, dist_func)
assert bk.ROOT == 'a'
assert len(bk.dict_all[bk.ROOT].children) == 1
assert 'c' in list(bk.dict_all['b'].children.keys())
def test_insert_tree_different_nodes():
# initialize root node, add 1 new node and enter another node with different distance from root, check it goes as
# root's child and not as the other node's child
_, dist_func = initialize_for_bktree()
hash_dict = OrderedDict(
{'a': '9', 'b': 'D', 'c': 'F'}
) # to guarantee that 'a' is the root of the tree
bk = BKTree(hash_dict, dist_func)
assert bk.ROOT == 'a'
assert len(bk.dict_all[bk.ROOT].children) == 2
assert set(['b', 'c']) <= set(bk.dict_all[bk.ROOT].children.keys())
def test_insert_tree_check_distance():
# initialize root node, add 1 new node and enter another node with different distance from root, check that the
# distance recorded in the root's children dictionary is as expected
_, dist_func = initialize_for_bktree()
hash_dict = OrderedDict(
{'a': '9', 'b': 'D', 'c': 'F'}
) # to guarantee that 'a' is the root of the tree
bk = BKTree(hash_dict, dist_func)
assert bk.ROOT == 'a'
assert bk.dict_all[bk.ROOT].children['b'] == 1
assert bk.dict_all[bk.ROOT].children['c'] == 2
def test_construct_tree():
# Input a complete tree and check for each node the children and parents
hash_dict, dist_func = initialize_for_bktree()
bk = BKTree(hash_dict, dist_func)
# check root
assert bk.ROOT == 'a'
# check that expected leaf nodes have no children (they're actually leaf nodes)
leaf_nodes = set(
[k for k in bk.dict_all.keys() if len(bk.dict_all[k].children) == 0]
)
expected_leaf_nodes = set(['b', 'd', 'f', 'h'])
assert leaf_nodes == expected_leaf_nodes
# check that root node ('a') has 4 children
assert len(bk.dict_all[bk.ROOT].children) == 4
# check that 'c' has 'd' as it's child at distance 2
assert bk.dict_all['c'].children['d'] == 2
def test_search():
# Input a tree and send a search query, check whether correct number of retrievals are returned
hash_dict, dist_func = initialize_for_bktree()
bk = BKTree(hash_dict, dist_func)
query = '5'
valid_retrievals = bk.search(query, tol=2)
assert len(valid_retrievals) == 5
def test_search_correctness():
# Input a tree and send a search query, check whether correct retrievals are returned
hash_dict, dist_func = initialize_for_bktree()
bk = BKTree(hash_dict, dist_func)
query = '5'
valid_retrievals = bk.search(query, tol=2)
assert set([i[0] for i in valid_retrievals]) == set(['a', 'f', 'g', 'd', 'b'])
def test_search_zero_tolerance():
# Input a tree and send a search query, check whether zero retrievals are returned for zero tolerance
hash_dict, dist_func = initialize_for_bktree()
bk = BKTree(hash_dict, dist_func)
query = '5'
valid_retrievals = bk.search(query, tol=0)
assert len(valid_retrievals) == 0
def test_search_dist():
# Input a tree and send a search query, check whether correct distance for a retrieval is returned
hash_dict, dist_func = initialize_for_bktree()
bk = BKTree(hash_dict, dist_func)
query = '5'
valid_retrievals = bk.search(query, tol=2)
assert [i for i in valid_retrievals if i[0] == 'a'][0][1] == 2
def test_get_next_candidates_valid():
# Give a partial tree as input and check that for a query, expected candidates and validity flag are obtained
hash_dict, dist_func = initialize_for_bktree()
bk = BKTree(hash_dict, dist_func)
assert bk.ROOT == 'a'
query = '5'
candidates, validity, dist = bk._get_next_candidates(
query, bk.dict_all[bk.ROOT], tolerance=2
)
candidates = set(candidates)
assert candidates <= set(['b', 'c', 'e', 'f'])
assert validity
def test_get_next_candidates_invalid():
# Give a tree as input and check that for a query, validity flag is 0
hash_dict, dist_func = initialize_for_bktree()
bk = BKTree(hash_dict, dist_func)
assert bk.ROOT == 'a'
query = '5'
_, validity, _ = bk._get_next_candidates(query, bk.dict_all[bk.ROOT], tolerance=1)
assert not validity
def test_tolerance_affects_retrievals():
# Give a partial tree as input and check that for a query, increased tolerance gives more retrievals as expected for
# the input tree
hash_dict, dist_func = initialize_for_bktree()
bk = BKTree(hash_dict, dist_func)
assert bk.ROOT == 'a'
query = '5'
candidates, _, _ = bk._get_next_candidates(query, bk.dict_all[bk.ROOT], tolerance=1)
low_tolerance_candidate_len = len(candidates)
candidates, _, _ = bk._get_next_candidates(query, bk.dict_all[bk.ROOT], tolerance=2)
high_tolerance_candidate_len = len(candidates)
assert high_tolerance_candidate_len > low_tolerance_candidate_len
|
StarcoderdataPython
|
1653769
|
<filename>backend/restful_api/urls.py<gh_stars>0
from django.conf.urls import url
from django.urls import include
from rest_framework import routers
from rest_framework_swagger.views import get_swagger_view
from . import views
router = routers.DefaultRouter()
router.register(r'user_attributes', views.UserAttributesViewSet)
router.register(r'solved_mission', views.solvedMissionViewSet)
router.register(r'placed_towers', views.placedTowersViewSet)
router.register(r'placed_landmarks', views.placedLandmarksViewSet)
router.register(r'all_solved_missions', views.AllSolvedMissionViewSet)
schema_view = get_swagger_view(title='Kort-cept API')
urlpatterns = [
# Rest api
url(r'^api/', include(router.urls)),
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')), # noqa
url(r'^auth/', include('djoser.urls')),
url(r'^auth/', include('djoser.urls.authtoken')),
url(r'^auth/', include('djoser.urls.jwt')),
# swagger
url(r'^swagger', schema_view)
]
|
StarcoderdataPython
|
1826050
|
# automatically generated by the FlatBuffers compiler, do not modify
# namespace: flattrs_test
import flatbuffers
class AllScalars(object):
__slots__ = ['_tab']
@classmethod
def GetRootAsAllScalars(cls, buf, offset):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = AllScalars()
x.Init(buf, n + offset)
return x
# AllScalars
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# AllScalars
def Boolean(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
return False
# AllScalars
def Uint8(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Uint8Flags, o + self._tab.Pos)
return 0
# AllScalars
def Uint16(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Uint16Flags, o + self._tab.Pos)
return 0
# AllScalars
def Uint32(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Uint32Flags, o + self._tab.Pos)
return 0
# AllScalars
def Uint64(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Uint64Flags, o + self._tab.Pos)
return 0
# AllScalars
def Int8(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
return 0
# AllScalars
def Int16(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int16Flags, o + self._tab.Pos)
return 0
# AllScalars
def Int32(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
return 0
# AllScalars
def Int64(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(20))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int64Flags, o + self._tab.Pos)
return 0
# AllScalars
def Float32(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(22))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos)
return 0.0
# AllScalars
def Float64(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(24))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Float64Flags, o + self._tab.Pos)
return 0.0
def AllScalarsStart(builder): builder.StartObject(11)
def AllScalarsAddBoolean(builder, boolean): builder.PrependBoolSlot(0, boolean, 0)
def AllScalarsAddUint8(builder, uint8): builder.PrependUint8Slot(1, uint8, 0)
def AllScalarsAddUint16(builder, uint16): builder.PrependUint16Slot(2, uint16, 0)
def AllScalarsAddUint32(builder, uint32): builder.PrependUint32Slot(3, uint32, 0)
def AllScalarsAddUint64(builder, uint64): builder.PrependUint64Slot(4, uint64, 0)
def AllScalarsAddInt8(builder, int8): builder.PrependInt8Slot(5, int8, 0)
def AllScalarsAddInt16(builder, int16): builder.PrependInt16Slot(6, int16, 0)
def AllScalarsAddInt32(builder, int32): builder.PrependInt32Slot(7, int32, 0)
def AllScalarsAddInt64(builder, int64): builder.PrependInt64Slot(8, int64, 0)
def AllScalarsAddFloat32(builder, float32): builder.PrependFloat32Slot(9, float32, 0.0)
def AllScalarsAddFloat64(builder, float64): builder.PrependFloat64Slot(10, float64, 0.0)
def AllScalarsEnd(builder): return builder.EndObject()
|
StarcoderdataPython
|
5137273
|
__version__ = "1.0.3"
from .DialogTag import DialogTag
|
StarcoderdataPython
|
6515706
|
<reponame>Rob2n/macro_pack
VBA = \
"""
'Create A Text and fill it
' Will overwrite existing file
Private Sub CreateTxtFile(FilePath As String, FileContent As String)
Dim fso As Object
Set fso = CreateObject("Scripting.FileSystemObject")
Dim Fileout As Object
Set Fileout = fso.CreateTextFile(FilePath, True, True)
Fileout.Write FileContent
Fileout.Close
End Sub
"""
|
StarcoderdataPython
|
4819517
|
# Code for "TSM: Temporal Shift Module for Efficient Video Understanding"
# arXiv:1811.08383
# <NAME>*, <NAME>, <NAME>
# {jilin, <EMAIL>, <EMAIL>
import pdb
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
import sys
sys.path.append("..")
from archs import repvgg
class TemporalShift(nn.Module):
def __init__(self, net, input_channels, n_segment=3, n_div=8, inplace=False, soft=False, init_mode="shift"):
super(TemporalShift, self).__init__()
self.net = net
# self.input_channels = net.in_channels
self.input_channels = input_channels
self.n_segment = n_segment
self.fold_div = n_div
self.fold = self.input_channels // self.fold_div
self.inplace = inplace
self.soft = soft
self.mode = init_mode
if self.soft:
self.conv_shift = nn.Conv1d(
self.input_channels, self.input_channels,
kernel_size=3, padding=1, groups=self.input_channels,
bias=False)
# weight_size: (self.input_channels, 1, 3)
# 以下是3种初始化方法
if self.mode == 'shift':
# import pdb; pdb.set_trace()
self.conv_shift.weight.requires_grad = True
self.conv_shift.weight.data.zero_()
self.conv_shift.weight.data[:self.fold, 0, 2] = 1 # shift left
self.conv_shift.weight.data[self.fold: 2 * self.fold, 0, 0] = 1 # shift right
if 2*self.fold < self.input_channels:
self.conv_shift.weight.data[2 * self.fold:, 0, 1] = 1 # fixed
elif self.mode == 'fixed':
self.conv_shift.weight.requires_grad = True
self.conv_shift.weight.data.zero_()
self.conv_shift.weight.data[:, 0, 1] = 1 # fixed
elif self.mode == 'norm':
self.conv_shift.weight.requires_grad = True
if inplace:
print('=> Using in-place shift...')
print('=> Using fold div: {}'.format(self.fold_div))
def forward(self, x):
if self.soft: # 可学习的 1D Temporal kernel
nt, c, h, w = x.size()
n_batch = nt // self.n_segment
x = x.view(n_batch, self.n_segment, c, h, w)
x = x.permute([0, 3, 4, 2, 1]) # (n_batch, h, w, c, n_segment)
x = x.contiguous().view(n_batch*h*w, c, self.n_segment)
x = self.conv_shift(x) # (n_batch*h*w, c, n_segment)
x = x.view(n_batch, h, w, c, self.n_segment)
x = x.permute([0, 4, 3, 1, 2]) # (n_batch, n_segment, c, h, w)
x = x.contiguous().view(nt, c, h, w)
else:
x = self.shift(x, self.n_segment, fold_div=self.fold_div, inplace=self.inplace)
return self.net(x)
@staticmethod
def shift(x, n_segment, fold_div=8, inplace=False):
nt, c, h, w = x.size()
n_batch = nt // n_segment
x = x.view(n_batch, n_segment, c, h, w)
fold = c // fold_div
if inplace:
# Due to some out of order error when performing parallel computing.
# May need to write a CUDA kernel.
raise NotImplementedError
# out = InplaceShift.apply(x, fold)
else:
out = torch.zeros_like(x)
out[:, :-1, :fold] = x[:, 1:, :fold] # shift left
out[:, 1:, fold: 2 * fold] = x[:, :-1, fold: 2 * fold] # shift right
out[:, :, 2 * fold:] = x[:, :, 2 * fold:] # not shift
return out.view(nt, c, h, w)
class InplaceShift(torch.autograd.Function):
# Special thanks to @raoyongming for the help to this function
@staticmethod
def forward(ctx, input, fold):
# not support higher order gradient
# input = input.detach_()
ctx.fold_ = fold
n, t, c, h, w = input.size()
buffer = input.data.new(n, t, fold, h, w).zero_()
buffer[:, :-1] = input.data[:, 1:, :fold]
input.data[:, :, :fold] = buffer
buffer.zero_()
buffer[:, 1:] = input.data[:, :-1, fold: 2 * fold]
input.data[:, :, fold: 2 * fold] = buffer
return input
@staticmethod
def backward(ctx, grad_output):
# grad_output = grad_output.detach_()
fold = ctx.fold_
n, t, c, h, w = grad_output.size()
buffer = grad_output.data.new(n, t, fold, h, w).zero_()
buffer[:, 1:] = grad_output.data[:, :-1, :fold]
grad_output.data[:, :, :fold] = buffer
buffer.zero_()
buffer[:, :-1] = grad_output.data[:, 1:, fold: 2 * fold]
grad_output.data[:, :, fold: 2 * fold] = buffer
return grad_output, None
class TemporalPool(nn.Module):
def __init__(self, net, n_segment):
super(TemporalPool, self).__init__()
self.net = net
self.n_segment = n_segment
def forward(self, x):
x = self.temporal_pool(x, n_segment=self.n_segment)
return self.net(x)
@staticmethod
def temporal_pool(x, n_segment):
nt, c, h, w = x.size()
n_batch = nt // n_segment
x = x.view(n_batch, n_segment, c, h, w).transpose(1, 2) # n, c, t, h, w
x = F.max_pool3d(x, kernel_size=(3, 1, 1), stride=(2, 1, 1), padding=(1, 0, 0))
x = x.transpose(1, 2).contiguous().view(nt // 2, c, h, w)
return x
def make_temporal_shift(net, n_segment, n_div=8, soft = False, init_mode="shift", place='blockres', temporal_pool=False, deploy=False):
'''
1D时序卷积参数初始化:"shift" 初始化 [0,0,1] 左移;"fixed" 无偏初始化[1, 1, 1];"norm" 随机初始化 normal
'''
if temporal_pool:
n_segment_list = [n_segment, n_segment // 2, n_segment // 2, n_segment // 2]
else:
n_segment_list = [n_segment] * 4
assert n_segment_list[-1] > 0
print('=> n_segment per stage: {}'.format(n_segment_list))
# for 0.5 resnet18 >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
if isinstance(net, torchvision.models.ResNet):
# if 1:
# for 0.5 resnet18 <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
if place == 'block':
def make_block_temporal(stage, this_segment):
blocks = list(stage.children())
print('=> Processing stage with {} blocks'.format(len(blocks)))
for i, b in enumerate(blocks):
# import pdb; pdb.set_trace()
blocks[i] = TemporalShift(b, b.conv1.in_channels, n_segment=this_segment, n_div=n_div, soft=soft, init_mode=init_mode)
return nn.Sequential(*(blocks))
net.layer1 = make_block_temporal(net.layer1, n_segment_list[0])
net.layer2 = make_block_temporal(net.layer2, n_segment_list[1])
net.layer3 = make_block_temporal(net.layer3, n_segment_list[2])
net.layer4 = make_block_temporal(net.layer4, n_segment_list[3])
elif 'blockres' in place:
n_round = 1
if len(list(net.layer3.children())) >= 23: # > res101
n_round = 2
print('=> Using n_round {} to insert temporal shift'.format(n_round))
def make_block_temporal(stage, this_segment):
blocks = list(stage.children())
print('=> Processing stage with {} blocks residual'.format(len(blocks)))
for i, b in enumerate(blocks):
# import pdb; pdb.set_trace()
if i % n_round == 0:
blocks[i].conv1 = TemporalShift(b.conv1, b.conv1.in_channels, n_segment=this_segment, n_div=n_div)
return nn.Sequential(*blocks)
net.layer1 = make_block_temporal(net.layer1, n_segment_list[0])
net.layer2 = make_block_temporal(net.layer2, n_segment_list[1])
net.layer3 = make_block_temporal(net.layer3, n_segment_list[2])
net.layer4 = make_block_temporal(net.layer4, n_segment_list[3])
elif isinstance(net, repvgg.RepVGG):
if place == 'block':
def make_block_temporal(stage, this_segment):
blocks = list(stage.children())
print('=> Processing stage with {} blocks'.format(len(blocks)))
for i, b in enumerate(blocks):
# import pdb; pdb.set_trace()
blocks[i] = TemporalShift(b, b.in_channels, n_segment=this_segment, n_div=n_div, soft=soft, init_mode=init_mode)
return nn.Sequential(*(blocks))
net.stage1 = make_block_temporal(net.stage1, n_segment_list[0])
net.stage2 = make_block_temporal(net.stage2, n_segment_list[1])
net.stage3 = make_block_temporal(net.stage3, n_segment_list[2])
net.stage4 = make_block_temporal(net.stage4, n_segment_list[3])
elif 'blockres' in place:
n_round = 1
# repvgg最深的 stage3 最多才16层,最少14层,没必要隔层添加shift module了
print('=> Using n_round {} to insert temporal shift'.format(n_round))
def make_block_temporal(stage, this_segment):
blocks = list(stage.children())
print('=> Processing stage with {} blocks residual'.format(len(blocks)))
for i, b in enumerate(blocks):
# import pdb; pdb.set_trace()
if i % n_round == 0:
if deploy:
blocks[i].rbr_reparam = TemporalShift(b.rbr_reparam, b.rbr_reparam.in_channels, n_segment=this_segment, n_div=n_div, soft=soft, init_mode=init_mode)
else:
if blocks[i].rbr_dense:
blocks[i].rbr_dense.conv = TemporalShift(b.rbr_dense.conv, b.rbr_dense.conv.in_channels, n_segment=this_segment, n_div=n_div, soft=soft, init_mode=init_mode)
if blocks[i].rbr_1x1:
blocks[i].rbr_1x1.conv = TemporalShift(b.rbr_1x1.conv, b.rbr_1x1.conv.in_channels, n_segment=this_segment, n_div=n_div, soft=soft, init_mode=init_mode)
return nn.Sequential(*blocks)
# net.stage0 = make_block_temporal(net.stage0, n_segment_list[0]) # 加了就在低层进行时序融合
net.stage1 = make_block_temporal(net.stage1, n_segment_list[0])
net.stage2 = make_block_temporal(net.stage2, n_segment_list[1])
net.stage3 = make_block_temporal(net.stage3, n_segment_list[2])
net.stage4 = make_block_temporal(net.stage4, n_segment_list[3])
else:
raise NotImplementedError(place)
def make_temporal_pool(net, n_segment):
import torchvision
if isinstance(net, torchvision.models.ResNet):
print('=> Injecting nonlocal pooling')
net.layer2 = TemporalPool(net.layer2, n_segment)
else:
raise NotImplementedError
if __name__ == '__main__':
print('=== Temporal Soft Shift RepVGG ===')
from archs.repvgg import repvgg_A0, repvgg_B1g2
# imagenet pretrained, deploy MODE
is_deploy = False
model1 = repvgg_A0(pretrained=True, deploy=is_deploy)
# model1 = repvgg_B1g2(pretrained=True, deploy=is_deploy)
# model1 = getattr(torchvision.models, "resnet18")(True)
# model4 = getattr(torchvision.models, "resnet50")(True)
print("Origin Net:", model1)
# make_temporal_shift(model3, n_segment=8, n_div=8, place="block", temporal_pool=False)
make_temporal_shift(model1, n_segment=8, n_div=8, place="blockres", temporal_pool=False, deploy=is_deploy, soft=True)
print("\n\nTSM:", model1)
import pdb; pdb.set_trace()
print('=> Testing CPU...')
# test forward
print('=> Testing forward...')
with torch.no_grad():
for i in range(10):
print(i)
x = torch.rand(2 * 8, 3, 224, 224) # (16, 3, 224, 224)
y = model1(x) # (16, 1000)
print(y.shape)
# test backward
print('=> Testing backword...')
with torch.enable_grad():
for i in range(10):
print(i)
x1 = torch.rand(2 * 8, 3, 224, 224)
x1.requires_grad_()
y1 = model1(x1)
grad1 = torch.autograd.grad((y1 ** 2).mean(), [x1])[0]
print('=> Testing GPU...')
model1.cuda()
# test forward
print('=> Testing forward...')
with torch.no_grad():
for i in range(10):
print(i)
x = torch.rand(2 * 8, 3, 224, 224).cuda()
y1 = model1(x)
# test backward
print('=> Testing backward...')
with torch.enable_grad():
for i in range(10):
print(i)
x1 = torch.rand(2 * 8, 3, 224, 224).cuda()
x1.requires_grad_()
y1 = model1(x1)
grad1 = torch.autograd.grad((y1 ** 2).mean(), [x1])[0]
print('Test passed.')
|
StarcoderdataPython
|
9608629
|
<gh_stars>1-10
from conans import ConanFile, CMake, tools, errors
import os, re
from six import StringIO # Python 2 and 3 compatible
def find_all_headers(root):
result = []
entries = os.walk(root)
for entry in entries:
for file in entry[2]:
if file.endswith('.h'):
result.append(os.path.join(entry[0], file))
return result
def fix(all_headers, src):
f = open(src, "r")
base = os.path.dirname(src)
regex = r'^#include\s*"([^"]*)"'
result = ""
for line in f:
match = re.match(regex, line)
if match:
if not os.path.exists(os.path.join(base, match.group(1))):
matches = [x for x in all_headers if os.path.basename(x) == os.path.basename(match.group(1))]
if len(matches) == 1:
relpath = os.path.relpath(matches[0], base)
result += '#include "%s"\n' % relpath
continue
result += line
f.close()
f = open(src, "w")
f.write(result)
def merge_two_dicts(x, y):
z = x.copy() # start with x's keys and values
z.update(y) # modifies z with y's keys and values & returns None
return z
class SkiaConan(ConanFile):
name = "skia"
version = "master"
license = "<Put the package license here>"
author = "<NAME> <<EMAIL>>"
url = "https://github.com/Maddimax/conan-skia.git"
description = "A 2D/3D Vector rendering engine"
topics = ("render", "vector", "2d", "3d")
settings = "os", "compiler", "build_type", "arch"
skia_options = {
"skia_enable_atlas_text" : [True, False],
"skia_enable_ccpr" : [True, False],
"skia_enable_discrete_gpu" : [True, False],
"skia_enable_flutter_defines" : [True, False],
"skia_enable_fontmgr_android" : [True, False],
"skia_enable_fontmgr_custom" : [True, False],
"skia_enable_fontmgr_custom_empty" : [True, False],
"skia_enable_fontmgr_empty" : [True, False],
"skia_enable_fontmgr_fuchsia" : [True, False],
"skia_enable_fontmgr_win" : [True, False],
"skia_enable_fontmgr_win_gdi" : [True, False],
"skia_enable_gpu" : [True, False],
"skia_enable_nima" : [True, False],
"skia_enable_nvpr" : [True, False],
"skia_enable_particles" : [True, False],
"skia_enable_pdf" : [True, False],
"skia_enable_skottie" : [True, False],
"skia_enable_skpicture" : [True, False],
"skia_enable_skshaper" : [True, False],
"skia_enable_spirv_validation" : [True, False],
"skia_enable_tools" : [True, False],
"skia_enable_vulkan_debug_layers" : [True, False],
"skia_generate_workarounds" : [True, False],
"skia_use_angle" : [True, False],
"skia_use_dng_sdk" : [True, False],
"skia_use_egl" : [True, False],
"skia_use_expat" : [True, False],
"skia_use_fixed_gamma_text" : [True, False],
"skia_use_fontconfig" : [True, False],
"skia_use_fonthost_mac" : [True, False],
"skia_use_freetype" : [True, False],
"skia_use_harfbuzz" : [True, False],
"skia_use_icu" : [True, False],
"skia_use_libheif" : [True, False],
"skia_use_libjpeg_turbo" : [True, False],
"skia_use_libpng" : [True, False],
"skia_use_libwebp" : [True, False],
"skia_use_lua" : [True, False],
"skia_use_metal" : [True, False],
"skia_use_opencl" : [True, False],
"skia_use_piex" : [True, False],
"skia_use_sfntly" : [True, False],
"skia_use_system_expat" : [True, False],
"skia_use_system_harfbuzz" : [True, False],
"skia_use_system_icu" : [True, False],
"skia_use_system_libjpeg_turbo" : [True, False],
"skia_use_system_libpng" : [True, False],
"skia_use_system_libwebp" : [True, False],
"skia_use_system_zlib" : [True, False],
"skia_use_vulkan" : [True, False],
"skia_use_wuffs" : [True, False],
"skia_use_x11" : [True, False],
"skia_use_xps" : [True, False],
"skia_use_zlib" : [True, False],
"is_official_build" : [True, False]
}
options = merge_two_dicts({ "shared": [True, False] }, skia_options)
default_options = {
"shared":False,
"harfbuzz:with_icu" : True,
# Skia options
"skia_enable_atlas_text" : False,
"skia_enable_ccpr" : True,
"skia_enable_discrete_gpu" : True,
"skia_enable_flutter_defines" : False,
"skia_enable_fontmgr_android" : False,
"skia_enable_fontmgr_custom" : False,
"skia_enable_fontmgr_custom_empty" : False,
"skia_enable_fontmgr_empty" : False,
"skia_enable_fontmgr_fuchsia" : False,
"skia_enable_fontmgr_win" : False,
"skia_enable_fontmgr_win_gdi" : False,
"skia_enable_gpu" : False,
"skia_enable_nima" : False,
"skia_enable_nvpr" : True,
"skia_enable_particles" : True,
"skia_enable_pdf" : True,
"skia_enable_skottie" : True,
"skia_enable_skpicture" : True,
"skia_enable_skshaper" : True,
"skia_enable_spirv_validation" : False,
"skia_enable_tools" : False,
"skia_enable_vulkan_debug_layers" : False,
"skia_generate_workarounds" : False,
"skia_use_angle" : False,
"skia_use_dng_sdk" : True,
"skia_use_egl" : False,
"skia_use_expat" : True,
"skia_use_fixed_gamma_text" : False,
"skia_use_fontconfig" : False,
"skia_use_fonthost_mac" : True,
"skia_use_freetype" : False,
"skia_use_harfbuzz" : True,
"skia_use_icu" : True,
"skia_use_libheif" : False,
"skia_use_libjpeg_turbo" : True,
"skia_use_libpng" : True,
"skia_use_libwebp" : True,
"skia_use_lua" : False,
"skia_use_metal" : False,
"skia_use_opencl" : False,
"skia_use_piex" : True,
"skia_use_sfntly" : True,
"skia_use_system_expat" : True,
"skia_use_system_harfbuzz" : True,
"skia_use_system_icu" : True,
"skia_use_system_libjpeg_turbo" : True,
"skia_use_system_libpng" : False,
"skia_use_system_libwebp" : False,
"skia_use_system_zlib" : False,
"skia_use_vulkan" : False,
"skia_use_wuffs" : False,
"skia_use_x11" : False,
"skia_use_xps" : True,
"skia_use_zlib" : True,
"is_official_build" : True
}
generators = "cmake"
build_policy = "missing"
scm = {
"type": "git",
"url": "auto",
"revision": "auto",
"submodule" : "shallow"
}
revision_mode = "scm"
def get_skia_option_value(self, option_name):
buf = StringIO()
self.run('bin/gn args out/conan-build --list=%s --short' % (option_name), output=buf, cwd='skia')
output = buf.getvalue()
pattern = r'%s = (.*)' % (option_name)
match = re.match(pattern, output)
if match:
if match.group(1) == 'true':
return True
elif match.group(1) == 'false':
return False
raise errors.ConanInvalidConfiguration("Could not parse gn comfiguration options")
def requirements(self):
if self.options.skia_use_system_icu and self.options.skia_use_icu:
self.requires("icu/63.1@bincrafters/stable")
if self.options.skia_use_system_libjpeg_turbo and self.options.skia_use_libjpeg_turbo:
self.requires("libjpeg-turbo/1.5.2@bincrafters/stable")
if self.options.skia_use_system_harfbuzz and self.options.skia_use_harfbuzz:
self.requires("harfbuzz/2.4.0@maddimax/stable")
if self.options.skia_use_system_libpng and self.options.skia_use_libpng:
self.requires("libpng/1.6.36@bincrafters/stable")
def source(self):
# Fix include paths ...
self.output.info("Fixing headers:")
all_headers = find_all_headers(os.path.join(self.source_folder, "skia"))
for header in all_headers:
fix(all_headers, header)
if len(all_headers) == 0:
print("Error: No header files found")
exit(1)
self.output.info("Fixed %i files" % (len(all_headers)))
# Fetch dependencies
self.run('/usr/local/bin/python skia/tools/git-sync-deps')
def configure(self):
if self.options.skia_use_metal:
if not self.settings.os == "iOS" and not self.settings.os == "Macos":
raise errors.ConanInvalidConfiguration("Metal is only supported on darwin platforms: %s" % self.settings.os)
if self.settings.os == "iOS":
self.options.skia_use_fonthost_mac = False
self.options.skia_use_system_expat = False
self.options.skia_use_system_expat = False
self.options.skia_use_system_harfbuzz = False
self.options.skia_use_system_icu = False
self.options.skia_use_system_libjpeg_turbo = False
self.options.skia_use_system_libpng = False
self.options.skia_use_system_libwebp = False
self.options.skia_use_system_zlib = False
def build(self):
flags = []
for k,v in self.deps_cpp_info.dependencies:
self.output.info("Adding dependency: %s - %s" %(k, v.rootpath))
flags += ['\\"-I%s/include\\"' % (v.rootpath), '\\"-I%s/include/%s\\"' % (v.rootpath, k)]
flag_str = 'extra_cflags_cc=[%s]' % ",".join(flags)
opts = [flag_str]
for k,v in self.options.items():
if k in self.skia_options:
opts += [("%s=%s" % (k,v)).lower()]
if self.settings.build_type == "Debug":
opts += ["is_debug=true"]
else:
opts += ["is_debug=false"]
if self.settings.os == "iOS":
opts += ['target_os=\\"ios\\"']
if self.settings.arch == "armv8":
opts += ['target_cpu=\\"arm64\\"']
else:
opts += ['target_cpu=\\"x86_64\\"']
if len(opts) > 0:
opts = '"--args=%s"' % " ".join(opts)
else:
opts = ""
self.output.info("gn options: %s" % (opts))
self.run('bin/gn gen out/conan-build %s ' %(opts), cwd="skia")
failed = False
for k,v in self.options.items():
if k in self.skia_options:
actual = self.get_skia_option_value(k)
if not ("%s" % actual) == ("%s" % v):
failed = True
self.output.warn("Mismatch in %s: %s => %s" % ( k, v, actual ))
if failed:
raise errors.ConanInvalidConfiguration("Final gn configuration did not match requested config")
self.run('ninja -C out/conan-build', cwd="skia")
def package(self):
self.copy("*.h", dst="include/skia", src="skia", keep_path=True)
self.copy("*.dll", dst="bin", src="skia/out/conan-build",keep_path=False)
self.copy("*.so", dst="lib", src="skia/out/conan-build",keep_path=False)
self.copy("*.dylib", dst="lib", src="skia/out/conan-build",keep_path=False)
self.copy("*.a", dst="lib", src="skia/out/conan-build", keep_path=False)
# if self.settings.build_type == "Release":
# libs = os.listdir(os.path.join(self.package_folder, "lib"))
# self.output.info("Trying to strip: %s" %(libs))
# for lib in libs:
# self.run('strip -S %s' % (os.path.join(self.package_folder, "lib" ,lib)))
def package_info(self):
libs = os.listdir(os.path.join(self.package_folder, "lib"))
libs = [(x[3:])[:-2] for x in libs]
self.cpp_info.libs = libs
if self.settings.os == "Macos":
self.cpp_info.exelinkflags += ["-framework AppKit"]
if self.options.skia_use_metal:
self.cpp_info.defines += ["SK_METAL=1"]
self.cpp_info.exelinkflags += ["-framework Metal", "-framework MetalKit"]
|
StarcoderdataPython
|
6476855
|
'''
@author: <NAME> (jakpra)
@copyright: Copyright 2020, <NAME>
@license: Apache 2.0
'''
import sys, argparse
from collections import Counter
import json
from tree_st.util import argparse
from tree_st.util.reader import ASTDerivationsReader, AUTODerivationsReader, StaggedDerivationsReader
from tree_st.util.statistics import print_counter_stats
from tree_st.ccg.category import Slashes as sl
def main(args):
out = open(args.out, 'w', newline='\n') if args.out else sys.stdout
if args.training_format == 'ast':
dr = ASTDerivationsReader
elif args.training_format == 'stagged':
dr = StaggedDerivationsReader
else:
dr = AUTODerivationsReader
# combinators = Counter()
atomic_categories = Counter()
categories = Counter()
# category_shapes = Counter()
# depths = Counter()
# slashes = Counter()
# tl_slashes = Counter()
# addresses = Counter()
# unary = Counter()
# unary_levels = Counter()
# sentence_unary = Counter()
for filepath in args.training_files:
ds = dr(filepath, validate=False)
for d in ds:
deriv = d['DERIVATION']
print(d['ID'], file=sys.stderr)
if args.derivation:
print(d['ID'], file=out)
print(deriv, file=out)
lex = deriv.get_lexical(ignore_attr=False)
# combinators.update(deriv.count_combinators())
for dln in lex:
atomic_categories.update(dln.category1.count_atomic_categories(concat_attr=True))
categories[dln.category1] += 1
ds.close()
# print('Category depths', '---------------', sep='\n', file=out)
# print_counter_stats(depths, 'depth', None, file=out)
#
# print(file=out)
# print('Categories', '-----------------', sep='\n', file=out)
# print_counter_stats(categories, 'category', None, file=out)
#
# print(file=out)
# print('Category shapes', '-----------------', sep='\n', file=out)
# print_counter_stats(category_shapes, 'shape', None, file=out)
#
# print(file=out)
# print('Top-level slashes', '-----------', sep='\n', file=out)
# print_counter_stats(tl_slashes, 'slash', None, file=out)
#
# print(file=out)
# print('Slash addresses', '-----------', sep='\n', file=out)
# print_counter_stats(addresses, 'address', None, file=out)
#
# print(file=out)
# print('Slashes', '-----------', sep='\n', file=out)
# print_counter_stats(slashes, 'slash', None, file=out)
#
# print(file=out)
# print('Atomic categories', '-----------------', sep='\n', file=out)
# print_counter_stats(atomic_categories, 'category', None, file=out)
#
# print(file=out)
# print('Combinators', '-----------', sep='\n', file=out)
# print_counter_stats(combinators, 'combinator', None, file=out)
#
# print(file=out)
# print('# unary combinators in a row', '-----------', sep='\n', file=out)
# print_counter_stats(unary, 'unaries in a row', None, file=out)
#
# print(file=out)
# print('Level of unary combinators', '-----------', sep='\n', file=out)
# print_counter_stats(unary_levels, 'level', None, file=out)
#
# print(file=out)
# print('Unary combinators per sentence', '-----------', sep='\n', file=out)
# print_counter_stats(sentence_unary, 'unaries per sentence', None, file=out)
# print freqs
#
# for i, (k, v) in enumerate(categories.most_common(len(categories))):
# print(v, file=out)
most_frequent = {}
# unstructured tagset
#
for i, (k, v) in enumerate(categories.most_common(len(categories))):
print(i, k, v, sep='\t')
if v >= args.freq_threshold:
most_frequent[f'({k})'] = i
# atomic tagset
#
# for i, (k, v) in enumerate(atomic_categories.most_common(len(atomic_categories))):
# print(i, k, v, sep='\t')
# if v >= args.freq_threshold:
# most_frequent[f'({k})'] = i
json.dump(most_frequent, out, indent=2)
out.close()
if __name__ == '__main__':
args = argparse.main()
main(args)
|
StarcoderdataPython
|
5144271
|
import json
from typing import List, Tuple
import pandas as pd
from softnanotools.logger import Logger
logger = Logger(__name__)
from ..system import System
from ..topology import Topology
from ...generators import Gel
class CoreReader:
def __init__(
self,
particles: dict = None,
topologies: list = None,
metadata: dict = None
):
if not particles:
self._particles = {}
if not topologies:
self._topologies = []
if not metadata:
self.metadata = {}
def add_particles(self, name, value):
self._particles[name] = value
@property
def particles(self) -> dict:
return self._particles
def add_topology(
self,
name,
sequence,
positions,
edges,
cls = None,
):
if cls == None:
cls = Topology
else:
cls = {
'Topology': Topology,
'Gel': Gel,
}[cls]
topology = cls(
name,
sequence=sequence,
positions=positions,
edges=edges
)
self._topologies.append(topology)
@property
def topologies(self) -> List[Topology]:
return self._topologies
def system(self, **kwargs) -> System:
if 'box' in self.metadata:
system = System(self.metadata['box'], unit_system=None)
else:
system = System(kwargs['box'], unit_system=None)
self.configure(system, **kwargs)
return system
def configure(
self,
system,
diffusion_constant: float = None,
diffusion_dictionary: float = None,
bonding: List[dict] = None,
**kwargs
):
if diffusion_dictionary != None and diffusion_constant != None:
raise ValueError('Please provide only one form for the diffusion constants!')
metadata = self.metadata
topologies = self._topologies
particles = self._particles
logger.debug('Configuring system using reader...')
logger.debug(f'\tmetadata: {metadata}')
logger.debug(f'\ttopologies: {topologies}')
logger.debug(f'\tparticles: {particles}')
logger.debug('Using reader to insert topologies...')
for topology in self.topologies:
logger.info(f'Processing topology: {topology}')
if topology.top_type not in bonding:
raise TypeError(
f'Topology ({topology.top_type}) has been found'
f' but no bonds can be found in the bonding dictionary:'
f'\n{json.dumps(bonding, indent=2)}'
)
settings = bonding[topology.top_type]
if isinstance(settings, list):
topology.add_bond(settings)
else:
topology.add_bond(**settings)
system.insert_topology(
topology,
diffusion_dictionary=diffusion_dictionary,
diffusion_constant=diffusion_constant
)
logger.debug(f'Using reader to insert species...')
if diffusion_constant:
logger.debug(f'Using diffusion_constant ({diffusion_constant})')
diffusion = diffusion_constant
for name, value in self.particles.items():
logger.debug(f'Adding {name}')
system.insert_species(name, diffusion, value)
elif diffusion_dictionary:
logger.debug(f'Using diffusion_dictionary: {diffusion_dictionary}')
diffusion = diffusion_dictionary
for name, value in self.particles.items():
logger.debug(f'Adding {name}')
system.insert_species(name, diffusion[name], value)
return
|
StarcoderdataPython
|
128316
|
# import pandas, numpy, and matplotlib
import pandas as pd
from feature_engine.encoding import OneHotEncoder
from category_encoders.hashing import HashingEncoder
from sklearn.model_selection import train_test_split
pd.set_option('display.width', 200)
pd.set_option('display.max_columns', 20)
pd.set_option('display.max_rows', 200)
pd.options.display.float_format = '{:,.0f}'.format
covidtotals = pd.read_csv("data/covidtotals.csv")
feature_cols = ['location','population',
'aged_65_older','diabetes_prevalence','region']
covidtotals = covidtotals[['total_cases'] + feature_cols].dropna()
# Separate into train and test sets
X_train, X_test, y_train, y_test = \
train_test_split(covidtotals[feature_cols],\
covidtotals[['total_cases']], test_size=0.3, random_state=0)
# use the one hot encoder for region
X_train.region.value_counts()
ohe = OneHotEncoder(top_categories=6, variables=['region'])
covidtotals_ohe = ohe.fit_transform(covidtotals)
covidtotals_ohe.filter(regex='location|region',
axis="columns").sample(5, random_state=99).T
# use the hashing encoder for region
he = HashingEncoder(cols=['region'], n_components=16)
covidtotals['region2'] = covidtotals.region
covidtotals_enc = he.fit_transform(covidtotals)
covidtotals_enc.filter(regex='col|reg', axis="columns")
covidtotals_enc.groupby(['col_0','col_1','col_2','col_3','col_4','col_5','col_6','col_7','col_8','col_9','col_10','col_11','col_12','col_13','col_14','col_15','region2']).size().reset_index()
|
StarcoderdataPython
|
3594755
|
from collections.abc import MutableMapping
from typing import Any
from unittest import TestCase
from .variable import (
IllegalReferenceError,
UndefinedVariableError,
VariableError,
replace,
)
class VariableTest(TestCase):
def test_replace_empty(self) -> None:
self.assertEqual(({}, []), replace({}))
def test_replace_no_variables(self) -> None:
data = {"a": 1}
self.assertEqual((data, []), replace(data))
def test_replace_remove_variables_item(self) -> None:
data = {"variables": {"var": 2}, "a": 1}
self.assertEqual(({"a": 1}, []), replace(data))
def test_replace_variables(self) -> None:
data = {"variables": {"a": 1}, "a": "$a"}
self.assertEqual(({"a": 1}, []), replace(data))
def test_replace_variables_in_dict(self) -> None:
data = {"variables": {"b": 1}, "a": {"b": "$b"}}
self.assertEqual(({"a": {"b": 1}}, []), replace(data))
def test_replace_variables_in_list(self) -> None:
data = {"variables": {"x": 1, "y": 2}, "a": ["$x", "$y"]}
self.assertEqual(({"a": [1, 2]}, []), replace(data))
def test_replace_undefined_variable_error(self) -> None:
self._assert_error(UndefinedVariableError("a.$a"), {"a": "$a"})
self._assert_error(UndefinedVariableError("a[0].$a"), {"a": ["$a"]})
self._assert_error(UndefinedVariableError("a.b.$a"), {"a": {"b": "$a"}})
def test_replace_invalid_variable_error(self) -> None:
self._assert_error(IllegalReferenceError("$a"), {"$a": 1})
self._assert_error(IllegalReferenceError("a.$b"), {"a": {"$b": 1}})
def _assert_error(
self, error: VariableError, data: MutableMapping[str, Any]
) -> None:
self.assertEqual(({}, [error]), replace(data))
|
StarcoderdataPython
|
11202750
|
#! /usr/bin/env python
import logging
import logging.handlers
from autopyfactory.apfexceptions import ThreadRegistryInvalidKind
class ThreadsRegistry(object):
def __init__(self, kinds=['plugin','queue','util','core']):
self.log = logging.getLogger('autopyfactory')
# the kinds of threads allowed
# to be registered,
# sorted in the order they will be join()'ed
self.kinds = kinds
# initialization of the registry
self.threads = {}
for kind in self.kinds:
self.threads[kind] = []
def add(self, kind, thread):
"""
adds a new thread to the registry.
Inputs:
-------
- kind: the type of thread
It must be one of the keys in the self.threads dictionary.
- thread: the object to be added to the registry
"""
self.log.debug('adding a thread of type %s: %s' %(kind, thread.__class__.__name__))
if kind not in self.kinds:
raise ThreadRegistryInvalidKind(kind, thread)
self.threads[kind].append(thread)
def join(self):
"""
stops all threads registered, in the right order.
"""
for kind in self.kinds:
self.join_kind(kind)
def join_kind(self, kind):
"""
stops all threads registered of a given kind
Inputs:
-------
- kind: the type of threads to join().
It must be one of the keys in the self.threads dictionary.
"""
threads = self.threads[kind]
msg = 'stopping %s %s thread(s)' %(len(threads), kind)
self.log.debug(msg)
for thread in threads:
msg = 'stopping another %s thread' %kind
self.log.debug(msg)
thread.join(5)
|
StarcoderdataPython
|
53958
|
from django.test import TestCase
from .models import Posts,Location,Category
# Create your tests here.
class locationTest(TestCase):
def setUp(self):
self.new_location = Location(location="nairobi")
def test_instance(self):
self.assertTrue(isinstance(self.new_location,Location))
def test_data(self):
self.assertTrue(self.new_location.location,"nairobi")
def test_save(self):
self.new_location.save()
location = Location.objects.all()
self.assertTrue(len(location)>0)
def test_delete(self):
location = Location.objects.filter(id=1)
location.delete()
locale = Location.objects.all()
self.assertTrue(len(locale)==0)
def test_update_location(self):
self.new_location.save()
self.update_location = Location.objects.filter(location='nairobi').update(location = 'Kenya')
self.updated_location = Location.objects.get(location='Kenya')
self.assertTrue(self.updated_location.location, 'Kenya')
def test_get_location_by_id(self):
self.new_location.save()
locale = Location.objects.get(id=1)
self.assertTrue(locale.location,'nairobi')
class CategoryTest(TestCase):
def setUp(self):
self.new_category = Category(name="test")
def test_instance(self):
self.assertTrue(isinstance(self.new_category,Category))
def test_data(self):
self.assertTrue(self.new_category.name,"test")
def test_save(self):
self.new_category.save()
categories = Category.objects.all()
self.assertTrue(len(categories)>0)
def test_delete(self):
category = Category.objects.filter(id=1)
category.delete()
cat = Category.objects.all()
self.assertTrue(len(cat)==0)
def test_update_category(self):
self.new_category.save()
self.update_cat = Category.objects.filter(name='test').update(name = 'wedding')
self.updated_cat = Category.objects.get(name='wedding')
self.assertTrue(self.updated_cat.name,'wedding')
def test_get_category_by_id(self):
self.new_category.save()
cat = Category.objects.get(id=1)
self.assertTrue(cat.name,'test')
class postsTest(TestCase):
def setUp(self):
self.new_location = Location(location="nairobi")
# self.new_category = Category(name="test")
self.new_location.save()
# self.new_category.save()
self.new_post = Posts(name="sheila",description="like eating",location=self.new_location)
def test_instance(self):
self.assertTrue(isinstance(self.new_post,Posts))
def test_data(self):
self.assertTrue(self.new_post.name,"sheila")
self.assertTrue(self.new_post.description,"like eating")
def test_save(self):
self.new_post.save()
posts = Posts.objects.all()
self.assertTrue(len(posts)>0)
def test_delete(self):
post = Posts.objects.filter(id=1)
post.delete()
posts = Posts.objects.all()
self.assertTrue(len(posts)==0)
def test_update_post(self):
self.new_post.save()
self.update_post = Posts.objects.filter(name='sheila').update(name = 'cake')
self.updated_post = Posts.objects.get(name='cake')
self.assertTrue(self.updated_post.name,'cake')
def test_get_post_by_id(self):
self.new_post.save()
posts = Posts.objects.get(id=1)
self.assertTrue(posts.name,'sheila')
|
StarcoderdataPython
|
4809517
|
<reponame>FermentAI/Fermentation-Station
from engine import Subroutine
from pyfoomb import BioprocessModel
import numpy as np
class MyModel(BioprocessModel):
"""
Defines the model class. Always named MyModel. Always inherits from BioprocessModel
Must define rhs(self,t,y) as a system of ODEs.
"""
def rhs(self, t, y):
"""
An exothermic stirred tank reactor where the objective is to control the reactor temperature
through manipulation of cooling water through the reactor cooling jacket.
\n By <NAME>
\n https://jckantor.github.io/CBE30338/04.11-Implementing-PID-Control-in-Nonlinear-Simulations.html
"""
# Unpacks the state vector. The states are alphabetically ordered.
C,T,Tc = y
# Unpacks the model parameters.
# Here both manipualted variables and parameters are considered "model_parameters"
q = self.model_parameters['q']
Cf = self.model_parameters['Cf']
Tf = self.model_parameters['Tf']
Tcf = self.model_parameters['Tcf']
qc = self.model_parameters['qc']
Vc = self.model_parameters['Vc']
V = self.model_parameters['V']
rho = self.model_parameters['rho']
Cp = self.model_parameters['Cp']
dHr = self.model_parameters['dHr']
UA = self.model_parameters['UA']
# Defines the derivatives.
dCdt = (q/V)*(Cf - C) - self.k(T)*C
dTdt = (q/V)*(Tf - T) + (-dHr/rho/Cp)*self.k(T)*C + (UA/V/rho/Cp)*(Tc - T)
dTcdt = (qc/Vc)*(Tcf - Tc) + (UA/Vc/rho/Cp)*(T - Tc)
# Returns the derivative as list (or numpy array).
# The order corresponds to the state vector.
return [dCdt, dTdt, dTcdt]
###############################################
"""
Other methods can also be defined
"""
# Arrhenius rate expression
def k(self,T):
Ea = self.model_parameters['Ea']
R = self.model_parameters['R']
k0 = self.model_parameters['k0']
return k0*np.exp(-Ea/R/T)
class MySubroutines(Subroutine):
"""
Defines the subroutine class. Always named MySubroutines. Always inherits from Subroutine.
The Subroutine class runs all its NOT underscored functions before iterating at every time step.
"""
def _initialization(self):
'''
This will only be run once in the first integration iteration.
Useful for initializing variables.
'''
# initialize errors for discrete time calculations
self.qLog = []
self.TLog = []
eP_, _, eD_ = self._temperature_error()
self._update_error([eP_,eD_,eD_])
def temperature_pid_coolant_flowratea(self):
'''
Discrete time PID implementation
'''
dt = self.simulator_vars['dt']
kp = self.subroutine_vars['kp']
ki = self.subroutine_vars['ki']
kd = self.subroutine_vars['kd']
new_qc = self.model_parameters['qc']
# calculate current error
eP, eI, eD = self._temperature_error()
# calculate manipulated varibale based on error
new_qc -= kp*(eP - self.eP_) + ki*dt*eI + kd*(eD - 2*self.eD_ + self.eD__)/dt
# check for saturation
new_qc = self._coolant_flowrate_saturation(new_qc)
# update manipulated variable
self.model_parameters['qc'] = new_qc
self.qLog.append(new_qc)
# update errors
self._update_error([eP,eD,self.eD_])
return True
# other helper functions for Temperature PID
def _update_error(self, new_error):
self.eP_ = new_error[0]
self.eD_ = new_error[1]
self.eD__ = new_error[2]
def _temperature_error(self):
'''
Reactor temperature error with setpoint weighting
'''
T = self.model_state['T']
Tsp = self.subroutine_vars['Tsp']
beta = self.subroutine_vars['beta']
gamma = self.subroutine_vars['gamma']
eP = beta*Tsp - T
eI = Tsp - T
eD = gamma*Tsp - T
self.TLog.append(T)
return eP,eI,eD
def _coolant_flowrate_saturation(self, qc):
'''
Clamping of coolant flowrate
'''
qc_min = self.subroutine_vars['qc_min']
qc_max = self.subroutine_vars['qc_max']
return max(qc_min, min(qc_max,qc))
|
StarcoderdataPython
|
55272
|
from unittest.mock import patch, MagicMock, PropertyMock
import pytest
from cincoconfig.core import ConfigFormat
from cincoconfig.formats.json import JsonConfigFormat
from cincoconfig.formats.bson import BsonConfigFormat
from cincoconfig.formats.yaml import YamlConfigFormat
from cincoconfig.formats.xml import XmlConfigFormat
from cincoconfig.formats.pickle import PickleConfigFormat
class TestFormatRegistry:
def setup_method(self, _):
ConfigFormat._ConfigFormat__registry = {}
ConfigFormat._ConfigFormat__initialized = False
def test_register(self):
fmt = MagicMock
ConfigFormat.register('blah', fmt)
assert ConfigFormat._ConfigFormat__registry['blah'] is fmt
def test_get(self):
fmt = MagicMock()
fmt.return_value = 'hello'
ConfigFormat._ConfigFormat__registry['blah'] = fmt
ConfigFormat._ConfigFormat__initialized = True
check = ConfigFormat.get('blah', x=1, y='2')
fmt.assert_called_once_with(x=1, y='2')
assert check == 'hello'
@patch.object(ConfigFormat, 'initialize_registry')
def test_get_initialize(self, mock_init):
ConfigFormat._ConfigFormat__registry['blah'] = MagicMock()
ConfigFormat.get('blah')
mock_init.assert_called_once()
def test_get_no_exists(self):
with pytest.raises(KeyError):
ConfigFormat.get('asdfasdfasdf')
def test_base_formats(self):
ConfigFormat.initialize_registry()
assert ConfigFormat._ConfigFormat__registry == {
'json': JsonConfigFormat,
'yaml': YamlConfigFormat,
'bson': BsonConfigFormat,
'pickle': PickleConfigFormat,
'xml': XmlConfigFormat
}
def test_initialize_cache(self):
ConfigFormat.initialize_registry()
reg = ConfigFormat._ConfigFormat__registry = object()
ConfigFormat.initialize_registry()
assert ConfigFormat._ConfigFormat__registry is reg
|
StarcoderdataPython
|
225396
|
import re
import calendar
import json
import functools
from datetime import datetime
from random import random
from .common import InfoExtractor
from ..compat import (
compat_urllib_parse_urlparse,
compat_urlparse
)
from ..utils import (
bug_reports_message,
ExtractorError,
get_first,
int_or_none,
OnDemandPagedList,
parse_qs,
srt_subtitles_timecode,
traverse_obj,
)
class PanoptoBaseIE(InfoExtractor):
BASE_URL_RE = r'(?P<base_url>https?://[\w.-]+\.panopto.(?:com|eu)/Panopto)'
# see panopto core.js
_SUB_LANG_MAPPING = {
0: 'en-US',
1: 'en-GB',
2: 'es-MX',
3: 'es-ES',
4: 'de-DE',
5: 'fr-FR',
6: 'nl-NL',
7: 'th-TH',
8: 'zh-CN',
9: 'zh-TW',
10: 'ko-KR',
11: 'ja-JP',
12: 'ru-RU',
13: 'pt-PT',
14: 'pl-PL',
15: 'en-AU',
16: 'da-DK',
17: 'fi-FI',
18: 'hu-HU',
19: 'nb-NO',
20: 'sv-SE',
21: 'it-IT'
}
def _call_api(self, base_url, path, video_id, data=None, fatal=True, **kwargs):
response = self._download_json(
base_url + path, video_id, data=json.dumps(data).encode('utf8') if data else None,
fatal=fatal, headers={'accept': 'application/json', 'content-type': 'application/json'}, **kwargs)
if not response:
return
error_code = traverse_obj(response, 'ErrorCode')
if error_code == 2:
self.raise_login_required(method='cookies')
elif error_code is not None:
msg = f'Panopto said: {response.get("ErrorMessage")}'
if fatal:
raise ExtractorError(msg, video_id=video_id, expected=True)
else:
self.report_warning(msg, video_id=video_id)
return response
@staticmethod
def _parse_fragment(url):
return {k: json.loads(v[0]) for k, v in compat_urlparse.parse_qs(compat_urllib_parse_urlparse(url).fragment).items()}
@staticmethod
def _extract_urls(webpage):
return [m.group('url') for m in re.finditer(
r'<iframe[^>]+src=["\'](?P<url>%s/Pages/(Viewer|Embed|Sessions/List)\.aspx[^"\']+)' % PanoptoIE.BASE_URL_RE,
webpage)]
class PanoptoIE(PanoptoBaseIE):
_VALID_URL = PanoptoBaseIE.BASE_URL_RE + r'/Pages/(Viewer|Embed)\.aspx.*(?:\?|&)id=(?P<id>[a-f0-9-]+)'
_TESTS = [
{
'url': 'https://demo.hosted.panopto.com/Panopto/Pages/Viewer.aspx?id=26b3ae9e-4a48-4dcc-96ba-0befba08a0fb',
'info_dict': {
'id': '26b3ae9e-4a48-4dcc-96ba-0befba08a0fb',
'title': 'Panopto for Business - Use Cases',
'timestamp': 1459184200,
'thumbnail': r're:https://demo\.hosted\.panopto\.com/.+',
'upload_date': '20160328',
'ext': 'mp4',
'cast': [],
'chapters': [],
'duration': 88.17099999999999,
'average_rating': int,
'uploader_id': '2db6b718-47a0-4b0b-9e17-ab0b00f42b1e',
'channel_id': 'e4c6a2fc-1214-4ca0-8fb7-aef2e29ff63a',
'channel': 'Showcase Videos'
},
},
{
'url': 'https://demo.hosted.panopto.com/Panopto/Pages/Viewer.aspx?id=ed01b077-c9e5-4c7b-b8ff-15fa306d7a59',
'info_dict': {
'id': 'ed01b077-c9e5-4c7b-b8ff-15fa306d7a59',
'title': 'Overcoming Top 4 Challenges of Enterprise Video',
'uploader': 'Panopto Support',
'timestamp': 1449409251,
'thumbnail': r're:https://demo\.hosted\.panopto\.com/.+',
'upload_date': '20151206',
'ext': 'mp4',
'chapters': 'count:12',
'cast': ['Panopto Support'],
'uploader_id': 'a96d1a31-b4de-489b-9eee-b4a5b414372c',
'average_rating': int,
'description': 'md5:4391837802b3fc856dadf630c4b375d1',
'duration': 1088.2659999999998,
'channel_id': '9f3c1921-43bb-4bda-8b3a-b8d2f05a8546',
'channel': 'Webcasts',
},
},
{
# Extra params in URL
'url': 'https://howtovideos.hosted.panopto.com/Panopto/Pages/Viewer.aspx?randomparam=thisisnotreal&id=5fa74e93-3d87-4694-b60e-aaa4012214ed&advance=true',
'info_dict': {
'id': '5fa74e93-3d87-4694-b60e-aaa4012214ed',
'ext': 'mp4',
'duration': 129.513,
'cast': ['<NAME>'],
'uploader_id': '316a0a58-7fa2-4cd9-be1c-64270d284a56',
'timestamp': 1569845768,
'tags': ['Viewer', 'Enterprise'],
'chapters': [],
'upload_date': '20190930',
'thumbnail': r're:https://howtovideos\.hosted\.panopto\.com/.+',
'description': 'md5:2d844aaa1b1a14ad0e2601a0993b431f',
'title': 'Getting Started: View a Video',
'average_rating': int,
'uploader': '<NAME>',
'channel_id': 'fb93bc3c-6750-4b80-a05b-a921013735d3',
'channel': 'Getting Started',
}
},
{
# Does not allow normal Viewer.aspx. AUDIO livestream has no url, so should be skipped and only give one stream.
'url': 'https://unisa.au.panopto.com/Panopto/Pages/Embed.aspx?id=9d9a0fa3-e99a-4ebd-a281-aac2017f4da4',
'info_dict': {
'id': '9d9a0fa3-e99a-4ebd-a281-aac2017f4da4',
'ext': 'mp4',
'cast': ['LTS CLI Script'],
'chapters': [],
'duration': 2178.45,
'description': 'md5:ee5cf653919f55b72bce2dbcf829c9fa',
'channel_id': 'b23e673f-c287-4cb1-8344-aae9005a69f8',
'average_rating': int,
'uploader_id': '38377323-6a23-41e2-9ff6-a8e8004bf6f7',
'uploader': 'LTS CLI Script',
'timestamp': 1572458134,
'title': 'WW2 Vets Interview 3 <NAME>',
'thumbnail': r're:https://unisa\.au\.panopto\.com/.+',
'channel': 'World War II Veteran Interviews',
'upload_date': '20191030',
},
},
{
# Slides/storyboard
'url': 'https://demo.hosted.panopto.com/Panopto/Pages/Viewer.aspx?id=a7f12f1d-3872-4310-84b0-f8d8ab15326b',
'info_dict': {
'id': 'a7f12f1d-3872-4310-84b0-f8d8ab15326b',
'ext': 'mhtml',
'timestamp': 1448798857,
'duration': 4712.681,
'title': 'Cache Memory - CompSci 15-213, Lecture 12',
'channel_id': 'e4c6a2fc-1214-4ca0-8fb7-aef2e29ff63a',
'uploader_id': 'a96d1a31-b4de-489b-9eee-b4a5b414372c',
'upload_date': '20151129',
'average_rating': 0,
'uploader': 'Panopto Support',
'channel': 'Showcase Videos',
'description': 'md5:55e51d54233ddb0e6c2ed388ca73822c',
'cast': ['ISR Videographer', 'Panopto Support'],
'chapters': 'count:28',
'thumbnail': r're:https://demo\.hosted\.panopto\.com/.+',
},
'params': {'format': 'mhtml', 'skip_download': True}
},
{
'url': 'https://na-training-1.hosted.panopto.com/Panopto/Pages/Viewer.aspx?id=8285224a-9a2b-4957-84f2-acb0000c4ea9',
'info_dict': {
'id': '8285224a-9a2b-4957-84f2-acb0000c4ea9',
'ext': 'mp4',
'chapters': [],
'title': 'Company Policy',
'average_rating': 0,
'timestamp': 1615058901,
'channel': 'Human Resources',
'tags': ['HumanResources'],
'duration': 1604.243,
'thumbnail': r're:https://na-training-1\.hosted\.panopto\.com/.+',
'uploader_id': '8e8ba0a3-424f-40df-a4f1-ab3a01375103',
'uploader': 'Cait M.',
'upload_date': '20210306',
'cast': ['Cait M.'],
'subtitles': {'en-US': [{'ext': 'srt', 'data': 'md5:a3f4d25963fdeace838f327097c13265'}],
'es-ES': [{'ext': 'srt', 'data': 'md5:57e9dad365fd0fbaf0468eac4949f189'}]},
},
'params': {'writesubtitles': True, 'skip_download': True}
}, {
# On Panopto there are two subs: "Default" and en-US. en-US is blank and should be skipped.
'url': 'https://na-training-1.hosted.panopto.com/Panopto/Pages/Viewer.aspx?id=940cbd41-f616-4a45-b13e-aaf1000c915b',
'info_dict': {
'id': '940cbd41-f616-4a45-b13e-aaf1000c915b',
'ext': 'mp4',
'subtitles': 'count:1',
'title': 'HR Benefits Review Meeting*',
'cast': ['Panopto Support'],
'chapters': [],
'timestamp': 1575024251,
'thumbnail': r're:https://na-training-1\.hosted\.panopto\.com/.+',
'channel': 'Zoom',
'description': 'md5:04f90a9c2c68b7828144abfb170f0106',
'uploader': 'Panopto Support',
'average_rating': 0,
'duration': 409.34499999999997,
'uploader_id': 'b6ac04ad-38b8-4724-a004-a851004ea3df',
'upload_date': '20191129',
},
'params': {'writesubtitles': True, 'skip_download': True}
},
{
'url': 'https://ucc.cloud.panopto.eu/Panopto/Pages/Viewer.aspx?id=0e8484a4-4ceb-4d98-a63f-ac0200b455cb',
'only_matching': True
},
{
'url': 'https://brown.hosted.panopto.com/Panopto/Pages/Embed.aspx?id=0b3ff73b-36a0-46c5-8455-aadf010a3638',
'only_matching': True
},
]
@classmethod
def suitable(cls, url):
return False if PanoptoPlaylistIE.suitable(url) else super().suitable(url)
def _mark_watched(self, base_url, video_id, delivery_info):
duration = traverse_obj(delivery_info, ('Delivery', 'Duration'), expected_type=float)
invocation_id = delivery_info.get('InvocationId')
stream_id = traverse_obj(delivery_info, ('Delivery', 'Streams', ..., 'PublicID'), get_all=False, expected_type=str)
if invocation_id and stream_id and duration:
timestamp_str = f'/Date({calendar.timegm(datetime.utcnow().timetuple())}000)/'
data = {
'streamRequests': [
{
'ClientTimeStamp': timestamp_str,
'ID': 0,
'InvocationID': invocation_id,
'PlaybackSpeed': 1,
'SecondsListened': duration - 1,
'SecondsRejected': 0,
'StartPosition': 0,
'StartReason': 2,
'StopReason': None,
'StreamID': stream_id,
'TimeStamp': timestamp_str,
'UpdatesRejected': 0
},
]}
self._download_webpage(
base_url + '/Services/Analytics.svc/AddStreamRequests', video_id,
fatal=False, data=json.dumps(data).encode('utf8'), headers={'content-type': 'application/json'},
note='Marking watched', errnote='Unable to mark watched')
@staticmethod
def _extract_chapters(timestamps):
chapters = []
for timestamp in timestamps or []:
caption = timestamp.get('Caption')
start, duration = int_or_none(timestamp.get('Time')), int_or_none(timestamp.get('Duration'))
if not caption or start is None or duration is None:
continue
chapters.append({
'start_time': start,
'end_time': start + duration,
'title': caption
})
return chapters
@staticmethod
def _extract_mhtml_formats(base_url, timestamps):
image_frags = {}
for timestamp in timestamps or []:
duration = timestamp.get('Duration')
obj_id, obj_sn = timestamp.get('ObjectIdentifier'), timestamp.get('ObjectSequenceNumber'),
if timestamp.get('EventTargetType') == 'PowerPoint' and obj_id is not None and obj_sn is not None:
image_frags.setdefault('slides', []).append({
'url': base_url + f'/Pages/Viewer/Image.aspx?id={obj_id}&number={obj_sn}',
'duration': duration
})
obj_pid, session_id, abs_time = timestamp.get('ObjectPublicIdentifier'), timestamp.get('SessionID'), timestamp.get('AbsoluteTime')
if None not in (obj_pid, session_id, abs_time):
image_frags.setdefault('chapter', []).append({
'url': base_url + f'/Pages/Viewer/Thumb.aspx?eventTargetPID={obj_pid}&sessionPID={session_id}&number={obj_sn}&isPrimary=false&absoluteTime={abs_time}',
'duration': duration,
})
for name, fragments in image_frags.items():
yield {
'format_id': name,
'ext': 'mhtml',
'protocol': 'mhtml',
'acodec': 'none',
'vcodec': 'none',
'url': 'about:invalid',
'fragments': fragments
}
@staticmethod
def _json2srt(data, delivery):
def _gen_lines():
for i, line in enumerate(data):
start_time = line['Time']
duration = line.get('Duration')
if duration:
end_time = start_time + duration
else:
end_time = traverse_obj(data, (i + 1, 'Time')) or delivery['Duration']
yield f'{i + 1}\n{srt_subtitles_timecode(start_time)} --> {srt_subtitles_timecode(end_time)}\n{line["Caption"]}'
return '\n\n'.join(_gen_lines())
def _get_subtitles(self, base_url, video_id, delivery):
subtitles = {}
for lang in delivery.get('AvailableLanguages') or []:
response = self._call_api(
base_url, '/Pages/Viewer/DeliveryInfo.aspx', video_id, fatal=False,
note='Downloading captions JSON metadata', query={
'deliveryId': video_id,
'getCaptions': True,
'language': str(lang),
'responseType': 'json'
}
)
if not isinstance(response, list):
continue
subtitles.setdefault(self._SUB_LANG_MAPPING.get(lang) or 'default', []).append({
'ext': 'srt',
'data': self._json2srt(response, delivery),
})
return subtitles
def _extract_streams_formats_and_subtitles(self, video_id, streams, **fmt_kwargs):
formats = []
subtitles = {}
for stream in streams or []:
stream_formats = []
http_stream_url = stream.get('StreamHttpUrl')
stream_url = stream.get('StreamUrl')
if http_stream_url:
stream_formats.append({'url': http_stream_url})
if stream_url:
media_type = stream.get('ViewerMediaFileTypeName')
if media_type in ('hls', ):
m3u8_formats, stream_subtitles = self._extract_m3u8_formats_and_subtitles(stream_url, video_id)
stream_formats.extend(m3u8_formats)
subtitles = self._merge_subtitles(subtitles, stream_subtitles)
else:
stream_formats.append({
'url': stream_url
})
for fmt in stream_formats:
fmt.update({
'format_note': stream.get('Tag'),
**fmt_kwargs
})
formats.extend(stream_formats)
return formats, subtitles
def _real_extract(self, url):
base_url, video_id = self._match_valid_url(url).group('base_url', 'id')
delivery_info = self._call_api(
base_url, '/Pages/Viewer/DeliveryInfo.aspx', video_id,
query={
'deliveryId': video_id,
'invocationId': '',
'isLiveNotes': 'false',
'refreshAuthCookie': 'true',
'isActiveBroadcast': 'false',
'isEditing': 'false',
'isKollectiveAgentInstalled': 'false',
'isEmbed': 'false',
'responseType': 'json',
}
)
delivery = delivery_info['Delivery']
session_start_time = int_or_none(delivery.get('SessionStartTime'))
timestamps = delivery.get('Timestamps')
# Podcast stream is usually the combined streams. We will prefer that by default.
podcast_formats, podcast_subtitles = self._extract_streams_formats_and_subtitles(
video_id, delivery.get('PodcastStreams'), format_note='PODCAST')
streams_formats, streams_subtitles = self._extract_streams_formats_and_subtitles(
video_id, delivery.get('Streams'), preference=-10)
formats = podcast_formats + streams_formats
formats.extend(self._extract_mhtml_formats(base_url, timestamps))
subtitles = self._merge_subtitles(
podcast_subtitles, streams_subtitles, self.extract_subtitles(base_url, video_id, delivery))
self._sort_formats(formats)
self.mark_watched(base_url, video_id, delivery_info)
return {
'id': video_id,
'title': delivery.get('SessionName'),
'cast': traverse_obj(delivery, ('Contributors', ..., 'DisplayName'), default=[], expected_type=lambda x: x or None),
'timestamp': session_start_time - 11640000000 if session_start_time else None,
'duration': delivery.get('Duration'),
'thumbnail': base_url + f'/Services/FrameGrabber.svc/FrameRedirect?objectId={video_id}&mode=Delivery&random={random()}',
'average_rating': delivery.get('AverageRating'),
'chapters': self._extract_chapters(timestamps),
'uploader': delivery.get('OwnerDisplayName') or None,
'uploader_id': delivery.get('OwnerId'),
'description': delivery.get('SessionAbstract'),
'tags': traverse_obj(delivery, ('Tags', ..., 'Content')),
'channel_id': delivery.get('SessionGroupPublicID'),
'channel': traverse_obj(delivery, 'SessionGroupLongName', 'SessionGroupShortName', get_all=False),
'formats': formats,
'subtitles': subtitles
}
class PanoptoPlaylistIE(PanoptoBaseIE):
_VALID_URL = PanoptoBaseIE.BASE_URL_RE + r'/Pages/(Viewer|Embed)\.aspx.*(?:\?|&)pid=(?P<id>[a-f0-9-]+)'
_TESTS = [
{
'url': 'https://howtovideos.hosted.panopto.com/Panopto/Pages/Viewer.aspx?pid=f3b39fcf-882f-4849-93d6-a9f401236d36&id=5fa74e93-3d87-4694-b60e-aaa4012214ed&advance=true',
'info_dict': {
'title': 'Featured Video Tutorials',
'id': 'f3b39fcf-882f-4849-93d6-a9f401236d36',
'description': '',
},
'playlist_mincount': 36
},
{
'url': 'https://utsa.hosted.panopto.com/Panopto/Pages/Viewer.aspx?pid=e2900555-3ad4-4bdb-854d-ad2401686190',
'info_dict': {
'title': 'Library Website Introduction Playlist',
'id': 'e2900555-3ad4-4bdb-854d-ad2401686190',
'description': 'md5:f958bca50a1cbda15fdc1e20d32b3ecb',
},
'playlist_mincount': 4
},
]
def _entries(self, base_url, playlist_id, session_list_id):
session_list_info = self._call_api(
base_url, f'/Api/SessionLists/{session_list_id}?collections[0].maxCount=500&collections[0].name=items', playlist_id)
items = session_list_info['Items']
for item in items:
if item.get('TypeName') != 'Session':
self.report_warning('Got an item in the playlist that is not a Session' + bug_reports_message(), only_once=True)
continue
yield {
'_type': 'url',
'id': item.get('Id'),
'url': item.get('ViewerUri'),
'title': item.get('Name'),
'description': item.get('Description'),
'duration': item.get('Duration'),
'channel': traverse_obj(item, ('Parent', 'Name')),
'channel_id': traverse_obj(item, ('Parent', 'Id'))
}
def _real_extract(self, url):
base_url, playlist_id = self._match_valid_url(url).group('base_url', 'id')
video_id = get_first(parse_qs(url), 'id')
if video_id:
if self.get_param('noplaylist'):
self.to_screen('Downloading just video %s because of --no-playlist' % video_id)
return self.url_result(base_url + f'/Pages/Viewer.aspx?id={video_id}', ie_key=PanoptoIE.ie_key(), video_id=video_id)
else:
self.to_screen(f'Downloading playlist {playlist_id}; add --no-playlist to just download video {video_id}')
playlist_info = self._call_api(base_url, f'/Api/Playlists/{playlist_id}', playlist_id)
return self.playlist_result(
self._entries(base_url, playlist_id, playlist_info['SessionListId']),
playlist_id=playlist_id, playlist_title=playlist_info.get('Name'),
playlist_description=playlist_info.get('Description'))
class PanoptoListIE(PanoptoBaseIE):
_VALID_URL = PanoptoBaseIE.BASE_URL_RE + r'/Pages/Sessions/List\.aspx'
_PAGE_SIZE = 250
_TESTS = [
{
'url': 'https://demo.hosted.panopto.com/Panopto/Pages/Sessions/List.aspx#folderID=%22e4c6a2fc-1214-4ca0-8fb7-aef2e29ff63a%22',
'info_dict': {
'id': 'e4c6a2fc-1214-4ca0-8fb7-aef2e29ff63a',
'title': 'Showcase Videos'
},
'playlist_mincount': 140
},
{
'url': 'https://demo.hosted.panopto.com/Panopto/Pages/Sessions/List.aspx#view=2&maxResults=250',
'info_dict': {
'id': 'panopto_list',
'title': 'panopto_list'
},
'playlist_mincount': 300
},
{
# Folder that contains 8 folders and a playlist
'url': 'https://howtovideos.hosted.panopto.com/Panopto/Pages/Sessions/List.aspx?noredirect=true#folderID=%224b9de7ae-0080-4158-8496-a9ba01692c2e%22',
'info_dict': {
'id': '4b9de7ae-0080-4158-8496-a9ba01692c2e',
'title': 'Video Tutorials'
},
'playlist_mincount': 9
}
]
def _fetch_page(self, base_url, query_params, display_id, page):
params = {
'sortColumn': 1,
'getFolderData': True,
'includePlaylists': True,
**query_params,
'page': page,
'maxResults': self._PAGE_SIZE,
}
response = self._call_api(
base_url, '/Services/Data.svc/GetSessions', f'{display_id} page {page+1}',
data={'queryParameters': params}, fatal=False)
for result in get_first(response, 'Results', default=[]):
# This could be a video, playlist (or maybe something else)
item_id = result.get('DeliveryID')
yield {
'_type': 'url',
'id': item_id,
'title': result.get('SessionName'),
'url': traverse_obj(result, 'ViewerUrl', 'EmbedUrl', get_all=False) or (base_url + f'/Pages/Viewer.aspx?id={item_id}'),
'duration': result.get('Duration'),
'channel': result.get('FolderName'),
'channel_id': result.get('FolderID'),
}
for folder in get_first(response, 'Subfolders', default=[]):
folder_id = folder.get('ID')
yield self.url_result(
base_url + f'/Pages/Sessions/List.aspx#folderID="{folder_id}"',
ie_key=PanoptoListIE.ie_key(), video_id=folder_id, title=folder.get('Name'))
def _extract_folder_metadata(self, base_url, folder_id):
response = self._call_api(
base_url, '/Services/Data.svc/GetFolderInfo', folder_id,
data={'folderID': folder_id}, fatal=False)
return {
'title': get_first(response, 'Name', default=[])
}
def _real_extract(self, url):
mobj = self._match_valid_url(url)
base_url = mobj.group('base_url')
query_params = self._parse_fragment(url)
folder_id, display_id = query_params.get('folderID'), 'panopto_list'
if query_params.get('isSubscriptionsPage'):
display_id = 'subscriptions'
if not query_params.get('subscribableTypes'):
query_params['subscribableTypes'] = [0, 1, 2]
elif query_params.get('isSharedWithMe'):
display_id = 'sharedwithme'
elif folder_id:
display_id = folder_id
query = query_params.get('query')
if query:
display_id += f': query "{query}"'
info = {
'_type': 'playlist',
'id': display_id,
'title': display_id,
}
if folder_id:
info.update(self._extract_folder_metadata(base_url, folder_id))
info['entries'] = OnDemandPagedList(
functools.partial(self._fetch_page, base_url, query_params, display_id), self._PAGE_SIZE)
return info
|
StarcoderdataPython
|
6419495
|
<reponame>kahbenya/k8-test-api
from flask import Flask , request
import random
import requests
import json
app = Flask(__name__)
@app.route("/")
def index():
return "Testing 1 1 2 3\n"
@app.route("/create" , methods=['POST'])
def create():
message = request.json['message']
if not message:
return "need a message"
# create with k8 api
# get the template from the file
hellorc_tmpl = open('hellorc.json').read()
# generate an id
rid = random.randint(100000,2000000000)
hellorc_tmpl = hellorc_tmpl.replace("{% ID %}",str(rid))
hellorc_tmpl = hellorc_tmpl.replace("{% USER MSG %}", "App: hello-%d | Message: %s" % (rid,message))
# now use the k8 api to create the rc
# should we pull the api info from env vars ?
headers = {'Content-Type' : 'application/json'}
#create rc
rc_req = requests.post("http://localhost:8001/api/v1/namespaces/default/replicationcontrollers", data=hellorc_tmpl, headers=headers)
# create the corresponding servcie
nodeport = random.randint(30200,30300)
hellosvc_tmpl = open('hellosvc.json').read()
hellosvc_tmpl = hellosvc_tmpl.replace("{% ID %}",str(rid))
hellosvc_tmpl = hellosvc_tmpl.replace("{% NODEPORT %}",str(nodeport))
svc_req = requests.post("http://localhost:8001/api/v1/namespaces/default/services", data=hellosvc_tmpl,headers=headers)
if svc_req.status_code != 200:
return svc_req.text, svc_req.status_code
#print svc_req.status_code
#print svc_req.text
# create service
return "done"
if __name__ == "__main__":
app.run(debug=True, host='0.0.0.0',port=5000)
|
StarcoderdataPython
|
4991758
|
<filename>util/mongoback.py
#encode=utf-8
import os
from config import mongo_back_path
import time
def back_mongo():
path = mongo_back_path+"/"+str(time.time())
command = "mkdir " + path
back_mongo_command = "/usr/bin/mongodump -d ss -o "+path
try:
os.system(command)
os.system(back_mongo_command)
return True
except Exception as e:
pass
return False
# if __name__ == "__main__":
# back_mongo()
|
StarcoderdataPython
|
3519628
|
# -*- coding: utf-8 -*-
__author__ = 'minhtule'
from unittest import TestCase
from gap.parameter import *
class TestParameter(TestCase):
def test_value(self):
self.assertTrue(Parameter("key1", "right format", "text"))
self.assertTrue(Parameter("key2", -54.234, "currency"))
self.assertTrue(Parameter("key3", True, "boolean"))
self.assertTrue(Parameter("key4", 343, "integer"))
with self.assertRaises(ValidateException):
Parameter("key5", 2.34, Parameter.VALUE_TYPE_TEXT)
Parameter("key6", "wrong format", Parameter.VALUE_TYPE_CURRENCY)
Parameter("key7", 4, Parameter.VALYE_TYPE_BOOLEAN)
Parameter("key8", True, Parameter.VALUE_TYPE_INTEGER)
def test_url_format_basic(self):
param_text = Parameter("key1", " 2 ^ 3 * a", "text")
self.assertEqual(param_text.url_format(), u"key1=%202%20%5E%203%20%2A%20a")
param_currency = Parameter("key%2", -22.343, "currency")
self.assertEqual(param_currency.url_format(), u"key%252=-22.343")
param_boolean = Parameter("key/3", True, "boolean")
self.assertEqual(param_boolean.url_format(), u"key%2F3=1")
param_integer = Parameter("key_4", 8343, "integer")
self.assertEqual(param_integer.url_format(), u"key_4=8343")
def test_url_format_advance(self):
param1 = Parameter("dp", "/my page €", "text")
self.assertEqual(param1.url_format(), u"dp=%2Fmy%20page%20%E2%82%AC")
def test_protocol_version(self):
protocol_version = ProtocolVersion()
self.assertEqual(protocol_version.value, "1")
def test_tracking_id(self):
self.assertTrue(TrackingID("UA-42620910-11"))
with self.assertRaises(ValidateException):
TrackingID("UUA-42620910-11")
TrackingID("UA-4262091a-11")
TrackingID("UA-42620910-1a")
def test_anonymize_ip(self):
self.assertTrue(AnonymizeIP())
with self.assertRaises(ValidateException):
AnonymizeIP(1)
AnonymizeIP("1")
|
StarcoderdataPython
|
125156
|
<filename>btn_cache/api_types.py
# Copyright (c) 2021 AllSeeingEyeTolledEweSew
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
# OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
# PERFORMANCE OF THIS SOFTWARE.
import enum
from typing import Any
from typing import Dict
from typing import Sequence
from typing_extensions import TypedDict
class TorrentEntry(TypedDict):
Category: str
Codec: str
Container: str
DownloadURL: str
GroupID: str
GroupName: str
ImdbID: str
InfoHash: str
Leechers: str
Origin: str
ReleaseName: str
Resolution: str
Seeders: str
Series: str
SeriesBanner: str
SeriesID: str
SeriesPoster: str
Size: str
Snatched: str
Source: str
Time: str
TorrentID: str
TvdbID: str
TvrageID: str
YoutubeTrailer: str
TORRENT_ENTRY_KEYS = {
"Category",
"Codec",
"Container",
"DownloadURL",
"GroupID",
"GroupName",
"ImdbID",
"InfoHash",
"Leechers",
"Origin",
"ReleaseName",
"Resolution",
"Seeders",
"Series",
"SeriesBanner",
"SeriesID",
"SeriesPoster",
"Size",
"Snatched",
"Source",
"Time",
"TorrentID",
"TvdbID",
"TvrageID",
"YoutubeTrailer",
}
class GetTorrentsResult(TypedDict):
results: str
torrents: Dict[str, TorrentEntry]
class SnatchEntryTorrentInfo(TypedDict):
GroupName: str
Series: str
Year: str
Source: str
Container: str
Codec: str
Resolution: str
class SnatchEntry(TypedDict):
Downloaded: str
IsSeeding: str
Ratio: str
Seedtime: str
SnatchTime: str
TorrentID: str
TorrentInfo: SnatchEntryTorrentInfo
Uploaded: str
SNATCH_ENTRY_KEYS = {
"Downloaded",
"IsSeeding",
"Ratio",
"Seedtime",
"SnatchTime",
"TorrentID",
"TorrentInfo",
"Uploaded",
}
class GetUserSnatchlistResult(TypedDict):
results: str
torrents: Dict[str, SnatchEntry]
class Request(TypedDict):
jsonrpc: str
id: Any
method: str
params: Sequence[Any]
class ErrorCode(enum.IntEnum):
INVALID_API_KEY = -32001
CALL_LIMIT_EXCEEDED = -32002
class Error(TypedDict):
message: str
code: ErrorCode
class Response(TypedDict, total=False):
id: Any
result: Any
error: Error
|
StarcoderdataPython
|
9715956
|
# tests.test_classifier.test_classification_report
# Tests for the classification report visualizer
#
# Author: <NAME> <<EMAIL>>
# Author: <NAME> <<EMAIL>>
# Created: Sun Mar 18 16:57:27 2018 -0400
#
# ID: test_classification_report.py [] <EMAIL> $
"""
Tests for the classification report visualizer
"""
##########################################################################
## Imports
##########################################################################
import pytest
import yellowbrick as yb
import matplotlib.pyplot as plt
from yellowbrick.classifier.classification_report import *
from tests.base import VisualTestCase
from tests.dataset import DatasetMixin
from sklearn.svm import LinearSVC
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.datasets import make_classification
from sklearn.model_selection import train_test_split as tts
from sklearn.linear_model import LassoCV, LogisticRegression
try:
import pandas as pd
except ImportError:
pd = None
##########################################################################
## Test for Classification Report
##########################################################################
@pytest.mark.usefixtures("binary", "multiclass")
class ClassificationReportTests(VisualTestCase, DatasetMixin):
"""
ClassificationReport visualizer tests
"""
def test_binary_class_report(self):
"""
Correctly generates a report for binary classification with LinearSVC
"""
_, ax = plt.subplots()
viz = ClassificationReport(LinearSVC(), ax=ax)
viz.fit(self.binary.X.train, self.binary.y.train)
viz.score(self.binary.X.test, self.binary.y.test)
self.assert_images_similar(viz)
assert viz.scores_ == {
'precision': {0: 0.7446808510638298, 1: 0.8490566037735849},
'recall': {0: 0.813953488372093, 1: 0.7894736842105263},
'f1': {0: 0.7777777777777778, 1: 0.8181818181818182}
}
def test_multiclass_class_report(self):
"""
Correctly generates report for multi-class with LogisticRegression
"""
_, ax = plt.subplots()
viz = ClassificationReport(LogisticRegression(random_state=12), ax=ax)
viz.fit(self.multiclass.X.train, self.multiclass.y.train)
viz.score(self.multiclass.X.test, self.multiclass.y.test)
self.assert_images_similar(viz)
assert viz.scores_ == {
'precision': {
0: 0.5333333333333333, 1: 0.5, 2: 0.45,
3: 0.4, 4: 0.4, 5: 0.5882352941176471
}, 'recall': {
0: 0.42105263157894735, 1: 0.5625, 2: 0.6428571428571429,
3: 0.3157894736842105, 4: 0.375, 5: 0.625
}, 'f1': {
0: 0.47058823529411764, 1: 0.5294117647058824,
2: 0.5294117647058824, 3: 0.35294117647058826,
4: 0.38709677419354843, 5: 0.6060606060606061
}}
@pytest.mark.skipif(pd is None, reason="test requires pandas")
def test_pandas_integration(self):
"""
Test with Pandas DataFrame and Series input
"""
_, ax = plt.subplots()
# Load the occupancy dataset from fixtures
data = self.load_data('occupancy')
target = 'occupancy'
features = [
"temperature", "relative_humidity", "light", "C02", "humidity"
]
# Create instances and target
X = pd.DataFrame(data[features])
y = pd.Series(data[target].astype(int))
# Create train/test splits
splits = tts(X, y, test_size=0.2, random_state=4512)
X_train, X_test, y_train, y_test = splits
classes = ['unoccupied', 'occupied']
# Create classification report
model = GaussianNB()
viz = ClassificationReport(model, ax=ax, classes=classes)
viz.fit(X_train, y_train)
viz.score(X_test, y_test)
self.assert_images_similar(viz, tol=0.1)
# Ensure correct classification scores under the hood
assert viz.scores_ == {
'precision': {
'unoccupied': 0.999347471451876,
'occupied': 0.8825214899713467
}, 'recall': {
'unoccupied': 0.9613935969868174,
'occupied': 0.9978401727861771
}, 'f1': {
'unoccupied': 0.9800031994880819,
'occupied': 0.9366447034972124
}}
@pytest.mark.skip(reason="requires random state in quick method")
def test_quick_method(self):
"""
Test the quick method with a random dataset
"""
X, y = make_classification(
n_samples=400, n_features=20, n_informative=8, n_redundant=8,
n_classes=2, n_clusters_per_class=4, random_state=27
)
_, ax = plt.subplots()
classification_report(DecisionTreeClassifier(), X, y, ax=ax)
self.assert_images_similar(ax=ax)
def test_isclassifier(self):
"""
Assert that only classifiers can be used with the visualizer.
"""
message = (
'This estimator is not a classifier; '
'try a regression or clustering score visualizer instead!'
)
with pytest.raises(yb.exceptions.YellowbrickError, match=message):
ClassificationReport(LassoCV())
|
StarcoderdataPython
|
6521910
|
<reponame>JasonTheDeveloper/Custom-Vision-Autotrainer
from enum import Enum
class Platform(Enum):
DOCKER = "DockerFile"
CORE_ML = "CoreML"
TENSORFLOW = "TensorFlow"
ONNX = "ONNX"
def __str__(self):
return self.value
class Flavour(Enum):
Linux = "Linux"
Windows = "Windows"
ONNX10 = "ONNX10"
ONNX12 = "ONNX12"
def __str__(self):
return self.value
|
StarcoderdataPython
|
1928667
|
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""The Dirichlet distribution class."""
# Dependency imports
import numpy as np
import tensorflow.compat.v2 as tf
from tensorflow_probability.python.bijectors import softmax_centered as softmax_centered_bijector
from tensorflow_probability.python.bijectors import softplus as softplus_bijector
from tensorflow_probability.python.distributions import distribution
from tensorflow_probability.python.distributions import gamma as gamma_lib
from tensorflow_probability.python.distributions import kullback_leibler
from tensorflow_probability.python.internal import assert_util
from tensorflow_probability.python.internal import distribution_util
from tensorflow_probability.python.internal import dtype_util
from tensorflow_probability.python.internal import parameter_properties
from tensorflow_probability.python.internal import prefer_static as ps
from tensorflow_probability.python.internal import reparameterization
from tensorflow_probability.python.internal import tensor_util
from tensorflow_probability.python.internal import tensorshape_util
__all__ = [
'Dirichlet',
]
_dirichlet_sample_note = """Note: `value` must be a non-negative tensor with
dtype `self.dtype` and be in the `(self.event_shape() - 1)`-simplex, i.e.,
`tf.reduce_sum(value, -1) = 1`. It must have a shape compatible with
`self.batch_shape() + self.event_shape()`."""
class Dirichlet(distribution.AutoCompositeTensorDistribution):
"""Dirichlet distribution.
The Dirichlet distribution is defined over the
[`(k-1)`-simplex](https://en.wikipedia.org/wiki/Simplex) using a positive,
length-`k` vector `concentration` (`k > 1`). The Dirichlet is identically the
Beta distribution when `k = 2`.
#### Mathematical Details
The Dirichlet is a distribution over the open `(k-1)`-simplex, i.e.,
```none
S^{k-1} = { (x_0, ..., x_{k-1}) in R^k : sum_j x_j = 1 and all_j x_j > 0 }.
```
The probability density function (pdf) is,
```none
pdf(x; alpha) = prod_j x_j**(alpha_j - 1) / Z
Z = prod_j Gamma(alpha_j) / Gamma(sum_j alpha_j)
```
where:
* `x in S^{k-1}`, i.e., the `(k-1)`-simplex,
* `concentration = alpha = [alpha_0, ..., alpha_{k-1}]`, `alpha_j > 0`,
* `Z` is the normalization constant aka the [multivariate beta function](
https://en.wikipedia.org/wiki/Beta_function#Multivariate_beta_function),
and,
* `Gamma` is the [gamma function](
https://en.wikipedia.org/wiki/Gamma_function).
The `concentration` represents mean total counts of class occurrence, i.e.,
```none
concentration = alpha = mean * total_concentration
```
where `mean` in `S^{k-1}` and `total_concentration` is a positive real number
representing a mean total count.
Distribution parameters are automatically broadcast in all functions; see
examples for details.
Warning: Some components of the samples can be zero due to finite precision.
This happens more often when some of the concentrations are very small.
Make sure to round the samples to `np.finfo(dtype).tiny` before computing the
density.
Samples of this distribution are reparameterized (pathwise differentiable).
The derivatives are computed using the approach described in the paper
[<NAME>, <NAME>, <NAME>.
Implicit Reparameterization Gradients, 2018](https://arxiv.org/abs/1805.08498)
#### Examples
```python
import tensorflow_probability as tfp
tfd = tfp.distributions
# Create a single trivariate Dirichlet, with the 3rd class being three times
# more frequent than the first. I.e., batch_shape=[], event_shape=[3].
alpha = [1., 2, 3]
dist = tfd.Dirichlet(alpha)
dist.sample([4, 5]) # shape: [4, 5, 3]
# x has one sample, one batch, three classes:
x = [.2, .3, .5] # shape: [3]
dist.prob(x) # shape: []
# x has two samples from one batch:
x = [[.1, .4, .5],
[.2, .3, .5]]
dist.prob(x) # shape: [2]
# alpha will be broadcast to shape [5, 7, 3] to match x.
x = [[...]] # shape: [5, 7, 3]
dist.prob(x) # shape: [5, 7]
```
```python
# Create batch_shape=[2], event_shape=[3]:
alpha = [[1., 2, 3],
[4, 5, 6]] # shape: [2, 3]
dist = tfd.Dirichlet(alpha)
dist.sample([4, 5]) # shape: [4, 5, 2, 3]
x = [.2, .3, .5]
# x will be broadcast as [[.2, .3, .5],
# [.2, .3, .5]],
# thus matching batch_shape [2, 3].
dist.prob(x) # shape: [2]
```
Compute the gradients of samples w.r.t. the parameters:
```python
alpha = tf.constant([1.0, 2.0, 3.0])
dist = tfd.Dirichlet(alpha)
samples = dist.sample(5) # Shape [5, 3]
loss = tf.reduce_mean(tf.square(samples)) # Arbitrary loss function
# Unbiased stochastic gradients of the loss function
grads = tf.gradients(loss, alpha)
```
"""
def __init__(self,
concentration,
validate_args=False,
allow_nan_stats=True,
force_probs_to_zero_outside_support=False,
name='Dirichlet'):
"""Initialize a batch of Dirichlet distributions.
Args:
concentration: Positive floating-point `Tensor` indicating mean number
of class occurrences; aka "alpha". Implies `self.dtype`, and
`self.batch_shape`, `self.event_shape`, i.e., if
`concentration.shape = [N1, N2, ..., Nm, k]` then
`batch_shape = [N1, N2, ..., Nm]` and
`event_shape = [k]`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
force_probs_to_zero_outside_support: If `True`, force `prob(x) == 0` and
`log_prob(x) == -inf` for values of x outside the distribution support.
name: Python `str` name prefixed to Ops created by this class.
"""
parameters = dict(locals())
self._force_probs_to_zero_outside_support = (
force_probs_to_zero_outside_support)
with tf.name_scope(name) as name:
dtype = dtype_util.common_dtype([concentration], dtype_hint=tf.float32)
self._concentration = tensor_util.convert_nonref_to_tensor(
concentration, dtype=dtype, name='concentration')
super(Dirichlet, self).__init__(
dtype=self._concentration.dtype,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
reparameterization_type=reparameterization.FULLY_REPARAMETERIZED,
parameters=parameters,
name=name)
@classmethod
def _parameter_properties(cls, dtype, num_classes=None):
# pylint: disable=g-long-lambda
return dict(
concentration=parameter_properties.ParameterProperties(
event_ndims=1,
default_constraining_bijector_fn=(
lambda: softplus_bijector.Softplus(low=dtype_util.eps(dtype)))))
# pylint: enable=g-long-lambda
@property
def concentration(self):
"""Concentration parameter; expected counts for that coordinate."""
return self._concentration
@property
def force_probs_to_zero_outside_support(self):
return self._force_probs_to_zero_outside_support
def _event_shape_tensor(self):
# NOTE: In TF1, tf.shape(x) can call `tf.convert_to_tensor(x)` **twice**,
# so we pre-emptively convert-to-tensor.
concentration = tf.convert_to_tensor(self.concentration)
return ps.shape(concentration)[-1:]
def _event_shape(self):
return tensorshape_util.with_rank(self.concentration.shape[-1:], rank=1)
def _sample_n(self, n, seed=None):
# We use the log-space gamma sampler to avoid the bump-up-from-0 correction,
# and to apply the concentration < 1 recurrence in log-space. This improves
# accuracy for small concentrations.
log_gamma_sample = gamma_lib.random_gamma(
shape=[n], concentration=self.concentration, seed=seed, log_space=True)
return tf.math.exp(
log_gamma_sample -
tf.math.reduce_logsumexp(log_gamma_sample, axis=-1, keepdims=True))
@distribution_util.AppendDocstring(_dirichlet_sample_note)
def _log_prob(self, x):
concentration = tf.convert_to_tensor(self.concentration)
lp = (tf.reduce_sum(tf.math.xlogy(concentration - 1., x), axis=-1) -
tf.math.lbeta(concentration))
if self._force_probs_to_zero_outside_support:
eps = np.finfo(dtype_util.as_numpy_dtype(x.dtype)).eps
in_support = (
tf.reduce_all(x >= 0, axis=-1) &
# Reusing the logic of tf.debugging.assert_near, 10 * np.finfo.eps
(tf.math.abs(tf.reduce_sum(x, axis=-1) - 1.) < 10 * eps))
return tf.where(in_support, lp, -float('inf'))
return lp
@distribution_util.AppendDocstring(_dirichlet_sample_note)
def _prob(self, x):
return tf.exp(self._log_prob(x))
def _entropy(self):
concentration = tf.convert_to_tensor(self.concentration)
k = tf.cast(tf.shape(concentration)[-1], self.dtype)
total_concentration = tf.reduce_sum(concentration, axis=-1)
return (tf.math.lbeta(concentration) +
((total_concentration - k) * tf.math.digamma(total_concentration)) -
tf.reduce_sum((concentration - 1.) * tf.math.digamma(concentration),
axis=-1))
def _mean(self):
concentration = tf.convert_to_tensor(self.concentration)
total_concentration = tf.reduce_sum(concentration, axis=-1, keepdims=True)
return concentration / total_concentration
def _covariance(self):
concentration = tf.convert_to_tensor(self.concentration)
total_concentration = tf.reduce_sum(concentration, axis=-1, keepdims=True)
mean = concentration / total_concentration
scale = tf.math.rsqrt(1. + total_concentration)
x = scale * mean
variance = x * (scale - x)
return tf.linalg.set_diag(
tf.matmul(-x[..., tf.newaxis], x[..., tf.newaxis, :]),
variance)
def _variance(self):
concentration = tf.convert_to_tensor(self.concentration)
total_concentration = tf.reduce_sum(concentration, axis=-1, keepdims=True)
mean = concentration / total_concentration
scale = tf.math.rsqrt(1. + total_concentration)
x = scale * mean
return x * (scale - x)
@distribution_util.AppendDocstring(
"""Note: The mode is undefined when any `concentration <= 1`. If
`self.allow_nan_stats` is `True`, `NaN` is used for undefined modes. If
`self.allow_nan_stats` is `False` an exception is raised when one or more
modes are undefined.""")
def _mode(self):
concentration = tf.convert_to_tensor(self.concentration)
k = tf.cast(tf.shape(concentration)[-1], self.dtype)
total_concentration = tf.reduce_sum(concentration, axis=-1)
mode = (concentration - 1.) / (total_concentration[..., tf.newaxis] - k)
if self.allow_nan_stats:
return tf.where(
tf.reduce_all(concentration > 1., axis=-1, keepdims=True),
mode,
dtype_util.as_numpy_dtype(self.dtype)(np.nan))
assertions = [
assert_util.assert_less(
tf.ones([], self.dtype),
concentration,
message='Mode undefined when any concentration <= 1')
]
with tf.control_dependencies(assertions):
return tf.identity(mode)
def _default_event_space_bijector(self):
# TODO(b/145620027) Finalize choice of bijector.
return softmax_centered_bijector.SoftmaxCentered(
validate_args=self.validate_args)
def _sample_control_dependencies(self, x):
"""Checks the validity of a sample."""
assertions = []
if not self.validate_args:
return assertions
assertions.append(assert_util.assert_non_negative(
x, message='Samples must be non-negative.'))
assertions.append(assert_util.assert_near(
tf.ones([], dtype=self.dtype),
tf.reduce_sum(x, axis=-1),
message='Sample last-dimension must sum to `1`.'))
return assertions
def _parameter_control_dependencies(self, is_init):
"""Checks the validity of the concentration parameter."""
assertions = []
# In init, we can always build shape and dtype checks because
# we assume shape doesn't change for Variable backed args.
if is_init:
if not dtype_util.is_floating(self.concentration.dtype):
raise TypeError('Argument `concentration` must be float type.')
msg = 'Argument `concentration` must have rank at least 1.'
ndims = tensorshape_util.rank(self.concentration.shape)
if ndims is not None:
if ndims < 1:
raise ValueError(msg)
elif self.validate_args:
assertions.append(assert_util.assert_rank_at_least(
self.concentration, 1, message=msg))
msg = 'Argument `concentration` must have `event_size` at least 2.'
event_size = tf.compat.dimension_value(self.concentration.shape[-1])
if event_size is not None:
if event_size < 2:
raise ValueError(msg)
elif self.validate_args:
assertions.append(assert_util.assert_less(
1,
tf.shape(self.concentration)[-1],
message=msg))
if not self.validate_args:
assert not assertions # Should never happen.
return []
if is_init != tensor_util.is_ref(self.concentration):
assertions.append(assert_util.assert_positive(
self.concentration,
message='Argument `concentration` must be positive.'))
return assertions
@kullback_leibler.RegisterKL(Dirichlet, Dirichlet)
def _kl_dirichlet_dirichlet(d1, d2, name=None):
"""Batchwise KL divergence KL(d1 || d2) with d1 and d2 Dirichlet.
Args:
d1: instance of a Dirichlet distribution object.
d2: instance of a Dirichlet distribution object.
name: Python `str` name to use for created operations.
Default value: `None` (i.e., `'kl_dirichlet_dirichlet'`).
Returns:
kl_div: Batchwise KL(d1 || d2)
"""
with tf.name_scope(name or 'kl_dirichlet_dirichlet'):
# The KL between Dirichlet distributions can be derived as follows. We have
#
# Dir(x; a) = 1 / B(a) * prod_i[x[i]^(a[i] - 1)]
#
# where B(a) is the multivariate Beta function:
#
# B(a) = Gamma(a[1]) * ... * Gamma(a[n]) / Gamma(a[1] + ... + a[n])
#
# The KL is
#
# KL(Dir(x; a), Dir(x; b)) = E_Dir(x; a){log(Dir(x; a) / Dir(x; b))}
#
# so we'll need to know the log density of the Dirichlet. This is
#
# log(Dir(x; a)) = sum_i[(a[i] - 1) log(x[i])] - log B(a)
#
# The only term that matters for the expectations is the log(x[i]). To
# compute the expectation of this term over the Dirichlet density, we can
# use the following facts about the Dirichlet in exponential family form:
# 1. log(x[i]) is a sufficient statistic
# 2. expected sufficient statistics (of any exp family distribution) are
# equal to derivatives of the log normalizer with respect to
# corresponding natural parameters: E{T[i](x)} = dA/d(eta[i])
#
# To proceed, we can rewrite the Dirichlet density in exponential family
# form as follows:
#
# Dir(x; a) = exp{eta(a) . T(x) - A(a)}
#
# where '.' is the dot product of vectors eta and T, and A is a scalar:
#
# eta[i](a) = a[i] - 1
# T[i](x) = log(x[i])
# A(a) = log B(a)
#
# Now, we can use fact (2) above to write
#
# E_Dir(x; a)[log(x[i])]
# = dA(a) / da[i]
# = d/da[i] log B(a)
# = d/da[i] (sum_j lgamma(a[j])) - lgamma(sum_j a[j])
# = digamma(a[i])) - digamma(sum_j a[j])
#
# Putting it all together, we have
#
# KL[Dir(x; a) || Dir(x; b)]
# = E_Dir(x; a){log(Dir(x; a) / Dir(x; b)}
# = E_Dir(x; a){sum_i[(a[i] - b[i]) log(x[i])} - (lbeta(a) - lbeta(b))
# = sum_i[(a[i] - b[i]) * E_Dir(x; a){log(x[i])}] - lbeta(a) + lbeta(b)
# = sum_i[(a[i] - b[i]) * (digamma(a[i]) - digamma(sum_j a[j]))]
# - lbeta(a) + lbeta(b))
concentration1 = tf.convert_to_tensor(d1.concentration)
concentration2 = tf.convert_to_tensor(d2.concentration)
digamma_sum_d1 = tf.math.digamma(
tf.reduce_sum(concentration1, axis=-1, keepdims=True))
digamma_diff = tf.math.digamma(concentration1) - digamma_sum_d1
concentration_diff = concentration1 - concentration2
return (
tf.reduce_sum(concentration_diff * digamma_diff, axis=-1) -
tf.math.lbeta(concentration1) + tf.math.lbeta(concentration2))
|
StarcoderdataPython
|
8079614
|
<gh_stars>1-10
"""Author: <NAME>, Copyright 2019"""
from best_first.caption_utils import Insertion
from abc import ABC, abstractmethod
import numpy as np
class Ordering(ABC):
def __init__(
self,
max_violations=0
):
self.max_violations = max_violations
def get_orderings(
self,
split_words,
split_tags,
candidate_words,
candidate_tags,
candidate_slots,
order_violations,
closed
):
orderings = self.expand(
split_words,
split_tags,
left_words=candidate_words,
left_tags=candidate_tags,
slots=candidate_slots,
violations=order_violations,
closed=closed) if split_words.size > 0 else []
insertion_index = np.argmax(self.score(candidate_words, candidate_tags))
return orderings + [Insertion(
words=np.concatenate([[2], split_words, [3]]),
tags=np.concatenate([[1], split_tags, [1]]),
next_word=candidate_words[insertion_index],
next_tag=candidate_tags[insertion_index],
slot=candidate_slots[insertion_index],
violations=order_violations)]
def expand(
self,
words,
tags,
left_words=np.zeros([0], dtype=np.int32),
left_tags=np.zeros([0], dtype=np.int32),
slots=np.zeros([0], dtype=np.int32),
violations=0,
closed=None
):
orderings = []
closed = closed if closed is not None else set()
removal_index = np.argmin(self.score(words, tags))
for i, (word, tag) in enumerate(zip(words, tags)):
order_violations = violations if i == removal_index else violations + 1
if order_violations <= self.max_violations:
split_words = np.append(words[:i], words[i + 1:])
if split_words.tostring() not in closed:
closed.add(split_words.tostring())
split = 0
while split < slots.size and slots[split] <= i:
split += 1
orderings.extend(self.get_orderings(
split_words,
np.append(tags[:i], tags[i + 1:]),
np.concatenate([left_words[:split], [word], left_words[split:]]),
np.concatenate([left_tags[:split], [tag], left_tags[split:]]),
np.concatenate([slots[:split], [i], slots[split:] - 1]),
order_violations,
closed))
return orderings
@abstractmethod
def score(
self,
words,
tags,
):
return NotImplemented
|
StarcoderdataPython
|
1826642
|
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy
from matplotlib.figure import Figure
from tifffile import imread
from hylfm.plot.plotting import turbo_colormap
def plot_img_projections(img: numpy.ndarray, b=0) -> Figure:
assert len(img.shape) == 5 # bczyx
img = img[b]
assert img.shape[0] == 1 # singleton channel axis
img = img[0]
z_len, y_len, x_len = img.shape
fig = plt.figure(constrained_layout=False)
gs = fig.add_gridspec(2, 2, width_ratios=[x_len, z_len], height_ratios=[z_len, y_len])
# ax1
ax1 = fig.add_subplot(gs[1, 0], anchor="E")
ax1.set_xlabel("x")
ax1.set_ylabel("y")
# ax2
ax2 = fig.add_subplot(gs[1, 1], sharey=ax1, anchor="W")
# ax2.set_ylabel('y')
ax2.set_xlabel("z")
plt.setp(ax2.get_yticklabels(), visible=False)
# ax3
ax3 = fig.add_subplot(gs[0, 0], sharex=ax1, anchor="E")
# ax3.set_xlabel("x")
ax3.set_ylabel("z")
plt.setp(ax3.get_xticklabels(), visible=False)
ax1.imshow(img.max(0), cmap=turbo_colormap)
ax2.imshow(img.max(2).transpose(), cmap=turbo_colormap)
ax3.imshow(img.max(1), cmap=turbo_colormap)
plt.tight_layout()
fig.subplots_adjust(wspace=0, hspace=0)
return fig
def plot_img_projections_with_beads(
img: numpy.ndarray,
bead_pos: List[numpy.ndarray],
other_bead_pos: Optional[List[numpy.ndarray]] = None,
b=0,
z_min=None,
z_max=None,
) -> Figure:
assert len(img.shape) == 5 # bczyx
img = img[b]
assert img.shape[0] == 1 # singleton channel axis
img = img[0]
z_len, y_len, x_len = img.shape
bead_pos = bead_pos[b]
assert len(bead_pos.shape) == 2
if z_min is not None:
bead_pos = bead_pos[z_min <= bead_pos[:, 0]]
if z_max is not None:
bead_pos = bead_pos[z_max >= bead_pos[:, 0]]
fig = plt.figure(constrained_layout=False)
gs = fig.add_gridspec(2, 2, width_ratios=[x_len, z_len], height_ratios=[z_len, y_len])
# ax1
ax1 = fig.add_subplot(gs[1, 0], anchor="E")
ax1.set_xlabel("x")
ax1.set_ylabel("y")
# ax2
ax2 = fig.add_subplot(gs[1, 1], sharey=ax1, anchor="W")
# ax2.set_ylabel('y')
ax2.set_xlabel("z")
plt.setp(ax2.get_yticklabels(), visible=False)
# ax3
ax3 = fig.add_subplot(gs[0, 0], sharex=ax1, anchor="E")
# ax3.set_xlabel('x')
ax3.set_ylabel("z")
plt.setp(ax3.get_xticklabels(), visible=False)
ax1.imshow(img.max(0), cmap=turbo_colormap)
ax2.imshow(img.max(2).transpose(), cmap=turbo_colormap)
ax3.imshow(img.max(1), cmap=turbo_colormap)
ax1.scatter(bead_pos[:, 2], bead_pos[:, 1], c=abs(bead_pos[:, 0] - 25), marker="1")
ax2.scatter(bead_pos[:, 0], bead_pos[:, 1], c=abs(bead_pos[:, 0] - 25), marker="1")
ax3.scatter(bead_pos[:, 2], bead_pos[:, 0], c=abs(bead_pos[:, 0] - 25), marker="1")
if other_bead_pos is not None:
other_bead_pos = other_bead_pos[b]
assert len(other_bead_pos.shape) == 2
ax1.scatter(other_bead_pos[:, 2], other_bead_pos[:, 1], c=abs(other_bead_pos[:, 0] - 25), marker="2")
ax2.scatter(other_bead_pos[:, 0], other_bead_pos[:, 1], c=abs(other_bead_pos[:, 0] - 25), marker="2")
ax3.scatter(other_bead_pos[:, 2], other_bead_pos[:, 0], c=abs(other_bead_pos[:, 0] - 25), marker="2")
plt.tight_layout()
fig.subplots_adjust(wspace=0, hspace=0)
return fig
if __name__ == "__main__":
tgt = (
imread("K:/beuttenm/repos/lnet/logs/beads/19-08-23_18-32_c307a5a_aux1_/result/test/target/0000.tif")[None, ...]
/ numpy.iinfo(numpy.uint16).max
)
pred = (
imread("K:/beuttenm/repos/lnet/logs/beads/19-08-23_18-32_c307a5a_aux1_/result/test/prediction/0000.tif")[
None, ...
]
/ numpy.iinfo(numpy.uint16).max
)
plot_img_projections(tgt)
plt.show()
plot_img_projections(pred)
plt.show()
|
StarcoderdataPython
|
5180849
|
<gh_stars>100-1000
# coding: utf-8
import os
import warnings
from pathlib import Path
from functools import reduce
from itertools import chain
from collections import Counter
import numpy as np
import pandas as pd
import gender_guesser.detector as gender
from sklearn.preprocessing import MultiLabelBinarizer
warnings.filterwarnings("ignore")
DATA_PATH = Path("data/airbnb")
fname = "listings.csv.gz"
if not os.path.exists(DATA_PATH):
os.makedirs(DATA_PATH)
df_original = pd.read_csv(DATA_PATH / fname)
print(df_original.shape)
df_original.head()
# this is just subjective. One can choose some other columns
keep_cols = [
"id",
"host_id",
"description",
"house_rules",
"host_name",
"host_listings_count",
"host_identity_verified",
"neighbourhood_cleansed",
"latitude",
"longitude",
"is_location_exact",
"property_type",
"room_type",
"accommodates",
"bathrooms",
"bedrooms",
"beds",
"amenities",
"price",
"security_deposit",
"cleaning_fee",
"guests_included",
"extra_people",
"minimum_nights",
"instant_bookable",
"cancellation_policy",
"reviews_per_month",
]
df = df_original[keep_cols]
df = df[~df.reviews_per_month.isna()]
df = df[~df.description.isna()]
df = df[~df.host_listings_count.isna()]
print(df.shape)
# This is a preprocessing stage before preparing the data to be passed to WideDeep
# Let's go "column by column"
# house rules
#
# I will simply include a binary column with 1/0 if the property has/has not
# house rules.
df["has_house_rules"] = df["house_rules"]
df.has_house_rules.fillna(0, inplace=True)
df["has_house_rules"][df.has_house_rules != 0] = 1
df.drop("house_rules", axis=1, inplace=True)
# host_name
#
# I will use names to infer gender using `gender_guesser`
host_name = df.host_name.tolist()
d = gender.Detector()
host_gender = [d.get_gender(n) for n in host_name]
replace_dict = {"mostly_male": "male", "mostly_female": "female", "andy": "unknown"}
host_gender = [replace_dict.get(item, item) for item in host_gender]
Counter(host_gender)
df["host_gender"] = host_gender
df.drop("host_name", axis=1, inplace=True)
df.head()
# property_type, room_type, accommodates, bathrooms, bedrooms, beds and
# guests_included, host_listings_count, minimum_nights
#
# Here some standard pre-processing
df.property_type.value_counts()
replace_prop_type = [
val
for val in df.property_type.unique().tolist()
if val not in ["Apartment", "House"]
]
replace_prop_type = {k: "other" for k in replace_prop_type}
df.property_type.replace(replace_prop_type, inplace=True)
df["property_type"] = df.property_type.apply(lambda x: "_".join(x.split(" ")).lower())
df.room_type.value_counts()
df["room_type"] = df.room_type.apply(lambda x: "_".join(x.split(" ")).lower())
df["bathrooms"][(df.bathrooms.isna()) & (df.room_type == "private_room")] = 0
df["bathrooms"][(df.bathrooms.isna()) & (df.room_type == "entire_home/apt")] = 1
df.bedrooms.fillna(1, inplace=True)
df.beds.fillna(1, inplace=True)
# Encode some as categorical
categorical_cut = [
("accommodates", 3),
("guests_included", 3),
("minimum_nights", 3),
("host_listings_count", 3),
("bathrooms", 1.5),
("bedrooms", 3),
("beds", 3),
]
for col, cut in categorical_cut:
new_colname = col + "_catg"
df[new_colname] = df[col].apply(lambda x: cut if x >= cut else x)
df[new_colname] = df[new_colname].round().astype(int)
# Amenities
#
# I will just add a number of dummy columns with 1/0 if the property has/has
# not that particular amenity
amenity_repls = (
('"', ""),
("{", ""),
("}", ""),
(" / ", "_"),
("/", "_"),
(" ", "_"),
("(s)", ""),
)
amenities_raw = df.amenities.str.lower().tolist()
amenities = [
reduce(lambda a, kv: a.replace(*kv), amenity_repls, s).split(",")
for s in amenities_raw
]
all_amenities = list(chain(*amenities))
all_amenities_count = Counter(all_amenities)
all_amenities_count
# having a look to the list we see that one amenity is empty and two are
# "translation missing:..."
keep_amenities = []
for k, v in all_amenities_count.items():
if k and "missing" not in k:
keep_amenities.append(k)
final_amenities = [
[amenity for amenity in house_amenities if amenity in keep_amenities]
for house_amenities in amenities
]
# some properties have no amenities aparently
final_amenities = [
["no amenities"] if not amenity else amenity for amenity in final_amenities
]
final_amenities = [
["amenity_" + amenity for amenity in amenities] for amenities in final_amenities
]
# let's build the dummy df
df_list_of_amenities = pd.DataFrame({"groups": final_amenities}, columns=["groups"])
s = df_list_of_amenities["groups"]
mlb = MultiLabelBinarizer()
df_amenities = pd.DataFrame(mlb.fit_transform(s), columns=mlb.classes_, index=df.index)
df.drop("amenities", axis=1, inplace=True)
df = pd.concat([df, df_amenities], axis=1)
df.head()
# Price, security_deposit, cleaning_fee, extra_people
money_columns = ["price", "security_deposit", "cleaning_fee", "extra_people"]
tmp_money_df = df[money_columns].fillna("$0")
money_repls = (("$", ""), (",", ""))
for col in money_columns:
val_str = tmp_money_df[col].tolist()
val_num = [
float(st)
for st in [
reduce(lambda a, kv: a.replace(*kv), money_repls, s) for s in val_str
]
]
tmp_money_df[col] = val_num
high_price, high_deposit, high_cleaning_fee, high_extra_people = 1000, 2000, 200, 100
high_price_count = (tmp_money_df.price >= high_price).sum()
high_deposit_count = (tmp_money_df.security_deposit >= high_deposit).sum()
high_cleaning_fee_count = (tmp_money_df.cleaning_fee >= high_cleaning_fee).sum()
high_extra_people_count = (tmp_money_df.extra_people >= high_extra_people).sum()
print("properties with very high price: {}".format(high_price_count))
print("properties with very high security deposit: {}".format(high_deposit_count))
print("properties with very high cleaning fee: {}".format(high_cleaning_fee_count))
print("properties with very high extra people cost: {}".format(high_extra_people_count))
# We will now just concat and we will drop high values later one
df.drop(money_columns, axis=1, inplace=True)
df = pd.concat([df, tmp_money_df], axis=1)
df = df[
(df.price < high_price)
& (df.price != 0)
& (df.security_deposit < high_deposit)
& (df.cleaning_fee < high_cleaning_fee)
& (df.extra_people < high_extra_people)
]
df.head()
print(df.shape)
# let's make sure there are no nan left
has_nan = df.isnull().any(axis=0)
has_nan = [df.columns[i] for i in np.where(has_nan)[0]]
if not has_nan:
print("no NaN, all OK")
# Computing a proxi for yield
# Yield is defined as price * occupancy rate. Occupancy rate can be calculated
# by multiplying ((reviews / review rate) * average length of stay), where
# review rate and average length of stay are normally taken as a factor based
# in some model. For example, in the San Francisco model a review rate of 0.5
# is used to convert reviews to estimated bookings (i.e. we assume that only
# half of the guests will leave a review). An average length of stay of 3
# nights multiplied by the estimated bookings over a period gives the
# occupancy rate. Therefore, in the expression I have used below, if you want
# to turn my implementation of 'yield' into a "proper" one under the San
# Francisco model assumptions simply multiply my yield by 6 (3 * (1/0.5)) or
# by 72 (3 * 2 * 12) if you prefer per year.
df["yield"] = (df["price"] + df["cleaning_fee"]) * (df["reviews_per_month"])
df.drop(["price", "cleaning_fee", "reviews_per_month"], axis=1, inplace=True)
# we will focus in cases with yield below 600 (we lose ~3% of the data).
# No real reason for this, simply removing some "outliers"
df = df[df["yield"] <= 600]
df.to_csv(DATA_PATH / "listings_processed.csv", index=False)
print("data preprocessed finished. Final shape: {}".format(df.shape))
|
StarcoderdataPython
|
1618215
|
<reponame>schlauch/DataFinder
#
# $Filename$
# $Authors$
# Last Changed: $Date$ $Committer$ $Revision-Id$
#
# Copyright (c) 2003-2011, German Aerospace Center (DLR)
#
# All rights reserved.
#Redistribution and use in source and binary forms, with or without
#modification, are permitted provided that the following conditions are
#
#met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of the German Aerospace Center nor the names of
# its contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
#LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
#A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
#OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
#SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
#LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
#DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
#THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
#(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
#OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Handles encoding/decoding of Python objects to a custom string format.
This module is intended to be replaced. Make sure to use L{JSON Format
<datafinder.persistence.metadata.value_mapping.json_format>} for new
implementations.
This module can handle the following data types:
- Numerics (float, int C{decimal.Decimal}
- Booleans
- Strings (unicode, str)
- Date time (C{datetime.datetime}, Decoding works from: ISO8601,
RFC822, time stamp. Encoding works from local time to UTC and decoding vice versa.
- Lists (CANNOT not be nested)
- Dictionaries (Already uses the new JSON format)
"""
import datetime
import decimal
import re
import sys
from datafinder.persistence.common import datetime_util
from datafinder.persistence.error import PersistenceError
from datafinder.persistence.metadata.value_mapping import json_format
__version__ = "$Revision-Id$"
_LIST_SEPARATOR = ";"
_ESCAPED_LIST_SEPARATOR = "\\" + _LIST_SEPARATOR
_NONE_PERSISTENCE_REPRESENTATION = ""
_EMPTY_LIST_REPRESENTATION = "____EMPTY____LIST____"
_ESCAPED_EMPTY_LIST = "\\" + _EMPTY_LIST_REPRESENTATION
class MetadataValue(object):
""" Wrapper around a meta data value which represents a restored value. """
def __init__(self, persistedValue, expectedType=None):
"""
@param persistedValue: The persistence representation of a property value.
@type persistedValue: C{basestring}
@param expectedType: Type to which the value should be converted. Type
comparison is performed using built-in C{type} function.
@type expectedType: C{object}
@raise PersistenceError: The persistence value is no string.
"""
if not isinstance(persistedValue, basestring):
raise PersistenceError("No valid persistence string has been provided. %s" % str(persistedValue))
self._expectedType = expectedType
self._persistedValue = persistedValue
self._conversionFunctions = list()
self._conversionFunctions.append(self._convertToBool)
self._conversionFunctions.append(self._convertToDecimal)
self._conversionFunctions.append(self._convertToDatetime)
self._conversionFunctions.append(self._convertToList)
self._conversionFunctions.append(self._convertToDict)
self._conversionFunctions.append(self._convertToUnicode)
@property
def persistedValue(self):
""" Value represented by its persistence format. """
return self._persistedValue
@property
def value(self):
""" Returns the most probable value. """
representations = self.guessRepresentation()
if not self._expectedType is None:
for representation in representations:
if type(representation) == self._expectedType:
return representation
return representations[0]
def guessRepresentation(self):
"""
Tries to convert the retrieved value to the expected type.
If the conversion fails an empty list is returned.
@return: List of possible value representations.
@rtype: C{list} of C{object}
"""
result = list()
if _NONE_PERSISTENCE_REPRESENTATION == self._persistedValue:
result.append(None)
else:
convertedValue = None
for conversionFunction in self._conversionFunctions:
convertedValue = conversionFunction(self._persistedValue)
if not convertedValue is None:
result.append(convertedValue)
return result
def _convertToList(self, value):
result = None
stringList = re.split("(?<!\\\\);", value) # does not split at: \;
if len(stringList) > 0 and stringList[0] != value:
typedList = list()
for item in stringList:
if item == _NONE_PERSISTENCE_REPRESENTATION:
convertedValue = None
else:
for conversionFunction in self._conversionFunctions:
convertedValue = conversionFunction(item)
if not convertedValue is None:
break
typedList.append(convertedValue)
result = typedList
elif value == _EMPTY_LIST_REPRESENTATION:
result = list()
return result
@staticmethod
def _convertToDict(value):
try:
dictValue = json_format.convertFromPersistenceFormat(value)
except PersistenceError:
dictValue = None
else:
if not isinstance(dictValue, dict):
dictValue = None
return dictValue
@staticmethod
def _convertToUnicode(value):
if _ESCAPED_LIST_SEPARATOR in value:
value = value.replace(_ESCAPED_LIST_SEPARATOR, _LIST_SEPARATOR)
elif value == _ESCAPED_EMPTY_LIST:
value = _EMPTY_LIST_REPRESENTATION
return value
@staticmethod
def _convertToBool(value):
try:
intValue = int(value)
except (ValueError, TypeError):
return None
else:
if intValue in [0, 1]:
return bool(intValue)
@staticmethod
def _convertToDecimal(value):
try:
value = decimal.Decimal(value)
except (decimal.InvalidOperation, ValueError, TypeError):
value = None
return value
@staticmethod
def _convertToDatetime(value):
return datetime_util.convertToDatetime(value)
def __cmp__(self, other):
return cmp(self.persistedValue, other)
def __hash__(self):
return hash(self.persistedValue)
def __repr__(self):
return repr(self.value)
def getPersistenceRepresentation(value):
"""
Converts the given value to the persistence string format.
@param value: Value to persist.
@type value: C{object}
@return: Persistence representation.
@rtype: C{basestring}
@raise PersistenceError: Indicating unsupported value type or
problems during conversion.
"""
if value is None:
return _NONE_PERSISTENCE_REPRESENTATION
else:
typeConversionFunctionMap = {str: _convertFromUnicode,
unicode: _convertFromUnicode,
int: _convertFromDecimal,
float: _convertFromDecimal,
bool: _convertFromBool,
decimal.Decimal: _convertFromDecimal,
list: _convertFromList,
datetime.datetime: _convertFromDatetime,
dict: _convertFromDict}
valueType = type(value)
if valueType in typeConversionFunctionMap:
return typeConversionFunctionMap[valueType](value)
else:
raise PersistenceError("Persistence support for values of type " \
+ "'%s' is not available." % str(valueType))
def _convertFromDatetime(value):
return datetime_util.convertToIso8601(value)
def _convertFromDecimal(value):
return unicode(value)
def _convertFromBool(value):
return unicode(int(value))
def _convertFromList(value):
listAsString = ""
for item in value:
convertedItem = getPersistenceRepresentation(item)
listAsString += convertedItem + _LIST_SEPARATOR
if len(listAsString) == 0:
listAsString = _EMPTY_LIST_REPRESENTATION
return listAsString
def _convertFromDict(value):
return json_format.convertToPersistenceFormat(value)
def _convertFromUnicode(value):
if not isinstance(value, unicode):
encoding = sys.getdefaultencoding() or "ascii"
try:
value = unicode(value, encoding)
except UnicodeError, error:
errorMessage = "Problem during string conversion: '%s'" \
% str(error)
raise PersistenceError(errorMessage)
value = _escapeSpecialSequence(value)
return value
def _escapeSpecialSequence(value):
if _LIST_SEPARATOR in value:
value = value.replace(_LIST_SEPARATOR, _ESCAPED_LIST_SEPARATOR)
elif value == _EMPTY_LIST_REPRESENTATION:
value = _ESCAPED_EMPTY_LIST
return value
|
StarcoderdataPython
|
3550637
|
import os
resultsDir = "./results/"
os.chdir("..")
for filename in os.listdir(resultsDir):
if ".out" in filename:
print(filename)
falsePositives = 0
falseNegatives = 0
trueNegatives = 0
truePositives = 0
tokens = filename.split(".")
labelFileName = tokens[0] + ".lbl"
labelFile = open(resultsDir + labelFileName)
outputFile = open(resultsDir + filename)
labelData = labelFile.readline()
outputData = outputFile.readline()
labelDataLength = len(labelData)
outputDataLength = len(outputData)
shorterLength = min([labelDataLength, outputDataLength])
first_fire_frame_truth = 0
first_fire_frame_predicted = 0
for i in range(0, shorterLength):
labelDataValue = labelData[i]
outputDataValue = outputData[i]
if labelDataValue == "1" and first_fire_frame_truth == 0:
first_fire_frame_truth = i
if outputDataValue == "1" and first_fire_frame_predicted == 0 and first_fire_frame_truth != 0:
first_fire_frame_predicted = i
if labelDataValue == "0" and outputDataValue == "0":
trueNegatives += 1
elif labelDataValue == "1" and outputDataValue == "0":
falseNegatives += 1
elif labelDataValue == "0" and outputDataValue == "1":
falsePositives += 1
elif labelDataValue == "1" and outputDataValue == "1":
truePositives += 1
analFile = open(resultsDir + tokens[0] + ".anl", "w")
analFile.write("tpos: {}\ntneg: {}\nfpos: {}\nfneg: {}\n"
.format(truePositives, trueNegatives, falsePositives, falseNegatives))
analFile.write("frames_to_detect: {}".format(first_fire_frame_predicted - first_fire_frame_truth))
print("tpos: {} tneg: {} fpos: {} fneg: {}"
.format(truePositives, trueNegatives, falsePositives, falseNegatives))
|
StarcoderdataPython
|
8040811
|
<gh_stars>0
from ..lib import OpenIncludedFile
import threading
import time
import sys
import os
import webview # pywebview
class WindowManager:
api = None
windows = None
def __init__(self, api):
self.windows = {}
self.api = api
def LaunchWindow(self, window_id, window_title, html_path, parent_window_id = None):
# test if window exists
if window_id in self.windows.keys():
raise Exception(f'Window with window_id {window_id} already exists. Call ActivateWindow() instead.')
self.CreateWindow( window_id, window_title, html_path, parent_window_id)
def ActivateWindow(self, window_id):
# test if window exists
if window_id not in self.windows.keys():
raise Exception(f'Window with window_id {window_id} does not exist. Call LaunchWindow() instead.')
# [todo] figure out how to raise the window on top
def DestroyWindow(self, window_id):
# test if window exists
if window_id not in self.windows.keys():
raise Exception(f'Window with window_id {window_id} does not exist. Cannot destroy.')
# destroy window object
self.windows[window_id]['window'].destroy()
# activate parent window (necessary?)
self.ActivateWindow(self.windows[window_id]['parent_id'])
# remove window record from the window manager
self.windows.pop('window_id', None)
def CreateWindow(self, window_id, window_title, html_path, parent_window_id = None):
# fetch html
html = OpenIncludedFile('installer/dist/' + html_path)
# template html
html = html.replace('{{window_id}}', window_id)
# create record for window
self.windows[window_id] = {
'parent_id': parent_window_id,
'window_title': window_title,
'html_path': html_path,
'window': None
}
# create window
self.windows[window_id]['window'] = webview.create_window(window_title, html=html, js_api=self.api)
|
StarcoderdataPython
|
5063612
|
<reponame>invenio-toaster/invenio-pidrelations
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2017-2019 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Minimal Flask application example.
First install Invenio-PIDRelations, setup the application and load
fixture data by running:
.. code-block:: console
$ pip install -e .[all]
$ cd examples
$ ./app-setup.sh
$ ./app-fixtures.sh
Next, start the development server:
.. code-block:: console
$ export FLASK_APP=app.py FLASK_DEBUG=1
$ flask run
and open the example application in your browser:
.. code-block:: console
$ open http://127.0.0.1:5000/
To reset the example application run:
.. code-block:: console
$ ./app-teardown.sh
"""
from __future__ import absolute_import, print_function
from flask import Flask, redirect, render_template, request, url_for
from invenio_db import InvenioDB, db
from invenio_indexer import InvenioIndexer
from invenio_indexer.signals import before_record_index
from invenio_pidstore import InvenioPIDStore
from invenio_pidstore.models import PersistentIdentifier, PIDStatus
from invenio_pidstore.providers.recordid import RecordIdProvider
from invenio_pidstore.resolver import Resolver
from invenio_records import InvenioRecords
from invenio_records.api import Record
from invenio_records_ui import InvenioRecordsUI
from marshmallow import Schema, fields
from invenio_pidrelations import InvenioPIDRelations
from invenio_pidrelations.contrib.versioning import PIDNodeVersioning, \
versioning_blueprint
from invenio_pidrelations.indexers import index_relations
from invenio_pidrelations.models import PIDRelation
from invenio_pidrelations.utils import resolve_relation_type_config
# Create Flask application
app = Flask(__name__, template_folder='.')
app.config.update(dict(
TEMPLATES_AUTO_RELOAD=True,
CELERY_ALWAYS_EAGER=True,
CELERY_RESULT_BACKEND='cache',
CELERY_CACHE_BACKEND='memory'))
InvenioDB(app)
InvenioPIDStore(app)
InvenioPIDRelations(app)
app.register_blueprint(versioning_blueprint)
InvenioIndexer(app)
InvenioRecords(app)
InvenioRecordsUI(app)
before_record_index.connect(index_relations, sender=app)
record_resolver = Resolver(
pid_type='recid', object_type='rec', getter=Record.get_record
)
class SimpleRecordSchema(Schema):
"""Tiny schema for our simple record."""
recid = fields.Str()
title = fields.Str()
body = fields.Str()
@app.route('/')
def index():
relation_id = resolve_relation_type_config('version').id
heads = (
PersistentIdentifier.query
.join(
PIDRelation,
PIDRelation.parent_id == PersistentIdentifier.id)
.filter(
PIDRelation.relation_type == relation_id)
.distinct())
return render_template('index.html', heads=heads)
@app.route('/create', methods=['POST'])
def create():
create_simple_record(request.form)
return redirect(url_for('index'))
@app.template_filter()
def to_record(pid):
schema = SimpleRecordSchema()
schema.context = dict(pid=pid)
rec = schema.dump(record_resolver.resolve(pid.pid_value)[1])
return rec.data
def create_simple_record(data):
# Create the record and mint a PID
metadata, errors = SimpleRecordSchema().load(data)
parent = data.get('parent')
if parent != 'new':
metadata['conceptrecid'] = parent
rec = Record.create(metadata)
record_minter(rec.id, rec)
db.session.commit()
def record_minter(record_uuid, data):
parent = data.get('conceptrecid')
if not parent:
parent_pid = RecordIdProvider.create(object_type='rec',
object_uuid=None,
status=PIDStatus.REGISTERED).pid
data['conceptrecid'] = parent_pid.pid_value
else:
parent_pid = PersistentIdentifier.get(
pid_type=RecordIdProvider.pid_type, pid_value=parent)
provider = RecordIdProvider.create('rec', record_uuid)
data['recid'] = provider.pid.pid_value
versioning = PIDNodeVersioning(pid=parent_pid)
versioning.insert_child(child_pid=provider.pid)
return provider.pid
|
StarcoderdataPython
|
4951604
|
<gh_stars>0
from model.contact import Contact
from model.group import Group
from fixture.orm import ORMFixture
from random import random
orm = ORMFixture(host="127.0.0.1", name="addressbook", user="root", password="")
def test_del_contact_from_group(app, db):
# добавляем группу и контакт, если их нет
if len(db.get_contact_list()) == 0:
app.contact.create_contact(Contact(firstname="Kseniya", email="<EMAIL>", homephone="234234234"))
if len(db.get_group_list()) == 0:
app.group.create_group(Group(name="147", header="555", footer="666"))
# добавляем контакт в группу, если его нет
if len(db.groups_with_contacts()) == 0:
group_free = random.choice(db.get_group_list())
contact_free = random.choice(db.get_contact_list())
app.contact.add_contact_to_group(contact_free.id, group_free.id)
group = db.groups_with_contacts()[0]
contact = orm.get_contacts_in_group(Group(id='%s' % group.id))[0]
# генерим список контактов в группе
old_group_content = orm.get_contacts_in_group(Group(id='%s' % group.id))
# удаляем контакт
app.contact.del_contact_from_group(contact.id, group.id)
# обновляем список контактов в группе и генерим новый список контактов
old_group_content.remove(contact)
new_group_content = app.contact.see_group_content(group.id)
# сверяем списки
assert sorted(old_group_content, key=Contact.id_or_max) == sorted(new_group_content, key=Contact.id_or_max)
|
StarcoderdataPython
|
3372414
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import modules.update_data
import modules.update_domaine
|
StarcoderdataPython
|
237565
|
# Smart Instant Pot Evaluation Script
# This will process a CSV file with input images and their expected digit
# detection. Each image will be processed by the detection code and the actual
# result compared to the expected to generate statistics on how well the
# algorithm is working.
import argparse
import csv
import os
import cv2
import smart_instant_pot.detector as detector
import smart_instant_pot.digit_reader as digit_reader
# Setup command line arguments.
parser = argparse.ArgumentParser(description='Smart Instant Pot Evaluation')
parser.add_argument('input_csv',
help='input CSV with columns for input image and expected result')
parser.add_argument('result_csv',
nargs='?',
help='optional output CSV with columns for input image, expected, actual pot detected (boolean), actual digit value (string)')
parser.add_argument('--panel_image',
metavar='FILENAME',
default='/home/jovyan/work/test_images/pi_control_panel.jpg',
help='control panel template for detection (default is an image from Pi camera with wide angle lens)')
args = parser.parse_args()
# Check input file exists and load all the input image filenames and expected
# results from the CSV.
if not os.path.isfile(args.input_csv):
raise RuntimeError('Failed to find input CSV file!')
with open(args.input_csv, 'r') as infile:
reader = csv.reader(infile)
input_data = [row for row in reader]
print('Processing {0} images...'.format(len(input_data)), end='')
# Change to the directory of the CSV so images are loaded relative to its location.
start_dir = os.getcwd()
os.chdir(os.path.dirname(args.input_csv))
# Setup detector and digit reader.
if not os.path.isfile(args.panel_image):
raise RuntimeError('Failed to find control panel template image!')
pot_detector = detector.Detector(cv2.imread(args.panel_image))
pot_digit_reader = digit_reader.DigitReader()
# Loop through the input images and run detection logic to compute actual result.
results = []
found_pot = 0
correct_digits = 0
for i, row in enumerate(input_data):
# Print a dot every 10 images to show the script is still running.
if i % 10 == 0:
print('.', end='')
image_file, expected = row
# Look for the pot control panel in the input image.
panel_image = pot_detector.detect_panel(cv2.imread(image_file))
if panel_image is None:
results.append((image_file, expected, False, None))
continue
# Found a control panel, now detect the digits and save result.
found_pot += 1
digits = pot_digit_reader.read_digits(panel_image)
if digits == expected:
correct_digits += 1
results.append((image_file, expected, True, digits))
print()
# Print stats of the detection results.
total_input = len(input_data)
found_pot_percent = found_pot / total_input * 100.0
correct_digits_percent = correct_digits / total_input * 100.0
print('Found the pot in {0} images: {1:0.2f}%'.format(found_pot, found_pot_percent))
print('Correctly detected digits in {0} images: {1:0.2f}%'.format(correct_digits,
correct_digits_percent))
# Write output results if a file was specified.
if args.result_csv is not None:
# Change back to the directory the script was run to create output relative to it.
os.chdir(start_dir)
with open(args.result_csv, 'w') as outfile:
writer = csv.writer(outfile)
writer.writerows(results)
print('Wrote results to: {0}'.format(args.result_csv))
|
StarcoderdataPython
|
8080848
|
<filename>tests/unit/utils/test_request_handler.py
# Standard lib imports
from http import client
import unittest
# Third party imports
import requests_mock
# Project level imports
from pywebhooks.utils.request_handler import RequestHandler
def suite():
test_suite = unittest.TestSuite()
test_suite.addTest(WhenTestingRequestHandler())
return test_suite
class WhenTestingRequestHandler(unittest.TestCase):
def setUp(self):
pass
def test_get(self):
with requests_mock.Mocker() as mocker:
mocker.register_uri('GET', 'http://localhost?test=123',
json={'test': 'value'},
status_code=200)
request_handler = RequestHandler()
data, status = request_handler.get(
'http://localhost',
params={'test': 123},
api_key='12345',
username='johndoe'
)
self.assertEqual(status, client.OK)
self.assertEqual({'test': 'value'}, data)
self.assertEqual(request_handler.headers['username'], 'johndoe')
self.assertEqual(request_handler.headers['api-key'], '12345')
self.assertEqual(
request_handler.headers['Content-Type'], 'application/json')
self.assertEqual(
request_handler.headers['Accept'], 'application/json')
def test_put(self):
with requests_mock.Mocker() as mocker:
mocker.register_uri('PUT', 'http://localhost',
json={'test': 'value'},
status_code=200)
request_handler = RequestHandler()
data, status = request_handler.put(
'http://localhost',
json_payload={'hello': 'world'},
api_key='555',
username='janedoe'
)
self.assertEqual(status, client.OK)
self.assertEqual({'test': 'value'}, data)
self.assertEqual(request_handler.headers['username'], 'janedoe')
self.assertEqual(request_handler.headers['api-key'], '555')
self.assertEqual(
request_handler.headers['Content-Type'], 'application/json')
self.assertEqual(
request_handler.headers['Accept'], 'application/json')
def test_post(self):
with requests_mock.Mocker() as mocker:
mocker.register_uri('POST', 'http://localhost',
json={'test': 'value'},
status_code=201)
request_handler = RequestHandler()
data, status = request_handler.post(
'http://localhost',
json_payload={'hello': 'world'},
api_key='8900',
username='samjones',
event='myevent',
signature='mysignature'
)
self.assertEqual(status, client.CREATED)
self.assertEqual({'test': 'value'}, data)
self.assertEqual(request_handler.headers['username'], 'samjones')
self.assertEqual(request_handler.headers['api-key'], '8900')
self.assertEqual(request_handler.headers['event'], 'myevent')
self.assertEqual(
request_handler.headers['pywebhooks-signature'], 'mysignature')
self.assertEqual(
request_handler.headers['Content-Type'], 'application/json')
self.assertEqual(
request_handler.headers['Accept'], 'application/json')
def test_patch(self):
with requests_mock.Mocker() as mocker:
mocker.register_uri('PATCH', 'http://localhost',
json={'test': 'value'},
status_code=200)
request_handler = RequestHandler()
data, status = request_handler.patch(
'http://localhost',
json_payload={'hello': 'world'},
api_key='01245',
username='natml'
)
self.assertEqual(status, client.OK)
self.assertEqual({'test': 'value'}, data)
self.assertEqual(request_handler.headers['username'], 'natml')
self.assertEqual(request_handler.headers['api-key'], '01245')
self.assertEqual(
request_handler.headers['Content-Type'], 'application/json')
self.assertEqual(
request_handler.headers['Accept'], 'application/json')
def test_delete(self):
with requests_mock.Mocker() as mocker:
mocker.register_uri('DELETE', 'http://localhost/45678',
json={'test': 'value'},
status_code=200)
request_handler = RequestHandler()
data, status = request_handler.delete(
'http://localhost/45678',
api_key='765434',
username='birk'
)
self.assertEqual(status, client.OK)
self.assertEqual({'test': 'value'}, data)
self.assertEqual(request_handler.headers['username'], 'birk')
self.assertEqual(request_handler.headers['api-key'], '765434')
self.assertEqual(
request_handler.headers['Content-Type'], 'application/json')
self.assertEqual(
request_handler.headers['Accept'], 'application/json')
|
StarcoderdataPython
|
9686513
|
print('-=' * 10, '10 termos de uma PA', '-=' * 10)
p = int(input('Primeiro termo: '))
r = int(input('Razão: '))
d = p + (10 - 1) * r
for n in range(p, d + r, r):
print(n, end=' ')
|
StarcoderdataPython
|
6654345
|
<reponame>timclipsham/lmdirect
"""lmdirect connection class."""
import asyncio
import logging
from datetime import datetime, timedelta
from functools import partial
from authlib.integrations.base_client.errors import OAuthError
from authlib.integrations.httpx_client import AsyncOAuth2Client
from .aescipher import AESCipher
from .const import *
from .msgs import (
AUTO_BITFIELD,
AUTO_BITFIELD_MAP,
AUTO_SCHED_MAP,
CURRENT_PULSE_COUNT,
DAYS_SINCE_BUILT,
DIVIDE_KEYS,
DRINK_OFFSET_MAP,
FIRMWARE_VER,
FRONT_PANEL_DISPLAY,
GATEWAY_DRINK_MAP,
HEATING_STATE,
HEATING_VALUES,
HOUR,
KEY_ACTIVE,
MIN,
MSGS,
OFF,
ON,
SERIAL_NUMBERS,
TIME,
TOTAL_COFFEE,
TOTAL_COFFEE_ACTIVATIONS,
TOTAL_FLUSHING,
UPDATE_AVAILABLE,
Elem,
Msg,
)
_LOGGER = logging.getLogger(__name__)
class Connection:
def __init__(self, machine_info):
"""Init LMDirect."""
self._reader = None
self._writer = None
self._read_response_task = None
self._read_reaper_task = None
self._current_status = {}
self._responses_waiting = []
self._run = True
self._callback_list = []
self._raw_callback_list = []
self._cipher = None
self._machine_info = machine_info
self._start_time = None
self._connected = False
self._lock = asyncio.Lock()
self._first_time = True
self._update_available = None
self._initialized_machine_info = False
"""Maintain temporary states for device states that take a while to update"""
self._temp_state = {}
def _get_key(self, k):
"""Construct tag name if needed."""
if isinstance(k, tuple):
k = "_".join(k)
return k
async def retrieve_machine_info(self, machine_info):
"""Retrieve the machine info from the cloud APIs."""
_LOGGER.debug(f"Retrieving machine info")
async with AsyncOAuth2Client(
client_id=machine_info[CLIENT_ID],
client_secret=machine_info[CLIENT_SECRET],
token_endpoint=TOKEN_URL,
) as client:
headers = {
"client_id": machine_info[CLIENT_ID],
"client_secret": machine_info[CLIENT_SECRET],
}
try:
await client.fetch_token(
url=TOKEN_URL,
username=machine_info[USERNAME],
password=machine_info[PASSWORD],
headers=headers,
)
except OAuthError as err:
raise AuthFail("Authorization failure") from err
except Exception as err:
raise AuthFail(f"Caught: {type(err)}, {err}") from err
"""Only retrieve info if we're missing something."""
if any(
x not in machine_info
for x in [KEY, SERIAL_NUMBER, MACHINE_NAME, MODEL_NAME]
):
cust_info = await client.get(CUSTOMER_URL)
if cust_info:
fleet = cust_info.json()["data"]["fleet"][0]
machine_info[KEY] = fleet["communicationKey"]
machine_info[SERIAL_NUMBER] = fleet["machine"]["serialNumber"]
machine_info[MACHINE_NAME] = fleet["name"]
machine_info[MODEL_NAME] = fleet["machine"]["model"]["name"]
"""Add the machine and model names to the dict so that they're avaialble for attributes"""
self._current_status.update({MACHINE_NAME: machine_info[MACHINE_NAME]})
self._current_status.update({MODEL_NAME: machine_info[MODEL_NAME]})
if any(
self._get_key(x) not in self._current_status
for x in DRINK_OFFSET_MAP.values()
):
drink_info = await client.get(
DRINK_COUNTER_URL.format(serial_number=machine_info[SERIAL_NUMBER])
)
if drink_info:
data = drink_info.json().get("data")
if data:
self._current_status.update(
{
self._get_key(GATEWAY_DRINK_MAP[x["coffeeType"]]): x["count"]
for x in data
}
)
if UPDATE_AVAILABLE not in self._current_status:
update_info = await client.get(UPDATE_URL)
if update_info:
data = update_info.json().get("data")
self._update_available = "Yes" if data else "No"
self._initialized_machine_info = True
_LOGGER.debug(f"Finished machine info")
return machine_info
async def _connect(self):
"""Conmnect to espresso machine."""
if self._connected:
return self._machine_info
_LOGGER.debug(f"Connecting")
if not self._initialized_machine_info:
try:
self._machine_info = await self.retrieve_machine_info(
self._machine_info
)
except Exception as err:
raise ConnectionFail(
f"Exception retrieving machine info: {err}"
) from err
if not self._cipher:
self._cipher = AESCipher(self._machine_info[KEY])
"""Connect to the machine."""
try:
self._reader, self._writer = await asyncio.wait_for(
asyncio.open_connection(
self._machine_info[HOST], self._machine_info[PORT]
),
timeout=3,
)
except asyncio.TimeoutError:
_LOGGER.warning("Connection Timeout, skipping")
return None
except Exception as err:
raise ConnectionFail(f"Cannot connect to machine: {err}") from err
"""Start listening for responses."""
await self.start_read_task()
self._connected = True
_LOGGER.debug("Finished Connecting")
return self._machine_info
async def start_read_task(self):
"""Start listening for responses."""
loop = asyncio.get_event_loop()
self._read_response_task = loop.create_task(
self.read_response_task(), name="Response Task"
)
"""Reap the results and any any exceptions"""
self._read_reaper_task = loop.create_task(
self.read_reaper(), name="Read Reaper"
)
async def _close(self):
"""Close the connection to the machine."""
if not self._connected:
return
if self._writer is not None:
self._writer.close()
if self._read_response_task:
self._read_response_task.cancel()
self._reader = self._writer = None
self._connected = False
_LOGGER.debug("Finished closing")
async def read_reaper(self):
_LOGGER.debug("Starting read reaper")
try:
await asyncio.gather(self._read_response_task)
except Exception as err:
_LOGGER.error(f"Exception in read_response_task: {err}")
await self._close()
self._read_response_task = None
self._first_time = False
_LOGGER.debug("Finished reaping read task")
def _call_callbacks(self, **kwargs):
"""Call the callbacks."""
if self._callback_list is not None:
[
elem(
current_status=self._current_status,
**kwargs,
)
for elem in self._callback_list
]
async def read_response_task(self):
"""Start thread to receive responses."""
BUFFER_SIZE = 1000
handle = None
_LOGGER.debug("Starting read task")
if self._start_time is None:
self._start_time = datetime.now()
while self._run:
encoded_data = await self._reader.readuntil(separator=b"%")
if encoded_data is not None:
loop = asyncio.get_event_loop()
fn = partial(self._cipher.decrypt, encoded_data[1:-1])
plaintext = await loop.run_in_executor(None, fn)
if not plaintext:
continue
await self.process_data(plaintext)
if not self._first_time:
if handle:
handle.cancel()
handle = None
"""Coalesce callbacks within a 5s window."""
handle = loop.call_later(5, self._call_callbacks)
else:
self._call_callbacks()
"""Exit if we've been reading longer than 10s since the last command."""
if datetime.now() > self._start_time + timedelta(seconds=10):
_LOGGER.debug(f"Exiting loop: {self._responses_waiting}")
"""Flush the wait list."""
self._responses_waiting = []
break
async def process_data(self, plaintext):
"""Process incoming packet."""
"""Separate the mesg from the data."""
msg_type = plaintext[0]
raw_data = plaintext[1:]
msg = raw_data[:8]
retval = True
"""Chop off the message and check byte."""
data = raw_data[8:-2]
finished = not len(self._responses_waiting)
_LOGGER.debug(f"Message={msg}, Data={data}")
msg_id = None
if msg_type == Msg.WRITE:
if Msg.RESPONSE_GOOD not in data:
_LOGGER.error(f"Command Failed: {msg}: {data}")
retval = False
else:
_LOGGER.debug(f"Command Succeeded: {msg}: {data}")
else:
"""Find the matching item or returns None."""
msg_id = next(
(
x
for x in MSGS
if MSGS[x].msg == msg and MSGS[x].msg_type == msg_type
),
None,
)
if msg_id is not None:
cur_msg = MSGS[msg_id]
"""Notify any listeners for this message."""
[
await x[1]((msg_id, x[1]), data)
for x in self._raw_callback_list
if MSGS[x[0]].msg == msg
]
if cur_msg.map is not None:
await self._populate_items(data, cur_msg)
else:
_LOGGER.error(f"Unexpected response: {plaintext}")
retval = False
if msg in self._responses_waiting:
self._responses_waiting.remove(msg)
finished = not len(self._responses_waiting)
if finished:
_LOGGER.debug("Received all responses")
return retval
def calculate_auto_sched_times(self, key):
time_on_key = self._get_key((key, ON, TIME))
hour_on_key = self._get_key((key, ON, HOUR))
min_on_key = self._get_key((key, ON, MIN))
time_off_key = self._get_key((key, OFF, TIME))
hour_off_key = self._get_key((key, OFF, HOUR))
min_off_key = self._get_key((key, OFF, MIN))
"""Set human-readable "on" time"""
self._current_status[
time_on_key
] = f"{'%02d' % self._current_status[hour_on_key]}:{'%02d' % self._current_status[min_on_key]}"
"""Set human-readable "off" time"""
self._current_status[
time_off_key
] = f"{'%02d' % self._current_status[hour_off_key]}:{'%02d' % self._current_status[min_off_key]}"
async def _populate_items(self, data, cur_msg):
def handle_cached_value(element, value):
"""See if we've stored a temporary value that may take a while to update on the machine"""
if element in self._temp_state:
if value == self._temp_state[element]:
"""Value has updated, so remove the temp value"""
_LOGGER.debug(
f"Element {element} has updated to {value}, pop the cached value"
)
self._temp_state.pop(element, None)
else:
"""Value hasn't updated yet, so use the cached value"""
_LOGGER.debug(
f"Element {element} hasn't updated yet, so use the cached value {value}"
)
value = self._temp_state[element]
return value
"""Process all the fields and populate shared dict."""
map = cur_msg.map
for elem in map:
value = None
"""Don't decode a value if we just plan to calculate it"""
if elem.index != CALCULATED_VALUE:
"""The strings are ASCII-encoded hex, so each value takes 2 bytes."""
index = elem.index * 2
size = elem.size * 2
"""Extract value for this field."""
value = data[index : index + size]
if elem.type == Elem.INT:
"""Convert from ascii-encoded hex."""
value = int(value, 16)
raw_key = map[elem]
"""Construct key name if needed."""
key = self._get_key(raw_key)
if any(x in key for x in DIVIDE_KEYS):
value = value / 10
elif key == FIRMWARE_VER:
value = "%0.2f" % (value / 100)
elif key in SERIAL_NUMBERS:
value = "".join(
[chr(int(value[i : i + 2], 16)) for i in range(0, len(value), 2)]
)
"""Chop off any trailing nulls."""
value = value.partition("\0")[0]
elif key == AUTO_BITFIELD:
bitfield = value
for item in AUTO_BITFIELD_MAP:
setting = ENABLED if bitfield & 0x01 else DISABLED
processed_key = self._get_key(AUTO_BITFIELD_MAP[item])
self._current_status[processed_key] = handle_cached_value(
processed_key, setting
)
bitfield = bitfield >> 1
elif raw_key in DRINK_OFFSET_MAP:
if key == TOTAL_FLUSHING:
value = (
self._current_status[TOTAL_COFFEE_ACTIVATIONS]
- self._current_status[TOTAL_COFFEE]
)
offset_key = self._get_key(DRINK_OFFSET_MAP[raw_key])
if key not in self._current_status:
"""If we haven't seen the value before, calculate the offset."""
self._current_status.update(
{offset_key: value - self._current_status.get(offset_key, 0)}
)
"""Apply the offset to the value."""
value = value - self._current_status.get(offset_key, 0)
elif key == DAYS_SINCE_BUILT:
"""Convert hours to days."""
value = round(value / 24)
elif key == HEATING_STATE:
value = [x for x in HEATING_VALUES if HEATING_VALUES[x] & value]
"""Don't add attribute and remove it if machine isn't currently running."""
if not value:
self._current_status.pop(key, None)
continue
elif key in [KEY_ACTIVE, CURRENT_PULSE_COUNT]:
"""Don't add attributes and remove them if machine isn't currently running."""
if not value:
self._current_status.pop(key, None)
continue
elif key == FRONT_PANEL_DISPLAY:
value = (
bytes.fromhex(value)
.decode("latin-1")
.replace("\xdf", "\u00b0") # Degree symbol
.replace(
"\xdb", "\u25A1"
) # turn a block into an outline block (heating element off)
.replace(
"\xff", "\u25A0"
) # turn \xff into a solid block (heating element on)
)
elif key in AUTO_SCHED_MAP.values() and elem.index == CALCULATED_VALUE:
self.calculate_auto_sched_times(key)
continue
self._current_status[key] = handle_cached_value(key, value)
async def _send_msg(self, msg_id, data=None, base=None):
"""Send command to the espresso machine."""
msg = MSGS[msg_id]
_LOGGER.debug(f"Sending {msg.msg} with {data} {base}")
await self._send_raw_msg(msg.msg, msg.msg_type, data, base)
async def _send_raw_msg(self, msg, msg_type, data=None, base=None):
def checksum(buffer):
"""Compute check byte."""
buffer = bytes(buffer, "utf-8")
return "%0.2X" % (sum(buffer) % 256)
"""Prevent race conditions - can be called from different tasks."""
async with self._lock:
"""Connect if we don't have an active connection."""
result = await self._connect()
if not result:
raise ConnectionFail("Connection failed.")
if not self._writer:
raise ConnectionFail(f"self._writer={self._writer}")
"""If a key was provided, replace the second byte of the message."""
msg_to_send = msg if not base else msg[:2] + base + msg[4:]
plaintext = msg_type + msg_to_send
if data is not None:
plaintext += data
"""Add the check byte."""
plaintext += checksum(plaintext)
loop = asyncio.get_event_loop()
fn = partial(self._cipher.encrypt, plaintext)
ciphertext = (
"@" + (await loop.run_in_executor(None, fn)).decode("utf-8") + "%"
)
self._writer.write(bytes(ciphertext, "utf-8"))
await self._writer.drain()
"""Remember that we're waiting for a response."""
self._responses_waiting.append(msg_to_send)
"""Note when the command was sent."""
self._start_time = datetime.now()
class AuthFail(Exception):
"""Error to indicate there is invalid auth info."""
def __init__(self, msg):
super().__init__(msg)
class ConnectionFail(Exception):
"""Error to indicate there is no connection."""
def __init__(self, msg):
super().__init__(msg)
|
StarcoderdataPython
|
3449069
|
"""
Hello World example
"""
import quasargui
from quasargui import *
def run_program():
layout.notify('Hello, {name}!'.format(
name=input_name.value
))
input_name = QInput(
# # uncomment these lines if you want to display notification message on change:
# value='',
# events={'change': run_program}
)
btn_submit = QButton(
'Submit',
classes='text-primary',
props={'unelevated': True, 'size': 'lg'},
events={'click': run_program})
layout = Div(
styles={
'max-width': '30em',
'margin-left': 'auto',
'margin-right': 'auto',
},
classes='q-mt-xl text-center',
children=[
"What's your name?",
input_name,
btn_submit])
quasargui.run(layout, size=(500, 300))
|
StarcoderdataPython
|
34413
|
# coding=utf-8
from OTLMOW.OTLModel.Datatypes.KeuzelijstField import KeuzelijstField
from OTLMOW.OTLModel.Datatypes.KeuzelijstWaarde import KeuzelijstWaarde
# Generated with OTLEnumerationCreator. To modify: extend, do not edit
class KlVerlichtingstoestelModelnaam(KeuzelijstField):
"""De modelnaam van het verlichtingstoestel."""
naam = 'KlVerlichtingstoestelModelnaam'
label = 'Verlichtingstoestel modelnaam'
objectUri = 'https://wegenenverkeer.data.vlaanderen.be/ns/abstracten#KlVerlichtingstoestelModelnaam'
definition = 'De modelnaam van het verlichtingstoestel.'
codelist = 'https://wegenenverkeer.data.vlaanderen.be/id/conceptscheme/KlVerlichtingstoestelModelnaam'
options = {
'ARC': KeuzelijstWaarde(invulwaarde='ARC',
label='ARC',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlVerlichtingstoestelModelnaam/ARC'),
'Belgica': KeuzelijstWaarde(invulwaarde='Belgica',
label='Belgica',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlVerlichtingstoestelModelnaam/Belgica'),
'Calypso': KeuzelijstWaarde(invulwaarde='Calypso',
label='Calypso',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlVerlichtingstoestelModelnaam/Calypso'),
'Corus': KeuzelijstWaarde(invulwaarde='Corus',
label='Corus',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlVerlichtingstoestelModelnaam/Corus'),
'DTN': KeuzelijstWaarde(invulwaarde='DTN',
label='DTN',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlVerlichtingstoestelModelnaam/DTN'),
'Evolo': KeuzelijstWaarde(invulwaarde='Evolo',
label='Evolo',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlVerlichtingstoestelModelnaam/Evolo'),
'Focal': KeuzelijstWaarde(invulwaarde='Focal',
label='Focal',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlVerlichtingstoestelModelnaam/Focal'),
'GSM': KeuzelijstWaarde(invulwaarde='GSM',
label='GSM',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlVerlichtingstoestelModelnaam/GSM'),
'GTMB': KeuzelijstWaarde(invulwaarde='GTMB',
label='GTMB',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlVerlichtingstoestelModelnaam/GTMB'),
'GTNB': KeuzelijstWaarde(invulwaarde='GTNB',
label='GTNB',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlVerlichtingstoestelModelnaam/GTNB'),
'GZM': KeuzelijstWaarde(invulwaarde='GZM',
label='GZM',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlVerlichtingstoestelModelnaam/GZM'),
'Gema': KeuzelijstWaarde(invulwaarde='Gema',
label='Gema',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlVerlichtingstoestelModelnaam/Gema'),
'HCI-TS': KeuzelijstWaarde(invulwaarde='HCI-TS',
label='HCI-TS',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlVerlichtingstoestelModelnaam/HCI-TS'),
'Iridium': KeuzelijstWaarde(invulwaarde='Iridium',
label='Iridium',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlVerlichtingstoestelModelnaam/Iridium'),
'MNF300': KeuzelijstWaarde(invulwaarde='MNF300',
label='MNF300',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlVerlichtingstoestelModelnaam/MNF300'),
'MWF230': KeuzelijstWaarde(invulwaarde='MWF230',
label='MWF230',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlVerlichtingstoestelModelnaam/MWF230'),
'MY11': KeuzelijstWaarde(invulwaarde='MY11',
label='MY11',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlVerlichtingstoestelModelnaam/MY11'),
'Neos': KeuzelijstWaarde(invulwaarde='Neos',
label='Neos',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlVerlichtingstoestelModelnaam/Neos'),
'Onyx': KeuzelijstWaarde(invulwaarde='Onyx',
label='Onyx',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlVerlichtingstoestelModelnaam/Onyx'),
'RT3NB': KeuzelijstWaarde(invulwaarde='RT3NB',
label='RT3NB',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlVerlichtingstoestelModelnaam/RT3NB'),
'RT3SB': KeuzelijstWaarde(invulwaarde='RT3SB',
label='RT3SB',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlVerlichtingstoestelModelnaam/RT3SB'),
'RXN': KeuzelijstWaarde(invulwaarde='RXN',
label='RXN',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlVerlichtingstoestelModelnaam/RXN'),
'RXS': KeuzelijstWaarde(invulwaarde='RXS',
label='RXS',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlVerlichtingstoestelModelnaam/RXS'),
'Radial': KeuzelijstWaarde(invulwaarde='Radial',
label='Radial',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlVerlichtingstoestelModelnaam/Radial'),
'SRS201': KeuzelijstWaarde(invulwaarde='SRS201',
label='SRS201',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlVerlichtingstoestelModelnaam/SRS201'),
'Safir': KeuzelijstWaarde(invulwaarde='Safir',
label='Safir',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlVerlichtingstoestelModelnaam/Safir'),
'Saturnus': KeuzelijstWaarde(invulwaarde='Saturnus',
label='Saturnus',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlVerlichtingstoestelModelnaam/Saturnus'),
'Squalo': KeuzelijstWaarde(invulwaarde='Squalo',
label='Squalo',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlVerlichtingstoestelModelnaam/Squalo'),
'Syntra': KeuzelijstWaarde(invulwaarde='Syntra',
label='Syntra',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlVerlichtingstoestelModelnaam/Syntra'),
'VTP': KeuzelijstWaarde(invulwaarde='VTP',
label='VTP',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlVerlichtingstoestelModelnaam/VTP'),
'Z18': KeuzelijstWaarde(invulwaarde='Z18',
label='Z18',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlVerlichtingstoestelModelnaam/Z18'),
'Z2': KeuzelijstWaarde(invulwaarde='Z2',
label='Z2',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlVerlichtingstoestelModelnaam/Z2'),
'Z21': KeuzelijstWaarde(invulwaarde='Z21',
label='Z21',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlVerlichtingstoestelModelnaam/Z21'),
'ampera': KeuzelijstWaarde(invulwaarde='ampera',
label='Ampera',
definitie='Ampera',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlVerlichtingstoestelModelnaam/ampera'),
'andere': KeuzelijstWaarde(invulwaarde='andere',
label='andere',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlVerlichtingstoestelModelnaam/andere'),
'brugleuning': KeuzelijstWaarde(invulwaarde='brugleuning',
label='brugleuning',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlVerlichtingstoestelModelnaam/brugleuning'),
'clear-field': KeuzelijstWaarde(invulwaarde='clear-field',
label='ClearField',
definitie='ClearField',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlVerlichtingstoestelModelnaam/clear-field'),
'digi-street': KeuzelijstWaarde(invulwaarde='digi-street',
label='DigiStreet',
definitie='DigiStreet',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlVerlichtingstoestelModelnaam/digi-street'),
'izylum': KeuzelijstWaarde(invulwaarde='izylum',
label='Izylum',
definitie='Izylum',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlVerlichtingstoestelModelnaam/izylum'),
'luma': KeuzelijstWaarde(invulwaarde='luma',
label='Luma',
definitie='Luma',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlVerlichtingstoestelModelnaam/luma'),
'lumi-street': KeuzelijstWaarde(invulwaarde='lumi-street',
label='LumiStreet',
definitie='LumiStreet',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlVerlichtingstoestelModelnaam/lumi-street'),
'projector': KeuzelijstWaarde(invulwaarde='projector',
label='projector',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlVerlichtingstoestelModelnaam/projector'),
'teceo': KeuzelijstWaarde(invulwaarde='teceo',
label='Teceo',
definitie='Teceo',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlVerlichtingstoestelModelnaam/teceo')
}
|
StarcoderdataPython
|
129805
|
<gh_stars>0
from turtle import *
from random import randrange
from freegames import square, vector
food = vector(0, 0)
snake = [vector(10, 0)]
aim = vector(0, -10)
def change(x, y):
"Change snake direction."
aim.x = x
aim.y = y
def inside(head):
"Return True if head inside boundaries."
return -200 < head.x < 190 and -200 < head.y < 190
def move():
"Move snake forward one segment."
head = snake[-1].copy()
head.move(aim)
if not inside(head) or head in snake:
square(head.x, head.y, 9, 'red')
update()
return
snake.append(head)
if head == food:
print('Snake:', len(snake))
food.x = randrange(-15, 15) * 10
food.y = randrange(-15, 15) * 10
else:
snake.pop(0)
clear()
for body in snake:
square(body.x, body.y, 9, 'green')
square(food.x, food.y, 9, 'red')
update()
ontimer(move, 100)
hideturtle()
tracer(False)
listen()
onkey(lambda: change(10, 0), 'Right')
onkey(lambda: change(-10, 0), 'Left')
onkey(lambda: change(0, 10), 'Up')
onkey(lambda: change(0, -10), 'Down')
move()
done()
|
StarcoderdataPython
|
1743844
|
#!/usr/bin/env python2
import re
import pprint
import subprocess
descs = {
"double": "Double",
"galvanic_mock": "Galvanic-mock",
"mock_derive": "Mock_Derive",
"mock_it": "Mock-it",
"mockall": "Mockall",
"mockers": "Mockers",
"mockiato": "Mockiato",
"mocktopus": "Mocktopus",
"pseudo": "Pseudo",
"simulacrum": "Simulacrum",
"associated_types": "Associated types",
"checkpoint": "Checkpoints",
"closures": "Closures",
"reference_parameters": "Reference parameters",
"consume_parameters": "Consume parameters",
"consume_self": "Consume self",
"doctest": "Doctest",
"external_trait": "External traits",
"foreign": "Foreign",
"generic_method": "Generic methods",
"generic_method_with_lifetime": "Generic methods with lifetime parameters",
"generic_return": "Generic return",
"generic_struct": "Generic structs",
"generic_trait": "Generic traits",
"impl_trait": "Impl Trait",
"inherited_trait": "Inherited traits",
"match_method": "Match function",
"mock_struct": "Structs",
"mock_trait": "Traits",
"multi_trait": "Multiple traits",
"return_call_with_args": "Return call with args",
"return_reference": "Return reference",
"return_mutable_reference": "Return mutable reference",
"return_owned": "Return owned",
"return_parameters": "Return parameters",
"send": "Send",
"sequence": "Sequence",
"static_method": "Static methods",
"times_range": "Times range",
"where_clause": "Where clauses",
"derive": "Derive",
"fallback": "Fallback",
"match_combo": "Match combinations",
"match_constant": "Match constant",
"match_operator": "Match operator",
"match_pattern": "Match pattern",
"match_range": "Match range",
"match_wildcard": "Match wildcard",
"modules": "Mock modules",
"return_constant": "Return a constant",
"return_default": "Return default",
"return_panic": "Return panic",
"times_once": "Times once",
"times_any": "Times any",
"times_n": "Times n",
"times_never": "Times never",
"many_args": "Maximum arguments",
"rustc": "Rustc",
"first_release": "First release",
"version": "Tested version",
"link": "Current version",
}
def format_cell(s):
words = s.split(" ")
result = words[-1]
text = " ".join(words[0:-1])
if '<img ' in text:
bg = "white"
elif result == "ok":
if re.match("^0\.[0-9]+\.[0-9]+", text):
bg = "#fe7d37"
else:
bg = "#ADEBAD"
elif result == "warn":
bg = "#FFEF99"
elif result == "-":
bg = "white"
else:
bg = "#EB9999"
if not text:
text = {"error": "no", "ok": "yes", "FAILED": "no"}[result]
return "{background:%s}.%s" % (bg, text)
def print_row(feature, results):
result_details = "|".join([format_cell(results[l][feature])
for l in libnames])
print "|%21s|%s|" % (descs[feature], result_details)
# First, run the tests and collect results
results = {}
p1 = subprocess.Popen(["cargo", "+nightly", "test", "-v",
"--no-fail-fast", "--", "--nocapture", "--test-threads=1"],
stdout=subprocess.PIPE)
output = p1.communicate()[0]
for line in output.splitlines():
match = re.match("^test t_(\w+)::(?:mod_t::)?t::(\w+) \.\.\. (.+)$", line)
if not match:
match = re.match(
"^test src/t_(\w+)\.rs - \w+::(doctest) \(line \d+\) \.\.\. (\w+)", line)
if match:
lib = match.group(1)
feature = match.group(2)
result = match.group(3)
if not results.has_key(lib):
results[lib] = {}
results[lib][feature] = result
# Manually add a few more data
results['double']['rustc'] = "stable ok"
results['galvanic_mock']['rustc'] = "nightly warn"
# results['mock_derive']['rustc'] = "nightly < 1.28.0 error"
results['mockall']['rustc'] = "stable ok"
results['mockers']['rustc'] = "stable ok"
results['mockiato']['rustc'] = "stable ok"
results['mocktopus']['rustc'] = "nightly warn"
results['pseudo']['rustc'] = "stable ok"
results['simulacrum']['rustc'] = "stable ok"
results['mock_it']['rustc'] = "stable ok"
results['double']['first_release'] = "Dec-12-2017 -"
results['galvanic_mock']['first_release'] = "Aug-13-2017 -"
# results['mock_derive']['first_release'] = "Jul-16-2017 -"
results['mockall']['first_release'] = "Jul-3-2019 -"
results['mockers']['first_release'] = "Apr-6-2016 -"
results['mockiato']['first_release'] = "Feb-11-2019 -"
results['mocktopus']['first_release'] = "Sep-5-2017 -"
results['pseudo']['first_release'] = "Mar-23-2017 -"
results['simulacrum']['first_release'] = "Dec-17-2017 -"
results['mock_it']['first_release'] = "Mar-11-2018 -"
# Finally, generate the table
libnames = sorted(results.keys())
lib_headers = "|_. ".join([descs[l] for l in libnames])
print "|_. |_.%s|" % lib_headers
essential_features = ["associated_types", "checkpoint", "closures",
"reference_parameters", "consume_parameters", "consume_self", "doctest",
"external_trait", "foreign", "generic_method",
"generic_method_with_lifetime", "generic_return",
"generic_struct", "generic_trait", "inherited_trait", "match_method",
"mock_struct", "mock_trait", "multi_trait", "return_call_with_args",
"return_reference", "return_mutable_reference", "return_owned",
"return_parameters", "send", "sequence", "static_method", "times_range",
"where_clause"]
convenience_features = [ "derive", "fallback", "impl_trait", "match_combo",
"match_constant", "match_operator", "match_pattern", "match_range",
"match_wildcard", "modules", "return_constant", "return_default",
"return_panic", "times_once", "times_any", "times_n", "times_never"]
other_features = [ "many_args", "rustc", "first_release", "version", "link"]
print "|\\10=. Essential Features|"
for feature in essential_features:
print_row(feature, results)
print "|\\10=. Convenience Features|"
for feature in convenience_features:
print_row(feature, results)
print "|\\10=. Other|"
for feature in other_features:
print_row(feature, results)
|
StarcoderdataPython
|
6525502
|
<reponame>Pennsieve/timeseries-processor<gh_stars>0
# -*- coding: utf-8 -*-
"""
Modified:
<NAME> (20180423)
Created on Mon Jan 2 12:49:21 2017
setup.py file for pymef3 library
Ing.,Mgr. (MSc.) <NAME>
Biomedical engineering
International Clinical Research Center
St. Anne's University Hospital in Brno
Czech Republic
&
Mayo systems electrophysiology lab
Mayo Clinic
200 1st St SW
Rochester, MN
United States
"""
from setuptools import setup, Extension
from setuptools.command.build_ext import build_ext as _build_ext
class build_ext(_build_ext):
def finalize_options(self):
_build_ext.finalize_options(self)
# Prevent numpy from thinking it is still in its setup process:
__builtins__.__NUMPY_SETUP__ = False
import numpy
self.include_dirs.append(numpy.get_include())
# the c extension module
mef_file_ext = Extension("pymef.mef_file.pymef3_file", ["pymef/mef_file/pymef3_file.c"])
setup(name = "pymef",
version="0.2.0",
packages = ["pymef","pymef.mef_file"],
cmdclass={'build_ext':build_ext},
ext_modules=[mef_file_ext],
) # This line needed for MSEL (+ the import at the beginning)
|
StarcoderdataPython
|
8014384
|
<reponame>NazaninBayati/SCA<gh_stars>0
import understand
import sys
file_list=[]
final_list=[]
file_dependency=[]
file_dependentby=[]
cls_CallPairs=[]
CallPairs=[]
def included_files(db):
for file in sorted(db.ents("File")):
file_dependency.append(list(file.depends()))
def used_by_file(db):
for file in sorted(db.ents("File")):
file_dependentby.append(list(file.dependsby()))
def file_dependency_list(file):
i = 0
dep_list=[]
while i < len(file_dependency):
j = 1
while j < len(file_dependency[i]):
if file_dependency[i][0]==file:
dep_list.append(str(file_dependency[i][j]))
j = j +1
i = i + 1
return dep_list
def file_dependent_list(file):
i = 0
dep_list=[]
while i < len(file_dependentby):
j = 1
while j < len(file_dependentby[i]):
if file_dependentby[i][0]==file:
dep_list.append(str(file_dependentby[i][j]))
j = j +1
i = i + 1
return dep_list
def Language(pfix):
pfix = pfix[0]
db_analyze_language = ''
if pfix == 'cpp' or pfix == 'h' or pfix == 'C' or pfix == 'hpp' or pfix == 'hxx' or pfix == 'cxx' or \
pfix == 'H' or pfix == 'inl' or pfix == 'cc' or pfix == 'hh':
db_analyze_language = 'C++'
if pfix == 'c':
db_analyze_language = 'C'
if pfix == 'a' or pfix == 'ads' or pfix == 'gpr' or pfix == 'ada' or pfix == 'adb_analyze':
db_analyze_language = 'Ada'
if pfix == 'cgi' or pfix == 'pl' or pfix == 'pm':
db_analyze_language = 'Perl'
if pfix == 'css':
db_analyze_language = 'CSS'
if pfix == 'dpr' or pfix == 'dfm':
db_analyze_language = 'Delphi'
if pfix == 'f77' or pfix == 'f' or pfix == 'f90' or pfix == 'for' or pfix == 'f03' or \
pfix == 'f95' or pfix == 'ftn':
db_analyze_language = 'Fortran'
if pfix == 'jov' or pfix == 'cpl':
db_analyze_language = 'Jovidal'
if pfix == 'mm':
db_analyze_language = 'Objective-C++'
if pfix == 'py':
db_analyze_language = 'Python'
if pfix == 'sql':
db_analyze_language = 'Sql'
if pfix == 'tsx' or pfix == 'ts':
db_analyze_language = 'TypeScript'
if pfix == 'vb':
db_analyze_language = 'Basic'
if pfix == 'vhdl' or pfix == 'vhd':
db_analyze_language = 'VHDL'
if pfix == 'asm' or pfix == 's':
db_analyze_language = 'Assembly'
if pfix == 'cbl' or pfix == 'cob' or pfix == 'cpy':
db_analyze_language = 'COBOL'
if pfix == 'htm' or pfix == 'html':
db_analyze_language = 'Html'
if pfix == 'js':
db_analyze_language = 'Javascript'
if pfix == 'pas' or pfix == 'sp':
db_analyze_language = 'Pascal'
if pfix == 'plm':
db_analyze_language = 'Plm'
if pfix == 'tcl':
db_analyze_language = 'Tcl'
if pfix == 'txt' or pfix == 'TXT':
db_analyze_language = 'Text'
if pfix == 'vh' or pfix == 'v':
db_analyze_language = 'Verilog'
if pfix == 'xml':
db_analyze_language = 'Xml'
if pfix == 'bat':
db_analyze_language = 'MSDos Batch'
if pfix == 'cs':
db_analyze_language = 'C#'
if pfix == 'java':
db_analyze_language = 'Java'
if pfix == 'm':
db_analyze_language = 'Objective-C'
if pfix == 'php':
db_analyze_language = 'Php'
return db_analyze_language
def fileList(file):
location=[]
last2=[]
file_qname = file.longname()
file_name_Qualified = file_qname.split('/')
location.append(file_qname)
last = file_name_Qualified[file_name_Qualified.__len__() - 1].split('.')
last2.append(file_name_Qualified[0:len(file_name_Qualified)-1])
last2[0].append(last[0])
last2=last2[0]
last = ".".join(last2[1:last2.__len__()])
qlast = []
qlast.append(location[0])
qlast.append(last)
file_name_Qualified = '.'.join(qlast[1:qlast.__len__()-1])
return (qlast[1], location[0])
def cls_printCallPairs(ent):
lineString = ''
defineAref = ent.ref("definein")
if defineAref is not None:
lineString = ent.longname() + ","
lineString += defineAref.file().longname()
return lineString
def printCallPairs(ent):
lineString = ''
defineAref = ent.ref("definein")
if defineAref is not None:
lineString = ent.longname() + ","
lineString += defineAref.file().longname()
return lineString
def class_cont(file):
list_ret = []
for item in cls_CallPairs:
list_cls = item.split(',')
if list_cls[1] == str(file.longname()):
list_ret.append(list_cls[0])
return list_ret
def function_cont(file):
list_ret = []
for item in CallPairs:
list_func = item.split(',')
if list_func[1] == str(file.longname()):
list_ret.append(list_func[0])
return list_ret
def metric(file):
file_metric = []
metrics = file.metric(file.metrics())
for k, v in sorted(metrics.items()):
file_metric.append(v)
return file_metric
if __name__ == '__main__':
# Open Database
args = sys.argv
# db = understand.open(args[1])
db = understand.open("/home/nazanin/cephDB.udb")
included_files(db)
used_by_file(db)
for ent in sorted(db.ents("class,method,struct,procedure"), key=lambda ent: ent.name()):
seen = {}
calls = cls_printCallPairs(ent)
if calls != '':
cls_CallPairs.append(calls)
for ent in sorted(db.ents("function ~unknown ~unresolved"), key=lambda ent: ent.name()):
seen = {}
calls = printCallPairs(ent)
if calls != '':
CallPairs.append(calls)
for file in sorted(db.ents("File")):
if file.library() != "Standard":
file_name=(file.name())
db_analyze_pfix = file_name.split('.')
if len(db_analyze_pfix) > 1:
file_lang = (Language(db_analyze_pfix[1]))
name_qname_loc = fileList(file)
file_metric = metric(file)
contained_classes = class_cont(file)
contained_function = function_cont(file)
dep = (file_dependency_list(file))
depBy = (file_dependent_list(file))
file_list.append(str(file.name()))
file_list.append(file_lang)
file_list.append(name_qname_loc[0])
file_list.append(name_qname_loc[1])
file_list.append(file_metric)
file_list.append(contained_function)
file_list.append(contained_classes)
file_list.append(dep)
file_list.append(depBy)
final_list.append(file_list)
print(final_list)
header = 'Filename,ProgrammingLanguage,FileQualifiedname,Location,Metrics,ContainedFunctions,ContainedClasses,calledFiles,CalledByFiles'
with open('FileLevel Report.txt', 'w') as classhandle:
i = 0
classhandle.write(header)
classhandle.write('\n')
for listitem in file_list:
classhandle.write('%s;' % listitem)
i = i + 1
if i % 9 == 0: classhandle.write('\n')
|
StarcoderdataPython
|
1864098
|
import itertools
import logging
import os
from typing import Collection, List
import click
from exposurescrawler.dbt.exposure import DbtExposure
from exposurescrawler.dbt.manifest import DbtManifest
from exposurescrawler.tableau.graphql_client import (
retrieve_custom_sql,
retrieve_native_sql,
)
from exposurescrawler.tableau.models import WorkbookModelsMapping
from exposurescrawler.tableau.rest_client import TableauRestClient
from exposurescrawler.utils.logger import logger
from exposurescrawler.utils.query_parsing import search_model_in_query
def _should_ignore_workbook(workbook, projects_to_ignore: Collection[str]) -> bool:
return workbook.project_name in projects_to_ignore
def _parse_tables_from_sql(workbooks_sqls: WorkbookModelsMapping, models) -> WorkbookModelsMapping:
"""
Receives a map of workbook (references) and their respective SQLs (list), and look
for occurrences of `models` in the SQLs.
:param workbooks_sqls: map of workbook (references) to SQLs
:param models: the node dict coming from the manifest.json
:return: another map, but instead of workbooks to SQLs, it
has workbooks to models
"""
logger().info('⚙️ Parsing SQL: looking for references to models')
output: WorkbookModelsMapping = {}
for workbook_reference, custom_sqls in workbooks_sqls.items():
# a list of dbt model represented as their original dicts from the manifest
all_found: List[dict] = []
for custom_sql in custom_sqls:
if models_found_query := search_model_in_query(custom_sql, models):
all_found.extend(models_found_query.values())
if all_found:
logger().debug(
' ✅ {}: found models {}'.format(
workbook_reference.name,
[model['materialized_name'] for model in all_found],
)
)
output[workbook_reference] = all_found
else:
logger().debug(f' ❌ {workbook_reference.name}: found no models')
logger().info(f'⚙️ Found {len(output.keys())} workbooks with linked models')
return output
def tableau_crawler(
manifest_path: str,
dbt_package_name: str,
tableau_projects_to_ignore: Collection[str],
verbose: bool,
) -> None:
# Enable verbose logging
if verbose:
logger().setLevel(logging.DEBUG)
# Parse arguments
manifest_path = os.path.expandvars(manifest_path)
manifest_path = os.path.expanduser(manifest_path)
# Parse the dbt manifest JSON file
manifest: DbtManifest = DbtManifest.from_file(manifest_path)
# Retrieve all models
models = manifest.retrieve_models_and_sources()
# Configure the Tableau REST client
tableau_client = TableauRestClient(
os.environ['TABLEAU_URL'],
os.environ['TABLEAU_USERNAME'],
os.environ['TABLEAU_PASSWORD'],
)
# Retrieve custom SQLs and find model references
workbooks_custom_sqls = retrieve_custom_sql(tableau_client, 'snowflake')
workbooks_custom_sql_models = _parse_tables_from_sql(workbooks_custom_sqls, models)
# Retrieve native SQLs and find model references
workbooks_native_sqls = retrieve_native_sql(tableau_client, 'snowflake')
workbooks_native_sql_models = _parse_tables_from_sql(workbooks_native_sqls, models)
# Merge the results by chaining the iterables
# Here it is fine to have duplicates on the list
# Duplicates will be handled in the DbtExposure class
workbooks_models: WorkbookModelsMapping = {}
for workbook_reference, found in itertools.chain(
workbooks_custom_sql_models.items(), workbooks_native_sql_models.items()
):
workbooks_models.setdefault(workbook_reference, []).extend(found)
logger().info('')
logger().info(
'💡 Results merged: {} + {} = {} workbooks'.format(
len(workbooks_custom_sql_models),
len(workbooks_native_sql_models),
len(workbooks_models),
)
)
logger().info('')
logger().info('🌏 Retrieving workbooks and authors metadata from the Tableau REST API')
# For every workbook and the models found, create exposures and add
# to the manifest (in-memory)
for workbook_reference, found in workbooks_models.items():
workbook = tableau_client.retrieve_workbook(workbook_reference.id)
owner = tableau_client.retrieve_user(workbook.owner_id)
if _should_ignore_workbook(workbook, tableau_projects_to_ignore):
logger().debug(
f'⏩ Skipping workbook: {workbook.name} ({workbook.project_name} is ignored)'
)
continue
exposure = DbtExposure.from_tableau_workbook(dbt_package_name, workbook, owner, found)
manifest.add_exposure(exposure, found)
# Terminate the Tableau client
tableau_client.sign_out()
# Persist the modified manifest
logger().info('')
logger().info(f'💾 Writing results to file: {manifest_path}')
manifest.save(manifest_path)
@click.command()
@click.option(
'--manifest-path',
required=True,
metavar='PATH',
help='The path to the dbt manifest artifact',
)
@click.option(
'--dbt-package-name',
required=True,
metavar='PROJECT_NAME',
help='The name of the dbt pacakge where the exposures should be added. If in doubt, check the '
'name of your dbt project on dbt_project.yml',
)
@click.option(
'--tableau-ignore-projects',
'tableau_projects_to_ignore',
default=[],
help='The name of Tableau projects (folders) to ignore',
)
@click.option('-v', '--verbose', is_flag=True, default=False, help='Enable verbose logging')
def tableau_crawler_command(
manifest_path: str,
dbt_package_name: str,
tableau_projects_to_ignore: Collection[str],
verbose: bool,
):
tableau_crawler(manifest_path, dbt_package_name, tableau_projects_to_ignore, verbose)
if __name__ == '__main__':
tableau_crawler_command()
|
StarcoderdataPython
|
60716
|
TARGET_URL = '/{tail:.*}'
EXCLUDED_HEADERS = {
# 'Accept-CH',
# 'Accept-CH-Lifetime',
# 'Cache-Control',
# 'Content-Encoding',
# 'Content-Security-Policy',
# 'Content-Type',
# 'Date',
# 'Expires',
# 'Last-Modified',
# 'P3P',
# 'Set-Cookie',
'Transfer-Encoding',
'X-Target-Url',
'Content-Length',
# 'Host',
# 'X-Content-Type-Options',
# 'X-Frame-Options'
}
|
StarcoderdataPython
|
4929907
|
<filename>students/k3343/practical_works/Kozyreva_Alena/simple_django_web_project/django_project_kozyreva/project_first_app/migrations/0003_delete_geeksmodel.py
# Generated by Django 3.0.5 on 2020-04-15 16:51
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('project_first_app', '0002_auto_20200415_1843'),
]
operations = [
migrations.DeleteModel(
name='GeeksModel',
),
]
|
StarcoderdataPython
|
9747679
|
<filename>src_assignemnt_2/src2.py
import math
def f():
total = 0
for x in range(1000000):
total += math.sin(x)
return total
print(f())
|
StarcoderdataPython
|
1952155
|
def inside_or_outside(number, lower, upper):
if number >= lower or number < upper:
print("Inside")
else:
print("Outside")
|
StarcoderdataPython
|
3545124
|
Import("env")
import gzip
import os
import sys
'''
try:
from css_html_js_minify import process_single_html_file, process_single_js_file, process_single_css_file, html_minify, js_minify, css_minify
except ImportError:
env.Execute("$PYTHONEXE -m pip install -vvv css_html_js_minify")
from css_html_js_minify import process_single_html_file, process_single_js_file, process_single_css_file, html_minify, js_minify, css_minify
def minify(filename, outfilename):
_ , file_extension = os.path.splitext(filename)
file_extension = file_extension.lower()
if file_extension == '.html' or file_extension == '.htm':
process_single_html_file(filename,output_path=outfilename)
elif file_extension == '.css':
process_single_css_file(filename,output_path=outfilename)
elif file_extension == '.js':
process_single_js_file(filename,output_path=outfilename)
'''
def compress(filename, outfilename):
with open(filename, 'rb') as src, gzip.open(outfilename, 'wb') as dst:
dst.writelines(src)
def bin2c(filename, outfilename):
varname = os.path.basename(filename)
varname=varname.replace('.','_')
varname_size = varname+'_size'
with open(outfilename,'wb') as result_file:
result_file.write(b"//Don't edit this file, it's automatically generated\n")
result_file.write(b'const long int %s = %d;\n' % (varname_size.encode('utf-8'), os.stat(filename).st_size))
result_file.write(b'const unsigned char %s[] = {\n' % varname.encode('utf-8'))
counter = 0
first = True
for b in open(filename, 'rb').read():
counter = counter + 1
if counter == 16:
result_file.write(b'\n')
counter = 0
if first:
first = False
else:
result_file.write(b',')
result_file.write(b'0x%02X' % b)
result_file.write(b'\n};')
with os.scandir('./main/html') as it :
for entry in it:
if entry.is_file() and entry.name.endswith('.html') or entry.name.endswith('.css') or entry.name.endswith('.js'):
'''
min_file = entry.path+'.min'
minify(entry.path,min_file)
'''
gz_file=entry.path+'.gz'
compress(entry.path, gz_file)
h_file = gz_file+'.h'
bin2c(gz_file,h_file)
'''os.remove(min_file)'''
os.remove(gz_file)
|
StarcoderdataPython
|
11233611
|
<reponame>RafayelGardishyan/Memorears
from django.db import models
# Create your models here.
class Theme(models.Model):
name = models.CharField(max_length=255)
description = models.TextField(max_length=1000)
class Image(models.Model):
image = models.ImageField(upload_to="images")
theme = models.ForeignKey(Theme, on_delete=models.CASCADE)
|
StarcoderdataPython
|
1828055
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 19 2018
@author: kfinc
"""
import pandas as pd
import numpy as np
from sklearn import preprocessing
def motion_24_friston(dataframe):
"""Simple function that calculates 24 motion parameters from pandas dataframe.
Parameters
----------
dataframe: pandas dataframe including 6 movement parameters with headers
Returns
-------
motion_24_friston: pandas dataframe including 24 motion parameters
- the first 6 are the motion parameters
- the next 6 are the temporal difference of motion parameters ('_td' suffix)
- the next 12 are the square of the motion parameters and the differenced values ('_sqrt' suffix)
"""
motion_24_friston = dataframe
for col in dataframe.columns:
temp_diff = np.roll(dataframe[col], 1, axis = 0)
temp_diff[0] = 0
temp_diff = pd.DataFrame(temp_diff)
motion_24_friston[col + '_td'] = temp_diff
for col in motion_24_friston.columns:
sqrt = motion_24_friston[col] ** 2
motion_24_friston[col + '_sqrt'] = sqrt
return motion_24_friston
def scrubbing(fd, thr = 0.5, before = True, after = True):
"""Function that calculates motion outliers (frames with motion above threshold_.
Parameters
----------
fd: pandas dataframe including frame-wise displacement (FD)
thr: threshold (default: 0.5)
before: marks frames before outlier datapoint (default: True)
after: marks frames after outlier datapoint (default: True)
Returns
-------
scrubbing: pandas dataframe including all ourliers datapoints
"""
scrubbing = pd.DataFrame()
fd.loc[0] = 0
fd = fd.astype(float)
scrub1 = fd > thr
scrub1 = scrub1.astype(int)
scrubbing['scrubbing'] = scrub1
if before == True:
scrub2 = np.roll(scrubbing['scrubbing'], -1, axis = 0)
scrub2[0] = 0
scrubbing['scrubbing_bef'] = scrub2
if after == True:
scrub3 = np.roll(scrubbing['scrubbing'], 1, axis = 0)
scrub3[0] = 0
scrubbing['scrubbing_aft'] = scrub3
return scrubbing
def standardize(dataframe):
"""
Normalizes each column and returns values set to unit variance.
Parameters
----------
dataframe: pandas dataframe including columns of interest
Returns
-------
dataframe_stand: pandas dataframe with standarized values
"""
dataframe_stand = pd.DataFrame()
val = dataframe.values
standardize = preprocessing.StandardScaler()
val_scaled = standardize.fit_transform(val)
dataframe_stand = pd.DataFrame(val_scaled, columns = dataframe.columns)
return dataframe_stand
|
StarcoderdataPython
|
3568590
|
from .run import run_pipeline
from .data import get_pricing
|
StarcoderdataPython
|
6700321
|
class Logger:
def __init__(self):
pass
def log(self, msg):
pass
|
StarcoderdataPython
|
11317549
|
<reponame>RDFLib/PyRDFa
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from xml.dom.minidom import parse, parseString
from datetime import date
def one_entry(term) :
return "\t'%s'\t\t\t: 'http://www.w3.org/1999/xhtml/vocab#%s'," % (term,term)
def getText(nodelist):
rc = []
for node in nodelist:
if node.nodeType == node.TEXT_NODE:
rc.append(node.data)
return ''.join(rc)
def manage_dom(dom) :
for record in dom.getElementsByTagName("record") :
term = ""
desc = ""
for child in record.childNodes :
if child.nodeType == child.ELEMENT_NODE and child.nodeName == "value" :
term = getText(child.childNodes)
if term != "describedby" :
print one_entry(term.encode('utf-8'))
#############################
Header = """initial_context["http://www.w3.org/2011/rdfa-context/html-rdfa-1.1"].terms = {"""
Footer = """
'p3pv1' : 'http://www.w3.org/1999/xhtml/vocab#p3pv1',
'transformation' : 'http://www.w3.org/2003/g/data-view#transformation',
'itsRules' : 'http://www.w3.org/1999/xhtml/vocab#itsRules',
'role' : 'http://www.w3.org/1999/xhtml/vocab#role',
}
"""
if __name__ == '__main__':
print Header
dom = parse('/Users/ivan/W3C/WWW/2011/rdfa-context/link-relations.xml')
manage_dom(dom)
print Footer
|
StarcoderdataPython
|
5091655
|
class PhysicalCoordinate(object):
def __init__(self, header):
phys_coord = ""
# check if physical coordinate is defined. FIXME!
for C in ["P", "L"]:
try:
if (header["WCSTY1" + C].strip() == "PHYSICAL") \
and (header["WCSTY2" + C].strip() == "PHYSICAL"):
phys_coord = C
except KeyError:
pass
try:
if (header["CTYPE1" + C].strip() == "X") \
and (header["CTYPE2" + C].strip() == "Y"):
phys_coord = C
except KeyError:
pass
if phys_coord:
C = phys_coord
cv1, cr1, cd1 = header["CRVAL1" + C], header["CRPIX1" + C], header[" CDELT1" + C]
cv2, cr2, cd2 = header["CRVAL2" + C], header["CRPIX2" + C], header[" CDELT2" + C]
self._physical_coord_not_defined = False
self.cv1_cr1_cd1 = cv1, cr1, cd1
self.cv2_cr2_cd2 = cv2, cr2, cd2
self.cdelt = (cd1 * cd2) ** .5
else:
self._physical_coord_not_defined = True
self.cv1_cr1_cd1 = 0, 0, 1
self.cv2_cr2_cd2 = 0, 0, 1
self.cdelt = 1
def to_physical(self, imx, imy):
if self._physical_coord_not_defined:
return imx, imy
cv1, cr1, cd1 = self.cv1_cr1_cd1
cv2, cr2, cd2 = self.cv2_cr2_cd2
phyx = cv1 + (imx - cr1) * cd1
phyy = cv2 + (imy - cr2) * cd2
return phyx, phyy
def to_image(self, phyx, phyy):
if self._physical_coord_not_defined:
return phyx, phyy
cv1, cr1, cd1 = self.cv1_cr1_cd1
cv2, cr2, cd2 = self.cv2_cr2_cd2
imx = cr1 + (phyx - cv1) / cd1
imy = cr2 + (phyy - cv2) / cd2
return imx, imy
def to_physical_distance(self, im_distance):
if self._physical_coord_not_defined:
return im_distance
return im_distance * self.cdelt
def to_image_distance(self, im_physical):
if self._physical_coord_not_defined:
return im_physical
return im_physical / self.cdelt
|
StarcoderdataPython
|
8021302
|
<gh_stars>1-10
'''
Core types for the connectivity model of an articulated mechanism.
'''
import logging, enum
import networkx as nx
import io
import kgprim.core as gr
class Joint:
'''
A placeholder for a joint of a mechanism.
A joint only has a name and a kind, the latter being one of the values
defined in `JointKind`.
A joint always connects two and only two `Link`s.
'''
def __init__(self, name, kind):
self.name = name
self.kind = kind
def __eq__(self, rhs):
return isinstance(rhs, Joint) and\
self.name==rhs.name and self.kind==rhs.kind
def __hash__(self):
return 31 * hash(self.name) + 97 * hash(self.kind)
def __str__(self):
return self.name
def __repr__(self):
return self.name
class JointKind(enum.Enum):
prismatic="prismatic",
revolute ="revolute"
class Link(gr.RigidBody):
'''
A placeholder for a rigid link of a mechanism.
A `Link` is just a named instance of `kgprim.gr.RigidBody`.
'''
def __init__(self, name):
super().__init__(name)
def __eq__(self, rhs):
return isinstance(rhs, Link) and self.name==rhs.name
def __hash__(self):
return 47 * hash(self.name)
def __str__(self):
return self.name
class KPair:
'''
A Kinematic-Pair, that is a pair of links connected by a joint
'''
def __init__(self, joint, link1, link2):
self.joint = joint
self.link1 = link1
self.link2 = link2
class Robot:
'''
The connectivity model of an articulated robot.
'''
def __init__(self, name, links, joints, pairs):
self.log = logging.getLogger('robot')
self._name = name
self.links = links # by-name map
self.joints= joints # by-name map
# A map from joint to links in the pair
self.pairs = {kp.joint: (kp.link1, kp.link2) for kp in pairs}
self.nB = len(self.links) # number of bodies
self.nJ = len(self.joints) # number of joints
self.nLoopJ = self.nJ - (self.nB - 1) # number of loop joints
# The connectivity graph.
# Note that it can be used as a map bewteen link-pairs to joint
self.graph = nx.Graph()
self.graph.add_edges_from( [ (p.link1, p.link2, {'joint': p.joint}) for p in pairs ] )
self.loops = nx.cycle_basis(self.graph)
self._checks()
@property
def name(self) : return self._name
def hasLoops(self):
return self.nLoopJ > 0
def linkPairToJoint(self, link1, link2):
'''
The `Joint` connecting the two given links, `None` if the links are not
part of a pair.
'''
if not self.graph.has_edge(link1, link2) : return None
return self.graph[link1][link2]['joint']
def jointToLinkPair(self, joint):
'''
The `KPair` object whose joint is the given joint
'''
return self.pairs[joint]
def path(self, link1, link2):
return nx.shortest_path(self.graph, link1, link2)
def __str__(self):
text = 'Robot ' + self.name + '\n'
for l in self.links.values():
text += ' ' + l.name + '\n'
text += '\n'
for ed in self.graph.edges(data=True):
text += ed[2]['joint'].name + ' between ' + ed[0].name + ' and ' + ed[1].name + '\n'
return text
def _checks(self):
if not nx.connected.is_connected( self.graph ) :
self.log.error('The robot graph is not connected')
else :
self.log.debug("OK, the robot graph is connected")
def fromDict(data):
'''
Create a connectivity model from input data in a dictionary.
The dictionary is expected to have the following format:
- a key 'name' with the robot name
- a key 'links' which is a list of robot link names
- a key 'joints' which is a list of dictionaries, each having the entries
'name' and 'kind' which are both strings
- a key 'pairs' which is a list of dictionaries, each having three entries:
'joint', 'link1', 'link2', each having a name as the value
This format is the same as the YAML connectivity model format used in this,
project. See the sample models in the repository.
'''
rname = data['name']
links = { name:Link(name) for name in data['links'] }
joints= { j['name']:Joint(j['name'], JointKind[j['kind']]) for j in data['joints'] }
pairs = [ KPair( joints[ p['joint'] ],
links [ p['link1'] ],
links [ p['link2'] ] )
for p in data['pairs'] ]
return Robot(rname, links, joints, pairs)
def graphToString(graph):
text = ''
for link in graph.nodes() :
text += link.name + ' '
return text
|
StarcoderdataPython
|
6610157
|
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
"""
Some notations:
d = number of features including W_0
n = number of observations
"""
def prepare_X(X:np.ndarray, degree:int = 1)-> np.ndarray:
'''
Expands X as per degrees and appends a column of ones in the begining
Input:
X: (n*1) Input matrix
degress: expanding X to number of degrees
Returns:
X_new : (n * d) matrix
'''
assert X.ndim == 2
n = X.shape[0]
X_new = X.copy()
if degree>1:
for d in range(2,degree+1):
X_new = np.hstack((X_new, X**d))
# append column of ones'
X_new = np.hstack((np.ones([n,1]), X_new))
return X_new
def normaliz_data(X):
'''
Z- normalized data and array of means and sds to normalize validation data
Input:
X: (n*d) matrix
Returns:
X_norm : n*d , z-normalized data
mean = np array of means of all columns
std = np array of std of all columns
'''
mean = X.mean(axis=0)
std = X.std(axis=0)
mean[0] = 0 # the first columns is column of ones.
std[0] = 1 # the first columns is column of ones.
X_norm = (X - mean)/ std
return X_norm, mean, std
def w_closedForm(X,y):
'''
Finds the optimal w using closed form solution
Input:
X = 2D array, N*(D+1) data matrix
y = 1D array, N lenghth vector of y values
Output:
w = 1D array, (D+1) lengthe vector
'''
w = np.dot(np.linalg.pinv(X),y)
return w
def give_squared_loss_grad(X, y, w, overPred_penalty=1, underPred_penalty=1):
'''
Gives squared loss and grandient given X, w and other parameters
Input:
X = 2D array; n*d input matrix
y = 1D array; (n,) output array
w = 1D array; (), weights array
overPred_penalty = [-Inf, Inf] penulty for over prediction
underPred_penalty = [-Inf, Inf] penulty for under prediction
Returns:
loss: float = squared loss
grad: (d*1) array of gradients
'''
n = X.shape[0]
# errors
e = np.dot(X,w) - y
# Penalty for Over/ Under Prediction
penulty_vect = (e>0.).astype(float)
penulty_vect[penulty_vect==1] = overPred_penalty
penulty_vect[penulty_vect==0] = underPred_penalty
# Asymmetric Loss
asym_e = np.multiply(penulty_vect, e)
# Normalised Squared Loss
loss = np.dot(np.transpose(asym_e), asym_e) /(2*n)
# Gradient
grad = (np.dot(X.T, asym_e)) / n
return loss, grad
def GradDescent_LinReg(X, y, overPred_penalty=1, underPred_penalty=1, lr=0.1 , maxIt = 10000, verbose=False):
'''
Finds the optimal w using Gradient Descent method
Input:
X = 2D array; n*d input matrix
y = 1D array; (n,) output array
w = 1D array; (), weights array
overPred_penalty = [-Inf, Inf] penulty for over prediction
underPred_penalty = [-Inf, Inf] penulty for under prediction
lr = learing rate
maxIt = Maximum Iterations
Returns:
w = (d*1) array of weights
'''
n,d = X.shape
if verbose:
itr_data = []
# initialize W randomly
w = np.random.rand(d,1)
for i in range(maxIt):
loss, grad = give_squared_loss_grad(X, y, w, overPred_penalty, underPred_penalty)
if verbose:
itr_data.append(loss[0][0])
w = w - (lr*grad)
if verbose:
return itr_data, w
return w
def find_best_model_plot_results(X_train, y_train, X_val, y_val,
method:str, overPred_penalty=1, underPred_penalty=1, lr=0.1 , maxIt = 10000 ):
# checking for degrees till 5
degrees = [i for i in range(1,6)]
# storing the best model
min_loss = np.Inf
best_model = {}
all_model = {}
# for plotting
fig = matplotlib.pyplot.gcf()
fig.set_size_inches(11, 7)
plt.plot(X_train, y_train, 'k.')
x_axis_data = np.linspace(min(X_train)-0.1, max(X_train)+-.1, num=100)
# below loop: for each polynomial degree finds the w*, finds the val loss and plots the fitted curve
for d in degrees:
X = prepare_X(X_train, degree=d)
X_norm, X_mean, X_std = normaliz_data(X)
if method == 'ClosedForm':
w = w_closedForm(X_norm, y_train)
if method == "GradientDescent":
w = GradDescent_LinReg(X_norm, y_train, overPred_penalty, underPred_penalty, lr, maxIt)
train_loss = give_squared_loss_grad(X_norm, y_train, w)[0]
# for validation loss
X_val_prep = prepare_X(X_val, degree=d)
X_val_norm = (X_val_prep - X_mean) / X_std
val_loss = give_squared_loss_grad(X_val_norm, y_val, w)[0]
all_model['degree:'+str(d)] = {'train_loss':train_loss, 'val_loss':val_loss, 'w':w}
if val_loss < min_loss:
min_loss = val_loss
best_model['degree'] = d
best_model['w'] = w
best_model['train_loss'] = train_loss
best_model['val_loss'] = val_loss
if method == "GradientDescent":
best_model['OverPred Penalty'] = overPred_penalty
best_model['UnderPred Penalty'] = underPred_penalty
best_model['learning_rate'] = lr
best_model['maxIt'] : maxIt
# to plot the line
X_graph = ((prepare_X(x_axis_data, degree=d))-X_mean) / X_std
y_axis_data = np.dot(X_graph,w)
color = {1:'b', 2:'g', 3:'r', 4:'y', 5:'c'}[d]
plt.plot(x_axis_data, y_axis_data, color, label = "deg:"+str(d))
plt.legend()
return best_model
|
StarcoderdataPython
|
9638303
|
"""
You have to write the perc_train function that trains the feature weights using the perceptron algorithm for the CoNLL 2000 chunking task.
Each element of train_data is a (labeled_list, feat_list) pair.
Inside the perceptron training loop:
- Call perc_test to get the tagging based on the current feat_vec and compare it with the true output from the labeled_list
- If the output is incorrect then we have to update feat_vec (the weight vector)
- In the notation used in the paper we have w = w_0, w_1, ..., w_n corresponding to \phi_0(x,y), \phi_1(x,y), ..., \phi_n(x,y)
- Instead of indexing each feature with an integer we index each feature using a string we called feature_id
- The feature_id is constructed using the elements of feat_list (which correspond to x above) combined with the output tag (which correspond to y above)
- The function perc_test shows how the feature_id is constructed for each word in the input, including the bigram feature "B:" which is a special case
- feat_vec[feature_id] is the weight associated with feature_id
- This dictionary lookup lets us implement a sparse vector dot product where any feature_id not used in a particular example does not participate in the dot product
- To save space and time make sure you do not store zero values in the feat_vec dictionary which can happen if \phi(x_i,y_i) - \phi(x_i,y_{perc_test}) results in a zero value
- If you are going word by word to check if the predicted tag is equal to the true tag, there is a corner case where the bigram 'T_{i-1} T_i' is incorrect even though T_i is correct.
"""
import perc
import sys, optparse, os
from collections import defaultdict
def train_tags(train):
output = []
i = 0
while(i < len(train)):
x = train[i].split()
output.append(x[2])
i = i + 1
return output
def word_list(train):
output = []
i = 0
while(i < len(train)):
x = train[i].split()
output.append(x[0])
i = i + 1
return output
def pos_list(train):
output = []
i = 0
while(i < len(train)):
x = train[i].split()
output.append(x[1])
i = i + 1
return output
def add_one_feat(feat_vec,key_z,key_true):
if key_z != None:
if feat_vec[key_z] != None:
feat_vec[key_z] -= 1
#if feat_vec[key_z] <= 0:
# feat_vec.pop(key_z)
if key_true != None:
if feat_vec[key_true] == None:
feat_vec[key_true] = 1
else:
feat_vec[key_true] += 1
return
def strip(feat_vec):
items_to_pop = []
for i in feat_vec:
if feat_vec[i] <= 0:
items_to_pop.append(i)
for i in range(0,len(items_to_pop)):
feat_vec.pop(items_to_pop[i])
#Every Feat function uses the feature that ranges from feat_00 to feat_22
def feat_00(feat_vec,word_list,pos_list,tag_list,z_list,position):
if ((position - 2) < 0):
return
if(tag_list[position-2] != z_list[position-2]):
key_z = ("U00:" + word_list[position-2], z_list[position-2])
key_true = ("U00:" + word_list[position-2], tag_list[position-2])
add_one_feat(feat_vec,key_z,key_true)
def feat_01(feat_vec,word_list,pos_list,tag_list,z_list,position):
if ((position - 1) < 0):
return
if(tag_list[position-1] != z_list[position-1]):
key_z = ("U01:" + word_list[position-1], z_list[position-1])
key_true = ("U01:" + word_list[position-1], tag_list[position-1])
add_one_feat(feat_vec,key_z,key_true)
def feat_02(feat_vec,word_list,pos_list,tag_list,z_list,position):
if(tag_list[position] != z_list[position]):
key_z = ("U02:" + word_list[position], z_list[position])
key_true = ("U02:" + word_list[position], tag_list[position])
add_one_feat(feat_vec,key_z,key_true)
return
def feat_03(feat_vec,word_list,pos_list,tag_list,z_list,position):
if ((position + 1) > len(z_list) - 1):
return
if(tag_list[position+1] != z_list[position+1]):
key_z = ("U03:" + word_list[position+1], z_list[position+1])
key_true = ("U03:" + word_list[position+1], tag_list[position+1])
add_one_feat(feat_vec,key_z,key_true)
def feat_04(feat_vec,word_list,pos_list,tag_list,z_list,position):
if ((position + 2) > len(z_list) - 1):
return
if(tag_list[position+2] != z_list[position+2]):
key_z = ("U04:" + word_list[position+2], z_list[position+2])
key_true = ("U04:" + word_list[position+2], tag_list[position+2])
add_one_feat(feat_vec,key_z,key_true)
def feat_05(feat_vec,word_list,pos_list,tag_list,z_list,position):
offset = 1
if ((position - offset) < 0):
return
if(tag_list[position -offset] == tag_list[position]):
if(z_list[position -offset] == z_list[position]):
if(z_list[position] != tag_list[position]):
key_z = ("U05:" + word_list[position-1] + "/" + word_list[position], z_list[position])
key_true = ("U05:" + word_list[position-1] + "/" + word_list[position], tag_list[position])
add_one_feat(feat_vec,key_z,key_true)
else:
key_z1 = ("U05:" + word_list[position-1] + "/" + word_list[position], z_list[position-1])
key_z2 = ("U05:" + word_list[position-1] + "/" + word_list[position], z_list[position])
key_true = ("U05:" + word_list[position-1] + "/" + word_list[position], tag_list[position])
add_one_feat(feat_vec,key_z1,key_true)
add_one_feat(feat_vec,key_z2,key_true)
add_one_feat(feat_vec,None,key_true)
def feat_06(feat_vec,word_list,pos_list,tag_list,z_list,position):
if ((position + 1) > len(z_list) - 1):
return
if(tag_list[position +1] == tag_list[position]):
if(z_list[position +1] == z_list[position]):
if(z_list[position] != tag_list[position]):
key_z = ("U06:" + word_list[position] + "/" + word_list[position+1], z_list[position])
key_true = ("U06:" + word_list[position] + "/" + word_list[position+1], tag_list[position])
add_one_feat(feat_vec,key_z,key_true)
else:
key_z1 = ("U06:" + word_list[position] + "/" + word_list[position+1], z_list[position])
key_z2 = ("U06:" + word_list[position] + "/" + word_list[position+1], z_list[position+1])
key_true = ("U06:" + word_list[position] + "/" + word_list[position+1], tag_list[position])
add_one_feat(feat_vec,key_z1,key_true)
add_one_feat(feat_vec,key_z2,key_true)
add_one_feat(feat_vec,None,key_true)
def feat_10(feat_vec,word_list,pos_list,tag_list,z_list,position):
if((position -2) < 0):
return
if(tag_list[position-2] != z_list[position-2]):
key_z = ("U10:" + pos_list[position-2], z_list[position-2])
key_true = ("U10:" + pos_list[position-2], tag_list[position-2])
add_one_feat(feat_vec,key_z,key_true)
def feat_11(feat_vec,word_list,pos_list,tag_list,z_list,position):
if((position -1) < 0):
return
if(tag_list[position-1] != z_list[position-1]):
key_z = ("U11:" + pos_list[position-1], z_list[position-1])
key_true = ("U11:" + pos_list[position-1], tag_list[position-1])
add_one_feat(feat_vec,key_z,key_true)
def feat_12(feat_vec,word_list,pos_list,tag_list,z_list,position):
if(tag_list[position] != z_list[position]):
key_z = ("U12:" + pos_list[position] + "q", z_list[position])
key_true = ("U12:" + pos_list[position] + "q", tag_list[position])
add_one_feat(feat_vec,key_z,key_true)
def feat_13(feat_vec,word_list,pos_list,tag_list,z_list,position):
if((position +1) > len(z_list) - 1):
return
if(tag_list[position+1] != z_list[position+1]):
key_z = ("U13:" + pos_list[position+1], z_list[position+1])
key_true = ("U13:" + pos_list[position+1], tag_list[position+1])
add_one_feat(feat_vec,key_z,key_true)
def feat_14(feat_vec,word_list,pos_list,tag_list,z_list,position):
if((position +2) > len(z_list) - 1):
return
if(tag_list[position+2] != z_list[position+2]):
key_z = ("U14:" + pos_list[position+2], z_list[position+2])
key_true = ("U14:" + pos_list[position+2], tag_list[position+2])
add_one_feat(feat_vec,key_z,key_true)
def feat_15(feat_vec,word_list,pos_list,tag_list,z_list,position):
if ((position - 2) < 0):
return
if(tag_list[position -2] == tag_list[position-1]):
if(z_list[position -2] == z_list[position-1]):
if(z_list[position-1] != tag_list[position-1]):
key_z = ("U15:" + pos_list[position-2] + "/" + pos_list[position-1], z_list[position-1])
key_true = ("U15:" + pos_list[position-2] + "/" + pos_list[position-1], tag_list[position-1])
add_one_feat(feat_vec,key_z,key_true)
else:
key_z1 = ("U15:" + pos_list[position-2] + "/" + pos_list[position-1], z_list[position-2])
key_z2 = ("U15:" + pos_list[position-2] + "/" + pos_list[position-1], z_list[position-1])
key_true = ("U15:" + pos_list[position-2] + "/" + pos_list[position-1], tag_list[position-1])
add_one_feat(feat_vec,None,key_true)
add_one_feat(feat_vec,key_z1,None)
add_one_feat(feat_vec,key_z2,None)
def feat_16(feat_vec,word_list,pos_list,tag_list,z_list,position):
if ((position - 1) < 0):
return
if(tag_list[position -1] == tag_list[position]):
if(z_list[position -1] == z_list[position]):
if(z_list[position] != tag_list[position]):
key_z = ("U16:" + pos_list[position-1] + "/" + pos_list[position], z_list[position])
key_true = ("U16:" + pos_list[position-1] + "/" + pos_list[position], tag_list[position])
add_one_feat(feat_vec,key_z,key_true)
else:
key_z1 = ("U16:" + pos_list[position-1] + "/" + pos_list[position], z_list[position-1])
key_z2 = ("U16:" + pos_list[position-1] + "/" + pos_list[position], z_list[position])
key_true = ("U16:" + pos_list[position-1] + "/" + pos_list[position], tag_list[position])
add_one_feat(feat_vec,None,key_true)
add_one_feat(feat_vec,key_z1,None)
add_one_feat(feat_vec,key_z2,None)
def feat_17(feat_vec,word_list,pos_list,tag_list,z_list,position):
if ((position + 1) > len(z_list) - 1):
return
if(tag_list[position] == tag_list[position+1]):
if(z_list[position] == z_list[position+1]):
if(z_list[position] != tag_list[position]):
key_z = ("U17:" + pos_list[position] + "/" + pos_list[position+1], z_list[position])
key_true = ("U17:" + pos_list[position] + "/" + pos_list[position+1], tag_list[position])
add_one_feat(feat_vec,key_z,key_true)
else:
key_z1 = ("U17:" + pos_list[position] + "/" + pos_list[position+1], z_list[position])
key_z2 = ("U17:" + pos_list[position] + "/" + pos_list[position+1], z_list[position+1])
key_true = ("U17:" + pos_list[position] + "/" + pos_list[position+1], tag_list[position])
add_one_feat(feat_vec,key_z1,None)
add_one_feat(feat_vec,key_z2,None)
add_one_feat(feat_vec,None,key_true)
def feat_18(feat_vec,word_list,pos_list,tag_list,z_list,position):
if ((position + 2) > len(z_list) - 1):
return
if(tag_list[position+1] == tag_list[position+2]):
if(z_list[position+1] == z_list[position+2]):
if(z_list[position+1] != tag_list[position+1]):
key_z = ("U18:" + pos_list[position+1] + "/" + pos_list[position+2], z_list[position+1])
key_true = ("U18:" + pos_list[position+1] + "/" + pos_list[position+2], tag_list[position+1])
add_one_feat(feat_vec,key_z,key_true)
else:
key_z1 = ("U18:" + pos_list[position+1] + "/" + pos_list[position+2], z_list[position+1])
key_z2 = ("U18:" + pos_list[position+1] + "/" + pos_list[position+2], z_list[position+2])
key_true = ("U18:" + pos_list[position+1] + "/" + pos_list[position+2], tag_list[position+1])
add_one_feat(feat_vec,key_z1,None)
add_one_feat(feat_vec,key_z2,None)
add_one_feat(feat_vec,None,key_true)
def feat_20(feat_vec,word_list,pos_list,tag_list,z_list,position):
if((position -2 ) < 0):
return
if((tag_list[position-2] == tag_list[position-1]) and (tag_list[position-1] == tag_list[position])):
if((z_list[position-2] == z_list[position-1]) and (z_list[position-1] == z_list[position])):
if(z_list[position] != tag_list[position]):
key_z = ("U20:" + pos_list[position-2] + "/" + pos_list[position-1] + "/" + pos_list[position], z_list[position])
key_true = ("U20:" + pos_list[position-2] + "/" + pos_list[position-1] + "/" + pos_list[position], tag_list[position])
add_one_feat(feat_vec,key_z,key_true)
else:
key_z1 = ("U20:" + pos_list[position-2] + "/" + pos_list[position-1] + "/" + pos_list[position], z_list[position-2])
key_z2 = ("U20:" + pos_list[position-2] + "/" + pos_list[position-1] + "/" + pos_list[position], z_list[position-1])
key_z3 = ("U20:" + pos_list[position-2] + "/" + pos_list[position-1] + "/" + pos_list[position], z_list[position])
key_true = ("U20:" + pos_list[position-2] + "/" + pos_list[position-1] + "/" + pos_list[position], tag_list[position])
add_one_feat(feat_vec,key_z1,None)
add_one_feat(feat_vec,key_z2,None)
add_one_feat(feat_vec,key_z3,None)
add_one_feat(feat_vec,None,key_true)
def feat_21(feat_vec,word_list,pos_list,tag_list,z_list,position):
if ((position - 1) < 0):
return
if ((position + 1) > len(z_list) - 1):
return
if((tag_list[position-1] == tag_list[position]) and (tag_list[position] == tag_list[position+1])):
if((z_list[position-1] == z_list[position]) and (z_list[position] == z_list[position+1])):
if(z_list[position] != tag_list[position]):
key_z = ("U21:" + pos_list[position-1] + "/" + pos_list[position] + "/" + pos_list[position+1], z_list[position])
key_true = ("U21:" + pos_list[position-1] + "/" + pos_list[position] + "/" + pos_list[position+1], tag_list[position])
add_one_feat(feat_vec,key_z,key_true)
else:
key_z1 = ("U21:" + pos_list[position-1] + "/" + pos_list[position] + "/" + pos_list[position+1], z_list[position-1])
key_z2 = ("U21:" + pos_list[position-1] + "/" + pos_list[position] + "/" + pos_list[position+1], z_list[position])
key_z3 = ("U21:" + pos_list[position-1] + "/" + pos_list[position] + "/" + pos_list[position+1], z_list[position+1])
key_true = ("U21:" + pos_list[position-1] + "/" + pos_list[position] + "/" + pos_list[position+1], tag_list[position])
add_one_feat(feat_vec,key_z1,None)
add_one_feat(feat_vec,key_z2,None)
add_one_feat(feat_vec,key_z3,None)
add_one_feat(feat_vec,None,key_true)
def feat_22(feat_vec,word_list,pos_list,tag_list,z_list,position):
if((position +2 ) > len(z_list) - 1):
return
if((tag_list[position] == tag_list[position+1]) and (tag_list[position+2] == tag_list[position])):
if((z_list[position] == z_list[position+1]) and (z_list[position+2] == z_list[position])):
if(z_list[position] != tag_list[position]):
key_z = ("U22:" + pos_list[position] + "/" + pos_list[position+1] + "/" + pos_list[position+2], z_list[position])
key_true = ("U22:" + pos_list[position] + "/" + pos_list[position+1] + "/" + pos_list[position+2], tag_list[position])
add_one_feat(feat_vec,key_z,key_true)
else:
key_z1 = ("U22:" + pos_list[position] + "/" + pos_list[position+1] + "/" + pos_list[position+2], z_list[position])
key_z2 = ("U22:" + pos_list[position] + "/" + pos_list[position+1] + "/" + pos_list[position+2], z_list[position+1])
key_z3 = ("U22:" + pos_list[position] + "/" + pos_list[position+1] + "/" + pos_list[position+2], z_list[position+2])
key_true = ("U22:" + pos_list[position] + "/" + pos_list[position+1] + "/" + pos_list[position+2], tag_list[position])
add_one_feat(feat_vec,key_z1,None)
add_one_feat(feat_vec,key_z2,None)
add_one_feat(feat_vec,key_z3,None)
add_one_feat(feat_vec,None,key_true)
#The check and change function calls all the features that may either add or
def check_and_change(feat_vec,word_list,pos_list,tag_list,z_list,position):
feat_00(feat_vec,word_list,pos_list,tag_list,z_list,position)
feat_01(feat_vec,word_list,pos_list,tag_list,z_list,position)
feat_02(feat_vec,word_list,pos_list,tag_list,z_list,position)
feat_03(feat_vec,word_list,pos_list,tag_list,z_list,position)
feat_04(feat_vec,word_list,pos_list,tag_list,z_list,position)
feat_05(feat_vec,word_list,pos_list,tag_list,z_list,position)
feat_06(feat_vec,word_list,pos_list,tag_list,z_list,position)
feat_10(feat_vec,word_list,pos_list,tag_list,z_list,position)
feat_11(feat_vec,word_list,pos_list,tag_list,z_list,position)
feat_12(feat_vec,word_list,pos_list,tag_list,z_list,position)
feat_13(feat_vec,word_list,pos_list,tag_list,z_list,position)
feat_14(feat_vec,word_list,pos_list,tag_list,z_list,position)
feat_15(feat_vec,word_list,pos_list,tag_list,z_list,position)
feat_16(feat_vec,word_list,pos_list,tag_list,z_list,position)
feat_17(feat_vec,word_list,pos_list,tag_list,z_list,position)
feat_18(feat_vec,word_list,pos_list,tag_list,z_list,position)
feat_20(feat_vec,word_list,pos_list,tag_list,z_list,position)
feat_21(feat_vec,word_list,pos_list,tag_list,z_list,position)
feat_22(feat_vec,word_list,pos_list,tag_list,z_list,position)
strip(feat_vec)
#perc train calls the viterbi algorithm then calls the check and change function to manipulate the weights
def perc_train(train_data,tagset,numepochs):
feat_vec = defaultdict(int)
tags = {}
for i in range(0,numepochs):
for j in range(0,len(train_data)):
label_list = train_data[j][0]
feat_list = train_data[j][1]
z = perc.perc_test(feat_vec,label_list, feat_list,tagset,tagset[0])
for k in range(0,len(z)):
temp = train_tags(label_list)
if(z[k] != temp[k]):
check_and_change(feat_vec,word_list(label_list),pos_list(label_list),train_tags(label_list),z,k)
return feat_vec
if __name__ == '__main__':
optparser = optparse.OptionParser()
optparser.add_option("-t", "--tagsetfile", dest="tagsetfile", default=os.path.join("data", "tagset.txt"), help="tagset that contains all the labels produced in the output, i.e. the y in \phi(x,y)")
optparser.add_option("-i", "--trainfile", dest="trainfile", default=os.path.join("data", "train.txt.gz"), help="input data, i.e. the x in \phi(x,y)")
optparser.add_option("-f", "--featfile", dest="featfile", default=os.path.join("data", "train.feats.gz"), help="precomputed features for the input data, i.e. the values of \phi(x,_) without y")
optparser.add_option("-e", "--numepochs", dest="numepochs", default=int(10), help="number of epochs of training; in each epoch we iterate over over all the training examples")
optparser.add_option("-m", "--modelfile", dest="modelfile", default=os.path.join("data", "default.model"), help="weights for all features stored on disk")
(opts, _) = optparser.parse_args()
# each element in the feat_vec dictionary is:
# key=feature_id value=weight
feat_vec = {}
tagset = []
train_data = []
tagset = perc.read_tagset(opts.tagsetfile)
print >>sys.stderr, "reading data ..."
train_data = perc.read_labeled_data(opts.trainfile, opts.featfile)
print >>sys.stderr, "done."
feat_vec = perc_train(train_data, tagset, int(opts.numepochs))
perc.perc_write_to_file(feat_vec, opts.modelfile)
|
StarcoderdataPython
|
3486890
|
import cv2
import json
from tqdm import tqdm
import pandas as pd
from .bb_polygon import *
def draw_arrow(image, start_point, end_point, color):
start_point = tuple(start_point)
end_point = tuple(end_point)
image = cv2.line(image, start_point, end_point, color, 3)
image = cv2.circle(image, end_point, 8, color, -1)
return image
def draw_start_last_points(ori_im, start_point, last_point, color=(0, 255, 0)):
return draw_arrow(ori_im, start_point, last_point, color)
def draw_one_box(img, box, key=None, value=None, color=None, line_thickness=None):
tl = line_thickness or int(round(0.001 * max(img.shape[0:2]))) # line thickness
coord = [box[0], box[1], box[2], box[3]]
c1, c2 = (int(coord[0]), int(coord[1])), (int(coord[2]), int(coord[3]))
img = cv2.rectangle(img, c1, c2, color, thickness=tl*2)
if key is not None and value is not None:
header = f'{key} || {value}'
tf = max(tl - 2, 1) # font thickness
s_size = cv2.getTextSize(f'| {value}', 0, fontScale=float(tl) / 3, thickness=tf)[0]
t_size = cv2.getTextSize(f'{key} |', 0, fontScale=float(tl) / 3, thickness=tf)[0]
c2 = c1[0] + t_size[0] + s_size[0] + 15, c1[1] - t_size[1] - 3
img = cv2.rectangle(img, c1, c2, color, -1) # filled
img = cv2.putText(img, header, (c1[0], c1[1] - 2), 0, float(tl) / 3, [0, 0, 0],
thickness=tf, lineType=cv2.FONT_HERSHEY_SIMPLEX)
return img
def draw_text(
img,
text,
uv_top_left=None,
color=(255, 255, 255),
fontScale=0.75,
thickness=1,
fontFace=cv2.FONT_HERSHEY_SIMPLEX,
outline_color=(0, 0, 0),
line_spacing=1.5,
):
"""
Draws multiline with an outline.
"""
assert isinstance(text, str)
lines = text.splitlines()
if uv_top_left is None:
# set the text start position, at the bottom left of video
(w, h), _ = cv2.getTextSize(
text=lines[0],
fontFace=fontFace,
fontScale=fontScale,
thickness=thickness,
)
text_offset_x = 10
text_offset_y = img.shape[0] - h*(len(lines)+3)
uv_top_left = (text_offset_x, text_offset_y)
uv_top_left = np.array(uv_top_left, dtype=float)
assert uv_top_left.shape == (2,)
for line in lines:
(w, h), _ = cv2.getTextSize(
text=line,
fontFace=fontFace,
fontScale=fontScale,
thickness=thickness,
)
uv_bottom_left_i = uv_top_left + [0, h]
org = tuple(uv_bottom_left_i.astype(int))
if outline_color is not None:
cv2.putText(
img,
text=line,
org=org,
fontFace=fontFace,
fontScale=fontScale,
color=outline_color,
thickness=thickness * 3,
lineType=cv2.LINE_AA,
)
cv2.putText(
img,
text=line,
org=org,
fontFace=fontFace,
fontScale=fontScale,
color=color,
thickness=thickness,
lineType=cv2.LINE_AA,
)
uv_top_left += [0, h * line_spacing]
return img
def draw_anno(image, polygon=None, paths=None):
colors = [(0, 0, 255), # red 0
(0, 255, 0), # green 1
(255, 0, 0), # blue 2
(0, 255, 255), # cyan 3
(128, 0, 128), # purple 4
(0, 0, 0), # black 5
(255, 255, 255)] # white 6
if polygon:
polygon = np.array(polygon, np.int32)
polygon = polygon.reshape((-1, 1, 2))
image = cv2.polylines(image, [polygon], True, colors[0], 5)
if paths:
for path, points in paths.items():
points = np.array(points, np.int32)
image = draw_arrow(image, points[0], points[1], colors[5])
image = cv2.putText(image, path, (points[1][0], points[1][1]),
cv2.FONT_HERSHEY_PLAIN, fontScale=1.5, color=colors[5], thickness=3)
return image
def draw_frame_count(img, frame_id):
text = f"Frame:{frame_id}"
text_offset = (10, 25)
return draw_text(img, text, text_offset, color=(0,255,0))
def load_zone_anno(zone_path):
with open(zone_path, 'r') as f:
anno = json.load(f)
directions = {}
zone = anno['shapes'][0]['points']
for i in anno['shapes']:
if i['label'].startswith('direction'):
directions[i['label'][-2:]] = i['points']
return zone, directions
def find_best_match_direction(obj_vector,paths):
"""
paths: dict {key: vector,...}
"""
directions = list(paths.keys())
best_score = 0
best_match = directions[0]
for direction_id in directions:
vector = paths[direction_id]
score = cosin_similarity(obj_vector, vector)
if score > best_score:
best_score = score
best_match = direction_id
return best_match
def save_tracking_to_csv(track_dict, filename):
num_classes = len(track_dict)
obj_dict = {
'track_id': [],
'frame_id': [],
'box': [],
'color': [],
'label': [],
'direction': [],
'fpoint': [],
'lpoint': [],
'fframe': [],
'lframe': []
}
for label_id in range(num_classes):
for track_id in track_dict[label_id].keys():
direction = track_dict[label_id][track_id]['direction']
boxes = track_dict[label_id][track_id]['boxes']
frames = track_dict[label_id][track_id]['frames']
color = track_dict[label_id][track_id]['color']
frame_first = frames[0]
frame_last = frames[-1]
box_first = boxes[0]
box_last = boxes[-1]
center_point_first = ((box_first[2]+box_first[0]) / 2, (box_first[3] + box_first[1])/2)
center_point_last = ((box_last[2]+box_last[0]) / 2, (box_last[3] + box_last[1])/2)
for i in range(len(track_dict[label_id][track_id]['boxes'])):
obj_dict['track_id'].append(track_id)
obj_dict['frame_id'].append(frames[i])
obj_dict['box'].append(boxes[i].tolist())
obj_dict['color'].append(color)
obj_dict['label'].append(label_id)
obj_dict['direction'].append(direction)
obj_dict['fpoint'].append(center_point_first)
obj_dict['lpoint'].append(center_point_last)
obj_dict['fframe'].append(frame_first)
obj_dict['lframe'].append(frame_last)
df = pd.DataFrame(obj_dict)
df.to_csv(filename, index=False)
def convert_frame_dict(track_dict):
"""
return result dict:
{
frame_id: {
'boxes': [],
'colors': [],
'fpoints': [],
'lpoints': []
}
}
"""
result_dict = {}
num_classes = len(track_dict)
for label_id in range(num_classes):
for track_id in track_dict[label_id].keys():
direction = track_dict[label_id][track_id]['direction']
boxes = track_dict[label_id][track_id]['boxes']
frames = track_dict[label_id][track_id]['frames']
color = track_dict[label_id][track_id]['color']
for i in range(len(track_dict[label_id][track_id])):
frame_id = frames[i]
box = boxes[i]
if frame_id not in result_dict.keys():
result_dict[frame_id] = {
'boxes': [],
'colors': [],
'fpoints': [],
'lpoints': [],
'labels': [],
'directions': []
}
first_box = box[0]
last_box = box[-1]
center_point_first = ((first_box[2]+first_box[0]) / 2, (first_box[3] + first_box[1])/2)
center_point_last = ((last_box[2]+last_box[0]) / 2, (last_box[3] + last_box[1])/2)
result_dict[frame_id]['boxes'].append(box)
result_dict[frame_id]['fpoints'].append(center_point_first)
result_dict[frame_id]['lpoints'].append(center_point_last)
result_dict[frame_id]['directions'].append(direction)
result_dict[frame_id]['colors'].append(color)
result_dict[frame_id]['labels'].append(label_id)
return result_dict
def visualize_one_frame(img, df):
# track_id frame_id box color label direction fpoint lpoint fframe lframe
anns = [
i for i in zip(
df.track_id,
df.box,
df.color,
df.label,
df.fpoint)
]
for (track_id, box, color, label, fpoint) in anns:
box = eval(box)
fpoint = np.array(eval(fpoint)).astype(int)
color = eval(color)
cpoint = np.array([(box[2]+box[0]) / 2, (box[3] + box[1])/2]).astype(int)
img = draw_start_last_points(img, fpoint, cpoint, color)
img = draw_one_box(
img,
box,
key=f'id: {track_id}',
value=f'cls: {label}',
color=color)
return img
def count_frame_directions(df, count_dict):
anns = [
i for i in zip(
df.frame_id,
df.label,
df.direction,
df.lframe)
]
for (frame_id, label, direction, lframe) in anns:
if lframe == frame_id:
count_dict[direction][label] += 1
count_text = []
for dir in count_dict.keys():
tmp_text = f"direction:{dir} || "
for cls_id in count_dict[dir].keys():
tmp_text += f"{cls_id}:{count_dict[dir][cls_id]} | "
count_text.append(tmp_text)
count_text = "\n".join(count_text)
return count_dict, count_text
def visualize_merged(videoloader, csv_path, directions, zones, num_classes, outvid):
df = pd.read_csv(csv_path)
count_dict = {
int(dir): {
label: 0 for label in range(num_classes)
} for dir in directions
}
prev_text = None # Delay direction text by one frame
for batch in tqdm(videoloader):
if batch is None:
continue
imgs = batch['ori_imgs']
frame_ids = batch['frames']
for idx in range(len(imgs)):
frame_id = frame_ids[idx]
img = imgs[idx].copy()
tmp_df = df[df.frame_id.astype(int) == frame_id]
count_dict, text = count_frame_directions(tmp_df, count_dict)
img = draw_anno(img, zones, directions)
if len(tmp_df) > 0:
img = visualize_one_frame(img, tmp_df)
if prev_text:
img = draw_text(img, prev_text)
prev_text=text
img = draw_frame_count(img, frame_id)
outvid.write(img)
|
StarcoderdataPython
|
11322754
|
import numpy as np
from theano import gof, tensor
class Minimal(gof.Op):
# TODO : need description for class
# if the Op has any attributes, consider using them in the eq function.
# If two Apply nodes have the same inputs and the ops compare equal...
# then they will be MERGED so they had better have computed the same thing!
__props__ = ()
def __init__(self):
# If you put things here, think about whether they change the outputs
# computed by # self.perform()
# - If they do, then you should take them into consideration in
# __eq__ and __hash__
# - If they do not, then you should not use them in
# __eq__ and __hash__
super().__init__()
def make_node(self, *args):
# HERE `args` must be THEANO VARIABLES
return gof.Apply(op=self, inputs=args, outputs=[tensor.lscalar()])
def perform(self, node, inputs, out_):
(output,) = out_
# HERE `inputs` are PYTHON OBJECTS
# do what you want here,
# but do not modify any of the arguments [inplace].
print("perform got %i arguments" % len(inputs))
print("Max of input[0] is ", np.max(inputs[0]))
# return some computed value.
# do not return something that is aliased to one of the inputs.
output[0] = np.asarray(0, dtype="int64")
minimal = Minimal()
|
StarcoderdataPython
|
3479987
|
import asyncio
import functools
import types
import typing as t
from contextlib import suppress
from discord import Member, NotFound
from discord.ext import commands
from discord.ext.commands import Cog, Context
from bot.constants import Channels, DEBUG_MODE, RedirectOutput
from bot.log import get_logger
from bot.utils import function, scheduling
from bot.utils.checks import ContextCheckFailure, in_whitelist_check
from bot.utils.function import command_wraps
log = get_logger(__name__)
def in_whitelist(
*,
channels: t.Container[int] = (),
categories: t.Container[int] = (),
roles: t.Container[int] = (),
redirect: t.Optional[int] = Channels.bot_commands,
fail_silently: bool = False,
) -> t.Callable:
"""
Check if a command was issued in a whitelisted context.
The whitelists that can be provided are:
- `channels`: a container with channel ids for whitelisted channels
- `categories`: a container with category ids for whitelisted categories
- `roles`: a container with role ids for whitelisted roles
If the command was invoked in a context that was not whitelisted, the member is either
redirected to the `redirect` channel that was passed (default: #bot-commands) or simply
told that they're not allowed to use this particular command (if `None` was passed).
"""
def predicate(ctx: Context) -> bool:
"""Check if command was issued in a whitelisted context."""
return in_whitelist_check(ctx, channels, categories, roles, redirect, fail_silently)
return commands.check(predicate)
class NotInBlacklistCheckFailure(ContextCheckFailure):
"""Raised when the 'not_in_blacklist' check fails."""
def not_in_blacklist(
*,
channels: t.Container[int] = (),
categories: t.Container[int] = (),
roles: t.Container[int] = (),
override_roles: t.Container[int] = (),
redirect: t.Optional[int] = Channels.bot_commands,
fail_silently: bool = False,
) -> t.Callable:
"""
Check if a command was not issued in a blacklisted context.
The blacklists that can be provided are:
- `channels`: a container with channel ids for blacklisted channels
- `categories`: a container with category ids for blacklisted categories
- `roles`: a container with role ids for blacklisted roles
If the command was invoked in a context that was blacklisted, the member is either
redirected to the `redirect` channel that was passed (default: #bot-commands) or simply
told that they're not allowed to use this particular command (if `None` was passed).
The blacklist can be overridden through the roles specified in `override_roles`.
"""
def predicate(ctx: Context) -> bool:
"""Check if command was issued in a blacklisted context."""
not_blacklisted = not in_whitelist_check(ctx, channels, categories, roles, fail_silently=True)
overridden = in_whitelist_check(ctx, roles=override_roles, fail_silently=True)
success = not_blacklisted or overridden
if not success and not fail_silently:
raise NotInBlacklistCheckFailure(redirect)
return success
return commands.check(predicate)
def has_no_roles(*roles: t.Union[str, int]) -> t.Callable:
"""
Returns True if the user does not have any of the roles specified.
`roles` are the names or IDs of the disallowed roles.
"""
async def predicate(ctx: Context) -> bool:
try:
await commands.has_any_role(*roles).predicate(ctx)
except commands.MissingAnyRole:
return True
else:
# This error is never shown to users, so don't bother trying to make it too pretty.
roles_ = ", ".join(f"'{item}'" for item in roles)
raise commands.CheckFailure(f"You have at least one of the disallowed roles: {roles_}")
return commands.check(predicate)
def redirect_output(
destination_channel: int,
bypass_roles: t.Optional[t.Container[int]] = None,
channels: t.Optional[t.Container[int]] = None,
categories: t.Optional[t.Container[int]] = None,
ping_user: bool = True
) -> t.Callable:
"""
Changes the channel in the context of the command to redirect the output to a certain channel.
Redirect is bypassed if the author has a bypass role or if it is in a channel that can bypass redirection.
If ping_user is False, it will not send a message in the destination channel.
This decorator must go before (below) the `command` decorator.
"""
def wrap(func: types.FunctionType) -> types.FunctionType:
@command_wraps(func)
async def inner(self: Cog, ctx: Context, *args, **kwargs) -> None:
if ctx.channel.id == destination_channel:
log.trace(f"Command {ctx.command} was invoked in destination_channel, not redirecting")
await func(self, ctx, *args, **kwargs)
return
if bypass_roles and any(role.id in bypass_roles for role in ctx.author.roles):
log.trace(f"{ctx.author} has role to bypass output redirection")
await func(self, ctx, *args, **kwargs)
return
elif channels and ctx.channel.id not in channels:
log.trace(f"{ctx.author} used {ctx.command} in a channel that can bypass output redirection")
await func(self, ctx, *args, **kwargs)
return
elif categories and ctx.channel.category.id not in categories:
log.trace(f"{ctx.author} used {ctx.command} in a category that can bypass output redirection")
await func(self, ctx, *args, **kwargs)
return
redirect_channel = ctx.guild.get_channel(destination_channel)
old_channel = ctx.channel
log.trace(f"Redirecting output of {ctx.author}'s command '{ctx.command.name}' to {redirect_channel.name}")
ctx.channel = redirect_channel
if ping_user:
await ctx.send(f"Here's the output of your command, {ctx.author.mention}")
scheduling.create_task(func(self, ctx, *args, **kwargs))
message = await old_channel.send(
f"Hey, {ctx.author.mention}, you can find the output of your command here: "
f"{redirect_channel.mention}"
)
if RedirectOutput.delete_invocation:
await asyncio.sleep(RedirectOutput.delete_delay)
with suppress(NotFound):
await message.delete()
log.trace("Redirect output: Deleted user redirection message")
with suppress(NotFound):
await ctx.message.delete()
log.trace("Redirect output: Deleted invocation message")
return inner
return wrap
def respect_role_hierarchy(member_arg: function.Argument) -> t.Callable:
"""
Ensure the highest role of the invoking member is greater than that of the target member.
If the condition fails, a warning is sent to the invoking context. A target which is not an
instance of discord.Member will always pass.
`member_arg` is the keyword name or position index of the parameter of the decorated command
whose value is the target member.
This decorator must go before (below) the `command` decorator.
"""
def decorator(func: types.FunctionType) -> types.FunctionType:
@command_wraps(func)
async def wrapper(*args, **kwargs) -> t.Any:
log.trace(f"{func.__name__}: respect role hierarchy decorator called")
bound_args = function.get_bound_args(func, args, kwargs)
target = function.get_arg_value(member_arg, bound_args)
if not isinstance(target, Member):
log.trace("The target is not a discord.Member; skipping role hierarchy check.")
return await func(*args, **kwargs)
ctx = function.get_arg_value(1, bound_args)
cmd = ctx.command.name
actor = ctx.author
if target.top_role >= actor.top_role:
log.info(
f"{actor} ({actor.id}) attempted to {cmd} "
f"{target} ({target.id}), who has an equal or higher top role."
)
await ctx.send(
f":x: {actor.mention}, you may not {cmd} "
"someone with an equal or higher top role."
)
else:
log.trace(f"{func.__name__}: {target.top_role=} < {actor.top_role=}; calling func")
return await func(*args, **kwargs)
return wrapper
return decorator
def mock_in_debug(return_value: t.Any) -> t.Callable:
"""
Short-circuit function execution if in debug mode and return `return_value`.
The original function name, and the incoming args and kwargs are DEBUG level logged
upon each call. This is useful for expensive operations, i.e. media asset uploads
that are prone to rate-limits but need to be tested extensively.
"""
def decorator(func: t.Callable) -> t.Callable:
@functools.wraps(func)
async def wrapped(*args, **kwargs) -> t.Any:
"""Short-circuit and log if in debug mode."""
if DEBUG_MODE:
log.debug(f"Function {func.__name__} called with args: {args}, kwargs: {kwargs}")
return return_value
return await func(*args, **kwargs)
return wrapped
return decorator
|
StarcoderdataPython
|
11285449
|
# coding=utf-8
import argparse
import os
import tensorflow as tf
import tqdm
from feature_extraction.extract_model import ExtractModel
from feature_extraction.data_set import get_testing_set
from model_loader import load_model, save_model
from data_set_args import get_rab_set_args, get_ked_set_args,\
get_bdl_set_args, get_jmk_set_args, get_slt_set_args,\
get_mix2_set_args, get_mix3_set_args
def get_args():
parser = argparse.ArgumentParser(description="GlottalNet")
parser.add_argument("--save_path", type=str, default="./save/mix3/")
parser.add_argument("--log_path", type=str, default="./log/mix3/")
parser.add_argument("--training_epochs", type=int, default=100)
parser.add_argument("--training_batch_size", type=int, default=128)
parser.add_argument("--validation_batch_size", type=int, default=128)
parser.add_argument("--save_per_epochs", type=int, default=10)
parser.add_argument("--validation_per_epochs", type=int, default=1)
return parser.parse_args()
def main():
tf.logging.set_verbosity(tf.logging.INFO)
args = get_args()
data_set_args = get_mix3_set_args()
net = ExtractModel()
graph = tf.Graph()
with graph.as_default():
with tf.variable_scope("data"):
training_set = get_testing_set(key=data_set_args.training_set_name,
epochs=args.training_epochs, batch_size=args.training_batch_size)
validation_set = get_testing_set(key=data_set_args.validation_set_name,
epochs=args.training_epochs // args.validation_per_epochs,
batch_size=args.validation_batch_size)
iterator = training_set.make_one_shot_iterator()
next_element = iterator.get_next()
training_init_op = iterator.make_initializer(training_set)
validation_init_op = iterator.make_initializer(validation_set)
with tf.variable_scope("extract_model"):
tensor_dict = net.build(next_element, training=True)
"""training summary"""
loss_summary = tf.summary.scalar("loss", tensor_dict["loss"])
accuracy_summary = tf.summary.scalar("accuracy", tensor_dict["accuracy"])
recall_summary = tf.summary.scalar("recall", tensor_dict["recall"])
precision_summary = tf.summary.scalar("precision", tensor_dict["precision"])
f1_score_summary = tf.summary.scalar("f1_score", tensor_dict["f1_score"])
"""validation summary"""
validation_loss = tf.placeholder(tf.float32, shape=())
validation_accuracy = tf.placeholder(tf.float32, shape=())
validation_recall = tf.placeholder(tf.float32, shape=())
validation_precision = tf.placeholder(tf.float32, shape=())
validation_f1_score = tf.placeholder(tf.float32, shape=())
validation_loss_summary = tf.summary.scalar("loss", validation_loss)
validation_accuracy_summary = tf.summary.scalar("accuracy", validation_accuracy)
validation_recall_summary = tf.summary.scalar("recall", validation_recall)
validation_precision_summary = tf.summary.scalar("precision", validation_precision)
validation_f1_score_summary = tf.summary.scalar("f1_score", validation_f1_score)
"""training"""
global_step = tf.Variable(0, dtype=tf.int32, name="global_step")
opt = tf.train.AdamOptimizer(1e-3)
extra_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(extra_update_ops):
upd = opt.minimize(tensor_dict["loss"], global_step=global_step)
saver = tf.train.Saver(max_to_keep=50)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(graph=graph, config=config) as sess:
sess.run([tf.global_variables_initializer(), tf.local_variables_initializer()])
save_path = os.path.join(args.save_path, net.name)
if not load_model(saver, sess, save_path):
tf.logging.info("Run on an initialized graph.")
sess.run([tf.global_variables_initializer(), tf.local_variables_initializer()])
training_writer = tf.summary.FileWriter(os.path.join(args.log_path, "training"), sess.graph)
validation_writer = tf.summary.FileWriter(os.path.join(args.log_path, "validation"), sess.graph)
global_step_eval = sess.run(global_step)
training_steps = args.training_epochs * data_set_args.training_set_size // args.training_batch_size
save_steps = args.save_per_epochs * data_set_args.training_set_size // args.training_batch_size
validation_steps = args.validation_per_epochs * data_set_args.training_set_size // args.training_batch_size
pbar = tqdm.tqdm(total=training_steps)
pbar.update(global_step_eval)
sess.run(training_init_op)
while global_step_eval < training_steps:
"""validation"""
if global_step_eval % validation_steps == 0:
sess.run(validation_init_op)
total_loss = 0.0
total_accuracy = 0.0
total_recall = 0.0
total_precision = 0.0
validation_steps = data_set_args.validation_set_size // args.validation_batch_size
for s in range(validation_steps):
tensor_dict_eval = sess.run(tensor_dict)
total_loss += tensor_dict_eval["loss"]
total_accuracy += tensor_dict_eval["accuracy"]
total_recall += tensor_dict_eval["recall"]
total_precision += tensor_dict_eval["precision"]
total_loss /= validation_steps
total_accuracy /= validation_steps
total_recall /= validation_steps
total_precision /= validation_steps
total_f1_score = 2 * total_recall * total_precision / (total_recall + total_precision)
feed_dict = {validation_loss: total_loss, validation_accuracy: total_accuracy, validation_recall: total_recall,
validation_precision: total_precision, validation_f1_score: total_f1_score}
validation_list = [validation_loss_summary, validation_accuracy_summary, validation_recall_summary,
validation_precision_summary, validation_f1_score_summary]
validation_loss_summary_eval, validation_accuracy_summary_eval, validation_recall_summary_eval,\
validation_precision_summary_eval, validation_f1_score_summary_eval = sess.run(validation_list,
feed_dict=feed_dict)
validation_writer.add_summary(validation_loss_summary_eval, global_step=global_step_eval)
validation_writer.add_summary(validation_accuracy_summary_eval, global_step=global_step_eval)
validation_writer.add_summary(validation_recall_summary_eval, global_step=global_step_eval)
validation_writer.add_summary(validation_precision_summary_eval, global_step=global_step_eval)
validation_writer.add_summary(validation_f1_score_summary_eval, global_step=global_step_eval)
tf.logging.info("Validation done.")
sess.run(training_init_op)
"""training"""
training_list = [loss_summary, accuracy_summary, recall_summary, precision_summary,
f1_score_summary, global_step, upd]
training_loss_summary_eval, training_accuracy_summary_eval, training_recall_summary_eval,\
training_precision_summary_eval, training_f1_score_summary_eval, global_step_eval,\
_ = sess.run(training_list)
training_writer.add_summary(training_loss_summary_eval, global_step=global_step_eval)
training_writer.add_summary(training_accuracy_summary_eval, global_step=global_step_eval)
training_writer.add_summary(training_recall_summary_eval, global_step=global_step_eval)
training_writer.add_summary(training_precision_summary_eval, global_step=global_step_eval)
training_writer.add_summary(training_f1_score_summary_eval, global_step=global_step_eval)
"""save model"""
if global_step_eval % save_steps == 0:
if not os.path.exists(args.save_path) or not os.path.isdir(args.save_path):
os.makedirs(args.save_path)
save_model(saver, sess, save_path, global_step_eval)
pbar.update(1)
tf.logging.info("Congratulations!")
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
8193434
|
<reponame>AronGreen/bpr-uml-shared
from dataclasses import dataclass
from typing import Optional
from .mongo_document_base import MongoDocumentBase
@dataclass
class SomeItem(MongoDocumentBase):
"""
Dummy class for testing.
Represents nothing in the domain.
"""
number: int
text: str
users: list
random: Optional[str] = None
|
StarcoderdataPython
|
6416089
|
<gh_stars>0
import pytest
from glados import GladosPlugin, RouteType, PluginImporter, BotImporter, GladosBot
from glados.errors import GladosPathExistsError
import logging
from pathlib import Path
import os
PC_BASE_PATH = Path("tests", "plugins_config")
PC1 = Path(PC_BASE_PATH, "TestPlugin1.yaml")
PC2 = Path(PC_BASE_PATH, "TestPlugin2.yaml")
PC3 = Path(PC_BASE_PATH, "TestPlugin3.yaml")
@pytest.fixture
def MockGladosBot():
return GladosBot("", "bot", "")
@pytest.fixture
def MockGladosPlugin(MockGladosBot):
return GladosPlugin("mock", MockGladosBot)
def check_only_one_file():
assert PC1.is_file() is True
assert PC2.is_file() is False
assert PC3.is_file() is False
def cleanup():
os.remove(PC2)
os.remove(PC3)
check_only_one_file()
def test_cant_add_existing(MockGladosPlugin):
def mock_function(request):
return "something"
MockGladosPlugin.add_route(RouteType.SendMessage, "send_message", mock_function)
assert len(MockGladosPlugin.routes) == 1
with pytest.raises(GladosPathExistsError):
MockGladosPlugin.add_route(RouteType.SendMessage, "send_message", mock_function)
def test_add_route(MockGladosPlugin):
assert MockGladosPlugin.routes == []
MockGladosPlugin.add_route(
RouteType.SendMessage, "send_message", lambda request: True
)
assert len(MockGladosPlugin.routes) == 1
route = MockGladosPlugin.routes[0]
assert route.route == "bot_send_message"
assert route.function(None)
def test_plugin_importer_discovery():
pi = PluginImporter("tests/plugins", "tests/plugins_config")
pi.discover_plugins()
assert len(pi.config_files) == 3
def test_plugin_importer_load_configs():
check_only_one_file()
pi = PluginImporter("tests/plugins", "tests/plugins_config")
pi.discover_plugins()
pi.load_discovered_plugins_config()
logging.info(pi.plugin_configs)
assert len(pi.plugin_configs.keys()) == 3
assert PC1.is_file() is True
assert PC2.is_file() is True
assert PC3.is_file() is True
cleanup()
def test_plugin_importer_all():
# Import all the bots
check_only_one_file()
bot_importer = BotImporter("tests/bots_config")
bot_importer.import_bots()
bots = bot_importer.bots.copy()
# Now import all the plugins
pi = PluginImporter("tests/plugins", "tests/plugins_config")
pi.discover_plugins()
pi.load_discovered_plugins_config()
pi.import_discovered_plugins(bots)
# Test that the plugin was installed
assert pi.plugins["TestPlugin1"].test_function("TESTING") == "TESTING"
# Make sure that only 1 plugin was installed
# Plugin #2 should have been disabled
assert len(pi.plugins) == 1
assert pi.plugin_configs.get("TestPlugin2").enabled == False
cleanup()
|
StarcoderdataPython
|
11284075
|
<filename>tests/test_utils_dataset.py
"""Test utils_dataset."""
import tempfile
from pathlib import Path
from dash_charts import utils_dataset
def test_db_connect():
"""Test DBConnect."""
with tempfile.TemporaryDirectory() as tmp_dir:
tmp_dir = Path(tmp_dir)
database = utils_dataset.DBConnect(tmp_dir / 'tmp.db')
table = database.db.create_table('test')
csv_filename = tmp_dir / 'tmp.csv'
table.insert({'username': 'username', 'value': 1})
utils_dataset.export_table_as_csv(csv_filename, table)
database.close()
result = csv_filename.read_text()
assert result == 'id,username,value\n1,username,1\n'
|
StarcoderdataPython
|
1827950
|
<filename>python-2-apps/fn_proofpoint_block_list/fn_proofpoint_block_list/components/proofpoint_add_to_block_group.py<gh_stars>0
# -*- coding: utf-8 -*-
# pragma pylint: disable=unused-argument, no-self-use
"""Function implementation"""
import logging
from resilient_circuits import ResilientComponent, function, handler, StatusMessage, FunctionResult, FunctionError
import fn_proofpoint_block_list.util.selftest as selftest
import resilient
import requests
class FunctionComponent(ResilientComponent):
"""Component that implements Resilient function 'proofpoint_add_to_block_group"""
def __init__(self, opts):
"""constructor provides access to the configuration options"""
super(FunctionComponent, self).__init__(opts)
self.options = opts.get("fn_proofpoint_block_list", {})
selftest.selftest_function(opts)
@handler("reload")
def _reload(self, event, opts):
"""Configuration options have changed, save new values"""
self.options = opts.get("fn_proofpoint_block_list", {})
@function("proofpoint_add_to_block_group")
def _proofpoint_add_to_block_group_function(self, event, *args, **kwargs):
"""Function: This function runs on the incident and extracts artifacts of type Email Sender and Email Receiver passing the sender email address into the block list of the email receivers address."""
# - method that retreives specific attributes needed for the function to run like credentials, etc...
def get_config_option(option_name, optional=False):
"""Given option_name, checks if it is in app.config. Raises ValueError if a mandatory option is missing"""
option = self.options.get(option_name)
if option is None and optional is False:
err = "'{0}' is mandatory and is not set in app.config file. You must set this value to run this function".format(
option_name)
raise ValueError(err)
else:
return option
try:
# Get the function parameters:
incident_id = kwargs.get("incident_id") # number
log = logging.getLogger(__name__)
log.info("incident_id: %s", incident_id)
# Talks to the config file and gets the auth credentials necessary to talk to Resilient REST API
parser = resilient.ArgumentParser(config_file=resilient.get_config_file())
opts = parser.parse_args()
client = resilient.get_client(opts)
headers = {
'Content-Type': 'application/json',
}
# Authentication specific settings
user_name = get_config_option("api_user")
password = get_config_option("api_user_credentials")
host = get_config_option("endpoint")
port = get_config_option("port")
completion = False
# PUT YOUR FUNCTION IMPLEMENTATION CODE HERE
yield StatusMessage("starting...")
uri = "/incidents/{}/artifacts".format(incident_id)
incident_artifacts = client.get(uri)
# Stores the attachments from the incident into a list object
email_senders = list()
email_recipients = list()
# For filtering on the Email Sender Artifact type and adding it to
# the email sender list
for incident_artifact in incident_artifacts:
if incident_artifact['type'] is 9:
email_senders.append(str(incident_artifact['value']))
# For filtering on the Email Recipient Artifact type and adding it to
# the email recipient list
for incident_artifact in incident_artifacts:
if incident_artifact['type'] is 20:
email_recipients.append(str(incident_artifact['value']))
# Gets the first item from each list and assigns it to the Proofpoint
# Endpoint call that adds the sender email address to the blocklist of
# the recipient email address account
# TODO: Handle multiple additions to block list per recipient
sender = email_senders[0]
recipient = email_recipients[0]
#data = '{"blocklist" : "%s"}' % email_senders - troubleshooting to isolate for just one element
data = '{"blocklist" : "%s"}' % sender
response = requests.put('https://{}:{}/rest/v1/enduser/{}'.format(host,port,recipient), headers=headers, data=data, verify=False,
auth=(user_name, password))
#log.info(response.headers)
#log.info(response.content)
# TODO: extract either header or body information and write it to value variable
# TODO: expand success in the code more around response
submission_result = response.content
completion = True
yield StatusMessage("done...")
results = {
"success": completion,
"payload": submission_result
}
# Produce a FunctionResult with the results
yield FunctionResult(results)
except Exception:
yield FunctionError()
|
StarcoderdataPython
|
1668946
|
<gh_stars>0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 4 19:27:35 2019
@author: debasish
"""
def qrs_detection(ecg,Fs):
import numpy as np
from scipy import signal
import matplotlib.pyplot as plt
#=============================================================Function Declarations==============================================================#
def sthresh_update(spki,threshi1,npki,threshi2):
spki=0.125*isig[i]+0.875*spki
threshi1=npki+0.25*(spki-npki)
threshi2=0.3*threshi1
return(spki,threshi1,threshi2)
def nthresh_update(spki,threshi1,npki,threshi2):
npki=0.125*isig[i]+0.875*npki
threshi1=npki+0.25*(spki-npki)
threshi2=0.3*threshi1
return(npki,threshi1,threshi2)
def RR_update():
if np.size(RRinterval1)<8:
RRaverage1=np.sum(RRinterval1)/np.size(RRinterval1)
else:
RRaverage1=np.sum(RRinterval1[np.size(RRinterval1)-7:])/8
if np.size(RRinterval2)<8:
RRaverage2=np.sum(RRinterval2)/np.size(RRinterval2)
else:
RRaverage2=np.sum(RRinterval2[np.size(RRinterval2)-7:])/8
RR_low_limit=round(0.92*RRaverage2)
RR_high_limit=round(1.16*RRaverage2)
RR_missed_limit=round(1.66*RRaverage2)
return(RRaverage1,RRaverage2,RR_low_limit,RR_high_limit,RR_missed_limit)
def check_T(i,Fs):
slope1=np.average(np.diff(isig[i-np.round(round(0.075*Fs)):i]))
slope2=np.average(np.diff(isig[beat_loc[-1]-np.round(round(0.075*Fs)):beat_loc[-1]]))
if slope1<0.5*slope2 and i<=int(0.36*Fs)+beat_loc[-1]:
return(True)
else:
return(False)
def search_back(threshi2):
val=-2
for k in n_loc:
if isig[k]>=threshi2 and isig[k]>val:
val=isig[k]
index=k
return index
def peak_cond(loc,crange):
val=(np.sum([x for x in np.sign(isigd[loc:loc+crange]) if x>0])-np.sum([x for x in np.sign(isigd[loc-crange:loc]) if x<0]))/(2*crange)
if val<=0.15:
return(True)
else:
return(False)
#---------------Band Pass filtering--------------#
b,a=signal.butter(3,[12/Fs,24/Fs],'bandpass')
fsig=signal.filtfilt(b,a,ecg)
# plt.figure(1)
# plt.subplot(411)
# plt.plot(fsig)
#---------------derivative-----------------------#
b=np.array([1,2,0,-2,-1])/8
a=1
dsig=signal.filtfilt(b,a,fsig)
# plt.subplot(412)
# plt.plot(dsig)
#----------------squaring------------------------#
ssig1=np.power(dsig,2)
ssig=ssig1[int(0.30*Fs):] #First 300 ms data is neglected
# plt.subplot(413)
# plt.plot(ssig)
#----------------integration---------------------#
window1= int(0.150*Fs)
isig1=ssig
for i in range(1,window1):
isig1=isig1+np.append(ssig[i:],np.zeros(i))
isig1=isig1/window1
isig1=np.power(isig1,2)
isig1=np.round(isig1/(2*np.max(isig1)),10)
window2= int(0.050*Fs)
isig=isig1
for i in range(1,window2):
isig=isig+np.append(isig1[i:],np.zeros(i))
isig=isig/window2
delay=(window1+window2)/2
# plt.subplot(414)
# plt.plot(isig)
#-----------------Fiducial Point detection------------------#
#------------------training phase 1---------------------------#
#for integrated signal
npki=np.average(isig[:2*Fs])*0.75
spki=0.8*np.amax(isig[:2*Fs])
threshi1=npki+0.25*(spki-npki)
threshi2=0.3*threshi1
#for filtered signal
npkf=np.average(fsig[:2*Fs])*0.75
spkf=0.8*np.amax(fsig[:2*Fs])
threshf1=npkf+0.25*(spkf-npkf)
threshf2=0.3*threshf1
#------------------training phase 2---------------------------#
isigd=np.diff(isig)
beat_loc=[np.argmax(isig[:2*Fs])]
number_beats=1
crange=int(0.01*Fs)
if beat_loc[-1]-int(0.2*Fs)>crange:
i=crange+1
else:
i=beat_loc[-1]+int(0.2*Fs)
RRinterval1=[]
RRinterval2=[]
while (number_beats<2 and i<np.size(isigd)):
if i<beat_loc[-1]:
while i<beat_loc[0]-5:
if isigd[i]<0 and isigd[i-1]>0 and peak_cond(i,crange):
if isig[i]>=threshi1:
i=i+np.argmax(isig[i:i+int(0.2*Fs)])
beat_loc=np.array([i,beat_loc[0]])
number_beats=number_beats+1
#updating threshold parameters
(spki,threshi1,threshi2)=sthresh_update(spki,threshi1,npki,threshi2)
#updating RR interval
RRinterval1=np.append(RRinterval1,[beat_loc[number_beats-1]-beat_loc[number_beats-2]])
RRinterval2=np.append(RRinterval2,[beat_loc[number_beats-1]-beat_loc[number_beats-2]])
(RRaverage1,RRaverage2,RR_low_limit,RR_high_limit,RR_missed_limit)=RR_update()
i=beat_loc[-1]+int(0.2*Fs)
else:
(npki,threshi1,threshi2)=nthresh_update(spki,threshi1,npki,threshi2)
i=i+1
if i==beat_loc[-1]-5:
i=beat_loc[-1]+int(0.2*Fs)
break
else:
if isigd[i]<0 and isigd[i-1]>0 and peak_cond(i,crange):
if isig[i]>=threshi1:
i=i+np.argmax(isig[i:i+int(0.2*Fs)])
beat_loc=np.append(beat_loc,[i])
number_beats=number_beats+1
#updating threshold parameters
(spki,threshi1,threshi2)=sthresh_update(spki,threshi1,npki,threshi2)
#updating RR interval
RRinterval1=np.append(RRinterval1,[beat_loc[number_beats-1]-beat_loc[number_beats-2]])
RRinterval2=np.append(RRinterval2,[beat_loc[number_beats-1]-beat_loc[number_beats-2]])
(RRaverage1,RRaverage2,RR_low_limit,RR_high_limit,RR_missed_limit)=RR_update()
i=beat_loc[-1]+int(0.2*Fs)
else:
(npki,threshi1,threshi2)=nthresh_update(spki,threshi1,npki,threshi2)
i=i+1
if np.size(beat_loc)==1:
#print('recheck')
beat_loc1=np.argmax(fsig[:int(1.8*Fs)])
beat_loc2=np.argmax(fsig[(beat_loc1+int(0.2*Fs)):(beat_loc1+int(1.7*Fs))])+beat_loc1+int(0.2*Fs)
i=np.argmax(isig[beat_loc1+int(0.2*Fs):beat_loc2-int(0.2*Fs)])+beat_loc1+int(0.2*Fs)
if isigd[i]<0 and isigd[i-1]>0 and peak_cond(i,crange) and (isig[i]>0.3*(isig[beat_loc1]+isig[beat_loc2])):
beat_loc2=i
number_beats=2
i=beat_loc2+int(0.2*Fs)
beat_loc=np.array([beat_loc1,beat_loc2])
RRinterval1=[]
RRinterval2=[]
(spki,threshi1,threshi2)=sthresh_update(spki,threshi1,npki,threshi2)
#updating RR interval
RRinterval1=np.append(RRinterval1,[beat_loc[number_beats-1]-beat_loc[number_beats-2]])
RRinterval2=np.append(RRinterval2,[beat_loc[number_beats-1]-beat_loc[number_beats-2]])
(RRaverage1,RRaverage2,RR_low_limit,RR_high_limit,RR_missed_limit)=RR_update()
#-----------------------------------------------------detection phase--------------------------------------------------#
n_loc=[]
#error=0
while (i<np.size(isigd)):
if isigd[i]<0 and isigd[i-1]>0 and peak_cond(i,crange):
if (isig[i]>=threshi1) and (~check_T(i,Fs)):
i=i+np.argmax(isig[i:i+int(0.2*Fs)])
beat_loc=np.append(beat_loc,[i])
number_beats=number_beats+1
#updating threshold parameters
(spki,threshi1,threshi2)=sthresh_update(spki,threshi1,npki,threshi2)
n_loc=[]
#updating RR interval
RRinterval1=np.append(RRinterval1,[beat_loc[number_beats-1]-beat_loc[number_beats-2]])
if (RR_high_limit>=[beat_loc[number_beats-1]-beat_loc[number_beats-2]]>=RR_low_limit):
RRinterval2=np.append(RRinterval2,[beat_loc[number_beats-1]-beat_loc[number_beats-2]])
(RRaverage1,RRaverage2,RR_low_limit,RR_high_limit,RR_missed_limit)=RR_update()
i=i+int(0.2*Fs)
else:
n_loc.append(i)
(npki,threshi1,threshi2)=nthresh_update(spki,threshi1,npki,threshi2)
if i>RR_missed_limit+beat_loc[number_beats-1]:
try:
beat_loc=np.append(beat_loc,search_back(threshi2))
number_beats=number_beats+1
#updating threshold parameters
(spki,threshi1,threshi2)=sthresh_update(spki,threshi1,npki,threshi2)
n_loc=[]
#updating RR interval
RRinterval1=np.append(RRinterval1,[beat_loc[number_beats-1]-beat_loc[number_beats-2]])
if (RR_high_limit>=[beat_loc[number_beats-1]-beat_loc[number_beats-2]]>=RR_low_limit):
RRinterval2=np.append(RRinterval2,[beat_loc[number_beats-1]-beat_loc[number_beats-2]])
(RRaverage1,RRaverage2,RR_low_limit,RR_high_limit,RR_missed_limit)=RR_update()
i=i+int(0.2*Fs)
except :
#error=error+1
#print('Error occured:',error)
if np.size(n_loc)==0:
beat_loc=np.append(beat_loc,int(beat_loc[-1]+int(RRaverage2*1.1)))
else:
beat_loc=np.append(beat_loc,n_loc[np.argmax(isig[n_loc])])
number_beats=number_beats+1
#updating threshold parameters
(spki,threshi1,threshi2)=sthresh_update(spki,threshi1,npki,threshi2)
n_loc=[]
#updating RR interval
RRinterval1=np.append(RRinterval1,[beat_loc[number_beats-1]-beat_loc[number_beats-2]])
if (RR_high_limit>=[beat_loc[number_beats-1]-beat_loc[number_beats-2]]>=RR_low_limit):
RRinterval2=np.append(RRinterval2,[beat_loc[number_beats-1]-beat_loc[number_beats-2]])
(RRaverage1,RRaverage2,RR_low_limit,RR_high_limit,RR_missed_limit)=RR_update()
i=i+int(0.2*Fs)
i=i+1
beat_loc=beat_loc+int(delay)+int(0.30*Fs)
return(beat_loc,RRinterval1)
|
StarcoderdataPython
|
6688002
|
"""
A representation of a ``while`` loop.
"""
from regression_tests.parsers.c_parser.exprs.expression import Expression
from regression_tests.parsers.c_parser.stmts.loop import Loop
from regression_tests.utils import memoize
class WhileLoop(Loop):
"""A representation of a ``while`` loop."""
def is_while_loop(self):
"""Returns ``True``."""
return True
@property
@memoize
def condition(self):
"""Condition of the statement (:class:`.Expression`)."""
cond = next(self._node.get_children())
return Expression._from_clang_node(cond)
def __repr__(self):
return '<{} condition={}>'.format(
self.__class__.__name__,
self.condition
)
def __str__(self):
return 'while ({})'.format(self.condition)
|
StarcoderdataPython
|
8046089
|
#!/usr/bin/env python
import mirheo as mir
dt = 0.001
ranks = (1, 1, 1)
domain = (8, 8, 8)
u = mir.Mirheo(ranks, domain, debug_level=3, log_filename='log')
pv = mir.ParticleVectors.ParticleVector('pv', mass = 1)
ic = mir.InitialConditions.Uniform(number_density=10)
u.registerParticleVector(pv, ic)
dpd = mir.Interactions.Pairwise('dpd', rc=1.0, kind="DPD", a=10.0, gamma=10.0, kBT=1.0, power=0.5)
u.registerInteraction(dpd)
u.setInteraction(dpd, pv, pv)
vv = mir.Integrators.VelocityVerlet('vv')
u.registerIntegrator(vv)
u.setIntegrator(vv, pv)
u.registerPlugins(mir.Plugins.createStats('stats', every=200, filename="stats.csv"))
u.run(2000, dt=dt)
# nTEST: dump.stats
# cd dump
# mir.run --runargs "-n 2" ./stats.py > /dev/null
# mir.post ../tools/dump_csv.py stats.csv time kBT vx vy vz --header > stats.out.txt
|
StarcoderdataPython
|
1918242
|
<reponame>mh-globus/globus-sdk-python
from .client import GroupsClient
from .errors import GroupsAPIError
__all__ = ("GroupsClient", "GroupsAPIError")
|
StarcoderdataPython
|
326574
|
<reponame>randomchain/randbeacon<filename>extra_apps/verify.py<gh_stars>1-10
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
from pprint import pprint
import click
import merkletools
import sloth
import msgpack
from randbeacon import utils
import ujson as json
SERIALIZED_FIELDS = ('COMMIT', 'PROOF')
def verify_sloth(inp, out, witness, bits, iterations):
s = sloth.Sloth(data=inp,
final_hash=out,
witness=witness,
bits=bits,
iterations=iterations)
s.verify()
s.wait()
return s.valid
def build_merkle_tree(leaves, hash_type, expected_root=None):
mt = merkletools.MerkleTools(hash_type=hash_type)
mt.leaves = leaves
mt.make_tree()
if expected_root is not None:
assert mt.merkle_root == expected_root
print("Root matches!")
return mt
def unpack_sequence(json_file):
beacon_sequence = json.load(json_file)
for k, v in beacon_sequence.items():
if k in SERIALIZED_FIELDS:
beacon_sequence[k]['data'] = msgpack.unpackb(bytes.fromhex(v['data']))
return beacon_sequence
@click.command()
@click.argument('json', type=click.File())
@click.option('--inp', type=str, default=None)
@click.option('--verbose', '-v', is_flag=True, default=False)
def main(json, inp, verbose):
b_seq = unpack_sequence(json)
print('\n\n {:=^50}\n'.format(" Sequence Data "))
if verbose:
pprint(b_seq, width=120)
else:
print("Sequence no.", b_seq['seq_no'])
print("Output: {}\n{}".format(b_seq['OUTPUT']['data'], b_seq['OUTPUT']['timestamp']))
commit_data = b_seq['COMMIT']['data']
print('\n\n {:=^50}\n'.format(" Merkle Tree "))
mt = build_merkle_tree(commit_data[b'leaves'], commit_data[b'hash_type'].decode('ascii'), commit_data[b'root'])
print("Merkle Tree:\n\tRoot -> {}\n\tLeaves -> {}".format(mt.merkle_root.hex(), len(mt.leaves)))
if inp:
inp = bytes.fromhex(inp)
try:
idx = mt.leaves.index(inp)
except ValueError:
print("Input '{}' not found in commitment!".format(inp.hex()))
else:
print("Proof")
pprint(mt.get_proof(idx))
print('\n\n {:=^50}\n'.format(" Computation Verification "))
proof_data = b_seq['PROOF']['data']
print("Sloth parameters: {} bits, {} iterations".format(proof_data[b'bits'], proof_data[b'iterations']))
print("Sloth witness: {}".format(proof_data[b'witness'].hex()))
valid = verify_sloth(mt.merkle_root, bytes.fromhex(b_seq['OUTPUT']['data']), proof_data[b'witness'], proof_data[b'bits'], proof_data[b'iterations'])
print("VALID" if valid else "NOT VALID!")
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
6447576
|
<reponame>KaranToor/MA450<filename>google-cloud-sdk/.install/.backup/lib/surface/compute/instance_groups/managed/set_autohealing.py
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command for setting autohealing policy of managed instance group."""
from googlecloudsdk.api_lib.compute import base_classes
from googlecloudsdk.api_lib.compute import managed_instance_groups_utils
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.compute import flags
from googlecloudsdk.command_lib.compute import scope as compute_scope
from googlecloudsdk.command_lib.compute.instance_groups import flags as instance_groups_flags
def _AddArgs(parser):
"""Adds args."""
managed_instance_groups_utils.AddAutohealingArgs(parser)
@base.ReleaseTracks(base.ReleaseTrack.BETA, base.ReleaseTrack.ALPHA)
class SetAutohealing(base_classes.BaseAsyncMutator):
"""Set autohealing policy of instance group manager."""
@staticmethod
def Args(parser):
_AddArgs(parser=parser)
instance_groups_flags.MULTISCOPE_INSTANCE_GROUP_MANAGER_ARG.AddArgument(
parser)
@property
def method(self):
return 'SetAutoHealingPolicies'
@property
def service(self):
return self.compute.instanceGroupManagers
def CreateRequests(self, args):
igm_ref = (instance_groups_flags.MULTISCOPE_INSTANCE_GROUP_MANAGER_ARG.
ResolveAsResource)(
args, self.resources,
default_scope=compute_scope.ScopeEnum.ZONE,
scope_lister=flags.GetDefaultScopeLister(
self.compute_client, self.project))
auto_healing_policies = (
managed_instance_groups_utils.CreateAutohealingPolicies(
self.resources, self.messages, args))
if igm_ref.Collection() == 'compute.instanceGroupManagers':
service = self.compute.instanceGroupManagers
request = (
self.messages.
ComputeInstanceGroupManagersSetAutoHealingPoliciesRequest(
project=self.project,
zone=igm_ref.zone,
instanceGroupManager=igm_ref.Name(),
instanceGroupManagersSetAutoHealingRequest=(
self.messages.InstanceGroupManagersSetAutoHealingRequest(
autoHealingPolicies=auto_healing_policies))))
else:
service = self.compute.regionInstanceGroupManagers
request = (
self.messages.
ComputeRegionInstanceGroupManagersSetAutoHealingPoliciesRequest(
project=self.project,
region=igm_ref.region,
instanceGroupManager=igm_ref.Name(),
regionInstanceGroupManagersSetAutoHealingRequest=(
self.messages.
RegionInstanceGroupManagersSetAutoHealingRequest(
autoHealingPolicies=auto_healing_policies))))
return [(service, self.method, request)]
SetAutohealing.detailed_help = {
'brief': 'Set autohealing policy for managed instance group.',
'DESCRIPTION': """
*{command}* updates the autohealing policy for an existing managed
instance group.
If --http-health-check or --https-health-check is specified, the resulting
autohealing policy will be triggered by the health-check i.e. the autohealing
action (RECREATE) on an instance will be performed if the health-check signals
that the instance is UNHEALTHY. If neither --http-health-check nor
--https-health-check is specified, the resulting autohealing policy will be
triggered by instance's status i.e. the autohealing action (RECREATE) on an
instance will be performed if the instance.status is not RUNNING.
--initial-delay specifies the length of the period during which IGM will
refrain from autohealing the instance even if the instance is reported as not
RUNNING or UNHEALTHY. This value must be from range [0, 3600].
""",
}
|
StarcoderdataPython
|
1801321
|
from django.http import HttpResponse
from django.shortcuts import render
from django.utils.encoding import smart_str
import os, sys, inspect, StringIO, shutil, time, json, base64
current_path = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
cardiff_path = os.path.join(os.path.dirname(current_path), "Cardiff")
sys.path.insert(0, cardiff_path)
from Cardiff import Cardiff
cardiff = Cardiff()
cardiff_settings_path = os.path.join(cardiff_path, "settings.json")
temp_path = os.path.join(".", "temp")
def is_set(value):
if value.startswith("<") and value.endswith(">"):
return False
else:
return True
def load():
cardiff.load_settings(cardiff_settings_path)
def save():
cardiff.save_settings(cardiff_settings_path)
def image_to_base64(image):
with open(image, "rb") as image_file:
encoded_string = base64.b64encode(image_file.read())
image_ext = image.split(".")[-1]
return "data:image/{0};base64,{1}".format(image_ext, encoded_string)
load()
os.environ["SILENT_MODE"] = "1"
def default(request):
context = {}
context["vcs"] = cardiff.settings["vcs"]
if not is_set(cardiff.settings["repo"]["current"]):
context["repo_set"] = False
if not is_set(cardiff.settings["user.name"]):
context["name_set"] = False
else:
context["signed"] = cardiff.settings["user.name"]
context["name_set"] = cardiff.settings["user.name"]
if not is_set(cardiff.settings["user.email"]):
context["email_set"] = False
else:
context["email_set"] = cardiff.settings["user.email"]
return render(request, "prepare.html", context)
else:
return repo(request)
def login(request):
if "username" in request.GET:
cardiff.settings["user.name"] = request.GET["username"]
if "useremail" in request.GET:
cardiff.settings["user.email"] = request.GET["useremail"]
save()
return default(request)
def logout(request):
cardiff.settings["user.name"] = "<username>"
cardiff.settings["user.email"] = "<useremail>"
save()
return default(request)
def about(request):
context = cardiff.settings["information"]
if is_set(cardiff.settings["user.name"]):
context["signed"] = cardiff.settings["user.name"]
return render(request, "about.html", context)
def explore(request):
context = {}
if is_set(cardiff.settings["user.name"]):
context["signed"] = cardiff.settings["user.name"]
current_repo = cardiff.settings["repo"]["current"].split("/")[-1]
other_repos = list( o_repo.split("/")[-1] for o_repo in cardiff.settings["repo"]["others"])
repo_list = []
repo_list.append(current_repo)
repo_list.extend(other_repos)
new_repo_list = zip(repo_list[::2], repo_list[1::2])
if len(repo_list) % 2:
new_repo_list.append((repo_list[-1], None))
context["repo_list"] = new_repo_list
return render(request, "explore.html", context)
def search(request):
context = {}
if is_set(cardiff.settings["user.name"]):
context["signed"] = cardiff.settings["user.name"]
if "keyword" not in request.GET or request.GET["keyword"] == "":
return default(request)
keywords = request.GET["keyword"].split()
context["keywords"] = keywords
return render(request, "search.html", context)
def settings(request):
context = {
"cardiff": cardiff.settings
}
if is_set(cardiff.settings["user.name"]):
context["signed"] = cardiff.settings["user.name"]
if request.method == "POST":
for key in request.POST:
cardiff.settings[key] = request.POST[key]
save()
context["saved"] = True
return render(request, "settings.html", context)
def files(request):
context = {}
if "checkout" in request.GET:
file_to_checkout = request.GET["checkout"]
file_version = request.GET["version"]
cardiff.exec_cmd(["checkout", file_to_checkout, file_version])
file_path = os.path.join(cardiff.settings["repo"]["current"], file_to_checkout)
context["file_path"] = file_path
if "download" in request.GET:
file_to_checkout = request.GET["download"]
file_version = request.GET["version"]
cardiff.exec_cmd(["checkout", file_to_checkout, file_version])
file_path = os.path.join(cardiff.settings["repo"]["current"], file_to_checkout)
context["file_path"] = file_path
response = HttpResponse(file(file_path))
response['Content-Type'] = 'application/force-download'
response['Content-Length'] = os.path.getsize(file_path)
response['Content-Disposition'] = 'attachment; filename=%s' % smart_str(file_to_checkout)
response['Accept-Ranges'] = 'bytes'
return response
if "diff" in request.GET:
file_to_checkout = request.GET["diff"]
file_version = request.GET["before"]
cardiff.exec_cmd(["checkout", file_to_checkout, file_version])
file_path = os.path.join(cardiff.settings["repo"]["current"], file_to_checkout)
file_format = os.path.splitext(os.path.basename(file_path))[-1]
temp_file_name = os.path.basename(file_path).replace(file_format, "")
temp_file_before = os.path.join(temp_path, temp_file_name + str(time.time()) + file_format)
if not os.path.isdir(temp_path):
os.mkdir(temp_path)
shutil.copyfile(file_path, temp_file_before)
context["temp_file"] = os.path.basename(temp_file_before)
version_to_diff = request.GET["after"]
cardiff.exec_cmd(["checkout", file_to_checkout, version_to_diff])
temp_file_after = os.path.join(temp_path, temp_file_name + str(time.time()) + file_format)
shutil.copyfile(file_path, temp_file_after)
context["temp_file_to_diff"] = os.path.basename(temp_file_after)
file_diffs = cardiff.exec_cmd(["diff", file_to_checkout, file_version, version_to_diff])
diff_before_name = os.path.basename(file_diffs[0])
diff_before = os.path.join(temp_path, diff_before_name)
shutil.copyfile(file_diffs[0], diff_before)
context["temp_file_diff_before"] = diff_before_name
diff_after_name = os.path.basename(file_diffs[1])
diff_after = os.path.join(temp_path, diff_after_name)
shutil.copyfile(file_diffs[1], diff_after)
context["temp_file_diff_after"] = diff_after_name
with open(file_diffs[2], "r") as file_diff_parameters:
context["temp_file_diff_parameters"] = json.load(file_diff_parameters)
diff_type = {
"image": "diffview/image.html"
}
if file_format in [".jpg", ".png", ".bmp", ".gif"]:
context = {
"file1": image_to_base64(temp_file_before),
"file2": image_to_base64(temp_file_after)
}
if "changes" in request.GET:
context = {
"file1": image_to_base64(diff_before),
"file2": image_to_base64(diff_after)
}
return render(request, diff_type["image"], context)
return default(request)
def view(request):
context = {
"diff": request.GET["diff"],
"before": request.GET["before"],
"after": request.GET["after"],
"aspect_ratio": "80%",
"diff_view": True
}
return render(request, "view.html", context)
def branch(request):
if "name" in request.GET:
cardiff.exec_cmd(["branch", request.GET["name"]])
return repo(request)
def repo(request):
context = {}
context["vcs"] = cardiff.settings["vcs"]
if not is_set(cardiff.settings["user.name"]):
context["name_set"] = False
else:
context["signed"] = cardiff.settings["user.name"]
context["name_set"] = cardiff.settings["user.name"]
if not is_set(cardiff.settings["user.email"]):
context["email_set"] = False
else:
context["email_set"] = cardiff.settings["user.email"]
if not is_set(cardiff.settings["repo"]["current"]) and request.method != "GET" and "init" not in request.GET:
context["repo_set"] = False
return render(request, "prepare.html", context)
if is_set(cardiff.settings["repo"]["current"]):
vcs = cardiff.setup_vcs()
if request.method == "GET":
if "init" in request.GET:
if "username" in request.GET:
cardiff.settings["user.name"] = request.GET["username"]
if "useremail" in request.GET:
cardiff.settings["user.email"] = request.GET["useremail"]
initial_succeed = cardiff.exec_cmd(["init", request.GET["init"]])
if not initial_succeed:
context["initial_failed"] = True
context["initial_info"] = {
"repo": request.GET["init"],
"username": cardiff.settings["user.name"],
"useremail": cardiff.settings["user.email"]
}
return render(request, "alert.html", context)
save()
if "switch" in request.GET:
repo_to_switch = request.GET["switch"]
cardiff.exec_cmd(["repo", repo_to_switch])
save()
vcs = cardiff.setup_vcs()
if request.method == "POST":
if "commit_file" in request.FILES:
up_file = request.FILES["commit_file"]
up_file_name = up_file.name
save_location = os.path.join(cardiff.settings["repo"]["current"], up_file_name)
with open(save_location, 'wb+') as destination:
for chunk in up_file.chunks():
destination.write(chunk)
context["uploaded"] = up_file_name
context["save_loc"] = save_location
if "commit_msg" in request.POST:
up_file = request.POST["uploaded_file"]
commit_msg = request.POST["commit_msg"]
cardiff.exec_cmd(["commit", up_file, commit_msg])
context["committed"] = up_file
context["current_repo"] = cardiff.settings["repo"]["current"].split("/")[-1]
context["other_repo"] = list( o_repo.split("/")[-1] for o_repo in cardiff.settings["repo"]["others"])
context["current_branch"] = cardiff.vcs_current_branch
context["other_branches"] = cardiff.vcs_branches["other"]
context["commit_logs"] = reversed(cardiff.vcs.get_commits())
return render(request, "repo.html", context)
|
StarcoderdataPython
|
9683136
|
<gh_stars>0
from macala import MancalaGame
mg = MancalaGame()
while not mg.is_over:
print mg
print "it is player %s's turn" % mg.player_turn
mg.make_move(int(raw_input('> ')))
print mg
p0, p1 = mg.score
if p0 < p1:
print "player 1 wins!"
elif p1 < p0:
print "player 0 wins!"
else:
print "it's a tie!"
|
StarcoderdataPython
|
6468577
|
<gh_stars>0
from django.apps import AppConfig
class DataimportConfig(AppConfig):
name = 'microimprocessing'
|
StarcoderdataPython
|
4869243
|
<gh_stars>0
import os
import pydicom
print("Executando... Por favor aguarde.")
arq = open('dicom.xml', 'w')
texto = []
texto.append("<?xml version=\"1.0\"?>\n")
path = os.getcwd() + '\\Exames de imagem\\'
for r, d, f in os.walk(path):
for file in f:
if '.dcm' in file:
path = str(r) + "\\" + str(file)
texto.append("<data src=\"" + path + "\">\n")
ds = pydicom.filereader.dcmread(path)
for key in ds.dir():
data_element = ds.data_element(key)
vr = str(data_element.VR)
if (vr != "OW") and (vr != "LO"):
tag = str(data_element.tag)
vm = str(data_element.VM)
name = str(data_element.name)
value = str(data_element.value)
lenght = str(len(value))
texto.append("\t<element vr=\"" + vr + "\" tag=\"" + tag + "\" vm=\"" + vm + "\" name=\"" + name + "\" len=\"" + lenght + "\">" + value + "</element>\n")
texto.append("</data>\n")
arq.writelines(texto)
arq.close()
|
StarcoderdataPython
|
6411312
|
import scipy, numpy as np
from tick.solver.base import SolverFirstOrderSto as SFOS
from tick.prox.base.prox import Prox as TPROX
from tick.base_model import ModelGeneralizedLinear as TMGL
class DummySolver(SFOS):
def _set_cpp_solver(self, dtype_or_object_with_dtype): return None
def set_epoch_size(self, v): pass
def set_rand_max(self, v): pass
def set_model(self, model): pass
class Solver(DummySolver):
@staticmethod
def CFUNC_RESOLVER(model, s = ""):
X = model.features
C = "s" if isinstance(X, scipy.sparse.csr.csr_matrix) else "d"
T = "d" if X.dtype == np.dtype('float64') else "s"
return model._MANGLING + s + C + T
def __init__(self, **kwargs):
object.__setattr__(self, "log_every_n_epochs", 10)
if "log_every_n_epochs" in kwargs:
object.__setattr__(self, "log_every_n_epochs", kwargs["log_every_n_epochs"])
object.__setattr__(self, "_solver", DummySolver())
object.__setattr__(self, "_dao", None)
def set_super_model(self, SUPER, model: TMGL):
import statick.solver.bin.statick_solver as statick_solver
SUPER.set_model(self, model)
func = self._s_name + "_" + Solver.CFUNC_RESOLVER(model, "_dao_")
if self.n_threads > 1:
object.__setattr__(self, "_dao", getattr(statick_solver, func)(model._dao, self.max_iter, self.epoch_size, self.n_threads))
self._dao.history.tol.val = self.tol
else:
object.__setattr__(self, "_dao", getattr(statick_solver, func)(model._dao))
if hasattr(self._dao, 'history'):
self._dao.history.log_every_n_epochs = self.log_every_n_epochs
if hasattr(self._dao, 'step'):
self._dao.step = self.step
return self
def set_prox(self, prox: TPROX):
if self.model is None:
raise ValueError("Set model, then Prox")
object.__setattr__(self, "_prox", prox._set_dao(self.model.features.dtype))
return self
def solve(self):
import statick.solver.bin.statick_solver as statick_solver
f = "solve_" + self._s_name + "_" + Solver.CFUNC_RESOLVER(self.model, "_" + self._prox._MANGLING + "_")
max_iter = self.max_iter
if self.n_threads > 1:
max_iter = 1
for i in range(max_iter):
getattr(statick_solver, f)(self._dao, self.model._dao, self._prox._dao)
if hasattr(self._dao, 'history'):
object.__setattr__(self, "objectives", self._dao.history.objectives)
object.__setattr__(self, "time_history", self._dao.history.time_history)
|
StarcoderdataPython
|
6556884
|
import unittest
from typing import List
class Solution:
@staticmethod
def exist(board: List[List[str]], word: str) -> bool:
"""
Given an m x n grid of characters board and a string word, return true if word exists in the
grid.
The word can be constructed from letters of sequentially adjacent cells, where adjacent
cells are horizontally or vertically neighboring. The same letter cell may not be used more
than once.
Example 1:
Input: board = [["A","B","C","E"],["S","F","C","S"],["A","D","E","E"]], word = "ABCCED"
Output: true
Example 2:
Input: board = [["A","B","C","E"],["S","F","C","S"],["A","D","E","E"]], word = "SEE"
Output: true
Example 3:
Input: board = [["A","B","C","E"],["S","F","C","S"],["A","D","E","E"]], word = "ABCB"
Output: false
Constraints:
m == board.length
n = board[i].length
1 <= m, n <= 6
1 <= word.length <= 15
board and word consists of only lowercase and uppercase English letters.
Follow up: Could you use search pruning to make your solution faster with a larger board?
"""
max_row, max_column = len(board), len(board[0])
for row_index in range(max_row):
for column_index in range(max_column):
visited_set = set()
if Solution.dfs(board=board, row_index=row_index, column_index=column_index,
visited_set=visited_set, word=word, max_row=max_row,
max_column=max_column):
return True
return False
@staticmethod
def dfs(board, row_index, column_index, visited_set, word, max_row, max_column):
current_index = (row_index, column_index)
# If the word is processed fully
if not word:
return True
# If we already visit this index
if current_index in visited_set:
return False
# If we have a matching first character
if board[row_index][column_index] == word[0]:
if len(word) == 1:
return True
visited_set.add(current_index)
changes = [(-1, 0), (0, 1), (1, 0), (0, -1)]
for dy, dx in changes:
new_row, new_column = row_index + dy, column_index + dx
if 0 <= new_row < max_row and 0 <= new_column < max_column:
result = Solution.dfs(board=board, row_index=new_row, column_index=new_column,
visited_set=visited_set, word=word[1:], max_row=max_row,
max_column=max_column)
if result:
return True
visited_set.remove(current_index)
return False
class WordSearch(unittest.TestCase):
def test_case_1(self):
board, word = [['A', 'B', 'C', 'E'], ['S', 'F', 'C', 'S'], ['A', 'D', 'E', 'E']], 'ABCCED'
expected_result = True
self.assertEqual(expected_result, Solution.exist(board=board, word=word))
def test_case_2(self):
board, word = [['A', 'B', 'C', 'E'], ['S', 'F', 'C', 'S'], ['A', 'D', 'E', 'E']], 'SEE'
expected_result = True
self.assertEqual(expected_result, Solution.exist(board=board, word=word))
def test_case_3(self):
board, word = [['A', 'B', 'C', 'E'], ['S', 'F', 'C', 'S'], ['A', 'D', 'E', 'E']], 'ABCB'
expected_result = False
self.assertEqual(expected_result, Solution.exist(board=board, word=word))
def test_case_4(self):
board, word = [['a']], 'a'
expected_result = True
self.assertEqual(expected_result, Solution.exist(board=board, word=word))
|
StarcoderdataPython
|
3529258
|
from gevent import monkey
monkey.patch_all()
import discord
import os
from flask import Flask
from flask_compress import Compress
from gevent.pywsgi import WSGIServer
from threading import Thread
client = discord.Client()
def get_role(guild, name):
return discord.utils.get(guild.roles, name=name)
@client.event
async def on_ready():
await client.change_presence(activity=discord.Activity(name="users changing voice chats", type=discord.ActivityType.listening))
@client.event
async def on_voice_state_update(member, before, after):
guild = member.guild
if after.channel:
channel_name = after.channel.name
if not get_role(guild, channel_name):
await guild.create_role(name=channel_name, mentionable=True)
role = get_role(guild, channel_name)
await member.add_roles(role)
if before.channel:
await member.remove_roles(get_role(guild, before.channel.name))
app = Flask('')
@app.route('/')
def home():
client.run(os.environ["token"])
return "Voice Chat Ping active!"
def run():
WSGIServer(('0.0.0.0', 8080), app).serve_forever()
compress = Compress()
compress.init_app(app)
Thread(target=run).start()
|
StarcoderdataPython
|
4971199
|
<filename>base/site-packages/jpush/push/__init__.py<gh_stars>100-1000
from .core import Push
from .audience import (
tag,
tag_and,
tag_not,
alias,
registration_id,
segment,
abtest
)
from .payload import (
android,
ios,
winphone,
platform,
cid,
notification,
message,
audience,
options,
smsmessage,
)
# Common selector for audience & platform
all_ = "all"
"""Select all, to do a broadcast.
Used in both ``audience`` and ``platform``.
"""
__all__ = [
all_,
Push,
tag,
tag_and,
tag_not,
alias,
registration_id,
segment,
abtest,
notification,
message,
platform,
cid,
audience,
options,
smsmessage,
]
|
StarcoderdataPython
|
9705884
|
from chess_game_practice.board import make_board, _make_board_empty
from chess_game_practice.pieces.pawn import Pawn
def test_create_pawn():
game_board = make_board()
pawn = Pawn('x', 'y', "b", game_board)
assert "pawn" == pawn.piece_type
assert game_board == pawn.board
assert "b" == pawn.color
assert 'x' == pawn.x
assert 'y' == pawn.y
def test_cant_move_on_self():
game_board = _make_board_empty()
game_board[6][0] = "bp"
pawn = Pawn(0, 6, "b", game_board)
assert False == pawn.move(0, 6)
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.