max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
blog/migrations/0005_auto_20201008_1805.py | JiajiaHuang/smonus | 0 | 12791751 | # Generated by Django 2.2.1 on 2020-10-08 10:05
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('blog', '0004_auto_20201003_2247'),
]
operations = [
migrations.AddField(
model_name='channel',
name='updated',
field=models.DateTimeField(auto_now=True, verbose_name='修改时间'),
),
migrations.AlterField(
model_name='article',
name='created',
field=models.DateTimeField(default=django.utils.timezone.now, verbose_name='创建时间'),
),
migrations.AlterField(
model_name='article',
name='updated',
field=models.DateTimeField(auto_now=True, verbose_name='修改时间'),
),
migrations.AlterField(
model_name='attachment',
name='Created',
field=models.DateTimeField(default=django.utils.timezone.now, verbose_name='创建时间'),
),
migrations.AlterField(
model_name='attachment',
name='Updated',
field=models.DateTimeField(auto_now=True, verbose_name='更新时间'),
),
migrations.AlterField(
model_name='channel',
name='created',
field=models.DateTimeField(default=django.utils.timezone.now, verbose_name='创建时间'),
),
migrations.AlterField(
model_name='tagsmodels',
name='tags_created',
field=models.DateTimeField(default=django.utils.timezone.now, verbose_name='创建时间'),
),
migrations.AlterField(
model_name='tagsmodels',
name='tags_updated',
field=models.DateTimeField(auto_now=True, verbose_name='更新时间'),
),
]
| 1.710938 | 2 |
pyop3/obj/parloop.py | wence-/pyop3 | 0 | 12791752 | from pyop3.api import AccessMode, ArgType, IterationRegion
from pyop3.codegen.compiled import build_wrapper, get_c_function
from pyop3.obj.kernel import Kernel
from pyop3.obj.maps import IdentityMap
from pyop3.obj.sets import AbstractSet
from pyop3.utils import cached_property, debug_check_args
def filter_args(args, access_modes):
reductions = []
seen = set()
exchange = []
dirty = []
mats = []
for arg, access_mode in zip(args, access_modes):
if arg.argtype == ArgType.DAT:
dirty.append(arg.obj)
if arg.map_tuple != (IdentityMap, ) and arg.obj not in seen:
exchange.append((arg.obj, access_mode))
seen.add(arg.obj)
if arg.argtype == ArgType.GLOBAL and access_mode != AccessMode.READ:
reductions.append((arg.obj, access_mode))
if arg.argtype == ArgType.MAT:
mats.append((arg.obj, access_mode))
return tuple(exchange), tuple(dirty), tuple(reductions), tuple(mats)
def noop():
pass
class ParLoop(object):
def validator(self, kernel, iterset, *args,
iteration_region=IterationRegion.ALL,
pass_layer_arg=False):
assert isinstance(kernel, Kernel)
assert isinstance(iterset, AbstractSet)
assert len(args) == len(kernel.access_modes)
assert isinstance(iteration_region, IterationRegion)
seen = {}
for arg, access_mode in zip(args, kernel.access_modes):
assert arg.validate(iterset)
try:
assert seen[arg] == access_mode
except KeyError:
seen[arg] = access_mode
@debug_check_args(validator)
def __init__(self, kernel, iterset, *args,
iteration_region=IterationRegion.ALL,
pass_layer_arg=False):
self.args = tuple(args)
self.kernel = kernel
self.iterset = iterset
self.iteration_region = iteration_region
self.pass_layer_arg = pass_layer_arg
exchange, dirty, reductions, mats = filter_args(args, kernel.access_modes)
self.exchange = exchange
self.dirty = dirty
self.reductions = reductions
self.mats = mats
# Micro-optimisations
if not reductions or iterset.comm.size == 1:
self.reduction_begin = noop
self.reduction_end = noop
if not exchange or iterset.comm.size == 1:
self.g2lbegin = noop
self.g2lend = noop
self.l2gbegin = noop
self.l2gend = noop
if not dirty or iterset.comm.size == 1:
self.mark_dirty = noop
def g2lbegin(self):
for d, mode in self.exchange:
d.g2lbegin(mode)
def g2lend(self):
for d, mode in self.exchange:
d.g2lend(mode)
def l2gbegin(self):
for d, mode in self.exchange:
d.l2gbegin(mode)
def l2gend(self):
for d, mode in self.exchange:
d.l2gend(mode)
def reduction_begin(self):
for g, mode in self.reductions:
g.reduction_begin(mode)
def reduction_end(self):
for g, mode in self.reductions:
g.reduction_end(mode)
def mark_dirty(self):
for d in self.dirty:
d.halo_valid = False
def execute(self):
self.g2lbegin()
self.dll(0, self.iterset.core_size, *self.c_arglist)
self.g2lend()
self.dll(self.iterset.core_size, self.iterset.size, *self.c_arglist)
self.reduction_begin()
# self.l2gbegin()
self.reduction_end()
# self.l2gend()
# self.mark_dirty()
@cached_property
def _arglist_and_types(self):
arglist = self.iterset._parloop_args_
argtypes = self.iterset._parloop_argtypes_
maptypes = []
maplist = []
seen = set()
for arg in self.args:
arglist += arg._parloop_args_
argtypes += arg._parloop_argtypes_
for map_ in arg.map_tuple:
for m, t in zip(map_._parloop_args_, map_._parloop_argtypes_):
if m in seen:
continue
seen.add(m)
maplist.append(m)
maptypes.append(t)
return arglist + tuple(maplist), argtypes + tuple(maptypes)
@cached_property
def c_argtypes(self):
return self._arglist_and_types[1]
@cached_property
def c_arglist(self):
return self._arglist_and_types[0]
code_cache = {}
@cached_property
def dll(self):
key = (self.kernel, self.iterset._codegen_info_,
*(a._codegen_info_ for a in self.args),
self.iteration_region,
self.pass_layer_arg)
try:
return self.code_cache[key]
except KeyError:
wrapper = build_wrapper(*key[:-2],
iteration_region=self.iteration_region,
pass_layer_arg=self.pass_layer_arg)
dll = get_c_function(wrapper, self.c_argtypes)
return self.code_cache.setdefault(key, dll)
| 1.90625 | 2 |
otdet/evaluation.py | kemskems/otdet | 1 | 12791753 | <filename>otdet/evaluation.py
"""
Out-of-topic post detection evaluation methods.
"""
from collections import Counter
import numpy as np
from scipy.stats import hypergeom
from otdet.util import lazyproperty
class TopListEvaluator:
"""Evaluate performance of OOT detector based on ranked result list."""
def __init__(self, result, M=None, n=None, N=1):
if N < 0:
raise Exception('Cannot pick negative number of posts in top list')
self.result = result
self.N = N # of posts taken (top N list)
if M is None or n is None:
Mtemp, ntemp = self._get_nums()
self.M = Mtemp if M is None else M
self.n = ntemp if n is None else n
else:
# Check validity of M and n
if M < n:
raise Exception('M should never be less than n')
self.M, self.n = M, n
def _get_nums(self):
"""Get the number of all and OOT posts."""
def get_num_oot(subresult):
return sum(is_oot for _, is_oot in subresult)
temp = [(len(subresult), get_num_oot(subresult))
for subresult in self.result]
num_post_tup, num_oot_tup = zip(*temp)
num_post, num_oot = list(set(num_post_tup)), list(set(num_oot_tup))
if len(num_post) > 1 or len(num_oot) > 1:
raise Exception('Number of posts or OOT posts mismatch')
if len(num_post) == 0 or len(num_oot) == 0:
return 0, 0
else:
return num_post[0], num_oot[0]
@lazyproperty
def min_sup(self):
"""Return the minimum support value of random variable X.
X is a hypergeometric random variable associated with this event.
"""
return max(self.N - self.M + self.n, 0)
@lazyproperty
def max_sup(self):
"""Return the maximum support value of random variable X.
X is a hypergeometric random variable associated with this event.
"""
return min(self.N, self.n)
@lazyproperty
def baseline(self):
"""Return the baseline performance vector.
The baseline is obtaining OOT posts by chance. Thus, the baseline
performance vector is the probability mass function of a hypergeometric
random variable denoting the number of OOT posts in the top N list.
The k-th element represents the probability of getting k OOT posts in
the top N list.
"""
rv = hypergeom(self.M, self.n, self.N)
k = np.arange(self.min_sup, self.max_sup+1)
return rv.pmf(k)
@lazyproperty
def performance(self):
"""Return the evaluation result in a performance vector."""
num_expr = len(self.result)
if num_expr == 0:
raise Exception('No experiment error')
top_oot_nums = [sum(is_oot for _, is_oot in subresult[:self.N])
for subresult in self.result]
length = self.max_sup - self.min_sup + 1
res = np.zeros(length)
count = Counter(top_oot_nums)
for k in range(length):
res[k] = count[k] / num_expr
return res
| 2.609375 | 3 |
mosqito/utils/isoclose.py | wantysal/MoSQITooo | 1 | 12791754 | <gh_stars>1-10
# -*- coding: utf-8 -*-
try:
import matplotlib.pyplot as plt
except ImportError:
raise RuntimeError(
"In order to perform this validation you need the 'matplotlib' package."
)
import numpy as np
def isoclose(actual, desired, rtol=1e-7, atol=0, is_plot=False, tol_label=None, xaxis=None):
"""
Check if two arrays are equal up to desired tolerance.
The test is inspired from section 5.1 of ISO 532-1. It compares
``actual`` to ``desired +/- min(atol, rtol * abs(desired))``.
Parameters
----------
actual : array_like
Array obtained.
desired : array_like
Array desired.
rtol : float, optional
Relative tolerance.
atol : float, optional
Absolute tolerance.
is_plot : bool, optional
To generate a "compliance" plot
tol_label: str
Label for the tolerance curves
xaxis : array_like, optional
x axis for the "compliance" plot
Output
------
is_isoclose: bool
False if actual and desired are not equal up to specified tolerance.
"""
# Tolerances
range_pos = np.amin(
[desired * (1 - abs(rtol)), desired - abs(atol)], axis=0)
range_neg = np.amax(
[desired * (1 + abs(rtol)), desired + abs(atol)], axis=0)
# Test for ISO 532-1 comformance (section 5.1)
is_isoclose = (actual >= range_pos).all() and (actual <= range_neg).all()
if is_plot:
# Define xaxis
if xaxis is None:
xaxis = np.arange(actual.shape[0])
# Plot desired range
plt.plot(
xaxis,
range_neg,
color="tab:red",
linestyle="solid",
label=tol_label,
linewidth=1,
)
plt.plot(
xaxis,
range_pos,
color="tab:red",
linestyle="solid",
# label="",
linewidth=1,
)
# Plot actual value
plt.plot(xaxis, actual)
plt.legend()
return is_isoclose
| 2.75 | 3 |
Machine learning/ML/supervised_ML_Deep_learning/AI2_CNN.py | warpalatino/public | 1 | 12791755 | <reponame>warpalatino/public
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.preprocessing import image
from tensorflow.keras.preprocessing.image import ImageDataGenerator
# ---------------------------
# deep learning - CNN for classification
# ***
# we have a big dataset of pictures (250mb) with cats and dogs, locally stored (in folder 5 - Tech)
# ***
# [1] load and pre-process data
# ------
# -- we apply image augmentation/transformations here to avoid over-fitting => we apply shifts and rotations and flips and zooms to the images
# https://keras.io/api/preprocessing/image/
# rescale property is about feature scaling while other properties below are from a Keras example, click link above
# the model will look at images in batches as usual
train_datagen = ImageDataGenerator(rescale = 1./255, shear_range = 0.2, zoom_range = 0.2, horizontal_flip = True)
training_set = train_datagen.flow_from_directory('../data/CNN pics dataset/training_set', target_size = (64, 64), batch_size = 32, class_mode = 'binary')
test_datagen = ImageDataGenerator(rescale = 1./255) # we scale but do not transform/augment the testset images as we need the originals to compare the effectiveness of our training/learning
test_set = test_datagen.flow_from_directory('../data/CNN pics dataset/training_set', target_size = (64, 64), batch_size = 32, class_mode = 'binary')
# [2] define the model, according to new keras instructions
# https://www.tensorflow.org/guide/keras/rnn
# ------
model = keras.Sequential()
# -- first convolution and pooling
model.add(layers.Conv2D(filters=32, kernel_size=3, activation='relu', input_shape=[64, 64, 3])) # filters are output filters in convolution, kernel is the CNN feature detector square, input shape for first input layer
model.add(layers.MaxPool2D(pool_size=2, strides=2)) # size of the pool (or set of pixels) to squeeze into one pixel in feature map, while strides is about shifting the frame of pixels to capture next pixels to observe
# -- second convolution and pooling
model.add(layers.Conv2D(filters=32, kernel_size=3, activation='relu'))
model.add(layers.MaxPool2D(pool_size=2, strides=2))
# -- flatten
model.add(layers.Flatten()) # here we take all the pixels and flatten them into a vector that keeps the dimensional charateristics of a picture
# -- connect
model.add(layers.Dense(units=128, activation='relu')) # neurons are high here because processing images is more complex and we may get more accuracy
# -- output layer
model.add(layers.Dense(units=1, activation='sigmoid')) # we need just one neuron for binary classification as output (0/1, or cat/dog)
# [3] fit (and run/train) the model
# ------
model.compile(optimizer='adam', loss='binary_crossentropy', metrics = ['accuracy']) # adam is a way to perform the stochastic gradient descent
model.fit(x = training_set, validation_data = test_set, epochs = 25)
# print(model.summary())
# [4] try a first prediction around a single picture
# ------
# -- load a specific image to observe after training and predict
test_image = image.load_img('../data/CNN pics dataset/single_prediction/cat_or_dog_1.jpg', target_size = (64, 64))
# -- convert the image into a numpy array, then expand the array into an extra dimension as images will be processed in batches (batch => extra dimension)
test_image = image.img_to_array(test_image)
test_image = np.expand_dims(test_image, axis = 0)
# -- make a prediction in terms of either 0 or 1
result = cnn.predict(test_image)
# -- decode: if prediction is 1, then dog; if 0, then cat; we know what index corresponds to which class by calling the attribute class_indices as below...
print(training_set.class_indices)
if result[0][0] == 1:
prediction = 'dog'
else:
prediction = 'cat'
print(prediction)
| 3.0625 | 3 |
python/game/cui/dicegame.py | rrbb014/rrbb-playground | 0 | 12791756 | <reponame>rrbb014/rrbb-playground<gh_stars>0
import random
middle_dot = chr(0xb7) # U+00B7 -> 16 * 11 + 7 -> 183
player_position = 1
computer_position = 1
def board():
print(middle_dot*(player_position - 1) +
"P" +
middle_dot * (30 - player_position) + "GOAL"
)
print(middle_dot * (computer_position - 1) +
"C" +
middle_dot * (30 - computer_position) + "GOAL"
)
if __name__ == "__main__":
board()
print("주사위게임 START")
while True:
input("Enter를 누르면 여러분의 말이 움직입니다.")
player_position += random.randint(1, 6)
if player_position > 30:
player_position = 30
board()
if player_position == 30:
print("여러분의 승리")
break
input("Enter를 누르면 컴퓨터의 말이 움직입니다.")
computer_position += random.randint(1, 6)
if computer_position > 30:
computer_position = 30
board()
if computer_position == 30:
print("컴퓨터의 승리")
break
| 3.78125 | 4 |
discord/ext/test/websocket.py | Sillocan/dpytest | 4 | 12791757 |
import discord.gateway as gateway
from . import callbacks
class FakeWebSocket(gateway.DiscordWebSocket):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.cur_event = ""
self.event_args = ()
self.event_kwargs = {}
async def send(self, data):
self._dispatch('socket_raw_send', data)
if self.cur_event is None:
raise ValueError("Unhandled Websocket send event")
await callbacks.dispatch_event(self.cur_event, *self.event_args, **self.event_kwargs)
self.cur_event = None
self.event_args = ()
self.event_kwargs = {}
async def change_presence(self, *, activity=None, status=None, afk=False, since=0.0):
self.cur_event = "presence"
self.event_args = (activity, status, afk, since)
await super().change_presence(activity=activity, status=status, afk=afk, since=since)
| 2.265625 | 2 |
src/sensai/data_transformation/dft.py | schroedk/sensAI | 0 | 12791758 | <reponame>schroedk/sensAI<gh_stars>0
import copy
import logging
import re
from abc import ABC, abstractmethod
from typing import List, Sequence, Union, Dict, Callable, Any, Optional, Set
import numpy as np
import pandas as pd
from sklearn.preprocessing import OneHotEncoder
from .sklearn_transformer import SkLearnTransformerProtocol
from ..columngen import ColumnGenerator
from ..util import flattenArguments
from ..util.pandas import DataFrameColumnChangeTracker
from ..util.pickle import setstate
from ..util.string import orRegexGroup, ToStringMixin
log = logging.getLogger(__name__)
class DataFrameTransformer(ABC, ToStringMixin):
"""
Base class for data frame transformers, i.e. objects which can transform one data frame into another
(possibly applying the transformation to the original data frame - in-place transformation).
A data frame transformer may require being fitted using training data.
"""
def __init__(self):
self._name = f"{self.__class__.__name__}-{id(self)}"
self._isFitted = False
self._columnChangeTracker: Optional[DataFrameColumnChangeTracker] = None
self._paramInfo = {} # arguments passed to init that are not saved otherwise can be persisted here
# for backwards compatibility with persisted DFTs based on code prior to commit 7088cbbe
# They lack the __isFitted attribute and we assume that each such DFT was fitted
def __setstate__(self, d):
d["_name"] = d.get("_name", f"{self.__class__.__name__}-{id(self)}")
d["_isFitted"] = d.get("_isFitted", True)
d["_columnChangeTracker"] = d.get("_columnChangeTracker", None)
d["_paramInfo"] = d.get("_paramInfo", {})
self.__dict__ = d
def _toStringExcludePrivate(self) -> bool:
return True
def getName(self) -> str:
"""
:return: the name of this dft transformer, which may be a default name if the name has not been set.
"""
return self._name
def setName(self, name):
self._name = name
@abstractmethod
def _fit(self, df: pd.DataFrame):
pass
@abstractmethod
def _apply(self, df: pd.DataFrame) -> pd.DataFrame:
pass
def apply(self, df: pd.DataFrame) -> pd.DataFrame:
self._columnChangeTracker = DataFrameColumnChangeTracker(df)
if not self.isFitted():
raise Exception(f"Cannot apply a DataFrameTransformer which is not fitted: "
f"the df transformer {self.getName()} requires fitting")
df = self._apply(df)
self._columnChangeTracker.trackChange(df)
return df
def info(self):
return {
"name": self.getName(),
"changeInColumnNames": self._columnChangeTracker.columnChangeString() if self._columnChangeTracker is not None else None,
"isFitted": self.isFitted(),
}
def fit(self, df: pd.DataFrame):
self._fit(df)
self._isFitted = True
def isFitted(self):
return self._isFitted
def fitApply(self, df: pd.DataFrame) -> pd.DataFrame:
self.fit(df)
return self.apply(df)
class InvertibleDataFrameTransformer(DataFrameTransformer, ABC):
@abstractmethod
def applyInverse(self, df: pd.DataFrame) -> pd.DataFrame:
pass
def getInverse(self) -> "InverseDataFrameTransformer":
"""
:return: a transformer whose (forward) transformation is the inverse transformation of this DFT
"""
return InverseDataFrameTransformer(self)
class RuleBasedDataFrameTransformer(DataFrameTransformer, ABC):
"""Base class for transformers whose logic is entirely based on rules and does not need to be fitted to data"""
def _fit(self, df: pd.DataFrame):
pass
def fit(self, df: pd.DataFrame):
pass
def isFitted(self):
return True
class InverseDataFrameTransformer(RuleBasedDataFrameTransformer):
def __init__(self, invertibleDFT: InvertibleDataFrameTransformer):
super().__init__()
self.invertibleDFT = invertibleDFT
def _apply(self, df: pd.DataFrame) -> pd.DataFrame:
return self.invertibleDFT.applyInverse(df)
class DataFrameTransformerChain(DataFrameTransformer):
"""
Supports the application of a chain of data frame transformers.
During fit and apply each transformer in the chain receives the transformed output of its predecessor.
"""
def __init__(self, *dataFrameTransformers: Union[DataFrameTransformer, List[DataFrameTransformer]]):
super().__init__()
self.dataFrameTransformers = flattenArguments(dataFrameTransformers)
def __len__(self):
return len(self.dataFrameTransformers)
def _apply(self, df: pd.DataFrame) -> pd.DataFrame:
for transformer in self.dataFrameTransformers:
df = transformer.apply(df)
return df
def _fit(self, df: pd.DataFrame):
if len(self.dataFrameTransformers) == 0:
return
for transformer in self.dataFrameTransformers[:-1]:
df = transformer.fitApply(df)
self.dataFrameTransformers[-1].fit(df)
def isFitted(self):
return all([dft.isFitted() for dft in self.dataFrameTransformers])
def getNames(self) -> List[str]:
"""
:return: the list of names of all contained feature generators
"""
return [transf.getName() for transf in self.dataFrameTransformers]
def info(self):
info = super().info()
info["chainedDFTTransformerNames"] = self.getNames()
info["length"] = len(self)
return info
def findFirstTransformerByType(self, cls) -> Optional[DataFrameTransformer]:
for dft in self.dataFrameTransformers:
if isinstance(dft, cls):
return dft
return None
class DFTRenameColumns(RuleBasedDataFrameTransformer):
def __init__(self, columnsMap: Dict[str, str]):
"""
:param columnsMap: dictionary mapping old column names to new names
"""
super().__init__()
self.columnsMap = columnsMap
def _apply(self, df: pd.DataFrame) -> pd.DataFrame:
return df.rename(columns=self.columnsMap)
class DFTConditionalRowFilterOnColumn(RuleBasedDataFrameTransformer):
"""
Filters a data frame by applying a boolean function to one of the columns and retaining only the rows
for which the function returns True
"""
def __init__(self, column: str, condition: Callable[[Any], bool]):
super().__init__()
self.column = column
self.condition = condition
def _apply(self, df: pd.DataFrame) -> pd.DataFrame:
return df[df[self.column].apply(self.condition)]
class DFTInSetComparisonRowFilterOnColumn(RuleBasedDataFrameTransformer):
"""
Filters a data frame on the selected column and retains only the rows for which the value is in the setToKeep
"""
def __init__(self, column: str, setToKeep: Set):
super().__init__()
self.setToKeep = setToKeep
self.column = column
def _apply(self, df: pd.DataFrame) -> pd.DataFrame:
return df[df[self.column].isin(self.setToKeep)]
def info(self):
info = super().info()
info["column"] = self.column
info["setToKeep"] = self.setToKeep
return info
class DFTNotInSetComparisonRowFilterOnColumn(RuleBasedDataFrameTransformer):
"""
Filters a data frame on the selected column and retains only the rows for which the value is not in the setToDrop
"""
def __init__(self, column: str, setToDrop: Set):
super().__init__()
self.setToDrop = setToDrop
self.column = column
def _apply(self, df: pd.DataFrame) -> pd.DataFrame:
return df[~df[self.column].isin(self.setToDrop)]
def info(self):
info = super().info()
info["column"] = self.column
info["setToDrop"] = self.setToDrop
return info
class DFTVectorizedConditionalRowFilterOnColumn(RuleBasedDataFrameTransformer):
"""
Filters a data frame by applying a vectorized condition on the selected column and retaining only the rows
for which it returns True
"""
def __init__(self, column: str, vectorizedCondition: Callable[[pd.Series], Sequence[bool]]):
super().__init__()
self.column = column
self.vectorizedCondition = vectorizedCondition
def _apply(self, df: pd.DataFrame) -> pd.DataFrame:
return df[self.vectorizedCondition(df[self.column])]
def info(self):
info = super().info()
info["column"] = self.column
return info
class DFTRowFilter(RuleBasedDataFrameTransformer):
"""
Filters a data frame by applying a condition function to each row and retaining only the rows
for which it returns True
"""
def __init__(self, condition: Callable[[Any], bool]):
super().__init__()
self.condition = condition
def _apply(self, df: pd.DataFrame) -> pd.DataFrame:
return df[df.apply(self.condition, axis=1)]
class DFTModifyColumn(RuleBasedDataFrameTransformer):
"""
Modifies a column specified by 'column' using 'columnTransform'
"""
def __init__(self, column: str, columnTransform: Union[Callable, np.ufunc]):
"""
:param column: the name of the column to be modified
:param columnTransform: a function operating on single cells or a Numpy ufunc that applies to an entire Series
"""
super().__init__()
self.column = column
self.columnTransform = columnTransform
def _apply(self, df: pd.DataFrame) -> pd.DataFrame:
df[self.column] = df[self.column].apply(self.columnTransform)
return df
class DFTModifyColumnVectorized(RuleBasedDataFrameTransformer):
"""
Modifies a column specified by 'column' using 'columnTransform'. This transformer can be used to utilise Numpy vectorisation for
performance optimisation.
"""
def __init__(self, column: str, columnTransform: Callable[[np.ndarray], Union[Sequence, pd.Series, np.ndarray]]):
"""
:param column: the name of the column to be modified
:param columnTransform: a function that takes a Numpy array and from which the returned value will be assigned to the column as a whole
"""
super().__init__()
self.column = column
self.columnTransform = columnTransform
def _apply(self, df: pd.DataFrame) -> pd.DataFrame:
df[self.column] = self.columnTransform(df[self.column].values)
return df
class DFTOneHotEncoder(DataFrameTransformer):
def __init__(self, columns: Optional[Union[str, Sequence[str]]],
categories: Union[List[np.ndarray], Dict[str, np.ndarray]] = None, inplace=False, ignoreUnknown=False,
arrayValuedResult=False):
"""
One hot encode categorical variables
:param columns: list of names or regex matching names of columns that are to be replaced by a list one-hot encoded columns each
(or an array-valued column for the case where useArrayValues=True);
If None, then no columns are actually to be one-hot-encoded
:param categories: numpy arrays containing the possible values of each of the specified columns (for case where sequence is specified
in 'columns') or dictionary mapping column name to array of possible categories for the column name.
If None, the possible values will be inferred from the columns
:param inplace: whether to perform the transformation in-place
:param ignoreUnknown: if True and an unknown category is encountered during transform, the resulting one-hot
encoded columns for this feature will be all zeros. if False, an unknown category will raise an error.
:param arrayValuedResult: whether to replace the input columns by columns of the same name containing arrays as values
instead of creating a separate column per original value
"""
super().__init__()
self._paramInfo["columns"] = columns
self._paramInfo["inferCategories"] = categories is None
self.oneHotEncoders = None
if columns is None:
self._columnsToEncode = []
self._columnNameRegex = "$"
elif type(columns) == str:
self._columnNameRegex = columns
self._columnsToEncode = None
else:
self._columnNameRegex = orRegexGroup(columns)
self._columnsToEncode = columns
self.inplace = inplace
self.arrayValuedResult = arrayValuedResult
self.handleUnknown = "ignore" if ignoreUnknown else "error"
if categories is not None:
if type(categories) == dict:
self.oneHotEncoders = {col: OneHotEncoder(categories=[np.sort(categories)], sparse=False, handle_unknown=self.handleUnknown) for col, categories in categories.items()}
else:
if len(columns) != len(categories):
raise ValueError(f"Given categories must have the same length as columns to process")
self.oneHotEncoders = {col: OneHotEncoder(categories=[np.sort(categories)], sparse=False, handle_unknown=self.handleUnknown) for col, categories in zip(columns, categories)}
def __setstate__(self, state):
if "arrayValuedResult" not in state:
state["arrayValuedResult"] = False
super().__setstate__(state)
def _toStringAdditionalEntries(self) -> Dict[str, Any]:
d = super()._toStringAdditionalEntries()
d["columns"] = self._paramInfo.get("columns")
return d
def _fit(self, df: pd.DataFrame):
if self._columnsToEncode is None:
self._columnsToEncode = [c for c in df.columns if re.fullmatch(self._columnNameRegex, c) is not None]
if len(self._columnsToEncode) == 0:
log.warning(f"{self} does not apply to any columns, transformer has no effect; regex='{self._columnNameRegex}'")
if self.oneHotEncoders is None:
self.oneHotEncoders = {column: OneHotEncoder(categories=[np.sort(df[column].unique())], sparse=False, handle_unknown=self.handleUnknown) for column in self._columnsToEncode}
for columnName in self._columnsToEncode:
self.oneHotEncoders[columnName].fit(df[[columnName]])
def _apply(self, df: pd.DataFrame):
if len(self._columnsToEncode) == 0:
return df
if not self.inplace:
df = df.copy()
for columnName in self._columnsToEncode:
encodedArray = self.oneHotEncoders[columnName].transform(df[[columnName]])
if not self.arrayValuedResult:
df = df.drop(columns=columnName)
for i in range(encodedArray.shape[1]):
df["%s_%d" % (columnName, i)] = encodedArray[:, i]
else:
df[columnName] = list(encodedArray)
return df
def info(self):
info = super().info()
info["inplace"] = self.inplace
info["handleUnknown"] = self.handleUnknown
info["arrayValuedResult"] = self.arrayValuedResult
info.update(self._paramInfo)
return info
class DFTColumnFilter(RuleBasedDataFrameTransformer):
"""
A DataFrame transformer that filters columns by retaining or dropping specified columns
"""
def __init__(self, keep: Union[str, Sequence[str]] = None, drop: Union[str, Sequence[str]] = None):
super().__init__()
self.keep = [keep] if type(keep) == str else keep
self.drop = drop
def _apply(self, df: pd.DataFrame) -> pd.DataFrame:
df = df.copy()
if self.keep is not None:
df = df[self.keep]
if self.drop is not None:
df = df.drop(columns=self.drop)
return df
def info(self):
info = super().info()
info["keep"] = self.keep
info["drop"] = self.drop
return info
class DFTKeepColumns(DFTColumnFilter):
def _apply(self, df: pd.DataFrame) -> pd.DataFrame:
return df[self.keep]
class DFTDRowFilterOnIndex(RuleBasedDataFrameTransformer):
def __init__(self, keep: Set = None, drop: Set = None):
super().__init__()
self.drop = drop
self.keep = keep
def _apply(self, df: pd.DataFrame) -> pd.DataFrame:
df = df.copy()
if self.keep is not None:
df = df.loc[self.keep]
if self.drop is not None:
df = df.drop(self.drop)
return df
class DFTNormalisation(DataFrameTransformer):
"""
Applies normalisation/scaling to a data frame by applying a set of transformation rules, where each
rule defines a set of columns to which it applies (learning a single transformer based on the values
of all applicable columns)
"""
class RuleTemplate:
def __init__(self, skip=False, unsupported=False, transformer: SkLearnTransformerProtocol = None,
transformerFactory: Callable[[], SkLearnTransformerProtocol] = None, independentColumns=False):
"""
:param skip: flag indicating whether no transformation shall be performed on all of the columns
:param unsupported: flag indicating whether normalisation of all columns is unsupported (shall trigger an exception if attempted)
:param transformer: a transformer instance (from sklearn.preprocessing, e.g. StandardScaler) to apply to the matching column(s)
for the case where a transformation is necessary (skip=False, unsupported=False). If None is given, either transformerFactory
or the containing instance's default factory will be used.
NOTE: Use an instance only if you want, in particular, the instance to be shared across several models that use the same
feature with associated rule/rule template (disabling `fit` where appropriate). Otherwise, use a factory.
:param transformerFactory: a factory for the generation of the transformer instance, which will only be applied if `transformer`
is not given; if neither `transformer` nor `transformerInstance` are given, the containing instance's default factory will
be used. See `SkLearnTransformerFactoryFactory` for convenient construction options.
:param independentColumns: whether a separate transformation is to be learned for each of the columns for the case where the
rule matches multiple columns.
"""
if skip and transformer is not None:
raise ValueError("skip==True while transformer is not None")
self.skip = skip
self.unsupported = unsupported
self.transformer = transformer
self.transformerFactory = transformerFactory
self.independentColumns = independentColumns
def toRule(self, regex: Optional[str]):
"""
Convert the template to a rule for all columns matching the regex
:param regex: a regular expression defining the column the rule applies to
:return: the resulting Rule
"""
return DFTNormalisation.Rule(regex, skip=self.skip, unsupported=self.unsupported, transformer=self.transformer,
transformerFactory=self.transformerFactory, independentColumns=self.independentColumns)
def toPlaceholderRule(self):
return self.toRule(None)
class Rule(ToStringMixin):
def __init__(self, regex: Optional[str], skip=False, unsupported=False, transformer: SkLearnTransformerProtocol = None,
transformerFactory: Callable[[], SkLearnTransformerProtocol] = None,
arrayValued=False, fit=True, independentColumns=False):
"""
:param regex: a regular expression defining the column(s) the rule applies to.
If it applies to multiple columns, these columns will be normalised in the same way (using the same normalisation
process for each column) unless independentColumns=True.
If None, the rule is a placeholder rule and the regex must be set later via setRegex or the rule will not be applicable.
:param skip: flag indicating whether no transformation shall be performed on the matching column(s)
:param unsupported: flag indicating whether normalisation of the matching column(s) is unsupported (shall trigger an exception if attempted)
:param transformer: a transformer instance (from sklearn.preprocessing, e.g. StandardScaler) to apply to the matching column(s)
for the case where a transformation is necessary (skip=False, unsupported=False). If None is given, either transformerFactory
or the containing instance's default factory will be used.
NOTE: Use an instance only if you want, in particular, the instance to be shared across several models that use the same
feature with associated rule/rule template (disabling `fit` where appropriate). Otherwise, use a factory.
:param transformerFactory: a factory for the generation of the transformer instance, which will only be applied if `transformer`
is not given; if neither `transformer` nor `transformerInstance` are given, the containing instance's default factory will
be used. See `SkLearnTransformerFactoryFactory` for convenient construction options.
:param arrayValued: whether the column values are not scalars but arrays (of arbitrary lengths).
It is assumed that all entries in such arrays are to be normalised in the same way.
If arrayValued is True, only a single matching column is supported, i.e. the regex must match at most one column.
:param fit: whether the rule's transformer shall be fitted
:param independentColumns: whether a separate transformation is to be learned for each of the columns for the case where the
rule matches multiple columns.
"""
if skip and (transformer is not None or transformerFactory is not None):
raise ValueError("skip==True while transformer/transformerFactory is not None")
self.regex = re.compile(regex) if regex is not None else None
self.skip = skip
self.unsupported = unsupported
self.transformer = transformer
self.transformerFactory = transformerFactory
self.arrayValued = arrayValued
self.fit = fit
self.independentColumns = independentColumns
def __setstate__(self, state):
setstate(DFTNormalisation.Rule, self, state, newDefaultProperties=dict(arrayValued=False, fit=True, independentColumns=False,
transformerFactory=None))
def _toStringExcludes(self) -> List[str]:
return super()._toStringExcludes() + ["regex"]
def _toStringAdditionalEntries(self) -> Dict[str, Any]:
d = super()._toStringAdditionalEntries()
if self.regex is not None:
d["regex"] = f"'{self.regex.pattern}'"
return d
def setRegex(self, regex: str):
self.regex = re.compile(regex)
def matches(self, column: str):
if self.regex is None:
raise Exception("Attempted to apply a placeholder rule. Perhaps the feature generator from which the rule originated was never applied in order to have the rule instantiated.")
return self.regex.fullmatch(column) is not None
def matchingColumns(self, columns: Sequence[str]):
return [col for col in columns if self.matches(col)]
def __init__(self, rules: Sequence[Rule], defaultTransformerFactory=None, requireAllHandled=True, inplace=False):
"""
:param rules: the set of rules; rules are always fitted and applied in the given order
:param defaultTransformerFactory: a factory for the creation of transformer instances (from sklearn.preprocessing, e.g. StandardScaler)
that shall be used to create a transformer for all rules that don't specify a particular transformer.
The default transformer will only be applied to columns matched by such rules, unmatched columns will
not be transformed.
:param requireAllHandled: whether to raise an exception if not all columns are matched by a rule
:param inplace: whether to apply data frame transformations in-place
"""
super().__init__()
self.requireAllHandled = requireAllHandled
self.inplace = inplace
self._userRules = rules
self._defaultTransformerFactory = defaultTransformerFactory
self._rules = None
def _toStringAdditionalEntries(self) -> Dict[str, Any]:
d = super()._toStringAdditionalEntries()
if self._rules is not None:
d["rules"] = self._rules
else:
d["userRules"] = self._userRules
return d
def _fit(self, df: pd.DataFrame):
matchedRulesByColumn = {}
self._rules = []
for rule in self._userRules:
matchingColumns = rule.matchingColumns(df.columns)
for c in matchingColumns:
if c in matchedRulesByColumn:
raise Exception(f"More than one rule applies to column '{c}': {matchedRulesByColumn[c]}, {rule}")
matchedRulesByColumn[c] = rule
if len(matchingColumns) > 0:
if rule.unsupported:
raise Exception(f"Normalisation of columns {matchingColumns} is unsupported according to {rule}. If you want to make use of these columns, transform them into a supported column before applying {self.__class__.__name__}.")
if not rule.skip:
if rule.transformer is None:
if rule.transformerFactory is not None:
rule.transformer = rule.transformerFactory()
else:
if self._defaultTransformerFactory is None:
raise Exception(f"No transformer to fit: {rule} defines no transformer and instance has no transformer factory")
rule.transformer = self._defaultTransformerFactory()
if rule.fit:
# fit transformer
applicableDF = df[sorted(matchingColumns)]
if rule.arrayValued:
if len(matchingColumns) > 1:
raise Exception(f"Array-valued case is only supported for a single column, matched {matchingColumns} for {rule}")
values = np.concatenate(applicableDF.values.flatten())
values = values.reshape((len(values), 1))
elif rule.independentColumns:
values = applicableDF.values
else:
values = applicableDF.values.flatten()
values = values.reshape((len(values), 1))
rule.transformer.fit(values)
else:
log.log(logging.DEBUG - 1, f"{rule} matched no columns")
# collect specialised rule for application
specialisedRule = copy.copy(rule)
r = orRegexGroup(matchingColumns)
try:
specialisedRule.regex = re.compile(r)
except Exception as e:
raise Exception(f"Could not compile regex '{r}': {e}")
self._rules.append(specialisedRule)
def _checkUnhandledColumns(self, df, matchedRulesByColumn):
if self.requireAllHandled:
unhandledColumns = set(df.columns) - set(matchedRulesByColumn.keys())
if len(unhandledColumns) > 0:
raise Exception(f"The following columns are not handled by any rules: {unhandledColumns}; rules: {', '.join(map(str, self._rules))}")
def _apply(self, df: pd.DataFrame) -> pd.DataFrame:
if not self.inplace:
df = df.copy()
matchedRulesByColumn = {}
for rule in self._rules:
matchingColumns = rule.matchingColumns(df.columns)
if len(matchingColumns) == 0:
continue
for c in matchingColumns:
matchedRulesByColumn[c] = rule
if not rule.skip:
if rule.independentColumns and not rule.arrayValued:
matchingColumns = sorted(matchingColumns)
df[matchingColumns] = rule.transformer.transform(df[matchingColumns].values)
else:
for c in matchingColumns:
if not rule.arrayValued:
df[c] = rule.transformer.transform(df[[c]].values)
else:
df[c] = [rule.transformer.transform(np.array([x]).T)[:, 0] for x in df[c]]
self._checkUnhandledColumns(df, matchedRulesByColumn)
return df
def info(self):
info = super().info()
info["requireAllHandled"] = self.requireAllHandled
info["inplace"] = self.inplace
return info
def findRule(self, colName: str) -> "DFTNormalisation.Rule":
for rule in self._rules:
if rule.matches(colName):
return rule
class DFTFromColumnGenerators(RuleBasedDataFrameTransformer):
def __init__(self, columnGenerators: Sequence[ColumnGenerator], inplace=False):
super().__init__()
self.columnGenerators = columnGenerators
self.inplace = inplace
def _apply(self, df: pd.DataFrame) -> pd.DataFrame:
if not self.inplace:
df = df.copy()
for cg in self.columnGenerators:
series = cg.generateColumn(df)
df[series.name] = series
return df
def info(self):
info = super().info()
info["inplace"] = self.inplace
return info
class DFTCountEntries(RuleBasedDataFrameTransformer):
"""
Adds a new column with counts of the values on a selected column
"""
def __init__(self, columnForEntryCount: str, columnNameForResultingCounts: str = "counts"):
super().__init__()
self.columnNameForResultingCounts = columnNameForResultingCounts
self.columnForEntryCount = columnForEntryCount
def _apply(self, df: pd.DataFrame) -> pd.DataFrame:
series = df[self.columnForEntryCount].value_counts()
return pd.DataFrame({self.columnForEntryCount: series.index, self.columnNameForResultingCounts: series.values})
def info(self):
info = super().info()
info["columnNameForResultingCounts"] = self.columnNameForResultingCounts
info["columnForEntryCount"] = self.columnForEntryCount
return info
class DFTAggregationOnColumn(RuleBasedDataFrameTransformer):
def __init__(self, columnForAggregation: str, aggregation: Callable):
super().__init__()
self.columnForAggregation = columnForAggregation
self.aggregation = aggregation
def _apply(self, df: pd.DataFrame) -> pd.DataFrame:
return df.groupby(self.columnForAggregation).agg(self.aggregation)
class DFTRoundFloats(RuleBasedDataFrameTransformer):
def __init__(self, decimals=0):
super().__init__()
self.decimals = decimals
def _apply(self, df: pd.DataFrame) -> pd.DataFrame:
return pd.DataFrame(np.round(df.values, self.decimals), columns=df.columns, index=df.index)
def info(self):
info = super().info()
info["decimals"] = self.decimals
return info
class DFTSkLearnTransformer(InvertibleDataFrameTransformer):
"""
Applies a transformer from sklearn.preprocessing to (a subset of the columns of) a data frame.
If multiple columns are transformed, they are transformed independently (i.e. each column uses a separately trained transformation).
"""
def __init__(self, sklearnTransformer: SkLearnTransformerProtocol, columns: Optional[List[str]] = None, inplace=False,
arrayValued=False):
"""
:param sklearnTransformer: the transformer instance (from sklearn.preprocessing) to use (which will be fitted & applied)
:param columns: the set of column names to which the transformation shall apply; if None, apply it to all columns
:param inplace: whether to apply the transformation in-place
:param arrayValued: whether to apply transformation not to scalar-valued columns but to one or more array-valued columns,
where the values of all arrays within a column (which may vary in length) are to be transformed in the same way.
If multiple columns are transformed, then the arrays belonging to a single row must all have the same length.
"""
super().__init__()
self.setName(f"{self.__class__.__name__}_wrapped_{sklearnTransformer.__class__.__name__}")
self.sklearnTransformer = sklearnTransformer
self.columns = columns
self.inplace = inplace
self.arrayValued = arrayValued
def __setstate__(self, state):
state["arrayValued"] = state.get("arrayValued", False)
setstate(DFTSkLearnTransformer, self, state)
def _fit(self, df: pd.DataFrame):
cols = self.columns
if cols is None:
cols = df.columns
if not self.arrayValued:
values = df[cols].values
else:
if len(cols) == 1:
values = np.concatenate(df[cols[0]].values.flatten())
values = values.reshape((len(values), 1))
else:
flatColArrays = [np.concatenate(df[col].values.flatten()) for col in cols]
lengths = [len(a) for a in flatColArrays]
if len(set(lengths)) != 1:
raise ValueError(f"Columns {cols} do not contain the same number of values: {lengths}")
values = np.stack(flatColArrays, axis=1)
self.sklearnTransformer.fit(values)
def _apply_transformer(self, df: pd.DataFrame, inverse: bool) -> pd.DataFrame:
if not self.inplace:
df = df.copy()
cols = self.columns
if cols is None:
cols = df.columns
transform = (lambda x: self.sklearnTransformer.inverse_transform(x)) if inverse else lambda x: self.sklearnTransformer.transform(x)
if not self.arrayValued:
df[cols] = transform(df[cols].values)
else:
if len(cols) == 1:
c = cols[0]
df[c] = [transform(np.array([x]).T)[:, 0] for x in df[c]]
else:
transformedValues = [transform(np.stack(row, axis=1)) for row in df.values]
for iCol, col in enumerate(cols):
df[col] = [row[:, iCol] for row in transformedValues]
return df
def _apply(self, df):
return self._apply_transformer(df, False)
def applyInverse(self, df):
return self._apply_transformer(df, True)
def info(self):
info = super().info()
info["columns"] = self.columns
info["inplace"] = self.inplace
info["sklearnTransformerClass"] = self.sklearnTransformer.__class__.__name__
return info
class DFTSortColumns(RuleBasedDataFrameTransformer):
"""
Sorts a data frame's columns in ascending order
"""
def _apply(self, df: pd.DataFrame) -> pd.DataFrame:
return df[sorted(df.columns)]
| 2.359375 | 2 |
external_test.py | YannisLawrence1/Covid-Alarm | 0 | 12791759 | <reponame>YannisLawrence1/Covid-Alarm<gh_stars>0
"""checks that all external files used in the clock programme are working as intended and includes
some suggestions for if some modules are not working"""
import json
from api_information import gather_news, news_anaysis, gather_weather, weather_analysis
from api_information import covid_statistics, covid_data_analysis
from time_conversion import hours_to_minutes, minutes_to_seconds, hhmm_to_seconds
def n_api_test():
"""Checks the news api gives a valid reponse"""
result = gather_news()
assert gather_news() != [{'title': 'News cannot currently be gathered - KeyError',
'content': 'This may be a API key issue please check config.json',
'type': 'news'}], "gather news test: FAILED, this may be an issue with the api key check the config file"
assert gather_news() != [{'title': 'News cannot currently be gathered',
'content': 'sorry an unknow error occured',
'type': 'news'}], "gather news test: FAILED"
def n_anaysis_test():
"""Checks that the information from the covid api is correctly formatted into notifications"""
with open('json_tests/gb-news.json', 'r') as file_open:
articles = json.load(file_open)
assert news_anaysis(articles) == [{'title': 'Primark shoppers can buy clothes online in lockdown using this hack – but be prepared to pay more - The Sun',
'content': '', 'type': 'news'},
{'title': "Tesla's $40M loan that kept the lights on, and what it teaches all of us - Teslarati",
'content': 'Oftentimes, many of us forget to look around and realize how fortunate we are to have what we have. In times where tensions are relatively high based on the current election, a pandemic, and a string of bad luck that we have all seemed to adopt throughout 202…', 'type': 'news'},
{'title': "<NAME> urges Sainsbury's to think again on Nectar deal | York Press - York Press",
'content': "SAINSBURY'S is reviewing a major promotion after <NAME> urged it to make changes to avoid customers having to visit stores unnecessarily.", 'type': 'news'},
{'title': 'London Covid: Why Heathrow Airport will stay open during lockdown 2 - My London',
'content': 'A small number of essential retailers and food and beverage outlets remain open at the airport', 'type': 'news'},
{'title': "Shoppers stockpile Aldi's fur throw after it returns to stores for first time in a year - Mirror Online",
'content': 'A small number of essential retailers and food and beverage outlets remain open at the airport', 'type': 'news'}], "news_anaysis test: FAILED"
def w_api_test():
"""Checks that valid information is being gathered from the weather API"""
assert gather_weather() != {'title': 'weather cannot be gathered - KeyError',
'content': 'This may be a API key issue please check config.json',
'type': 'weather'}, "gather weather test: FAILED, this may be an issue with the api key check the config file"
assert gather_weather() != [{'title': 'weather cannot be gathered',
'content': 'sorry an unknow error occured',
'type': 'weather'}], "gather weather test: FAILED"
def w_anaysis_test():
"""Checks that information from the weather api is being correctly formatted as notifications"""
with open('json_tests/weather-exeter.json', 'r') as file_open:
weather = json.load(file_open)
assert weather_analysis(weather, 'Exeter') == {'title': ('Current weather in Exeter'),
'content': ('The current weather is "light rain" with tempretures ' +
'of 13.17°C that feels like 11.81°C'),
'type': 'weather'}, "weather_analysis test: FAILED"
def c_api_test():
"""Checks the covid API gives a valid response"""
assert covid_statistics() != [{'title': 'The UK covid 19 statistics cannot be gathered',
'content': 'Check that no areas of the config.json file are missing',
'type': 'covid'}], "gather covid statistics test: FAILED"
assert covid_statistics() != [{'title': 'The UK covid 19 statistics cannot be gathered',
'content': 'Please make sure you have used pip install uk-covid19',
'type': 'covid'}], "gather covid statistics test: FAILED"
def c_anaysis_test():
"""Checks the covid data can be correctly formated as a notification"""
with open('json_tests/covid-devon.json', 'r') as file_open:
covid_level = json.load(file_open)
assert covid_data_analysis(covid_level, 'Exeter') == {
'title': 'Current covid statistic for Exeter',
'content': "The number of new Covid-19 cases as of 2020-11-30, are: 69 with" +
" at total of: 597 new cases in the last 7 days and a total of: 13 new " +
"deaths in the last 7 days",
'type': 'covid'}, "covid_data_analysis test: FAILED"
def hours_test():
"""Assess if hours are being correctly translated into minutes"""
assert hours_to_minutes(6) == 360, "hours_to_minutes test: FAILED"
def minutes_test():
"""Assess of minutes are correctly translated into seconds"""
assert minutes_to_seconds(16) == 960, "minutes_to_seconds test: FAILED"
def time_test():
"""Assess that all the conversions are correctly converted into one total number"""
assert hhmm_to_seconds("12:35") == 45300, "hhmm_to_seconds test: FAILED"
def assert_external_files():
""""""
n_api_test()
n_anaysis_test()
w_api_test()
w_anaysis_test()
c_api_test()
c_anaysis_test()
hours_test()
minutes_test()
time_test()
if __name__ == '__main__':
assert_external_files()
| 2.578125 | 3 |
bob/learn/tensorflow/data/tfrecords.py | bioidiap/bob.learn.tensorflow | 2 | 12791760 | """Utilities for TFRecords
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import tensorflow as tf
TFRECORDS_EXT = ".tfrecords"
def tfrecord_name_and_json_name(output):
output = normalize_tfrecords_path(output)
json_output = output[: -len(TFRECORDS_EXT)] + ".json"
return output, json_output
def normalize_tfrecords_path(output):
if not output.endswith(TFRECORDS_EXT):
output += TFRECORDS_EXT
return output
def bytes_feature(value):
if isinstance(value, type(tf.constant(0))):
value = value.numpy() # BytesList won't unpack a string from an EagerTensor.
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def dataset_to_tfrecord(dataset, output):
"""Writes a tf.data.Dataset into a TFRecord file.
Parameters
----------
dataset : ``tf.data.Dataset``
The tf.data.Dataset that you want to write into a TFRecord file.
output : str
Path to the TFRecord file. Besides this file, a .json file is also created.
This json file is needed when you want to convert the TFRecord file back into
a dataset.
Returns
-------
``tf.Operation``
A tf.Operation that, when run, writes contents of dataset to a file. When
running in eager mode, calling this function will write the file. Otherwise, you
have to call session.run() on the returned operation.
"""
output, json_output = tfrecord_name_and_json_name(output)
# dump the structure so that we can read it back
meta = {
"output_types": repr(tf.compat.v1.data.get_output_types(dataset)),
"output_shapes": repr(tf.compat.v1.data.get_output_shapes(dataset)),
}
with open(json_output, "w") as f:
json.dump(meta, f)
# create a custom map function that serializes the dataset
def serialize_example_pyfunction(*args):
feature = {}
for i, f in enumerate(args):
key = f"feature{i}"
feature[key] = bytes_feature(f)
example_proto = tf.train.Example(features=tf.train.Features(feature=feature))
return example_proto.SerializeToString()
def tf_serialize_example(*args):
args = tf.nest.flatten(args)
args = [tf.io.serialize_tensor(f) for f in args]
tf_string = tf.py_function(serialize_example_pyfunction, args, tf.string)
return tf.reshape(tf_string, ()) # The result is a scalar
dataset = dataset.map(tf_serialize_example)
writer = tf.data.experimental.TFRecordWriter(output)
return writer.write(dataset)
def dataset_from_tfrecord(tfrecord, num_parallel_reads=None):
"""Reads TFRecords and returns a dataset.
The TFRecord file must have been created using the :any:`dataset_to_tfrecord`
function.
Parameters
----------
tfrecord : str or list
Path to the TFRecord file. Pass a list if you are sure several tfrecords need
the same map function.
num_parallel_reads: int
A `tf.int64` scalar representing the number of files to read in parallel.
Defaults to reading files sequentially.
Returns
-------
``tf.data.Dataset``
A dataset that contains the data from the TFRecord file.
"""
# these imports are needed so that eval can work
from tensorflow import TensorShape # noqa: F401
if isinstance(tfrecord, str):
tfrecord = [tfrecord]
tfrecord = [tfrecord_name_and_json_name(path) for path in tfrecord]
json_output = tfrecord[0][1]
tfrecord = [path[0] for path in tfrecord]
raw_dataset = tf.data.TFRecordDataset(
tfrecord, num_parallel_reads=num_parallel_reads
)
with open(json_output) as f:
meta = json.load(f)
for k, v in meta.items():
meta[k] = eval(v)
output_types = tf.nest.flatten(meta["output_types"])
output_shapes = tf.nest.flatten(meta["output_shapes"])
feature_description = {}
for i in range(len(output_types)):
key = f"feature{i}"
feature_description[key] = tf.io.FixedLenFeature([], tf.string)
def _parse_function(example_proto):
# Parse the input tf.Example proto using the dictionary above.
args = tf.io.parse_single_example(
serialized=example_proto, features=feature_description
)
args = tf.nest.flatten(args)
args = [tf.io.parse_tensor(v, t) for v, t in zip(args, output_types)]
args = [tf.reshape(v, s) for v, s in zip(args, output_shapes)]
return tf.nest.pack_sequence_as(meta["output_types"], args)
return raw_dataset.map(_parse_function)
| 2.84375 | 3 |
ebay_accounts/__init__.py | luke-dixon/django-ebay-accounts | 4 | 12791761 | <filename>ebay_accounts/__init__.py
# -*- coding: utf-8 -*-
default_app_config = 'ebay_accounts.apps.EbayAccountsConfig'
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8
| 1.179688 | 1 |
gen2-pipeline-analyser/analyse.py | ibaiGorordo/depthai-experiments | 1 | 12791762 | <reponame>ibaiGorordo/depthai-experiments
import argparse
import json
from pathlib import Path
parser = argparse.ArgumentParser()
parser.add_argument('file', metavar="FILE", type=Path, help="Path to pipeline JSON representation file to be used for analysis")
args = parser.parse_args()
if __name__ == "__main__":
if not args.file.exists():
raise RuntimeError("File does not exists")
with args.file.open() as f:
data = json.load(f)
connected_nodes = {}
for connection in data["connections"]:
connected_nodes[connection["node1Id"]] = {
**connected_nodes.get(connection["node1Id"], {}),
connection["node2Id"]: connected_nodes.get(connection["node1Id"], {}).get(connection["node2Id"], []) + [connection]
}
def get_level(node_id, start_level=0):
resolved_levels = [get_level(connected_id, start_level+1) for connected_id, connections in connected_nodes.items() if connected_id != node_id and node_id in connections]
return max(resolved_levels) if len(resolved_levels) > 0 else start_level
hierarchy = {}
nodes = {}
for node_id, node in dict(data["nodes"]).items():
nodes[node_id] = node
level = get_level(node_id)
hierarchy[level] = {
**hierarchy.get(level, {}),
node_id: node
}
for level in sorted(hierarchy.keys()):
print(f"=== LEVEL {level} ====")
for node_id, node in hierarchy[level].items():
print(node["name"])
connected_to_str = " and ".join([
f"\"{connection['node1Output']}\" to {nodes[connected_id]['name']} \"{connection['node2Input']}\""
for connected_id in connected_nodes.get(node_id, [])
for connection in connected_nodes.get(node_id, {}).get(connected_id, [])
])
if len(connected_to_str) > 0:
connected_to_str = "\tConnections: " + connected_to_str
else:
connected_to_str = "\tNo connections"
print(connected_to_str)
print(f"\tProperties: {nodes[node_id]['properties']}")
print()
| 2.796875 | 3 |
app/forms.py | lawrluor/matchstats | 6 | 12791763 | <filename>app/forms.py
from flask.ext.wtf import Form, validators
from wtforms import StringField, BooleanField, TextAreaField, SelectField, IntegerField, SelectMultipleField
from wtforms.validators import DataRequired, InputRequired, Required, ValidationError, StopValidation
from app import app, db
from app.models import *
import re
# list of all Regions
regionlist = [('Global', 'Global'), ('National', 'National'), ('New England', 'New England'), ('SoCal', 'SoCal'), ('North Carolina', 'North Carolina')]
# characters choice list for SelectField; a constant list taken by SelectField containing only the 26 SSBM Characters
character_choices = [('Fox', 'Fox'),
('Falco', 'Falco'),
('Sheik', 'Sheik'),
('Marth', 'Marth'),
('Jigglypuff', 'Jigglypuff'),
('Peach', 'Peach'),
('Captain Falcon', 'Captain Falcon'),
('Ice Climbers', 'Ice Climbers'),
('Dr. Mario', 'Dr. Mario'),
('Pikachu', 'Pikachu'),
('Samus', 'Samus'),
('Ganondorf', 'Ganondorf'),
('Luigi', 'Luigi'),
('Mario', 'Mario'),
('Young Link', 'Young Link'),
('Link', 'Link'),
('<NAME>', '<NAME>'),
('Yoshi', 'Yoshi'),
('Zelda', 'Zelda'),
('Roy', 'Roy'),
('Mewtwo', 'Mewtwo'),
('Mr. Game and Watch', 'Mr. Game and Watch'),
('Ness', 'Ness'),
('Bowser', 'Bowser'),
('Pichu', 'Pichu'),
('Kirby', 'Kirby')]
# simple list of character strings for all possible characters
character_list = ['Fox',
'Falco',
'Sheik',
'Marth',
'Jigglypuff',
'Peach',
'Captain Falcon',
'Ice Climbers',
'Dr. Mario',
'Pikachu',
'Samus',
'Ganondorf',
'Luigi',
'Mario',
'<NAME>',
'Link',
'<NAME>',
'Yoshi',
'Zelda',
'Roy',
'Mewtwo',
'Mr. Game and Watch',
'Ness',
'Bowser',
'Pichu',
'Kirby']
# custom validator to check if two (User.tag) fields are not the same. This function format allows for other parameters besides (form, field)
def not_equal_to(fieldname):
message = "Winner and Loser can't be the same!"
def _not_equal_to(form, field, fieldname):
if form.field == fieldname:
raise ValidationError(message)
return _not_equal_to
# custom validator to check that Set score can be converted to an integer, is a DQ value (-1), or is a 'W' or 'L' char
def set_score_check():
message = "You must submit the Set score as an integer>=-1 and <10, or as a W/L character"
def _set_score_check(form, field):
score = field.data
if score not in ['-1', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'W', 'L']:
raise ValidationError(message)
return _set_score_check
class UserCreate(Form):
user_tag = StringField('tag', validators=[DataRequired()])
user_region = StringField('region', validators=[DataRequired()])
user_characters = SelectMultipleField('characters', choices=character_choices, coerce=str)
class UserEdit(Form):
edit_tag = StringField('tag', validators=[DataRequired()])
edit_region = StringField('region', validators=[DataRequired()])
add_characters = SelectMultipleField('add_characters')
remove_characters = SelectMultipleField('remove_characters')
class SetCreate(Form):
set_tournament = StringField('tournament')
set_winner_tag = StringField('winner_tag', validators=[DataRequired()])
set_loser_tag = StringField('loser_tag', validators=[DataRequired()])
set_winner_score = StringField('winner_score', validators=[DataRequired(), set_score_check()])
set_loser_score = StringField('loser_score', validators=[DataRequired(), set_score_check()])
set_max_match_count = SelectField('Best of:', choices = [('1','1'), ('3','3'), ('5','5'), ('7','7')], validators=[Required()])
no_match_info = BooleanField('no_match_info')
class SetEdit(Form):
edit_tournament = StringField('tournament')
edit_winner_tag = StringField('winner_tag', validators=[DataRequired()])
edit_loser_tag = StringField('loser_tag', validators=[DataRequired()])
edit_winner_score = IntegerField('winner_score', validators=[InputRequired()])
edit_loser_score = IntegerField('loser_score', validators=[InputRequired()])
edit_max_match_count = IntegerField('max_match_count', validators=[InputRequired()])
edit_match_info = BooleanField('edit_match_info')
class MatchSubmit(Form):
match_stage = SelectField('match_stage', choices = [('Battlefield', 'Battlefield'), ('Dream Land', 'Dream Land'), ('Final Destination', 'Final Destination'), ('Fountain of Dreams', 'Fountain of Dreams'), ('Pokemon Stadium', 'Pokemon Stadium'), ('Yoshi\'s Story', 'Yoshi\'s Story'), ('Other', 'Other')], coerce=str)
# Data not required in case no match info is known (no validators for fields)
match_winner = SelectField('match_winner', coerce=str)
match_loser = SelectField('match_loser', coerce=str)
winner_char = SelectField('winner_char', choices=character_choices, coerce=str)
loser_char = SelectField('loser_char', choices=character_choices, coerce=str)
# SelectField format for choices: The first (value, label) is the actual value. The label is what appears in the dropdown menu. In this case, both should be the samei
# Form generated when looking to search for head to head results between players
class HeadToHead(Form):
user1 = StringField('user1', validators=[DataRequired()])
user2 = StringField('user2', validators=[DataRequired()])
# search form in navigation bar
class SearchForm(Form):
search = StringField('search', validators=[InputRequired()])
# select region form
class RegionSelect(Form):
region_name = SelectField('region_name', choices=regionlist, coerce=str)
# Character filter form in /browse_users
class CharacterFilter(Form):
character_name = SelectField('character_name', choices=[('Main', 'Main')] + character_choices, coerce=str)
| 2.34375 | 2 |
skchainer/linear.py | lucidfrontier45/scikit-chainer | 27 | 12791764 | <filename>skchainer/linear.py
__author__ = 'du'
from chainer import Chain, functions as F
from . import ChainerRegresser, ChainerClassifier
class LinearRegression(ChainerRegresser):
def _setup_network(self, **params):
return Chain(l1=F.Linear(params["n_dim"], 1))
def _forward(self, x, train=False):
y = self.network.l1(x)
return y
def _loss_func(self, y, t):
return F.mean_squared_error(y, t)
class LogisticRegression(ChainerClassifier):
def _setup_network(self, **params):
return Chain(l1=F.Linear(params["n_dim"], params["n_classes"]))
def _forward(self, x, train=False):
y = self.network.l1(x)
return y
def _loss_func(self, y, t):
return F.softmax_cross_entropy(y, t)
| 2.703125 | 3 |
src/seed_words_wdg.py | frosted97/dash-masternode-tool | 75 | 12791765 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Author: Bertrand256
# Created on: 2021-04
from typing import Optional, List
from PyQt5 import QtCore, QtWidgets, QtGui
from PyQt5.QtCore import QVariant, QAbstractTableModel, pyqtSlot, QPoint, QTimer, Qt
from PyQt5.QtGui import QKeySequence
from PyQt5.QtWidgets import QWidget, QMenu, QShortcut, QApplication, QLabel
from mnemonic import Mnemonic
from wnd_utils import WndUtils
class SeedWordsWdg(QWidget):
def __init__(self, parent):
QWidget.__init__(self, parent=parent)
self.layout_main: Optional[QtWidgets.QVBoxLayout] = None
self.spacer: Optional[QtWidgets.QSpacerItem] = None
self.word_count: int = 24
self.mnemonic_words: List[str] = [""] * 24
self.mnemonic = Mnemonic('english')
self.grid_model = MnemonicModel(self, self.mnemonic_words, self.mnemonic.wordlist)
self.popMenuWords: Optional[QMenu] = None
self.setupUi(self)
def setupUi(self, dlg):
dlg.setObjectName("SeedWordsWdg")
self.layout_main = QtWidgets.QVBoxLayout(dlg)
self.layout_main.setObjectName('layout_main')
self.layout_main.setContentsMargins(0, 0, 0, 0)
self.layout_main.setSpacing(3)
self.layout_main.setObjectName("verticalLayout")
self.viewMnemonic = QtWidgets.QTableView(self)
self.viewMnemonic.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self.viewMnemonic.setSelectionMode(QtWidgets.QAbstractItemView.SingleSelection)
self.viewMnemonic.setObjectName("viewMnemonic")
self.viewMnemonic.horizontalHeader().setVisible(False)
self.viewMnemonic.horizontalHeader().setStretchLastSection(True)
self.viewMnemonic.verticalHeader().setVisible(False)
self.layout_main.addWidget(self.viewMnemonic)
self.msg = QtWidgets.QLabel(self)
self.msg.setWordWrap(True)
self.msg.setObjectName("msg")
self.msg.setText('You can copy and paste the complete set of seed words into this dialog directly (separated '
'by spaces, commas or line breaks).')
self.layout_main.addWidget(self.msg)
self.viewMnemonic.verticalHeader().setDefaultSectionSize(
self.viewMnemonic.verticalHeader().fontMetrics().height() + 6)
self.viewMnemonic.customContextMenuRequested.connect(self.on_viewMnemonic_customContextMenuRequested)
# words grid context menu
self.popMenuWords = QMenu(self)
# copy action
self.actCopyWords = self.popMenuWords.addAction("\u274f Copy all words")
self.actCopyWords.triggered.connect(self.on_copy_seed_words_triggered)
self.actCopyWords.setShortcut(QKeySequence("Ctrl+C")) # not working on Mac (used here to display
# shortcut in menu item
QShortcut(QKeySequence("Ctrl+C"), self.viewMnemonic).activated.connect(self.on_copy_seed_words_triggered)
# paste action
self.act_paste_words = self.popMenuWords.addAction("\u23ce Paste")
self.act_paste_words.triggered.connect(self.on_paste_seed_words_triggered)
self.act_paste_words.setShortcut(QKeySequence("Ctrl+V"))
QShortcut(QKeySequence("Ctrl+V"), self.viewMnemonic).activated.connect(self.on_paste_seed_words_triggered)
def set_word_count(self, word_count):
self.word_count = word_count
self.grid_model.set_words_count(word_count)
def setup_mnem_view():
width = self.viewMnemonic.width()
width = int((width - (2 * 40)) / 2)
self.viewMnemonic.setModel(self.grid_model)
self.viewMnemonic.setColumnWidth(0, 40)
self.viewMnemonic.setColumnWidth(1, width)
self.viewMnemonic.setColumnWidth(2, 40)
QTimer.singleShot(10, setup_mnem_view)
def set_words(self, words):
for idx, word in enumerate(words):
if idx < len(self.mnemonic_words):
self.mnemonic_words[idx] = word
def get_cur_mnemonic_words(self):
ws = []
for idx, w in enumerate(self.mnemonic_words):
if idx >= self.word_count:
break
ws.append(w)
return ws
def on_copy_seed_words_triggered(self):
try:
ws = self.get_cur_mnemonic_words()
ws_str = '\n'.join(ws)
clipboard = QApplication.clipboard()
if clipboard:
clipboard.setText(ws_str)
except Exception as e:
self.error_msg(str(e))
def on_paste_seed_words_triggered(self):
try:
clipboard = QApplication.clipboard()
if clipboard:
ws_str = clipboard.text()
if isinstance(ws_str, str):
ws_str = ws_str.replace('\n', ' ').replace('\r', ' ').replace(",", ' ')
ws = ws_str.split()
for idx, w in enumerate(ws):
if idx >= self.word_count:
break
self.mnemonic_words[idx] = w
self.grid_model.refresh_view()
except Exception as e:
self.error_msg(str(e))
@pyqtSlot(QPoint)
def on_viewMnemonic_customContextMenuRequested(self, point):
try:
self.popMenuWords.exec_(self.viewMnemonic.mapToGlobal(point))
except Exception as e:
self.error_msg(str(e))
class MnemonicModel(QAbstractTableModel):
def __init__(self, parent, mnemonic_word_list, dictionary_words):
QAbstractTableModel.__init__(self, parent)
self.parent = parent
self.dictionary_words = dictionary_words
self.mnemonic_word_list = mnemonic_word_list
self.words_count = 24
self.read_only = False
self.columns = [
"#",
'Word',
'#',
'Word'
]
def set_words_count(self, words_count):
self.words_count = words_count
self.refresh_view()
def refresh_view(self):
self.beginResetModel()
self.endResetModel()
def set_read_only(self, ro):
self.read_only = ro
def columnCount(self, parent=None, *args, **kwargs):
return len(self.columns)
def rowCount(self, parent=None, *args, **kwargs):
return self.words_count / 2
def headerData(self, section, orientation, role=None):
if role != 0:
return QVariant()
if orientation == 0x1:
if section < len(self.columns):
return self.columns[section]
return ''
else:
return ' '
def setData(self, index, data, role=None):
row_idx = index.row()
col_idx = index.column()
if 0 <= row_idx < int(self.words_count / 2):
if col_idx == 1:
idx = row_idx
else:
idx = row_idx + int(self.words_count / 2)
self.mnemonic_word_list[idx] = data
return True
def flags(self, index):
col_idx = index.column()
if col_idx in (1, 3):
ret = Qt.ItemIsEnabled | Qt.ItemIsSelectable
if not self.read_only:
ret |= Qt.ItemIsEditable
else:
ret = Qt.ItemIsEnabled
return ret
def data(self, index, role=None):
if index.isValid():
col_idx = index.column()
row_idx = index.row()
if col_idx < len(self.columns):
if role in (Qt.DisplayRole, Qt.EditRole):
if col_idx == 0:
return str(row_idx + 1) + '.'
elif col_idx == 2:
return str(int(self.words_count / 2) + row_idx + 1) + '.'
elif col_idx == 1:
if 0 <= row_idx < int(self.words_count / 2):
return self.mnemonic_word_list[row_idx]
elif col_idx == 3:
if 0 <= row_idx < int(self.words_count / 2):
return self.mnemonic_word_list[int(self.words_count / 2) + row_idx]
elif role == Qt.ForegroundRole:
if 0 <= row_idx < int(self.words_count / 2):
if col_idx in (0, 1):
word_col_idx = 1
else:
word_col_idx = 3
if word_col_idx == 1:
word = self.mnemonic_word_list[row_idx]
elif word_col_idx == 3 and row_idx < int(self.words_count / 2):
word = self.mnemonic_word_list[int(self.words_count / 2) + row_idx]
else:
return
if word and word not in self.dictionary_words:
return QtGui.QColor('red')
elif role == Qt.BackgroundRole:
if col_idx in (0, 2):
return QtGui.QColor('lightgray')
elif role == Qt.TextAlignmentRole:
if col_idx in (0, 2):
return Qt.AlignRight
elif role == Qt.FontRole:
pass
return QVariant()
| 2.234375 | 2 |
qingmi/utils/functional.py | xiongxianzhu/qingmi | 20 | 12791766 | class Promise:
"""
Base class for the proxy class created in the closure of the lazy function.
It's used to recognize promises in code.
"""
pass
| 1.226563 | 1 |
toolkit/ev3/simulation/block/block.py | AlexGustafsson/ev3-emulator-toolkit | 0 | 12791767 | from typing import TypedDict, Optional, Dict
from dataclasses import dataclass
@dataclass
class BlockVariableDefinition():
type: str
id: str
name: str
@dataclass
class BlockField():
name: str
id: Optional[str]
variable_type: Optional[str]
value: str
@dataclass
class BlockShadow():
type: str
fields: Dict[str, BlockField]
@dataclass
class BlockValue():
name: str
shadow: BlockShadow
# TODO: Needs more test cases to verify implementation
@dataclass
class BlockMutation():
expanded: int
input_init: bool
@dataclass
class Block():
# ID
id: int
# Location of the block
x: Optional[int]
# Location of the block
y: Optional[int]
# Block type, such as "variable_set"
type: str
fields: Dict[str, BlockField]
values: Dict[str, BlockValue]
# The next block to process
next: Optional["Block"]
disabled: bool
# Statements such as HANDLER for event handlers
statements: Dict[str, "Block"]
def findTail(self) -> "Block":
"""Find the last block of the chain, which could be this block."""
return self if self.next is None else self.next.findTail()
| 3.125 | 3 |
python_back_end/triangle_formatting/sub_triangler.py | Henler/ReBridge_data_cloud | 0 | 12791768 | <gh_stars>0
from difflib import SequenceMatcher
from string import digits
from copy import deepcopy
import numpy as np
import pandas as pd
from scipy.sparse.csgraph._traversal import connected_components
from python_back_end.utilities.help_functions import general_adjacent
from python_back_end.data_cleaning.date_col_identifier import DateColIdentifier, TypeColExtracter
from python_back_end.definitions import SheetTypeDefinitions
from python_back_end.program_settings import PROGRAM_PARAMETERS as pp
from python_back_end.triangle_formatting.triangle_chopper import TriangleChopper
from python_back_end.utilities.state_handling import DataHolder, DataStruct
class SubTriangler:
remove_digits = str.maketrans('', '', digits)
@staticmethod
def make_standard_triangles(dh, **kwargs):
assert 'meta_dh' in kwargs
meta_dh = kwargs['meta_dh']
tri_type = "single"
if "tri_type" in kwargs:
tri_type = kwargs['tri_type']
n_outputs = 1
if "n_outputs" in kwargs:
n_outputs = kwargs['n_outputs']
new_dh_dict = dict()
# This call will reset all entries in new_dh_dict
new_dh_dict[dh.name] = dh
for dh in new_dh_dict.values():
SubTriangler.name_and_scrub_triangle(dh, new_dh_dict, meta_dh=meta_dh)
for ds in list(new_dh_dict.values())[0].data_struct_list:
SubTriangler.divide_into_subtriangles(ds, new_dh_dict, meta_dh)
for new_dh in new_dh_dict.values():
SubTriangler.scrub_rows(new_dh)
# choose dh in dict
dh_name = SubTriangler.data_holder_selector(new_dh_dict, dh.name, tri_type, n_outputs)
return new_dh_dict[dh_name], new_dh_dict
@staticmethod
def scrub_rows(dh):
for ds in dh:
d_cols = DateColIdentifier.identify_marked_date_cols(ds)
d_cols = d_cols[d_cols].index
date_form = []
for col_name in d_cols:
col = ds.df_data[col_name]
if len(date_form) == 0:
date_form = DateColIdentifier.date_form(col) == 1
else:
date_form = np.logical_or(date_form, DateColIdentifier.date_form(col) == 1)
not_date_form = date_form == False
ds.df_data = ds.df_data.drop(ds.df_data.index[not_date_form])
ds.df_profiles = ds.df_profiles.drop(ds.df_profiles.index[not_date_form])
@staticmethod
def data_holder_selector(dh_dict, orig_name, tri_type, n_outputs):
# only decompositions that exist for all sheets are acknowledged
fully_represented, n_orig_sheets = SubTriangler.get_fully_represented(dh_dict, orig_name)
if len(fully_represented) == 1:
return fully_represented[0]
# check size coherence
deviations = list()
if tri_type == "aggregate":
# search for the most square ones!
for name in fully_represented:
dh = dh_dict[name]
square_ind1 = [ds.df_data.shape[0]/ds.df_data.shape[1] for ds in dh]
square_ind2 = [ds.df_data.shape[1] / ds.df_data.shape[0] for ds in dh]
square_index = np.minimum(square_ind1, square_ind2).mean()
deviations.append(1-square_index)
elif tri_type == "single":
# get the dh with the most coherent sizes
for name in fully_represented:
dh = dh_dict[name]
n_sheet_dev = np.maximum(0, pp.N_DESIRED_PER_SHEET*n_outputs - dh.n)
len_list = np.array([ds.df_data.shape[0] for ds in dh])
len_dev = np.sum(1-(len_list/np.max(len_list)))
dev = n_sheet_dev + len_dev
# if n_outputs == 1:
# dev = np.abs(pp.N_DESIRED_PER_SHEET - dh.n)
# else:
# occurence_list = [ds.orig_sheet_name for ds in dh]
# unique, counts = np.unique(occurence_list, return_counts=True)
# dev = np.sum(np.abs(counts - pp.N_DESIRED_PER_SHEET)) / len(unique)
deviations.append(dev)
else:
raise ValueError("Unknown triangle type: " + tri_type)
deviations = np.array(deviations)
return fully_represented[np.argmin(deviations)]
@staticmethod
def get_fully_represented(dh_dict, orig_name):
orig_dh = dh_dict[orig_name]
orig_sheets = {ds.orig_sheet_name for ds in orig_dh}
fully_represented = list()
for key, dh in dh_dict.items():
sheets = {ds.orig_sheet_name for ds in dh}
if len(orig_sheets.difference(sheets)) == 0:
fully_represented.append(key)
return fully_represented, len(orig_sheets)
@staticmethod
def name_and_scrub_triangle(dh, new_dh_dict, meta_dh=None):
new_dh = DataHolder(dh.name)
word_set_list = list()
for ds in dh:
word_set_list.append(SubTriangler.identify_category_name(ds, meta_dh))
if meta_dh != None:
if meta_dh.n > 0:
SubTriangler.divide_meta_data(dh, meta_dh, word_set_list)
# Find the most unique name
for i in range(len(word_set_list)):
ds = dh.data_struct_list[i]
difference = word_set_list[i].copy()
for j in range(len(word_set_list)):
if j != i and ds.orig_sheet_name == dh.data_struct_list[j].orig_sheet_name:
difference = difference.difference(word_set_list[j])
if len(difference) > 0:
stringified = sorted([str(el) for el in difference])
name = " ".join(stringified)
name = name.translate(SubTriangler.remove_digits)
else:
name = str(i)
if ds.name != ds.orig_sheet_name:
name = ds.name + " " + name
new_dh.add_sheet(name, ds.df_data, ds.df_profiles, orig_sheet_name=ds.orig_sheet_name)
new_dh_dict[dh.name] = new_dh
@staticmethod
def divide_meta_data(dh, meta_dh, word_set_list):
# map each meta data to the triangle closest under it
tr_ids = [ds.id for ds in dh]
meta_ids = [ds.id for ds in meta_dh.data_struct_list]
#content = pd.Series([ds.df_data.values[0] for ds in meta_dh], index=meta_ids)
distances = pd.DataFrame(np.iinfo(np.uint32).max, columns=tr_ids, index=meta_ids)
#check if median is a reasonable measure of distance
spatial_info = SubTriangler.generate_tr_spatial_info(dh)
for ds in dh:
if len(ds.df_data.index) > 0:
ds_low = np.min(np.array(ds.df_data.index))
ds_high = np.max(np.array(ds.df_data.index))
if ds.name in meta_dh.data_dict:
for meta_ds in meta_dh.data_dict[ds.name]:
if ds.df_data.size > meta_ds.df_data.size:
meta_high = np.max(np.array(meta_ds.df_data.index))
# check = meta_ds.df_data.iloc[0,0]
# if check == 'Combined':
# meta_col_int_array = np.array(
# [int(el[0:pp.N_DIGITS_HEADER_PADDING]) for el in meta_ds.df_data.columns])
# print('found')
if meta_high <= ds_high:
tr_col_int_array = spatial_info[ds.id]['int_array']
meta_col_int_array = np.array(
[int(el[0:pp.N_DIGITS_HEADER_PADDING]) for el in meta_ds.df_data.columns])
#meta_vert_median = np.array(meta_ds.df_data.index)[int(np.floor(len(meta_ds.df_data.index) / 2))]
if len(tr_col_int_array) > 0:
if spatial_info['use_median']:
meta_median = meta_col_int_array[int(np.floor(len(meta_col_int_array) / 2))]
col_dist = np.abs(meta_median - spatial_info[ds.id]['hori_median'])
else:
col_mat = np.abs(tr_col_int_array - meta_col_int_array)
col_dist = np.min(col_mat)
ind_dist = np.abs(meta_high-ds_low)
distances.loc[meta_ds.id, ds.id] = ind_dist + col_dist
closest_dists = distances.min(axis=1)
closest_ids = {index: distances.columns[np.where(distances.loc[index, :] == closest_dists[index])] for index in
closest_dists.index}
for word_set, ds in zip(word_set_list, dh):
for meta_id in closest_dists.index:
if closest_dists[meta_id] < pp.MAX_LENGTH_TO_RELATED_DATA:
if ds.id in closest_ids[meta_id]:
vals = meta_dh.id_dict[meta_id].df_data.values.flatten()
word_list = [str(el) for el in vals]
word_set.update(word_list)
@staticmethod
def generate_tr_spatial_info(dh):
outer_dict = dict()
for ds in dh:
tr_spatial_dict = dict()
num_cols = TypeColExtracter.extract_num_cols(ds.df_data, ds.df_profiles)
adj_headers = general_adjacent(num_cols.columns)
num_cols = num_cols[adj_headers]
if num_cols.size > 0:
tr_col_int_array = np.array([int(el[0:pp.N_DIGITS_HEADER_PADDING]) for el in num_cols.columns])
tr_spatial_dict["hori_median"] = tr_col_int_array[int(np.floor(len(tr_col_int_array) / 2))]
tr_spatial_dict["int_array"] = tr_col_int_array.reshape((tr_col_int_array.size, 1))
tr_spatial_dict["vert_median"] = np.array(num_cols.index)[int(np.floor(len(num_cols.index) / 2))]
else:
tr_spatial_dict["hori_median"] = np.iinfo(np.uint32).max
tr_spatial_dict["int_array"] = np.array([])
tr_spatial_dict["vert_median"] = np.iinfo(np.uint32).max
tr_spatial_dict['name'] = ds.name
outer_dict[ds.id] = tr_spatial_dict
use_median = True
for name in dh.data_dict:
info_array = np.array([np.array([el['hori_median'], el['vert_median']]) for el in outer_dict.values()
if el['name'] == name])
distances = np.zeros((len(info_array), len(info_array))) + pp.MIN_MEDIAN_DISTANCE
for i in range(len(info_array)):
for j in range(i + 1, len(info_array)):
distances[i, j] = np.linalg.norm(info_array[i, :] - info_array[j, :])
min_dist = np.min(distances)
if min_dist < pp.MIN_MEDIAN_DISTANCE:
use_median = False
outer_dict['use_median'] = use_median
return outer_dict
@staticmethod
def divide_into_subtriangles(ds, new_dh_dict, meta_dh):
SubTriangler.vertical_category_division(ds, new_dh_dict, meta_dh)
SubTriangler.horizontal_category_division(ds, new_dh_dict, meta_dh)
@staticmethod
def vertical_category_division(ds, new_dh_dict, meta_dh):
# find the category column
# Should be strings (for now) (used)
# Kind of periodic (thus repetitive entries) (used)
# some entries may change slightly (used)
# period may change slightly (not checked for now)(should be checked in new if statment)
# Should get tag matches in dict (not checked for now)
df_data = ds.df_data
df_profiles = ds.df_profiles
orig_name = ds.orig_sheet_name
for col_name, col in df_data.iteritems():
string_ratio = np.sum(df_profiles[col_name].values == SheetTypeDefinitions.STRING) / df_profiles[col_name].values.size
if string_ratio > pp.MIN_STRING_RATIO_CAT_COL:
# check periodic potential
string_col = col.astype(str)
unique, counts = np.unique(string_col, return_counts=True)
ratio = np.max(counts) / col.size
if ratio < pp.MAX_RATIO_LARGEST_CAT and ratio > pp.MIN_RATIO_LARGEST_CAT and len(unique) < pp.MAX_N_CATS:
if col_name in new_dh_dict:
new_dh = new_dh_dict[col_name]
else:
new_dh = DataHolder(col_name)
new_dh_dict[col_name] = new_dh
#period_label_bool = counts * period > string_col.size - period
# now get the remaining
#sub_period_label = unique[period_label_bool == False]
match_dict = SubTriangler.component_finder(unique)
# now load the new_dh
for name in match_dict:
cond = np.array([string_col.values == sub_name for sub_name in match_dict[name]]).any(axis=0)
sub_df_data = df_data[cond].drop(columns=[string_col.name])
sub_df_profiles = df_profiles[cond].drop(columns=[string_col.name])
if name == "" or np.sum(cond) < 4:
new_ds = DataStruct(sub_df_data, sub_df_profiles, name, orig_sheet_name=orig_name)
for split in new_ds.col_split_ds():
if not np.all(split.df_profiles == SheetTypeDefinitions.EMPTY_STRING) and not (
np.all(split.df_data == "")):
meta_dh.add_ds(split)
else:
new_dh.add_sheet(ds.name + " - " + name, sub_df_data, sub_df_profiles, orig_sheet_name=orig_name)
@staticmethod
def horizontal_category_division(ds, new_dh_dict, meta_dh):
# find potential category rows
# for now, look for strings
str_ratio = (ds.df_profiles == SheetTypeDefinitions.STRING).sum(axis=1)/ds.df_profiles.shape[1]
cat_cols = str_ratio >= pp.MIN_STRING_RATIO_CAT_ROW
for ind in cat_cols.index[cat_cols]:
cat_row = ds.df_data.loc[ind, :]
unique, counts = np.unique(cat_row, return_counts=True)
ratio = np.max(counts) / cat_row.size
if ratio < 0.5 and len(unique)/cat_row.size < 0.5:
row_name = "Row " + str(ind)
if row_name in new_dh_dict:
new_dh = new_dh_dict[row_name]
else:
new_dh = DataHolder(row_name)
new_dh_dict[row_name] = new_dh
match_dict = SubTriangler.component_finder(unique)
rev_match_dict = dict()
for key, val in match_dict.items():
for item in val:
rev_match_dict[item] = key
count_dict = {}
for key, val in match_dict.items():
active = np.isin(unique, val)
count_dict[key] = np.sum(counts[active])
# get number of data_structs to make
headers_dict = {}
for key, val in count_dict.items():
if val > pp.MIN_YEARS_SPANNED:
headers_dict[key] = []
len_array = np.zeros(len(headers_dict), dtype=int)
for enum, key in enumerate(headers_dict):
for name, val in cat_row.iteritems():
if rev_match_dict[val] not in headers_dict or rev_match_dict[val] == key:
headers_dict[key].append(name)
len_array[enum] = len(headers_dict[key])
# Now fill the dh
# First, if same length, find optimal header naming
same_length = np.std(len_array) == 0
if same_length:
out_headers = deepcopy(headers_dict)
for i in range(len_array[0]):
i_headers = np.array([val[i] for val in headers_dict.values()])
missing = np.array(["Missing header" in header for header in i_headers])
if np.any(missing) and np.any(np.logical_not(missing)):
header = i_headers[np.logical_not(missing)][0]
for key in out_headers:
out_headers[key][i] = header
for key, val in headers_dict.items():
df_data = ds.df_data.loc[ds.df_data.index != ind, val]
df_profiles = ds.df_profiles.loc[ds.df_data.index != ind, val]
if same_length:
df_data = pd.DataFrame(df_data.values, index=df_data.index, columns=out_headers[key])
df_profiles = pd.DataFrame(df_profiles.values, index=df_profiles.index, columns=out_headers[key])
new_dh.add_sheet(ds.name + " - " + key, df_data, df_profiles, orig_sheet_name=ds.orig_sheet_name)
# find similar entries, group them and make collective name
@staticmethod
def component_finder(uniques):
n_el = len(uniques)
dist_m = np.zeros(shape=(n_el,n_el))
for i in range(n_el):
for j in range(i, n_el):
dist_m[i][j] = SequenceMatcher(None, uniques[i], uniques[j]).ratio()
dist_m = dist_m + np.transpose(dist_m) - np.eye(n_el)
n_components, labels = connected_components(dist_m >= pp.MIN_LABEL_SIM)
# make matches
match_dict = dict()
for i in range(n_components):
comp = np.array(uniques)[labels == i]
if len(comp) == 1:
match_dict[comp[0]] = comp
else:
# find a common name
block = SequenceMatcher(None, comp[0], comp[1]).find_longest_match(0, len(comp[0]), 0, len(comp[1]))
name = comp[0][block.a:block.size+block.a]
match_dict[name] = comp
return match_dict
@staticmethod
def identify_category_name(ds, meta_dh):
#df_data = ds.df_data
d_cols = DateColIdentifier.identify_marked_date_cols(ds)
d_cols = d_cols[d_cols].index
date_form = []
for col_name in d_cols:
col = ds.df_data[col_name]
if len(date_form) == 0:
date_form = DateColIdentifier.date_form(col) == 1
else:
date_form = np.logical_or(date_form, DateColIdentifier.date_form(col) == 1)
not_date_form = date_form == False
for ind in ds.df_data.index[not_date_form]:
for col in ds.df_data.columns:
if ds.df_data.loc[ind, col] != "" and ds.df_profiles.loc[ind, col] == SheetTypeDefinitions.STRING:
temp_data = pd.DataFrame(ds.df_data.loc[ind, col], index=[ind], columns=[col])
temp_profile = pd.DataFrame(SheetTypeDefinitions.STRING, index=[ind], columns=[col])
meta_dh.add_sheet(ds.name, temp_data, temp_profile, orig_sheet_name=ds.orig_sheet_name)
#wordset = set(ds.df_data.values[not_date_form, :].flatten())
#ds.df_data = ds.df_data.drop(ds.df_data.index[not_date_form])
#ds.df_profiles = ds.df_profiles.drop(ds.df_profiles.index[not_date_form])
#kill repeated headers
# strip repeated headers
repeated, deviating_entries = TriangleChopper.find_repeated_headers(ds)
#wordset.update(deviating_entries)
for ind in ds.df_data.index[repeated]:
for col in deviating_entries:
if ds.df_data.loc[ind, col] != "" and ds.df_profiles.loc[ind, col] == SheetTypeDefinitions.STRING:
temp_data = pd.DataFrame(ds.df_data.loc[ind, col], index=[ind], columns=[col])
temp_profile = pd.DataFrame(SheetTypeDefinitions.STRING, index=[ind], columns=[col])
meta_dh.add_sheet(ds.name, temp_data, temp_profile, orig_sheet_name=ds.orig_sheet_name)
for ind in repeated:
ds.df_data = ds.df_data.drop(ds.df_data.index[ind])
ds.df_profiles = ds.df_profiles.drop(ds.df_profiles.index[ind])
#wordset.add(ds.name)
#return wordset
return set()
| 2.265625 | 2 |
Home-and-Elementary/between_markers.py | RiquePM/pyCheckiO-my_solutions | 0 | 12791769 | #--------------------------------------------#
# My Solution #
#--------------------------------------------#
def between_markers(text: str, begin: str, end: str):
if begin in text and end in text:
if text.find(begin)>text.find(end):
return ""
else:
return text[text.find(begin)+len(begin):text.find(end)]
elif begin not in text and end not in text:
return text[:]
elif begin not in text:
return text[:text.find(end)]
elif end not in text:
return text[text.find(begin)+len(begin):]
#--------------------------------------------#
# Other Solutions #
#--------------------------------------------#
#def between_markers(text: str, begin: str, end: str) -> str:
#start = text.find(begin) + len(begin) if begin in text else None
#stop = text.find(end) if end in text else None
#return text[start:stop]
#def between_markers(text: str, begin: str, end: str) -> str:
#start, stop = map(text.find, (begin, end))
#return text[(start + len(begin), 0)[start < 0]:(stop, None)[stop < 0]] → if-tuple slicing
#def between_markers(text: str, begin: str, end: str) -> str:
#if begin in text and end in text and text.index(begin) > text.index(end):
#return ''
#return text.split(begin)[-1].split(end)[0]
#--------------------------------------------#
# Test #
#--------------------------------------------#
a = between_markers('What is >apple<', '>', '<') #== "apple", "One sym"
b = between_markers("<head><title>My new site</title></head>",
"<title>", "</title>") #== "My new site", "HTML"
c = between_markers('No[/b] hi', '[b]', '[/b]') #== 'No', 'No opened'
d = between_markers('No [b]hi', '[b]', '[/b]') #== 'hi', 'No close'
e = between_markers('No hi', '[b]', '[/b]') #== 'No hi', 'No markers at all'
f = between_markers('No <hi>', '>', '<') #== '', 'Wrong direction'
print(a, b, c, d, e, f, sep="\n")
| 3.4375 | 3 |
src/meanings/entityAnalysis.py | jordanjoewatson/natural-language-classifier | 1 | 12791770 | #!/usr/local/bin/python
import spacy
nlp = spacy.load('en')
entityLs = ["ORG","PERSON","DATE","TIME","MONEY","PERCENT","FAC","GPE","NORP","WORK_OF_ART","QUANTITY","LOC","PRODUCT","EVENT","LAW","LANGUAGE","ORDINAL","CARDINAL"]
def updateAlphaLs(text):
alphaLs = []
doc = nlp(text)
for token in doc:
if(token.is_alpha): alphaLs.append(token)
return alphaLs
def updateEntityLs(text):
entityDict = {entity: 0 for entity in entityLs}
doc = nlp(text)
for entity in doc.ents:
entityDict[entity.label_] += 1
return entityDict
def alphaPercentage(text,wordCount):
alphaLs = updateAlphaLs(text)
return(len(alphaLs)/wordCount)
def entityPercentage(text,wordCount):
entityDict = updateEntityLs(text)
entitySum = 0
for k,v in entityDict.items(): entitySum += v
return(entitySum/wordCount)
| 2.78125 | 3 |
trello/space_works/migrations/0001_initial.py | copydataai/clon-trello | 0 | 12791771 | <filename>trello/space_works/migrations/0001_initial.py
# Generated by Django 3.2.13 on 2022-05-22 00:16
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
('users', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Membership',
fields=[
('created', models.DateTimeField(auto_now_add=True, verbose_name='created at')),
('modified', models.DateTimeField(auto_now=True, verbose_name='modified at')),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('is_admin', models.BooleanField(default=False, help_text='Space Works admins can update the space works data and manage its members', verbose_name='space_works admin')),
('is_active', models.BooleanField(default=True, help_text='Only active users are allowed to interact in the circle', verbose_name='active status')),
('invited_by', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='invited_by', to=settings.AUTH_USER_MODEL)),
('profile', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='users.profile')),
],
options={
'ordering': ('-created', '-modified'),
'abstract': False,
},
),
migrations.CreateModel(
name='SpaceWork',
fields=[
('created', models.DateTimeField(auto_now_add=True, verbose_name='created at')),
('modified', models.DateTimeField(auto_now=True, verbose_name='modified at')),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('name', models.CharField(max_length=100)),
('description', models.TextField(max_length=500)),
('slug_name', models.SlugField(max_length=40, unique=True)),
('is_public', models.BooleanField(default=True)),
('members', models.ManyToManyField(through='space_works.Membership', to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
migrations.AddField(
model_name='membership',
name='space_work',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='space_works.spacework'),
),
migrations.AddField(
model_name='membership',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.CreateModel(
name='List',
fields=[
('created', models.DateTimeField(auto_now_add=True, verbose_name='created at')),
('modified', models.DateTimeField(auto_now=True, verbose_name='modified at')),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('name', models.CharField(max_length=100)),
('description', models.TextField(max_length=500)),
('space_work', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='space_works.spacework')),
],
options={
'abstract': False,
},
),
]
| 1.828125 | 2 |
CISSL_cls/lib/algs/mean_teacher.py | MinsungHyun/Class-Imbalanced-Semi-Supervsied-Learning | 4 | 12791772 | import torch
import torch.nn as nn
import torch.nn.functional as F
import copy
class MT(nn.Module):
def __init__(self, model, ema_factor, loss='mse', scl_weight=None):
super().__init__()
self.model = model
self.model.train()
self.ema_factor = ema_factor
self.global_step = 0
self.loss = loss
# scl weight
if scl_weight is not None:
self.scl_weight = scl_weight
else:
self.scl_weight = None
def forward(self, x, y, model, mask):
self.global_step += 1
y_hat = self.model(x)
model.update_batch_stats(False)
y = model(x) # recompute y since y as input of forward function is detached
model.update_batch_stats(True)
if self.loss == 'mse':
if self.scl_weight is not None:
target_weights = torch.stack(list(map(lambda t: self.scl_weight[t.data], y.max(1)[1])))
return (target_weights * F.mse_loss(y.softmax(1), y_hat.softmax(1).detach(), reduction="none").mean(1) * mask).mean()
else:
return (F.mse_loss(y.softmax(1), y_hat.softmax(1).detach(), reduction="none").mean(1) * mask).mean()
elif self.loss == 'kld':
return (F.kl_div(y.softmax(1).log(), y_hat.softmax(1).detach(), reduction="none").sum(1) * mask).mean()
else:
raise ValueError("{} is unknown loss type".format(self.loss))
def moving_average(self, parameters):
ema_factor = min(1 - 1 / (self.global_step+1), self.ema_factor)
for ema_p, p in zip(self.model.parameters(), parameters):
ema_p.data = ema_factor * ema_p.data + (1 - ema_factor) * p.data
| 2.34375 | 2 |
ini2.py | lmartinho/rosalind-solutions | 0 | 12791773 | <filename>ini2.py<gh_stars>0
a=807
b=905
#a=3
#b=5
c=a*a+b*b
print(c)
| 1.5625 | 2 |
recreate/common/myfolder.py | majo48/recreate-git | 0 | 12791774 | <reponame>majo48/recreate-git<gh_stars>0
""" Class MyFolder:
set file datetime created to metadata.creation_time in all files in folder
Copyright (c) 2021 <NAME> (<EMAIL>)
"""
import io
import sys
import os
from recreate.common import myfile
class MyFolder:
""" Recursive part of the recreate application """
def __init__(self, file_path):
""" Init class variables """
self.files = []
self.error = None
if os.path.isfile(file_path):
self.files.append(myfile.MyFile(file_path))
elif os.path.isdir(file_path):
for subdir, dirs, files in os.walk(file_path):
for file in files:
self.files.append(myfile.MyFile(os.path.join(subdir, file)))
else:
self.error = file_path + 'Invalid path!'
| 2.953125 | 3 |
blog/migrations/0002_summary.py | maweefeng/Blog-Powed-by-Django | 1 | 12791775 | # Generated by Django 2.0 on 2019-05-21 06:57
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Summary',
fields=[
('summary_id', models.AutoField(primary_key=True, serialize=False)),
('summary_title', models.TextField()),
('summary_content', models.TextField()),
('publish_date', models.DateTimeField(auto_now=True)),
],
),
]
| 1.734375 | 2 |
CondTools/RPC/test/riovtest_cfg.py | nistefan/cmssw | 0 | 12791776 | <filename>CondTools/RPC/test/riovtest_cfg.py
import FWCore.ParameterSet.Config as cms
process = cms.Process("Demo")
process.load("CondCore.DBCommon.CondDBCommon_cfi")
process.load("FWCore.MessageService.MessageLogger_cfi")
process.CondDBCommon.connect = 'sqlite_file:/afs/cern.ch/user/d/dpagano/public/dati.db'
process.CondDBCommon.DBParameters.authenticationPath = './'
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(-1)
)
process.rimon = cms.ESSource("PoolDBESSource",
process.CondDBCommon,
timetype = cms.string('timestamp'),
toGet = cms.VPSet(cms.PSet(
record = cms.string('RPCObImonRcd'),
tag = cms.string('Imon_v3')
))
)
process.rvmon = cms.ESSource("PoolDBESSource",
process.CondDBCommon,
timetype = cms.string('timestamp'),
toGet = cms.VPSet(cms.PSet(
record = cms.string('RPCObVmonRcd'),
tag = cms.string('Vmon_v3')
))
)
process.rtemp = cms.ESSource("PoolDBESSource",
process.CondDBCommon,
timetype = cms.string('timestamp'),
toGet = cms.VPSet(cms.PSet(
record = cms.string('RPCObTempRcd'),
tag = cms.string('Temp_v3')
))
)
process.rpvss = cms.ESSource("PoolDBESSource",
process.CondDBCommon,
timetype = cms.string('timestamp'),
toGet = cms.VPSet(cms.PSet(
record = cms.string('RPCObPVSSmapRcd'),
tag = cms.string('PVSS_v3')
))
)
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring(
'/store/data/Commissioning08/Cosmics/RECO/v1/000/070/195/FC7E572F-48AE-DD11-AA2A-0019DB29C5FC.root',
'/store/data/Commissioning08/Cosmics/RECO/v1/000/070/195/FC5D6C50-60AE-DD11-98A3-001617C3B66C.root',
'/store/data/Commissioning08/Cosmics/RECO/v1/000/070/195/FACA249F-2FAE-DD11-968B-001617E30D4A.root'
)
)
process.demo = cms.EDAnalyzer('RiovTest')
process.p = cms.Path(process.demo)
| 1.515625 | 2 |
data/backup/remove_punc_from_squad.py | ghundal93/QA-Experiments-with-DMN | 0 | 12791777 | import os
import string
import sys
mode = sys.argv[1]
output = open("punc_removed_"+str(mode)+".txt","w")
for i,line in enumerate(open("cleaned_squad_"+str(mode)+".txt")):
if "?" in line:
idx = line.find('?')
tmp = line[idx+1:].split('\t')
q = line[:idx+1]
a = tmp[1].strip()
a = a.replace(","," ")
a = a.translate(None, string.punctuation)
q = q.translate(None, string.punctuation)
q = q+"?"
output_line = q + "\t" + a
for i in range(2,len(tmp)):
output_line += "\t"+tmp[i]
output.write(output_line)
else:
line = line.translate(None, string.punctuation)
output.write(line)
print("done")
| 2.671875 | 3 |
docs_translate/reserved_word.py | gs-kikim/md_docs-trans-app | 0 | 12791778 | <filename>docs_translate/reserved_word.py<gh_stars>0
from pathlib import Path
import json
import re
class ReservedWords:
def __init__(self, path: Path):
with open(path, encoding='UTF8') as json_file:
json_data = json.load(json_file)
self.data = json_data
self.keys = list(self.data.keys())
self.compile = re.compile("("+")|(".join(self.keys)+")")
def translate(self, line: str):
for group, idx in sorted([(m.group(), m.groups().index(m.group())) for m in self.compile.finditer(line)], key=lambda x: x[0], reverse=True):
line = re.sub(group, self.data[self.keys[idx]], line)
return line
| 3.203125 | 3 |
3.Object_oriented_programming/1.into.py | jkuatdsc/Python101 | 2 | 12791779 | <reponame>jkuatdsc/Python101
"""
Object oriented programing allow programs to imitate the objects in the real world
"""
"""
Create a student dict with name and grades and write a function to calculate the average grade of the student
"""
def main():
student = {
'name': '<NAME>',
'grades': [90, 100, 98, 97, 96]
}
def calculate_average(grades):
average = grades / len(student['grades'])
return average
total = sum(student['grades'])
print(calculate_average(total))
"""
Instead of using a dictionary, now use a class to represent the student.
--> Dictionaries cannot contain that act on their data but classes can
--> Multiple independent objects can be created from one class
--> A class is a blueprint for creating objects
--> An object is a class variable or an instance of the class
"""
class Student:
""" a class to represent a student..it models a students with a name and grades"""
def __init__(self, names, grades): # The __init__() function is a constructor for the class student
self.name = names
self.grade = grades
def calculate_average(self):
"""A function to calculate the average of a students grades"""
average = sum(self.grade) / len(self.grade)
return average
# Objects of the student class
student_one = Student('<NAME>', [90, 96, 98, 97, 100])
student_two = Student('<NAME>', [89, 98, 78, 89, 96])
# Accessing the fields in the objects
print('')
print(student_one.name)
print(student_one.grade)
print(f'{student_one.name}\'s average grade is {student_one.calculate_average()}')
print('')
print(student_two.name)
print(student_two.grade)
print(f'{student_two.name}\'s average grade is {student_two.calculate_average()}')
if __name__ == '__main__':
main()
| 4.21875 | 4 |
recognition/predict.py | w-garcia/insightface | 108 | 12791780 | <reponame>w-garcia/insightface
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import os
import sys
import tensorflow as tf
import yaml
from recognition.backbones.resnet_v1 import ResNet_v1_50
from recognition.models.models import MyModel
tf.enable_eager_execution()
def get_embeddings(model, images):
prelogits, _, _ = model(images, training=False)
embeddings = tf.nn.l2_normalize(prelogits, axis=-1)
return embeddings
def parse_args(argv):
parser = argparse.ArgumentParser(description='Train face network')
parser.add_argument('--config_path', type=str, help='path to config path', default='configs/config.yaml')
args = parser.parse_args(argv)
return args
def main():
args = parse_args(sys.argv[1:])
# logger.info(args)
from recognition.data.generate_data import GenerateData
with open(args.config_path) as cfg:
config = yaml.load(cfg, Loader=yaml.FullLoader)
gd = GenerateData(config)
train_data, _ = gd.get_train_data()
model = MyModel(ResNet_v1_50, embedding_size=config['embedding_size'])
ckpt_dir = os.path.expanduser(config['ckpt_dir'])
ckpt = tf.train.Checkpoint(backbone=model.backbone)
ckpt.restore(tf.train.latest_checkpoint(ckpt_dir)).expect_partial()
print("Restored from {}".format(tf.train.latest_checkpoint(ckpt_dir)))
# for layer in tf.train.list_variables(tf.train.latest_checkpoint(ckpt_dir)):
# print(layer)
for img, _ in train_data.take(1):
embs = get_embeddings(model, img)
for i in range(embs.shape[0]):
for j in range(embs.shape[0]):
val = 0
for k in range(512):
val += embs[i][k] * embs[j][k]
print(i, j, val)
if __name__ == '__main__':
# logger.info("hello, insightface/recognition")
main()
| 1.9375 | 2 |
Lib/test/test_compiler/testcorpus/04_assign.py | diogommartins/cinder | 1,886 | 12791781 | a = 1
b = "foo"
c = (d, e)
di = {f: 1, g: 2}
| 1.453125 | 1 |
LeetCode/217 Contains Duplicate.py | gesuwen/Algorithms | 0 | 12791782 | # Array; Hash Table
# Given an array of integers, find if the array contains any duplicates.
#
# Your function should return true if any value appears at least twice in the array, and it should return false if every element is distinct.
#
# Example 1:
#
# Input: [1,2,3,1]
# Output: true
# Example 2:
#
# Input: [1,2,3,4]
# Output: false
# Example 3:
#
# Input: [1,1,1,3,3,4,3,2,4,2]
# Output: true
class Solution:
def containsDuplicate(self, nums):
"""
:type nums: List[int]
:rtype: bool
"""
numCount = collections.defaultdict(int)
for x in nums:
if numCount[x] == 1:
return True
numCount[x] += 1
return False
| 3.953125 | 4 |
solutions/python3/433.py | sm2774us/amazon_interview_prep_2021 | 42 | 12791783 | class Solution:
def minMutation(self, start: str, end: str, bank: List[str]) -> int:
bfs = [start]
genes = set(bank)
cnt = 0
while bfs:
arr = []
for g in bfs:
if g == end:
return cnt
for i, c in enumerate(g):
for new in 'AGTC':
if new != c:
s = g[:i] + new + g[i + 1:]
if s in genes:
arr.append(s)
genes.discard(s)
bfs = arr
cnt += 1
return -1 | 3.015625 | 3 |
migrations/versions/4c087f9202a_.py | mattkantor/basic-flask-app | 0 | 12791784 | <filename>migrations/versions/4c087f9202a_.py
"""empty message
Revision ID: 4c087f9202a
Revises: <PASSWORD>
Create Date: 2018-08-02 07:30:03.072213
"""
# revision identifiers, used by Alembic.
from sqlalchemy import Integer, String
revision = '4c087f9202a'
down_revision = '<PASSWORD>'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('groups', sa.Column('user_ids', postgresql.ARRAY(String), nullable=True, default=[]))
op.alter_column('groups', 'user_id',
existing_type=sa.INTEGER(),
nullable=True)
op.create_foreign_key(None, 'groups', 'users', ['user_id'], ['id'])
op.create_foreign_key(None, 'news', 'users', ['user_id'], ['id'])
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'news', type_='foreignkey')
op.drop_constraint(None, 'groups', type_='foreignkey')
op.alter_column('groups', 'user_id',
existing_type=sa.INTEGER(),
nullable=False)
op.drop_column('groups', 'user_ids')
### end Alembic commands ###
| 1.726563 | 2 |
flex/http/cors.py | centergy/flex | 0 | 12791785 | <gh_stars>0
from flask_cors import CORS
from flex.conf import config
cors = CORS(origins=config.CORS_ORIGINS, supports_credentials=True) | 1.351563 | 1 |
{{cookiecutter.project_slug}}/apps/cms/presets.py | ukwahlula/django-server-boilerplate | 2 | 12791786 | # flake8: noqa
# fmt: off
from .choices import ContentType
CONTENT_PRESETS= {
# Emails
ContentType.EMAIL_VERIFICATION_SUBJECT: {
"content": "Verify your email.",
},
ContentType.EMAIL_VERIFICATION_BODY: {
"content": "Your verification code is {email_verification_code}.",
},
ContentType.EMAIL_RESET_PASSWORD_SUBJECT: {
"content": "Reset Password.",
},
ContentType.EMAIL_RESET_PASSWORD_BODY: {
"content": "Please click <a href=\"{password_verification_link}\">the link</a> to reset your password.",
},
# Sms
ContentType.PHONE_VERIFICATION: {
"content": "Your verification code is {phone_verification_code}",
},
ContentType.PHONE_VERIFICATION_2FA: {
"content": "Your verification code is {phone_verification_code_2fa}",
},
}
# fmt: on
| 1.570313 | 2 |
src/dissertation/printInterpolationBarCharts.py | dhruvtapasvi/implementation | 1 | 12791787 | import numpy as np
import matplotlib.pyplot as plt
from evaluation.results import packageResults
from dissertation import datasetInfo
from config.routes import getRecordedResultsRoute
MINI_FONTSIZE=10
FONTSIZE = 14 * 4 / 3
NUMBER_FORMAT = "{:.0f}"
interpolationResults = packageResults.interpolationResults.getDictionary()
# Distance in image space:
for dataset in datasetInfo.INTERPOLATION_DATASET_ORDER:
interpolationFactors = list(interpolationResults[dataset][datasetInfo.DATASET_ARCH_NAMES[dataset]["dense"]].keys())
for interpolationFactor in interpolationFactors:
metricResults = [
interpolationResults[dataset][datasetInfo.DATASET_ARCH_NAMES[dataset]["conv"]][interpolationFactor]["interpolateLatentSpace"]["metricImageSpace"]
] + [
interpolationResults[dataset][datasetInfo.DATASET_ARCH_NAMES[dataset]["dense"]][interpolationFactor][interpolationTechnique]["metricImageSpace"]
for interpolationTechnique in datasetInfo.INTERPOLATION_TECHNIQUES
]
x = np.arange(len(datasetInfo.INTERPOLATION_TECHNIQUES) + 1)
means = np.array(list(map(lambda x: x.mean, metricResults)))
stds = np.array(list(map(lambda x: x.standardDeviation, metricResults)))
labels = [datasetInfo.INTERPOLATE_TECHNIQUE_NAMES["interpolateLatentSpace"] + "_{\mathrm{conv}}$"] + \
[datasetInfo.INTERPOLATE_TECHNIQUE_NAMES[interpolationTechnique] + ("_{\mathrm{dense}}$" if interpolationTechnique == "interpolateLatentSpace" else "") for interpolationTechnique in datasetInfo.INTERPOLATION_TECHNIQUES]
plt.figure(figsize=(4, 8))
bars = plt.bar(x, means, yerr=stds, capsize=5)
plt.xticks(x, labels, fontsize=FONTSIZE, rotation=90)
plt.xlabel("Proposed Interpolation $\mathbf{x}$", fontsize=FONTSIZE)
plt.ylabel("$\mathcal{BCE}[\mathbf{x}_{\mathrm{centre}}, \mathbf{x}]$", fontsize=FONTSIZE)
plt.ylim(ymin=0)
plt.yticks(fontsize=MINI_FONTSIZE)
maxVal = max(map(lambda x: x.mean + x.standardDeviation, metricResults))
extraHeight = 0.0125 * maxVal
for bar, error, mean in zip(bars, stds, means):
plt.text(
bar.get_x() + 0.5 * bar.get_width(),
mean + error + extraHeight,
NUMBER_FORMAT.format(mean),
ha="center",
va="bottom",
rotation=0,
fontsize=MINI_FONTSIZE
)
plt.tight_layout()
plt.savefig(getRecordedResultsRoute(dataset + "_" + interpolationFactor + "_" + "metricImageSpace.png"))
plt.close()
# Distance in latent space:
for dataset in datasetInfo.INTERPOLATION_DATASET_ORDER:
interpolationFactors = list(interpolationResults[dataset][datasetInfo.DATASET_ARCH_NAMES[dataset]["dense"]].keys())
for interpolationFactor in interpolationFactors:
for architecture in datasetInfo.ARCH_TYPES:
metricResults = [
interpolationResults[dataset][datasetInfo.DATASET_ARCH_NAMES[dataset][architecture]][interpolationFactor][interpolationTechnique]["metricLatentSpace"]
for interpolationTechnique in datasetInfo.INTERPOLATION_TECHNIQUES
]
x = np.arange(len(datasetInfo.INTERPOLATION_TECHNIQUES))
means = np.array(list(map(lambda x: x.mean, metricResults)))
stds = np.array(list(map(lambda x: x.standardDeviation, metricResults)))
labels = [datasetInfo.INTERPOLATE_TECHNIQUE_NAMES[interpolationTechnique] for interpolationTechnique in datasetInfo.INTERPOLATION_TECHNIQUES]
plt.figure(figsize=(4, 6))
bars = plt.bar(x, means, yerr=stds, capsize=5)
plt.xticks(x, labels, fontsize=FONTSIZE, rotation=90)
plt.xlabel("Proposed Interpolation $\mathbf{x}$", fontsize=FONTSIZE)
plt.ylabel("ED(enc(x_centre), enc(x)) in Latent Space", fontsize=FONTSIZE)
plt.ylim(ymin=0)
plt.yticks(fontsize=FONTSIZE)
maxVal = max(map(lambda x: x.mean + x.standardDeviation, metricResults))
extraHeight = 0.0125 * maxVal
for bar, error, mean in zip(bars, stds, means):
plt.text(
bar.get_x() + 0.5 * bar.get_width(),
mean + error + extraHeight,
NUMBER_FORMAT.format(mean),
ha="center",
va="bottom",
fontsize=MINI_FONTSIZE,
rotation=0
)
plt.tight_layout()
plt.savefig(getRecordedResultsRoute(dataset + "_" + architecture + "_" + interpolationFactor + "_" + "metricLatentSpace.png"))
plt.close()
| 2.5 | 2 |
esim/modules/exceptions.py | PunchyArchy/esim | 0 | 12791788 | """ Исключения """
class NoAsuPolygon(Exception):
# Исключение, возникающее при указании полигона, которого нет в АСУ.
def __init__(self):
text = 'Не найдена запись в таблице asu_poligons, соответствующая ' \
'этому полигону. Зарегестрируйте ее сначала.'
super().__init__(text)
| 2.890625 | 3 |
images/spark/outbound-relay/status_server.py | splunk/deep-learning-toolkit | 11 | 12791789 | <filename>images/spark/outbound-relay/status_server.py
from waitress import serve
from flask import Flask, request
import threading
import http
class StatusServer(object):
def __init__(self):
self.source_done = threading.Event()
self.source_error_event = threading.Event()
self._source_error_message = None
self.sink_done = threading.Event()
self.thread = threading.Thread(target=self.run, args=())
self.thread.daemon = True
self.thread.start()
def run(self):
app = Flask(__name__)
app.add_url_rule('/', view_func=self.get_handler, methods=["GET"])
app.add_url_rule('/source_done', view_func=self.source_done_handler, methods=["PUT"])
app.add_url_rule('/source_error', view_func=self.source_error_handler, methods=["PUT"])
serve(
app,
host="0.0.0.0",
port=8889,
threads=1,
)
def get_handler(self):
if self.source_error_event.is_set():
return 'error', http.HTTPStatus.OK
if not self.source_done.is_set():
return 'receiving', http.HTTPStatus.OK
if not self.sink_done.is_set():
return 'sending', http.HTTPStatus.OK
return 'done', http.HTTPStatus.OK
def source_error_handler(self):
self.source_error_event.set()
self._source_error_message = request.data.decode()
return '', http.HTTPStatus.OK
def source_done_handler(self):
self.source_done.set()
return '', http.HTTPStatus.OK
@property
def source_error_message(self):
if not self.source_error_event.is_set():
return None
return self._source_error_message
@property
def source_sent_all_data(self):
return self.source_done.is_set()
def wait_for_source_sent_all_data(self):
self.source_done.wait()
@property
def sink_received_all_data(self):
return self.sink_done.is_set()
def mark_sink_received_all_data(self):
self.sink_done.set()
| 2.4375 | 2 |
solutions/121-BestTimeToBuy&SellStock.py | Reyansh14/leetcode-solutions | 0 | 12791790 | <gh_stars>0
# Notes: example of Sliding Window algorithm - keeps track of left and right pointers. iterate through prices array, keep track of left pointer called currMin. if currProfit > maxProfit, set maxProfit = currProfit. if the right pointer (prices[i]) < left pointer (currMin), set currMin to the right pointer and keep going. at the end, return maxProfit
# Space Complexity: O(n)
# Time Complexity: O(n)
class Solution:
def maxProfit(self, prices: List[int]) -> int:
if len(prices) == 1:
return 0
currMin = prices[0]
maxProfit = 0
for i in range(1, len(prices)):
currProfit = (prices[i] - currMin)
if (currProfit > maxProfit):
maxProfit = currProfit
if (prices[i] < currMin):
currMin = prices[i]
return maxProfit
| 3.84375 | 4 |
orange3/Orange/preprocess/normalize.py | rgschmitz1/BioDepot-workflow-builder | 54 | 12791791 | <reponame>rgschmitz1/BioDepot-workflow-builder
from Orange.data import ContinuousVariable, Domain
from Orange.statistics import distribution
from Orange.util import Reprable
from .preprocess import Normalize
from .transformation import Normalizer as Norm
__all__ = ["Normalizer"]
class Normalizer(Reprable):
def __init__(
self, zero_based=True, norm_type=Normalize.NormalizeBySD, transform_class=False
):
self.zero_based = zero_based
self.norm_type = norm_type
self.transform_class = transform_class
def __call__(self, data):
dists = distribution.get_distributions(data)
new_attrs = [
self.normalize(dists[i], var)
for (i, var) in enumerate(data.domain.attributes)
]
new_class_vars = data.domain.class_vars
if self.transform_class:
attr_len = len(data.domain.attributes)
new_class_vars = [
self.normalize(dists[i + attr_len], var)
for (i, var) in enumerate(data.domain.class_vars)
]
domain = Domain(new_attrs, new_class_vars, data.domain.metas)
return data.transform(domain)
def normalize(self, dist, var):
if not var.is_continuous:
return var
elif self.norm_type == Normalize.NormalizeBySD:
return self.normalize_by_sd(dist, var)
elif self.norm_type == Normalize.NormalizeBySpan:
return self.normalize_by_span(dist, var)
def normalize_by_sd(self, dist, var):
avg, sd = (dist.mean(), dist.standard_deviation()) if dist.size else (0, 1)
if sd == 0:
sd = 1
return ContinuousVariable(
var.name, compute_value=Norm(var, avg, 1 / sd), sparse=var.sparse
)
def normalize_by_span(self, dist, var):
dma, dmi = dist.max(), dist.min()
diff = dma - dmi
if diff < 1e-15:
diff = 1
if self.zero_based:
return ContinuousVariable(
var.name, compute_value=Norm(var, dmi, 1 / diff), sparse=var.sparse
)
else:
return ContinuousVariable(
var.name,
compute_value=Norm(var, (dma + dmi) / 2, 2 / diff),
sparse=var.sparse,
)
| 2.484375 | 2 |
setup.py | Mumbleskates/jsane | 131 | 12791792 | <filename>setup.py
#!/usr/bin/env python
import sys
from jsane import __version__
assert sys.version >= '2.7', ("Requires Python v2.7 or above, get with the "
"times, grandpa.")
from setuptools import setup
classifiers = [
"License :: OSI Approved :: MIT License",
"Programming Language :: Python",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Topic :: Software Development :: Libraries :: Python Modules",
]
install_requires = []
setup_requires = ['pytest-runner']
tests_require = ['pep8', 'pytest'] + install_requires
setup(
name="jsane",
version=__version__,
author="<NAME>",
author_email="<EMAIL>",
url="https://github.com/skorokithakis/jsane/",
description="A saner way to parse JSON.",
long_description=open("README.rst").read(),
license="MIT",
classifiers=classifiers,
packages=["jsane"],
setup_requires=setup_requires,
tests_require=tests_require,
install_requires=install_requires,
test_suite='jsane.tests',
)
| 1.484375 | 1 |
src/cryptoadvance/specter/services/swan/config.py | Lobbelt/specter-desktop | 0 | 12791793 | <reponame>Lobbelt/specter-desktop
"""
Swan API uses PKCE OAuth2. Per Swan's API team: The client secret here is not considered
to be a real secret. There is no reasonable attack vector for this secret being public.
"""
class BaseConfig:
SWAN_CLIENT_ID = "specter-dev"
SWAN_CLIENT_SECRET = (
"<KEY>"
)
SWAN_API_URL = "https://dev-api.swanbitcoin.com"
class ProductionConfig(BaseConfig):
SWAN_CLIENT_ID = "specter"
SWAN_CLIENT_SECRET = (
"<KEY>
)
SWAN_API_URL = "https://api.swanbitcoin.com"
| 1.320313 | 1 |
tests_ui/helpers/db_actions.py | LoshmanovNA/pet-project | 1 | 12791794 | <filename>tests_ui/helpers/db_actions.py
from datetime import datetime
import time
class DBManager:
"""Установка соединения с БД и методы для выполнения запросов"""
def __init__(self, DBModel):
"""
Gets database information from mysql_conf.py and creates a connection.
"""
import pymysql
retry_count = 3
backoff = 1.2 # Time to wait (in seconds) between retries.
count = 0
while count < retry_count:
try:
self.conn = pymysql.connect(host=DBModel.db_server,
user=DBModel.db_user,
passwd=DBModel.db_pass,
db=DBModel.db_schema,
port=DBModel.db_port)
self.conn.autocommit(True)
self.cursor = self.conn.cursor()
return
except Exception:
time.sleep(backoff)
count = count + 1
if retry_count == 3:
raise Exception("Unable to connect to Database after 3 retries.")
def _query_fetch_all(self, query, values):
"""
Executes a db query, gets all the values, and closes the connection.
"""
self.cursor.execute(query, values)
retval = self.cursor.fetchall()
# self.__close_db()
return retval
def _query_fetch_one(self, query, values):
"""
Executes a db query, gets the first value, and closes the connection.
"""
self.cursor.execute(query, values)
retval = self.cursor.fetchone()
# self.__close_db()
return retval
def _execute_query(self, query, values):
"""
Executes a query to the test_db and closes the connection afterwards.
"""
retval = self.cursor.execute(query, values)
# self.__close_db()
return retval
def close_db(self):
self.cursor.close()
self.conn.close()
def activate_new_account_db(self, email, pass_hash):
"""Вносим изменения в тестовую БД для активации УЗ"""
confirmed_time = datetime.today().strftime("%Y-%m-%d %H:%M:%S") # Дата для сохранения в БД
sql = f"UPDATE users SET confirmed_at='{confirmed_time}', sms_confirmed_at='{confirmed_time}', " \
f"encrypted_password=%s, first_password_changed=1 WHERE email=%s"
self._execute_query(sql, (pass_hash, email))
def should_be_new_record_into_db(self, value, column, table):
"""Проверяем наличие новой записи в БД"""
sql = f"SELECT * FROM {table} WHERE {column} = %s"
response = self._query_fetch_one(sql, value)
assert value in response
def delete_new_account_from_db(self, email):
"""Удаляем запись о созданной УЗ из БД и проверяем, что email не найден"""
sql_1 = "DELETE FROM users WHERE email=%s"
self._execute_query(sql_1, email)
sql_2 = "SELECT email FROM users WHERE email=%s"
assert not self._query_fetch_one(sql_2, email)
# @staticmethod
# def fake_data_json(path='path', email='email', pass_hash='<PASSWORD>'):
# with open(path, 'r', encoding='utf-8') as f:
# value = json.loads(f.read())
# value.update({
# 'email': email,
# 'encrypted_password': <PASSWORD>
# })
# return value
#
# def insert_new_row_into_db(self, fake_json, table):
# placeholders = ', '.join(['%s'] * len(fake_json))
# columns = ', '.join(fake_json.keys())
# values = list(fake_json.values())
# sql = f"INSERT INTO {table} ( {columns} ) VALUES ( {placeholders} );"
# self.execute_query(sql, values)
| 2.609375 | 3 |
ch8/ex8.4.py | YChanHuang/Python-for-every-body-notes | 0 | 12791795 | <reponame>YChanHuang/Python-for-every-body-notes<filename>ch8/ex8.4.py
#solution 1
fname = input("Enter file name: ")
fh = open(fname)
lst = list()
for line in fh:
words = line.split()
for word in words:
if word in lst:
continue
else:
lst.append(word)
lst.sort()
print(lst)
#solution 2
fname = input("Enter file name: ")
fh = open(fname)
lst = list()
for line in fh:
line = line.rstrip()
words = line.split()
for word in words:
if word not in lst:
lst.append(word)
lst.sort()
print(lst)
| 3.765625 | 4 |
src/models/event_model.py | RoaringForkTech/rfv-events | 0 | 12791796 | import json
from dataclasses import dataclass
from typing import Optional
@dataclass()
class Event:
source: str # One of constants.EVENT_SOURCE
source_id: str # Unique ID from the source
name: str
_start_time: str = None
_end_time = None
_description = None
# {'timezone': 'America/Denver', 'local': '2020-02-07T19:00:00', 'utc': '2020-02-08T02:00:00Z'}
@property
def start_time(self) -> Optional[str]:
return self._start_time
@start_time.setter
def start_time(self, value: str):
self._start_time = value
# {'timezone': 'America/Denver', 'local': '2020-02-07T19:00:00', 'utc': '2020-02-08T02:00:00Z'}
@property
def end_time(self) -> Optional[str]:
return self._end_time
@end_time.setter
def end_time(self, value: str):
self._end_time = value
@property
def description(self) -> Optional[str]:
return self._description
@description.setter
def description(self, value: str):
self._description = value
def to_json(self) -> object:
obj = {
"source_id": self.source_id,
"source": self.source,
"name": self.name,
"start_time": self.start_time,
"end_time": self.end_time,
"description": self.description
}
return json.dumps(obj)
| 3.015625 | 3 |
docs/conf.py | jbcurtin/cloud-fits | 3 | 12791797 | <reponame>jbcurtin/cloud-fits
# Configuration file for the Sphinx documentation builder.
# https://www.sphinx-doc.org/en/master/usage/configuration.html
project = 'Cloud Optimized Fits'
copyright = '2020, <NAME>'
author = '<NAME>'
# The full version, including alpha/beta/rc tags
with open('../VERSION', 'r') as stream:
release = stream.read()
html_theme = "sphinx_rtd_theme"
master_doc = 'index'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
html_static_path = ['_static']
| 1.28125 | 1 |
code/tmp_rtrip/test/curses_tests.py | emilyemorehouse/ast-and-me | 24 | 12791798 | import curses
from curses import textpad
def test_textpad(stdscr, insert_mode=False):
ncols, nlines = 8, 3
uly, ulx = 3, 2
if insert_mode:
mode = 'insert mode'
else:
mode = 'overwrite mode'
stdscr.addstr(uly - 3, ulx, 'Use Ctrl-G to end editing (%s).' % mode)
stdscr.addstr(uly - 2, ulx,
'Be sure to try typing in the lower-right corner.')
win = curses.newwin(nlines, ncols, uly, ulx)
textpad.rectangle(stdscr, uly - 1, ulx - 1, uly + nlines, ulx + ncols)
stdscr.refresh()
box = textpad.Textbox(win, insert_mode)
contents = box.edit()
stdscr.addstr(uly + ncols + 2, 0, 'Text entered in the box\n')
stdscr.addstr(repr(contents))
stdscr.addstr('\n')
stdscr.addstr('Press any key')
stdscr.getch()
for i in range(3):
stdscr.move(uly + ncols + 2 + i, 0)
stdscr.clrtoeol()
def main(stdscr):
stdscr.clear()
test_textpad(stdscr, False)
test_textpad(stdscr, True)
if __name__ == '__main__':
curses.wrapper(main)
| 3.484375 | 3 |
least_squares/warm_up/qudratic_regression.py | yimuw/yimu-blog | 8 | 12791799 | <gh_stars>1-10
import numpy as np
import matplotlib.pyplot as plt
def generate_quadratic_data():
quadratic_a = 2.4
quadratic_b = -2.
quadratic_c = 1.
num_data = 30
noise = 2 * np.random.randn(num_data)
sampled_x = np.linspace(-10, 10., num_data)
sampled_y = quadratic_a * sampled_x * sampled_x + quadratic_b * sampled_x + quadratic_c + noise
return sampled_x, sampled_y
class LeastSquares:
def __init__(self, x, y):
self.x = x
self.y = y
self.data_mat = np.vstack(
[self.x * self.x, self.x,
np.ones_like(self.x)]).T
self.theta = np.array([0, 0, 0.])
def residual(self):
pred_y = self.data_mat @ self.theta
r = pred_y - self.y
return r
def cost(self):
r = self.residual()
return r.T @ r
def compute_jacobian(self):
return self.data_mat
def least_squares_solve(self):
for i in range(10):
print('iteration: {} cost: {}'.format(i, self.cost()))
J = self.compute_jacobian()
r = self.residual()
delta = np.linalg.solve(J.T @ J, -J.T @ r)
self.theta += delta
if np.linalg.norm(delta) < 1e-8:
print('converged iteration: {} cost: {}'.format(
i, self.cost()))
break
return self.theta
def main():
x_data, y_data = generate_quadratic_data()
solver = LeastSquares(x_data, y_data)
theta = solver.least_squares_solve()
x = np.linspace(-12, 12., 100)
a, b, c = theta
print('fitted coefficient (a,b,c):', theta.transpose())
pred_y = a * x * x + b * x + c
p1 = plt.plot(x_data, y_data, '*r')
p2 = plt.plot(x, pred_y, 'g')
plt.legend((p1[0], p2[0]), ('sampled data', 'fitted function'))
plt.title('Data points vs Fitted curve')
plt.show()
if __name__ == "__main__":
main()
| 3 | 3 |
swagger/action-server/rlbot_action_server/models/bot_action.py | tarehart/RLBotTwitchBroker | 2 | 12791800 | <reponame>tarehart/RLBotTwitchBroker<gh_stars>1-10
# coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from rlbot_action_server.models.base_model_ import Model
from rlbot_action_server.models.strategic_category import StrategicCategory # noqa: F401,E501
from rlbot_action_server import util
class BotAction(Model):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, description: str=None, action_type: str=None, strategic_category: StrategicCategory=None, data: Dict=None): # noqa: E501
"""BotAction - a model defined in Swagger
:param description: The description of this BotAction. # noqa: E501
:type description: str
:param action_type: The action_type of this BotAction. # noqa: E501
:type action_type: str
:param strategic_category: The strategic_category of this BotAction. # noqa: E501
:type strategic_category: StrategicCategory
:param data: The data of this BotAction. # noqa: E501
:type data: Dict
"""
self.swagger_types = {
'description': str,
'action_type': str,
'strategic_category': StrategicCategory,
'data': Dict
}
self.attribute_map = {
'description': 'description',
'action_type': 'actionType',
'strategic_category': 'strategicCategory',
'data': 'data'
}
self._description = description
self._action_type = action_type
self._strategic_category = strategic_category
self._data = data
@classmethod
def from_dict(cls, dikt) -> 'BotAction':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The BotAction of this BotAction. # noqa: E501
:rtype: BotAction
"""
return util.deserialize_model(dikt, cls)
@property
def description(self) -> str:
"""Gets the description of this BotAction.
:return: The description of this BotAction.
:rtype: str
"""
return self._description
@description.setter
def description(self, description: str):
"""Sets the description of this BotAction.
:param description: The description of this BotAction.
:type description: str
"""
self._description = description
@property
def action_type(self) -> str:
"""Gets the action_type of this BotAction.
:return: The action_type of this BotAction.
:rtype: str
"""
return self._action_type
@action_type.setter
def action_type(self, action_type: str):
"""Sets the action_type of this BotAction.
:param action_type: The action_type of this BotAction.
:type action_type: str
"""
if action_type is None:
raise ValueError("Invalid value for `action_type`, must not be `None`") # noqa: E501
self._action_type = action_type
@property
def strategic_category(self) -> StrategicCategory:
"""Gets the strategic_category of this BotAction.
:return: The strategic_category of this BotAction.
:rtype: StrategicCategory
"""
return self._strategic_category
@strategic_category.setter
def strategic_category(self, strategic_category: StrategicCategory):
"""Sets the strategic_category of this BotAction.
:param strategic_category: The strategic_category of this BotAction.
:type strategic_category: StrategicCategory
"""
self._strategic_category = strategic_category
@property
def data(self) -> Dict:
"""Gets the data of this BotAction.
:return: The data of this BotAction.
:rtype: Dict
"""
return self._data
@data.setter
def data(self, data: Dict):
"""Sets the data of this BotAction.
:param data: The data of this BotAction.
:type data: Dict
"""
self._data = data
| 1.914063 | 2 |
am4894pd/utils.py | andrewm4894/am4894pd | 0 | 12791801 | import pandas as pd
import numpy as np
def df_info(df: pd.DataFrame, return_info=False, shape=True, cols=True, info_prefix=''):
""" Print a string to describe a df.
"""
info = info_prefix
if shape:
info = f'{info}Shape = {df.shape}'
if cols:
info = f'{info} , Cols = {df.columns.tolist()}'
print(info)
if return_info:
return info
def df_dummy_ts(start='2019-01-01', end='2019-01-02', freq='1s', n_cols=5,
smooth_n: int = 100, smooth_f: str = 'mean', dropna: bool = True):
""" Make dummy ts df.
"""
time_range = pd.DataFrame(pd.date_range(start, end, freq=freq), columns=['time'])
data = pd.DataFrame(np.random.randn(len(time_range), n_cols), columns=[f'col{n}' for n in range(n_cols)])
df = pd.concat([time_range, data], axis=1)
df = df.set_index('time')
if smooth_n:
if smooth_f == 'mean':
df = df.rolling(smooth_n).mean()
elif smooth_f == 'min':
df = df.rolling(smooth_n).min()
elif smooth_f == 'max':
df = df.rolling(smooth_n).max()
elif smooth_f == 'median':
df = df.rolling(smooth_n).median()
else:
raise NotImplementedError(f'... {smooth_f} not implemented ...')
if dropna:
df = df.dropna()
return df
| 3.015625 | 3 |
heritago/heritages/views.py | SWE574-Groupago/heritago | 6 | 12791802 | from django.core.exceptions import ObjectDoesNotExist
from django.core.files.uploadedfile import UploadedFile
from django.conf import settings
from django.http import HttpResponse
from rest_framework import generics
from rest_framework.exceptions import NotFound
from rest_framework.response import Response
from rest_framework.viewsets import ViewSet
from rest_framework import mixins
from rest_framework import viewsets
from rest_framework.exceptions import PermissionDenied
from django.contrib.auth.models import User
from heritages.models import Heritage, Multimedia, Annotation
from heritages.search import search_heritages, search_annotations
from heritages.serializers import HeritageSerializer, MultimediaSerializer, AnnotationSerializer, UserSerializer, \
AnnotationPaleSerializer
from heritages.permissions import IsOwner, IsNotAnonymous, IsSelf
class HeritagesListView(generics.ListCreateAPIView):
queryset = Heritage.objects.all()
serializer_class = HeritageSerializer
def list(self, request, *args, **kwargs):
keyword = self.request.query_params.get("keyword", None)
if not keyword:
return super().list(request, *args, **kwargs)
result = Response(search_heritages(keyword)).data
return Response(i["_source"] for i in result["hits"]["hits"])
class HeritageView(generics.RetrieveUpdateDestroyAPIView):
queryset = Heritage.objects.all()
serializer_class = HeritageSerializer
class MultimediaListView(generics.ListCreateAPIView):
serializer_class = MultimediaSerializer
def get_queryset(self):
try:
heritage = Heritage.objects.get(pk=self.kwargs["heritage_id"])
except ObjectDoesNotExist:
raise NotFound()
return heritage.multimedia
def perform_create(self, serializer):
heritage_id = self.kwargs["heritage_id"]
try:
heritage = Heritage.objects.get(pk=heritage_id)
except ObjectDoesNotExist:
raise NotFound()
return serializer.save(heritage=heritage)
class MultimediaView(generics.RetrieveDestroyAPIView):
queryset = Multimedia.objects.all()
serializer_class = MultimediaSerializer
class MultimediaFileView(ViewSet):
@staticmethod
def get_file(request, multimedia_id):
try:
m = Multimedia.objects.get(pk=multimedia_id)
except ObjectDoesNotExist:
raise NotFound(multimedia_id)
file = UploadedFile(m.file)
return HttpResponse(file, content_type="image/png")
class AnnotationListView(generics.ListCreateAPIView):
serializer_class = AnnotationSerializer
def get_queryset(self):
queryset = Annotation.objects.all()
heritage_id = self.kwargs["heritage_id"]
if heritage_id is not None:
queryset = queryset.filter(target__target_id__contains=heritage_id)
return queryset
else:
return NotFound()
def get_serializer_context(self):
return {"target_id": self.request.build_absolute_uri(),
"heritage_id": self.kwargs["heritage_id"]}
def list(self, request, *args, **kwargs):
keyword = self.request.query_params.get("keyword", None)
if not keyword:
return super().list(request, *args, **kwargs)
result = Response(search_annotations(keyword)).data
return Response(i["_source"] for i in result["hits"]["hits"])
class AnnotationPaleListView(generics.ListCreateAPIView):
serializer_class = AnnotationPaleSerializer
def get_queryset(self):
return Annotation.objects.all()
def get_serializer_context(self):
return {"target_id": self.request.build_absolute_uri()}
def list(self, request, *args, **kwargs):
keyword = self.request.query_params.get("keyword", None)
if not keyword:
return super().list(request, *args, **kwargs)
result = Response(search_annotations(keyword)).data
return Response(i["_source"] for i in result["hits"]["hits"])
class AnnotationView(generics.RetrieveUpdateDestroyAPIView):
queryset = Annotation.objects.all()
serializer_class = AnnotationSerializer
class AnnotationPaleView(generics.RetrieveUpdateDestroyAPIView):
queryset = Annotation.objects.all()
serializer_class = AnnotationPaleSerializer
class Users(mixins.CreateModelMixin, viewsets.GenericViewSet):
queryset = User.objects.all()
serializer_class = UserSerializer
class UserDetail(mixins.RetrieveModelMixin,
mixins.UpdateModelMixin,
viewsets.GenericViewSet):
queryset = User.objects.all()
serializer_class = UserSerializer
permission_classes = (IsSelf,)
def get_me(self, request):
return Response(self.serializer_class(request.user).data)
| 1.90625 | 2 |
05-Inheritance/problem-5-Restaurant/project/beverage/hot_beverage.py | Beshkov/OOP | 1 | 12791803 | from project.beverage.beverage import Beverage
class HotBeverage(Beverage):
pass | 1 | 1 |
Transposicion/TransposicionSerie.py | pordnajela/AlgoritmosCriptografiaClasica | 0 | 12791804 | <reponame>pordnajela/AlgoritmosCriptografiaClasica
#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
class TransposicionSerie(object):
def __init__(self, series, cadena=None):
self.cadena = cadena
self.series = series
self.textoClaro = ""
self.textoCifrado = ""
def cifrar(self):
textoCifrado = ""
linea_a_cifrar = ""
saltosLinea = len(self.cadena)-1
numFunciones = len(self.series)
i = 0
j = 0
for linea in self.cadena:
while i < numFunciones:
if len(linea) == 0:
i += 1
break
linea_a_cifrar += self.aplicarSeries_cifrar(linea, i, len(self.series[i]))
i += 1
if j < saltosLinea:
textoCifrado += linea_a_cifrar + "\n"
j += 1
else:
textoCifrado += linea_a_cifrar
linea_a_cifrar = ""
i = 0
self.textoCifrado = textoCifrado
def descifrar(self):
textoDescifrado = ""
linea_a_descifrar = ""
saltosLinea = len(self.cadena)-1
funciones = self.concatenarFunciones()
i = 0
j = 0
for linea in self.cadena:
if len(linea) == 0:
i += 1
break
if j < saltosLinea:
linea_a_descifrar += self.aplicarSeries_descifrar(linea, funciones, len(funciones))
textoDescifrado += linea_a_descifrar + "\n"
else:
linea_a_descifrar += self.aplicarSeries_descifrar(linea, funciones, len(funciones))
textoDescifrado += linea_a_descifrar
linea_a_descifrar = ""
i = 0
self.textoClaro = textoDescifrado
def aplicarSeries_cifrar(self, linea, i, tamanioFuncion):
lineaNueva = ""
j = 0
while j < tamanioFuncion:
lineaNueva += linea[ int(self.series[i][j])-1 ]
j += 1
return lineaNueva
def aplicarSeries_descifrar(self, linea, funciones, lenFunciones):
nuevoBloque = {}
bloqueDescifrado = list()
pos = 0
i = 0
while i < lenFunciones:
pos = int(funciones[i])-1
nuevoBloque.update({pos:linea[i]})
i += 1
for llave, valor in nuevoBloque.items():
bloqueDescifrado.append(valor)
bloqueDescifrado = ''.join(bloqueDescifrado)
return bloqueDescifrado
def concatenarFunciones(self):
#[ [], [], [] ]
funciones = list()
longSeries = len(self.series)
tmp = list()
i = 0
j = 0
while i < longSeries:
tmp = self.series[i]
while j < len(tmp):
funciones.append(tmp[j])
j += 1
i += 1
j = 0
return funciones | 3.0625 | 3 |
preprocessing/utils.py | clitic/signauth | 1 | 12791805 | import glob
import cv2
import numpy as np
def globimgs(path, globs:list):
"""returns a list of files with path with globing with more than one extensions"""
imgs = []
for i in globs:
imgs.extend(glob.glob(path + i))
paths = []
for path in imgs:
paths.append(path.replace("\\", "/"))
return paths
def scaneffects(img):
dilated_img = cv2.dilate(img, np.ones((7, 7), np.uint8))
bg_img = cv2.medianBlur(dilated_img, 15)
diff_img = 255 - cv2.absdiff(img, bg_img)
norm_img = diff_img.copy()
cv2.normalize(diff_img, norm_img, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8UC1)
_, thr_img = cv2.threshold(norm_img, 230, 0, cv2.THRESH_TRUNC)
cv2.normalize(thr_img, thr_img, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8UC1)
return thr_img
| 2.953125 | 3 |
tests/unittests/test_folder.py | ZPascal/grafana_api_sdk | 2 | 12791806 | <filename>tests/unittests/test_folder.py
from unittest import TestCase
from unittest.mock import MagicMock, Mock, patch
from src.grafana_api.model import APIModel
from src.grafana_api.folder import Folder
class FolderTestCase(TestCase):
@patch("src.grafana_api.api.Api.call_the_api")
def test_get_folders(self, call_the_api_mock):
model: APIModel = APIModel(host=MagicMock(), token=MagicMock())
folder: Folder = Folder(grafana_api_model=model)
mock: Mock = Mock()
mock.json = Mock(return_value=list([{"title": None, "id": 12}]))
call_the_api_mock.return_value = mock
self.assertEqual(list([{"title": None, "id": 12}]), folder.get_folders())
@patch("src.grafana_api.api.Api.call_the_api")
def test_get_folders_error_response(self, call_the_api_mock):
model: APIModel = APIModel(host=MagicMock(), token=MagicMock())
folder: Folder = Folder(grafana_api_model=model)
mock: Mock = Mock()
mock.json = Mock(return_value=list())
call_the_api_mock.return_value = mock
with self.assertRaises(Exception):
folder.get_folders()
@patch("src.grafana_api.api.Api.call_the_api")
def test_get_folder_by_uid(self, call_the_api_mock):
model: APIModel = APIModel(host=MagicMock(), token=MagicMock())
folder: Folder = Folder(grafana_api_model=model)
mock: Mock = Mock()
mock.json = Mock(return_value=dict({"title": None, "id": 12}))
call_the_api_mock.return_value = mock
self.assertEqual(
dict({"title": None, "id": 12}), folder.get_folder_by_uid("xty13y")
)
@patch("src.grafana_api.api.Api.call_the_api")
def test_get_folder_by_uid_no_uid(self, call_the_api_mock):
model: APIModel = APIModel(host=MagicMock(), token=MagicMock())
folder: Folder = Folder(grafana_api_model=model)
mock: Mock = Mock()
mock.json = Mock(return_value=dict())
call_the_api_mock.return_value = mock
with self.assertRaises(ValueError):
folder.get_folder_by_uid("")
@patch("src.grafana_api.api.Api.call_the_api")
def test_get_folder_by_uid_error_response(self, call_the_api_mock):
model: APIModel = APIModel(host=MagicMock(), token=MagicMock())
folder: Folder = Folder(grafana_api_model=model)
mock: Mock = Mock()
mock.json = Mock(return_value=dict())
call_the_api_mock.return_value = mock
with self.assertRaises(Exception):
folder.get_folder_by_uid("xty13y")
@patch("src.grafana_api.api.Api.call_the_api")
def test_get_folder_by_id(self, call_the_api_mock):
model: APIModel = APIModel(host=MagicMock(), token=MagicMock())
folder: Folder = Folder(grafana_api_model=model)
mock: Mock = Mock()
mock.json = Mock(return_value=dict({"title": None, "id": 12}))
call_the_api_mock.return_value = mock
self.assertEqual(dict({"title": None, "id": 12}), folder.get_folder_by_id(12))
@patch("src.grafana_api.api.Api.call_the_api")
def test_get_folder_by_id_no_id(self, call_the_api_mock):
model: APIModel = APIModel(host=MagicMock(), token=MagicMock())
folder: Folder = Folder(grafana_api_model=model)
mock: Mock = Mock()
mock.json = Mock(return_value=dict())
call_the_api_mock.return_value = mock
with self.assertRaises(ValueError):
folder.get_folder_by_id(0)
@patch("src.grafana_api.api.Api.call_the_api")
def test_get_folder_by_id_error_response(self, call_the_api_mock):
model: APIModel = APIModel(host=MagicMock(), token=MagicMock())
folder: Folder = Folder(grafana_api_model=model)
mock: Mock = Mock()
mock.json = Mock(return_value=dict())
call_the_api_mock.return_value = mock
with self.assertRaises(Exception):
folder.get_folder_by_id(10)
@patch("src.grafana_api.api.Api.call_the_api")
def test_create_folder(self, call_the_api_mock):
model: APIModel = APIModel(host=MagicMock(), token=MagicMock())
folder: Folder = Folder(grafana_api_model=model)
mock: Mock = Mock()
mock.json = Mock(return_value=dict({"title": None, "id": 12}))
call_the_api_mock.return_value = mock
self.assertEqual(dict({"title": None, "id": 12}), folder.create_folder("test"))
@patch("src.grafana_api.api.Api.call_the_api")
def test_create_folder_specified_uid(self, call_the_api_mock):
model: APIModel = APIModel(host=MagicMock(), token=MagicMock())
folder: Folder = Folder(grafana_api_model=model)
mock: Mock = Mock()
mock.json = Mock(return_value=dict({"title": None, "id": 12, "uid": "test"}))
call_the_api_mock.return_value = mock
self.assertEqual(
dict({"title": None, "id": 12, "uid": "test"}),
folder.create_folder("test", "test"),
)
@patch("src.grafana_api.api.Api.call_the_api")
def test_create_folder_no_title(self, call_the_api_mock):
model: APIModel = APIModel(host=MagicMock(), token=MagicMock())
folder: Folder = Folder(grafana_api_model=model)
mock: Mock = Mock()
mock.json = Mock(return_value=dict())
call_the_api_mock.return_value = mock
with self.assertRaises(ValueError):
folder.create_folder(MagicMock())
@patch("src.grafana_api.api.Api.call_the_api")
def test_create_folder_error_response(self, call_the_api_mock):
model: APIModel = APIModel(host=MagicMock(), token=MagicMock())
folder: Folder = Folder(grafana_api_model=model)
mock: Mock = Mock()
mock.json = Mock(return_value=dict())
call_the_api_mock.return_value = mock
with self.assertRaises(Exception):
folder.create_folder("test")
@patch("src.grafana_api.api.Api.call_the_api")
def test_update_folder(self, call_the_api_mock):
model: APIModel = APIModel(host=MagicMock(), token=MagicMock())
folder: Folder = Folder(grafana_api_model=model)
mock: Mock = Mock()
mock.json = Mock(return_value=dict({"title": "test1", "id": 12}))
call_the_api_mock.return_value = mock
self.assertEqual(
dict({"title": "test1", "id": 12}),
folder.update_folder("test", "test1", 10),
)
@patch("src.grafana_api.api.Api.call_the_api")
def test_update_folder_no_uid(self, call_the_api_mock):
model: APIModel = APIModel(host=MagicMock(), token=MagicMock())
folder: Folder = Folder(grafana_api_model=model)
mock: Mock = Mock()
mock.json = Mock(return_value=dict({"title": "test", "id": 12}))
call_the_api_mock.return_value = mock
self.assertEqual(
dict({"title": "test", "id": 12}),
folder.update_folder("test", overwrite=True),
)
@patch("src.grafana_api.api.Api.call_the_api")
def test_update_folder_overwrite_true(self, call_the_api_mock):
model: APIModel = APIModel(host=MagicMock(), token=MagicMock())
folder: Folder = Folder(grafana_api_model=model)
mock: Mock = Mock()
mock.json = Mock(return_value=dict({"title": "test", "id": 12}))
call_the_api_mock.return_value = mock
self.assertEqual(
dict({"title": "test", "id": 12}),
folder.update_folder("test", "test", overwrite=True),
)
@patch("src.grafana_api.api.Api.call_the_api")
def test_update_folder_no_title(self, call_the_api_mock):
model: APIModel = APIModel(host=MagicMock(), token=MagicMock())
folder: Folder = Folder(grafana_api_model=model)
mock: Mock = Mock()
mock.json = Mock(return_value=dict())
call_the_api_mock.return_value = mock
with self.assertRaises(ValueError):
folder.update_folder(MagicMock(), MagicMock())
@patch("src.grafana_api.api.Api.call_the_api")
def test_update_folder_error_response(self, call_the_api_mock):
model: APIModel = APIModel(host=MagicMock(), token=MagicMock())
folder: Folder = Folder(grafana_api_model=model)
mock: Mock = Mock()
mock.json = Mock(return_value=dict())
call_the_api_mock.return_value = mock
with self.assertRaises(Exception):
folder.update_folder("test", "test", 10)
@patch("src.grafana_api.api.Api.call_the_api")
def test_delete_folder(self, call_the_api_mock):
model: APIModel = APIModel(host=MagicMock(), token=MagicMock())
folder: Folder = Folder(grafana_api_model=model)
mock: Mock = Mock()
mock.json = Mock(return_value=dict({"message": "Folder deleted"}))
call_the_api_mock.return_value = mock
self.assertEqual(None, folder.delete_folder("test"))
@patch("src.grafana_api.api.Api.call_the_api")
def test_delete_folder_no_uid(self, call_the_api_mock):
model: APIModel = APIModel(host=MagicMock(), token=MagicMock())
folder: Folder = Folder(grafana_api_model=model)
mock: Mock = Mock()
mock.json = Mock(return_value=dict())
call_the_api_mock.return_value = mock
with self.assertRaises(ValueError):
folder.delete_folder("")
@patch("src.grafana_api.api.Api.call_the_api")
def test_delete_folder_error_response(self, call_the_api_mock):
model: APIModel = APIModel(host=MagicMock(), token=MagicMock())
folder: Folder = Folder(grafana_api_model=model)
mock: Mock = Mock()
mock.json = Mock(return_value=dict({"message": "error"}))
call_the_api_mock.return_value = mock
with self.assertRaises(Exception):
folder.delete_folder("test")
@patch("src.grafana_api.api.Api.call_the_api")
def test_get_folder_permissions(self, call_the_api_mock):
model: APIModel = APIModel(host=MagicMock(), token=MagicMock())
folder: Folder = Folder(grafana_api_model=model)
mock: Mock = Mock()
mock.json = Mock(return_value=list([{"id": "test"}]))
call_the_api_mock.return_value = mock
self.assertEqual(list([{"id": "test"}]), folder.get_folder_permissions("test"))
@patch("src.grafana_api.api.Api.call_the_api")
def test_get_folder_permissions_no_uid(self, call_the_api_mock):
model: APIModel = APIModel(host=MagicMock(), token=MagicMock())
folder: Folder = Folder(grafana_api_model=model)
call_the_api_mock.return_value = list()
with self.assertRaises(ValueError):
folder.get_folder_permissions("")
@patch("src.grafana_api.api.Api.call_the_api")
def test_get_folder_permissions_error_response(self, call_the_api_mock):
model: APIModel = APIModel(host=MagicMock(), token=MagicMock())
folder: Folder = Folder(grafana_api_model=model)
mock: Mock = Mock()
mock.json = Mock(return_value=list([{"test": "test"}]))
call_the_api_mock.return_value = mock
with self.assertRaises(Exception):
folder.get_folder_permissions("test")
@patch("src.grafana_api.api.Api.call_the_api")
def test_update_folder_permissions(self, call_the_api_mock):
model: APIModel = APIModel(host=MagicMock(), token=MagicMock())
folder: Folder = Folder(grafana_api_model=model)
mock: Mock = Mock()
mock.json = Mock(return_value=dict({"message": "Folder permissions updated"}))
call_the_api_mock.return_value = mock
self.assertEqual(
None, folder.update_folder_permissions("test", dict({"test": "test"}))
)
@patch("src.grafana_api.api.Api.call_the_api")
def test_update_folder_permissions_no_uid(self, call_the_api_mock):
model: APIModel = APIModel(host=MagicMock(), token=MagicMock())
folder: Folder = Folder(grafana_api_model=model)
call_the_api_mock.return_value = dict()
with self.assertRaises(ValueError):
folder.update_folder_permissions("", dict())
@patch("src.grafana_api.api.Api.call_the_api")
def test_update_folder_permissions_error_response(self, call_the_api_mock):
model: APIModel = APIModel(host=MagicMock(), token=MagicMock())
folder: Folder = Folder(grafana_api_model=model)
mock: Mock = Mock()
mock.json = Mock(return_value=dict({"message": "test"}))
call_the_api_mock.return_value = mock
with self.assertRaises(Exception):
folder.update_folder_permissions("test", dict({"test": "test"}))
@patch("src.grafana_api.folder.Folder.get_all_folder_ids_and_names")
def test_get_folder_id_by_dashboard_path(self, all_folder_ids_and_names_mock):
model: APIModel = APIModel(host=MagicMock(), token=MagicMock())
folder: Folder = Folder(grafana_api_model=model)
all_folder_ids_and_names_mock.return_value = list([{"title": "test", "id": 12}])
self.assertEqual(
12, folder.get_folder_id_by_dashboard_path(dashboard_path="test")
)
def test_get_folder_id_by_dashboard_path_general_path(self):
model: APIModel = APIModel(host=MagicMock(), token=MagicMock())
folder: Folder = Folder(grafana_api_model=model)
self.assertEqual(
0, folder.get_folder_id_by_dashboard_path(dashboard_path="General")
)
def test_get_folder_id_by_dashboard_path_no_dashboard_path_defined(self):
model: APIModel = APIModel(host=MagicMock(), token=MagicMock())
folder: Folder = Folder(grafana_api_model=model)
with self.assertRaises(ValueError):
folder.get_folder_id_by_dashboard_path(dashboard_path="")
@patch("src.grafana_api.folder.Folder.get_all_folder_ids_and_names")
def test_get_folder_id_by_dashboard_path_no_title_match(
self, all_folder_ids_and_names_mock
):
model: APIModel = APIModel(host=MagicMock(), token=MagicMock())
folder: Folder = Folder(grafana_api_model=model)
all_folder_ids_and_names_mock.return_value = list(
[{"title": None, "id": "xty13y"}]
)
with self.assertRaises(Exception):
folder.get_folder_id_by_dashboard_path(dashboard_path="test")
@patch("src.grafana_api.api.Api.call_the_api")
def test_get_all_folder_ids_and_names(self, call_the_api_mock):
model: APIModel = APIModel(host=MagicMock(), token=MagicMock())
folder: Folder = Folder(grafana_api_model=model)
mock: Mock = Mock()
mock.json = Mock(
return_value=list([{"title": "test", "id": 12, "test": "test"}])
)
call_the_api_mock.return_value = mock
self.assertEqual(
list([{"title": "test", "id": 12}]), folder.get_all_folder_ids_and_names()
)
| 2.5625 | 3 |
testing/playground/esp8266_communication.py | dpm76/Microvacbot | 1 | 12791807 | '''
Created on 31 mar. 2020
@author: David
'''
from sys import path
path.append("/flash/userapp")
from pyb import LED, Switch, Pin
from uasyncio import get_event_loop, sleep_ms as ua_sleep_ms
from uvacbot.io.esp8266 import Connection, Esp8266
class LedToggleConnection(Connection):
async def onConnected(self):
print("Connected: {0}".format(self._clientId))
def onClose(self):
print("Closed: {0}".format(self._clientId))
async def onReceived(self, message):
if message.startswith("LED"):
try:
ledId = int(message.split(":")[1])
#The Nucleo-F767ZI board has 3 on-board user leds
if ledId >= 1 and ledId <= 3:
LED(ledId).toggle()
print("Led['{0}'] toggled.".format(ledId))
else:
print("Led not found. Please, try again.")
except:
print("I don't understand '{0}'. Please, try again.".format(message))
class EchoConnection(Connection):
async def onConnected(self):
print("Connected!")
async def onReceived(self, message):
echo = message.strip()
if echo != "":
self.send("echo: '{0}'\r\n".format(echo))
def onClose(self):
print("Closed.")
class RemoteExecConnection(Connection):
async def onReceived(self, message):
code = message.strip()
if code != "":
try:
exec("{0}\r\n".format(str(code, Esp8266.BYTES_ENCODING)))
except Exception as ex:
self.send("Exception: {0}\r\n".format(ex))
async def serve(esp):
esp.initServer(EchoConnection)
#esp.initServer(LedToggleConnection)
print("Waiting for connections...")
sw = Switch()
while not sw.value():
await ua_sleep_ms(200)
esp.stopServer()
print("Server stopped.")
def main():
print("*** Esp8266 communication test ***")
print("Press switch button to finish.")
esp = None # Uncomment ESP8266 configuration properly
#esp = Esp8266(3, Pin.board.D3, 115200, debug=True) #NUCLEO-L476RG
# On NUCLEO-F767ZI TX6 is on CN7-01 (PC6) and RX6 is on CN7-11 (PC7)
#esp = Esp8266(6, Pin.board.D8, 115200, debug=True) #NUCLEO-F767ZI
if not esp:
raise Exception("Create a Esp8266 object first.")
loop = get_event_loop()
esp.start()
assert esp.isPresent()
try:
#esp.setOperatingMode(Esp8266.OP_MODE_CLIENT)
#esp.join("SSID", "PASSWD")
#esp.setStaIpAddress("192.168.1.200", "192.168.1.1")
esp.setOperatingMode(Esp8266.OP_MODE_AP)
esp.setAccessPointConfig("ESP8266-AP", "", 1, Esp8266.SECURITY_OPEN)
loop.run_until_complete(serve(esp))
finally:
esp._flushRx()
esp.cleanup()
print("Program finished")
if __name__ == "__main__":
main()
| 2.75 | 3 |
src/Data/__init__.py | ArmanZarrin/Telegram--Stat | 0 | 12791808 | from pathlib import Path
DATA_DIR= Path(__file__).resolve().parent | 1.398438 | 1 |
tests/rate_limit_test.py | maruthiprithivi/tweetf0rm | 1 | 12791809 | <gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.DEBUG, format='%(levelname)s: %(message)s')
requests_log = logging.getLogger("requests")
requests_log.setLevel(logging.DEBUG)
from nose.tools import nottest
import sys, os, json, exceptions
sys.path.append("..")
from tweetf0rm.utils import full_stack
from tweetf0rm.proxies import proxy_checker
import multiprocessing as mp
from tweetf0rm.twitterapi.users import User
class Handler(object):
def append(self,data, bucket=None, key=None):
logger.info(data)
pass
def call_user_api(apikeys, client_args):
user_api = User(apikeys=apikeys, client_args=client_args)
user_api.find_all_friend_ids(53039176, [Handler()])
class TestTwitterRateLimit:
@classmethod
def setup_class(cls):
pass
@classmethod
def teardown_class(cls):
pass
def setup(self):
import sys, os, json
#sys.path.append("..")
with open(os.path.abspath('rate_limit_test.json'), 'rb') as config_f, open(os.path.abspath('proxy.json'), 'rb') as proxy_f:
self.config = json.load(config_f)
self.proxies = json.load(proxy_f)
def teardown(self):
pass
@nottest
def test_china_proxy(self):
apikeys = self.config['apikeys']['i0mf0rmer13']
client_args = {
"timeout": 300,
"proxies": {'http':'172.16.17.32:8080'}#proxy_list[i]['proxy_dict']
}
call_user_api(apikeys, client_args)
@nottest
def test_rate_limit(self):
from tweetf0rm.proxies import proxy_checker
proxy_list = proxy_checker(self.proxies['proxies'])
ps = []
for i, twitter_user in enumerate(self.config['apikeys']):
apikeys = self.config['apikeys'][twitter_user]
client_args = {
"timeout": 300,
"proxies": {'http':'172.16.17.32:8080'}#proxy_list[i]['proxy_dict']
}
logger.info(client_args)
p = mp.Process(target=call_user_api, args=(apikeys, client_args, ))
ps.append(p)
p.start()
for p in ps:
p.join()
if __name__=="__main__":
import nose
#nose.main()
result = nose.run(TestTwitterRateLimit) | 1.960938 | 2 |
organizing_hub/templatetags/organizing_hub_tags.py | JoshZero87/site | 4 | 12791810 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django import template
from django.conf import settings
from django.urls import reverse_lazy
from calls.models import CallCampaignStatus
from local_groups.models import find_local_group_by_user
from organizing_hub.models import OrganizingHubLoginAlert
import logging
logger = logging.getLogger(__name__)
register = template.Library()
BSD_CREATE_ACCOUNT_URL = settings.BSD_CREATE_ACCOUNT_URL
ORGANIZING_DOCS_URL = settings.ORGANIZING_DOCS_URL
ORGANIZING_EMAIL = settings.ORGANIZING_EMAIL
ORGANIZING_GUIDES_URL = settings.ORGANIZING_GUIDES_URL
ORGANIZING_HUB_ADMINS_ENABLED = settings.ORGANIZING_HUB_ADMINS_ENABLED
ORGANIZING_HUB_CALL_CALLERS_URL = settings.ORGANIZING_HUB_CALL_CALLERS_URL
ORGANIZING_HUB_CALL_MANAGE_URL = settings.ORGANIZING_HUB_CALL_MANAGE_URL
ORGANIZING_HUB_CALL_SCRIPT_URL = settings.ORGANIZING_HUB_CALL_SCRIPT_URL
ORGANIZING_HUB_DASHBOARD_URL = settings.ORGANIZING_HUB_DASHBOARD_URL
ORGANIZING_HUB_PROMOTE_ENABLED = settings.ORGANIZING_HUB_PROMOTE_ENABLED
@register.simple_tag
def bsd_create_account_url():
return BSD_CREATE_ACCOUNT_URL
@register.simple_tag
def call_campaign_complete_url(call_campaign):
"""
URL for Complete Call Campaign page
Parameters
----------
call_campaign : CallCampaign
Call Campaign
Returns
-------
str
Return url for Complete Call Campaign page
"""
return reverse_lazy(
'organizing-hub-call-campaign-status',
kwargs={
'uuid': call_campaign.uuid,
'status_id': CallCampaignStatus.complete.value[0],
}
)
@register.simple_tag
def call_campaign_pause_url(call_campaign):
"""
URL for Pause Call Campaign page
Parameters
----------
call_campaign : CallCampaign
Call Campaign
Returns
-------
str
Return url for Pause Call Campaign page
"""
return reverse_lazy(
'organizing-hub-call-campaign-status',
kwargs={
'uuid': call_campaign.uuid,
'status_id': CallCampaignStatus.paused.value[0],
}
)
@register.simple_tag
def call_campaign_resume_url(call_campaign):
"""
URL for Resume Call Campaign page
Parameters
----------
call_campaign : CallCampaign
Call Campaign
Returns
-------
str
Return url for Resume Call Campaign page
"""
return reverse_lazy(
'organizing-hub-call-campaign-status',
kwargs={
'uuid': call_campaign.uuid,
'status_id': CallCampaignStatus.in_progress.value[0],
}
)
@register.simple_tag
def call_campaign_start_url(call_campaign):
"""
URL for Start Call Campaign page
Parameters
----------
call_campaign : CallCampaign
Call Campaign
Returns
-------
str
Return url for Start Call Campaign page
"""
return reverse_lazy(
'organizing-hub-call-campaign-status',
kwargs={
'uuid': call_campaign.uuid,
'status_id': CallCampaignStatus.in_progress.value[0],
}
)
@register.inclusion_tag('partials/events_nav.html', takes_context=True)
def events_nav(context):
"""Show Hydra Promote Link if Hub Promote is not enabled"""
show_promote_link = not ORGANIZING_HUB_PROMOTE_ENABLED
return {
'show_promote_link': show_promote_link,
'request': context['request'],
}
# Organizing Hub templates
@register.inclusion_tag('partials/group_link.html', takes_context=True)
def group_link(context):
group = find_local_group_by_user(context['request'].user)
return {
'group': group,
'request': context['request'],
}
@register.simple_tag(takes_context=True)
def has_organizing_hub_feature_access(context, feature_id):
"""
Check if user has access to Organizing Hub Feature
Parameters
----------
feature_id : int
Organizing Hub Feature id
Returns
-------
bool
Return True if user has access to Organizing Hub Feature
"""
local_group = find_local_group_by_user(context['request'].user)
if local_group is not None and hasattr(
local_group,
'organizinghubaccess',
):
access = local_group.organizinghubaccess
has_feature_access = access.has_feature_access_by_id(feature_id)
return has_feature_access
else:
return False
@register.simple_tag(takes_context=True)
def local_group(context):
"""TODO move to local groups template tags"""
return find_local_group_by_user(context['request'].user)
# Organizing Hub Navigation menu
@register.inclusion_tag('partials/organizing_hub_nav.html', takes_context=True)
def organizing_hub_nav(context):
group = find_local_group_by_user(context['request'].user)
show_admins_link = ORGANIZING_HUB_ADMINS_ENABLED
return {
'group': group,
'organizing_guides_url': ORGANIZING_GUIDES_URL,
'organizing_docs_url': ORGANIZING_DOCS_URL,
'show_admins_link': show_admins_link,
'request': context['request'],
}
@register.simple_tag
def organizing_docs_url():
return ORGANIZING_DOCS_URL
@register.simple_tag
def organizing_email():
return ORGANIZING_EMAIL
@register.simple_tag
def organizing_hub_call_callers_url():
return ORGANIZING_HUB_CALL_CALLERS_URL
@register.simple_tag
def organizing_hub_call_manage_url():
return ORGANIZING_HUB_CALL_MANAGE_URL
@register.simple_tag
def organizing_hub_call_script_url():
return ORGANIZING_HUB_CALL_SCRIPT_URL
@register.simple_tag
def organizing_hub_dashboard_url():
return ORGANIZING_HUB_DASHBOARD_URL
@register.inclusion_tag(
'organizing_hub/tags/organizing_hub_login_alert.html',
takes_context=True
)
def organizing_hub_login_alert(context):
"""Organizing Hub Login Alert snippet set to show"""
return {
'organizing_hub_login_alert': OrganizingHubLoginAlert.objects.filter(
show=True
).first(),
'request': context['request'],
}
| 1.898438 | 2 |
tests/fits2png_zoom.py | nbarbey/csh | 1 | 12791811 | #!/usr/bin/env python
import getopt, sys, os
import numpy as np
import pyfits
from pylab import matplotlib
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid.inset_locator import zoomed_inset_axes
from mpl_toolkits.axes_grid.inset_locator import mark_inset
#fname_ext = '/home/nbarbey/data/csh/output/ngc6946_cross_robust.fits'
fname_ext = sys.argv[1]
fname = fname_ext.split('.')[0]
out_fname = fname + '.png'
print('displaying ' + fname)
title_str = fname.split(os.sep)[-1]
t = np.flipud(pyfits.fitsopen(fname_ext)[0].data.T)
fig = plt.figure(1, [5,4])
ax = fig.add_subplot(111)
#imshow(t , interpolation="nearest")
#imshow((t - t.min())) ** .25, interpolation="nearest")
tt = t ** .25
tt[np.isnan(tt)] = 0
extent = [0., 192., 0., 192.]
ax.imshow(tt, extent=extent, interpolation="nearest")
tzoom = tt[135:155, 80:100,]
axins = zoomed_inset_axes(ax, 2, loc=3) # zoom = 6
extent = [80., 100., 192. - 155., 192. - 135, ]
im = axins.imshow(tzoom, extent=extent, interpolation="nearest")
im.set_clim([tt.min(), tt.max()])
plt.xticks(visible=False)
plt.yticks(visible=False)
#x1, x2, y1, y2 = 80., 100., 135., 155.,
#axins.set_xlim(x1, x2)
#axins.set_ylim(y1, y2)
mark_inset(ax, axins, loc1=2, loc2=4, fc="none", ec="0.5")
#plt.title(title_str)
#plt.colorbar()
#plt.xlabel('Right Ascension')
#plt.ylabel('Declination')
plt.show()
fig.savefig(out_fname)
| 2.171875 | 2 |
rlcard/games/tienlen/utils.py | xiviu123/rlcard | 0 | 12791812 | <gh_stars>0
import math
from typing import List
import threading
import collections
import itertools
import numpy as np
import operator as op
from functools import reduce
def nPr(n, r):
return math.factorial(n)/(math.factorial(n-r))
def nCr(n, r):
r = min(r, n-r)
numer = reduce(op.mul, range(n, n-r, -1), 1)
denom = reduce(op.mul, range(1, r+1), 1)
return numer // denom # or / in Python 2
def init_standard_deck():
''' Initialize a standard deck of 52 cards
Returns:
(list): A list of Card object
'''
res = [i for i in range(52)]
return res
class LocalObjs(threading.local):
def __init__(self):
self.cached_candidate_cards = None
_local_objs = LocalObjs()
def contains_cards(candidate, target):
''' Check if cards of candidate contains cards of target.
Args:
candidate (string): A string representing the cards of candidate
target (string): A string representing the number of cards of target
Returns:
boolean
'''
# In normal cases, most continuous calls of this function
# will test different targets against the same candidate.
# So the cached counts of each card in candidate can speed up
# the comparison for following tests if candidate keeps the same.
if not _local_objs.cached_candidate_cards or _local_objs.cached_candidate_cards != candidate:
_local_objs.cached_candidate_cards = candidate
cards_dict = collections.defaultdict(int)
for card in candidate:
cards_dict[card] += 1
_local_objs.cached_candidate_cards_dict = cards_dict
cards_dict = _local_objs.cached_candidate_cards_dict
if (target == ''):
return True
curr_card = target[0]
curr_count = 1
for card in target[1:]:
if (card != curr_card):
if (cards_dict[curr_card] < curr_count):
return False
curr_card = card
curr_count = 1
else:
curr_count += 1
if (cards_dict[curr_card] < curr_count):
return False
return True
def encode_cards(cards: List[int]) -> np.ndarray:
plane = np.zeros(52, dtype=int)
for card_id in cards:
plane[card_id] = 1
return plane
def get_one_hot_array(num_left_cards, max_num_cards):
one_hot = np.zeros(max_num_cards, dtype=np.int8)
one_hot[num_left_cards - 1] = 1
return one_hot
def encode_players_round_active(players, num_players = 4) -> np.ndarray:
plane = np.zeros(num_players, dtype=int)
for player_id in players:
plane[player_id] = 1
return plane | 3.015625 | 3 |
vultr/v1_block.py | kchoudhu/python-vultr | 2 | 12791813 | <gh_stars>1-10
'''Partial class to handle Vultr Account API calls'''
from .utils import VultrBase, update_params
class VultrBlockStore(VultrBase):
'''Handles Vultr Account API calls'''
def __init__(self, api_key):
VultrBase.__init__(self, api_key)
def attach(self, subid, attach_to_subid, params=None):
params = update_params(
params,
{'SUBID' : subid,
'attach_to_SUBID' : attach_to_subid }
)
return self.request('/v1/block/attach', params, 'POST')
def create(self, dcid, size_gb, label=None, params=None):
params = update_params(
params,
{'DCID' : dcid,
'size_gb' : size_gb,
'label' : label if label else str() }
)
return self.request('/v1/block/create', params, 'POST')
def delete(self, subid, params=None):
params = update_params(
params,
{'SUBID' : subid }
)
return self.request('/v1/block/delete', params, 'POST')
def detach(self, subid, params=None):
params = update_params(
params,
{'SUBID' : subid }
)
return self.request('/v1/block/detach', params, 'POST')
def label_set(self, subid, label, params=None):
params = update_params(
params,
{'SUBID' : subid,
'label' : label }
)
return self.request('/v1/block/label_set', params, 'POST')
def list(self, params=None):
params = params if params else dict()
return self.request('/v1/block/list', params, 'GET')
def resize(self, params=None):
raise NotImplementedError()
| 2.40625 | 2 |
sta/client.py | NMWDI/PySTA | 1 | 12791814 | <filename>sta/client.py
# ===============================================================================
# Copyright 2021 ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
import os.path
import click
import yaml
from requests import Session
from jsonschema import validate, ValidationError
import re
IDREGEX = re.compile(r"(?P<id>\(\d+\))")
def verbose_message(msg):
click.secho(msg, fg="green")
def warning(msg):
click.secho(msg, fg="red")
class BaseST:
iotid = None
_db_obj = None
def __init__(self, payload, session, connection):
self._payload = payload
self._connection = connection
self._session = session
def _validate_payload(self):
try:
validate(instance=self._payload, schema=self._schema)
return True
except ValidationError as err:
print(
f"Validation failed for {self.__class__.__name__}. {err}. {self._payload}"
)
def _generate_request(
self, method, query=None, entity=None, orderby=None, expand=None, limit=None
):
if orderby is None and method == "get":
orderby = "$orderby=id asc"
base_url = self._connection["base_url"]
if not base_url.startswith("http"):
base_url = f"https://{base_url}/FROST-Server/v1.1"
if entity is None:
entity = self.__class__.__name__
url = f"{base_url}/{entity}"
if method == "patch":
url = f"{url}({self.iotid})"
else:
params = []
if limit:
params.append(f"$top={limit}")
if orderby:
if not orderby.startswith("$orderby"):
orderby = f"$orderby={orderby}"
params.append(orderby)
if query:
# params.append(urlencode({"$filter": query}))
params.append(f"$filter={query}")
if params:
url = f"{url}?{'&'.join(params)}"
if expand:
url = f"{url}&$expand={expand}"
return {"method": method, "url": url}
def _send_request(self, request, dry=False, verbose=True, **kw):
connection = self._connection
func = getattr(self._session, request["method"])
if not dry:
resp = func(
request["url"], auth=(connection["user"], connection["pwd"]), **kw
)
if verbose:
if resp and resp.status_code not in (200, 201):
print(f"request={request}")
print(f"response={resp}")
return resp
def _parse_response(self, request, resp, dry=False):
if request["method"] == "get":
if resp.status_code == 200:
return resp.json()
elif request["method"] == "post":
if dry:
return True
if resp.status_code == 201:
m = IDREGEX.search(resp.headers.get("location", ""))
if m:
iotid = m.group("id")[1:-1]
self.iotid = iotid
return True
else:
print(resp.status_code, resp.text)
elif request["method"] == "patch":
if resp.status_code == 200:
return True
def get(
self,
query,
entity=None,
pages=None,
expand=None,
limit=None,
verbose=False,
orderby=None,
):
if pages and pages < 0:
pages = abs(pages)
orderby = "$orderby=id desc"
def get_items(request, page_count, yielded):
if pages:
if page_count >= pages:
return
if verbose:
pv = ""
if pages:
pv = "/{pages}"
verbose_message(
f"getting page={page_count + 1}{pv} - url={request['url']}"
)
# verbose_message("-------------- Request -----------------")
# verbose_message(request["url"])
# verbose_message("----------------------------------------")
resp = self._send_request(request)
resp = self._parse_response(request, resp)
if not resp:
click.secho(request["url"], fg="red")
return
if not resp["value"]:
warning("no records found")
return
else:
for v in resp["value"]:
if limit and yielded >= limit:
return
yielded += 1
yield v
try:
next_url = resp["@iot.nextLink"]
except KeyError:
return
yield from get_items(
{"method": "get", "url": next_url}, page_count + 1, yielded
)
start_request = self._generate_request(
"get",
query=query,
entity=entity,
orderby=orderby,
expand=expand,
limit=limit,
)
yield from get_items(start_request, 0, 0)
def put(self, dry=False, check_exists=True):
if self._validate_payload():
if check_exists and self.exists():
return self.patch()
else:
request = self._generate_request("post")
print(request)
resp = self._send_request(request, json=self._payload, dry=dry)
return self._parse_response(request, resp, dry=dry)
def getfirst(self, *args, **kw):
try:
return next(self.get(*args, **kw))
except StopIteration:
return
def exists(self):
name = self._payload["name"]
resp = self.getfirst(f"name eq '{name}'")
if resp:
try:
self._db_obj = resp
except IndexError:
return
self.iotid = self._db_obj["@iot.id"]
return True
def patch(self, dry=False):
if self._validate_payload():
request = self._generate_request("patch")
resp = self._send_request(request, json=self._payload, dry=dry)
return self._parse_response(request, resp, dry=dry)
class Things(BaseST):
_schema = {
"type": "object",
"required": ["name", "description"],
"properties": {
"name": {"type": "string"},
"description": {"type": "string"},
"Locations": {
"type": "array",
"required": ["@iot.id"],
"properties": {"@iot.id": {"type": "number"}},
},
},
}
def exists(self):
name = self._payload["name"]
location = self._payload["Locations"][0]
lid = location["@iot.id"]
resp = self.getfirst(f"name eq '{name}'", entity=f"Locations({lid})/Things")
if resp:
try:
self._db_obj = resp
except IndexError:
return
self.iotid = self._db_obj["@iot.id"]
return True
class Locations(BaseST):
_schema = {
"type": "object",
"properties": {
"name": {"type": "string"},
"description": {"type": "string"},
"encodingType": {"type": "string"},
"location": {
"type": "object",
"required": ["type", "coordinates"],
"oneOf": [
{
"title": "Point",
"type": "object",
"properties": {
"type": {"enum": ["Point"]},
"coordinates": {"$ref": "#/definitions/position"},
},
},
{
"title": "Polygon",
"type": "object",
"properties": {
"type": {"enum": ["Polygon"]},
"coordinates": {"$ref": "#/definitions/polygon"},
},
},
],
},
},
"required": ["name", "description", "encodingType", "location"],
"definitions": {
"position": {
"description": "A single position",
"type": "array",
"minItems": 2,
"items": {"type": "number"},
"additionalItems": False,
},
"positionArray": {
"description": "An array of positions",
"type": "array",
"items": {"$ref": "#/definitions/position"},
},
"lineString": {
"description": "An array of two or more positions",
"allOf": [{"$ref": "#/definitions/positionArray"}, {"minItems": 2}],
},
"linearRing": {
"description": "An array of four positions where the first equals the last",
"allOf": [{"$ref": "#/definitions/positionArray"}, {"minItems": 4}],
},
"polygon": {
"description": "An array of linear rings",
"type": "array",
"items": {"$ref": "#/definitions/linearRing"},
},
},
}
class Sensors(BaseST):
_schema = {
"type": "object",
"required": ["name", "description", "encodingType", "metadata"],
"properties": {
"name": {"type": "string"},
"description": {"type": "string"},
"encodingType": {"type": "string"},
"metadata": {"type": "string"},
},
}
class ObservedProperties(BaseST):
_schema = {
"type": "object",
"required": ["name", "definition", "description"],
"properties": {
"name": {"type": "string"},
"description": {"type": "string"},
"definition": {"type": "string"},
},
}
class Datastreams(BaseST):
_schema = {
"type": "object",
"properties": {
"name": {"type": "string"},
"description": {"type": "string"},
"unitOfMeasurement": {
"type": "object",
"required": ["name", "symbol", "definition"],
"properties": {
"name": {"type": "string"},
"symbol": {"type": "string"},
"definition": {"type": "string"},
},
},
"observationType": {"type": "string"},
"Thing": {
"type": "object",
"required": ["@iot.id"],
"properties": {"@iot.id": {"type": "number"}},
},
"ObservedProperty": {
"type": "object",
"required": ["@iot.id"],
"properties": {"@iot.id": {"type": "number"}},
},
"Sensor": {
"type": "object",
"required": ["@iot.id"],
"properties": {"@iot.id": {"type": "number"}},
},
},
"required": [
"name",
"description",
"unitOfMeasurement",
"observationType",
"Thing",
"ObservedProperty",
"Sensor",
],
}
def exists(self):
name = self._payload["name"]
thing = self._payload["Thing"]
lid = thing["@iot.id"]
resp = self.getfirst(f"name eq '{name}'", entity=f"Things({lid})/Datastreams")
if resp:
try:
self._db_obj = resp
except IndexError:
return
self.iotid = self._db_obj["@iot.id"]
return True
class Observations(BaseST):
_schema = {
"type": "object",
"required": ["phenomenonTime", "result", "resultTime", "Datastream"],
"properties": {
"phenomenonTime": {"type": "string"},
"result": {"type": "number"},
"resultTime": {"type": "string"},
"Datastream": {
"type": "object",
"required": ["@iot.id"],
"properties": {"@iot.id": {"type": "number"}},
},
},
}
class ObservationsArray(BaseST):
_schema = {
"type": "object",
"required": ["observations", "Datastream", "components"],
"properties": {
"observations": {"type": "array"},
"components": {"type": "array"},
"Datastream": {
"type": "object",
"required": ["@iot.id"],
"properties": {"@iot.id": {"type": "number"}},
},
},
}
def put(self, dry=False):
if self._validate_payload():
obs = self._payload["observations"]
n = 100
nobs = len(obs)
for i in range(0, nobs, n):
print("loading chunk {}/{}".format(i, nobs))
chunk = obs[i : i + n]
pd = [
{
"Datastream": self._payload["Datastream"],
"components": self._payload["components"],
"dataArray": chunk,
}
]
base_url = self._connection["base_url"]
if not base_url.startswith("http"):
base_url = f"https://{base_url}/FROST-Server/v1.1"
url = f"{base_url}/CreateObservations"
request = {"method": "post", "url": url}
resp = self._send_request(request, json=pd, dry=dry)
self._parse_response(request, resp, dry=dry)
class Client:
def __init__(self, base_url=None, user=None, pwd=None):
self._connection = {"base_url": base_url, "user": user, "pwd": <PASSWORD>}
if not base_url:
p = os.path.join(os.path.expanduser("~"), ".sta.yaml")
if os.path.isfile(p):
with open(p, "r") as rfile:
obj = yaml.load(rfile, Loader=yaml.SafeLoader)
self._connection.update(**obj)
if not self._connection["base_url"]:
base_url = input("Please enter a base url for a SensorThings instance>> ")
if base_url.endswith("/"):
base_url = base_url[:-1]
self._connection["base_url"] = base_url
with open(p, "w") as wfile:
yaml.dump(self._connection, wfile)
self._session = Session()
@property
def base_url(self):
return self._connection["base_url"]
def locations(self):
loc = Locations(None, self._session, self._connection)
return loc.get(None, verbose=True)
def put_sensor(self, payload, dry=False):
sensor = Sensors(payload, self._session, self._connection)
sensor.put(dry)
return sensor
def put_observed_property(self, payload, dry=False):
obs = ObservedProperties(payload, self._session, self._connection)
obs.put(dry)
return obs
def put_datastream(self, payload, dry=False):
datastream = Datastreams(payload, self._session, self._connection)
datastream.put(dry)
return datastream
def put_location(self, payload, dry=False):
location = Locations(payload, self._session, self._connection)
location.put(dry)
return location
def put_thing(self, payload, dry=False):
thing = Things(payload, self._session, self._connection)
thing.put(dry)
return thing
def add_observations(self, payload, dry=False):
obs = ObservationsArray(payload, self._session, self._connection)
obs.put(dry)
return obs
def add_observation(self, payload, dry=False):
obs = Observations(payload, self._session, self._connection)
obs.put(dry, check_exists=False)
return obs
def patch_location(self, iotid, payload, dry=False):
location = Locations(payload, self._session, self._connection)
location.iotid = iotid
location.patch(dry)
return location
def get_sensors(self, query=None, name=None):
if name is not None:
query = f"name eq '{name}'"
yield from Sensors(None, self._session, self._connection).get(query)
def get_observed_properties(self, query=None, name=None):
if name is not None:
query = f"name eq '{name}'"
yield from ObservedProperties(None, self._session, self._connection).get(query)
def get_datastreams(self, query=None, **kw):
yield from Datastreams(None, self._session, self._connection).get(query, **kw)
def get_locations(self, query=None, **kw):
yield from Locations(None, self._session, self._connection).get(query, **kw)
def get_things(self, query=None, **kw):
yield from Things(None, self._session, self._connection).get(query, **kw)
def get_location(self, query=None, name=None):
if name is not None:
query = f"name eq '{name}'"
try:
return next(self.get_locations(query))
except StopIteration:
pass
def get_thing(self, query=None, name=None, location=None):
entity = None
if location:
if isinstance(location, dict):
location = location["@<EMAIL>"]
entity = "Locations({})/Things".format(location)
if name is not None:
query = f"name eq '{name}'"
return next(self.get_things(query, entity=entity))
def get_datastream(self, query=None, name=None, thing=None):
entity = None
if thing:
if isinstance(thing, dict):
thing = thing["@<EMAIL>"]
entity = f"Things({thing})/Datastreams"
if name is not None:
query = f"name eq '{name}'"
return next(self.get_datastreams(query, entity=entity))
def get_observations(self, datastream, **kw):
if isinstance(datastream, dict):
datastream = datastream["@<EMAIL>"]
entity = f"Datastreams({datastream})/Observations"
yield from Datastreams(None, self._session, self._connection).get(
None, entity=entity, **kw
)
if __name__ == "__main__":
payload = {}
l = Locations(payload, None, None)
l._validate_payload()
# ============= EOF =============================================
| 2.171875 | 2 |
assignment8/mapper.py | IITDU-BSSE06/ads-demystifying-the-logs-KhairulAlam-IITDU | 0 | 12791815 | #!/usr/bin/python
import sys
for line in sys.stdin:
row = line.strip().split(" ")
if len(row) == 10
File = str(row[6])
t1 = File.strip().split(".")
ind = len(t1)-1
t2 = str(t1[ind])
print t2
| 2.765625 | 3 |
Headedness/Transitive/Test_Trans.py | mlepori1/Picking_BERTs_Brain | 5 | 12791816 | <reponame>mlepori1/Picking_BERTs_Brain
import torch
from pytorch_pretrained_bert import BertTokenizer, BertModel
import logging
import matplotlib.pyplot as plt
import sys
import numpy as np
sys.path.append("../..")
import RSA_utils.utils as RSA
import glove_utils.utils as utils
from statsmodels.stats.descriptivestats import sign_test
from sklearn.metrics.pairwise import cosine_similarity
from scipy.stats import ttest_ind, spearmanr
import random
lexical_idxs = [1, 2, 4]
verb_list = ['loves',
'hates',
'likes',
'smells',
'touches',
'pushes',
'moves',
'sees',
'lifts',
'hits']
if __name__ == "__main__":
np.random.seed(seed=9)
random.seed(9)
# Preprocess Corpus
glove_list, bert_list = RSA.preprocess_data('./head_trans_corpus.txt')
print("data processed")
# Generate Glove hypothesis models
embed_dict = RSA.get_glove_embeds(glove_list, "../../glove_utils/glove/glove.6B.300d.txt", 300, lexical_idxs, verb_list)
subj = np.array(embed_dict[lexical_idxs[0]])
verb = np.array(embed_dict[lexical_idxs[1]])
obj = np.array(embed_dict[lexical_idxs[2]])
rand_verb = np.array(embed_dict[-1])
print("glove embeds generated")
# Generate BERT reference model
bert_embeds = RSA.get_bert_embeds(bert_list, 0)
print("BERT embeds generated")
rsa_subj_dist = []
rsa_obj_dist = []
rsa_verb_dist = []
rsa_rand_verb_dist = []
samples = []
# Generate 100 samples of representational similarity
while len(samples) < 100:
sample = np.random.choice(range(0, len(glove_list)), replace = False, size=200)
if set(sample) in samples:
continue
samples.append(set(sample))
samp_bert_embeds = bert_embeds[sample]
samp_subj = subj[sample]
samp_obj = obj[sample]
samp_verb = verb[sample]
samp_rand_verb = rand_verb[sample]
bert_geom = RSA.calculate_geometry(samp_bert_embeds)
subj_geom = RSA.calculate_geometry(samp_subj)
obj_geom = RSA.calculate_geometry(samp_obj)
verb_geom = RSA.calculate_geometry(samp_verb)
rand_verb_geom = RSA.calculate_geometry(samp_rand_verb)
rsa_subj_dist.append(spearmanr([subj_geom, bert_geom], axis=1)[0])
rsa_obj_dist.append(spearmanr([obj_geom, bert_geom], axis=1)[0])
rsa_verb_dist.append(spearmanr([verb_geom, bert_geom], axis=1)[0])
rsa_rand_verb_dist.append(spearmanr([rand_verb_geom, bert_geom], axis=1)[0])
# Run Tests
print(f'RSA Subj: {np.mean(rsa_subj_dist)} STD: {np.std(rsa_subj_dist)}')
print(f'RSA Obj: {np.mean(rsa_obj_dist)} STD: {np.std(rsa_obj_dist)}')
print(f'RSA Verb: {np.mean(rsa_verb_dist)} STD: {np.std(rsa_verb_dist)}')
print(f'RSA Random Verb: {np.mean(rsa_rand_verb_dist)} STD: {np.std(rsa_rand_verb_dist)}')
print(f'Sign Test Subj vs. Obj: {sign_test(np.array(rsa_subj_dist) - np.array(rsa_obj_dist))[1]}')
print(f'Sign Test Subj vs. Verb: {sign_test(np.array(rsa_subj_dist) - np.array(rsa_verb_dist))[1]}')
print(f'Sign Test Verb vs. Obj: {sign_test(np.array(rsa_obj_dist) - np.array(rsa_verb_dist))[1]}')
print(f'Sign Test Verb vs. Random Verb: {sign_test(np.array(rsa_rand_verb_dist) - np.array(rsa_verb_dist))[1]}')
| 2.421875 | 2 |
tests/conftest.py | alexanderrichards/ProductionSystem | 0 | 12791817 | <filename>tests/conftest.py
"""Define necessary setup fixtures."""
import pytest
import pkg_resources
from productionsystem.config import ConfigSystem
@pytest.fixture(scope="session", autouse=True)
def config():
"""Set up the config entrypoint map."""
config_instance = ConfigSystem.setup(None) # pylint: disable=no-member
config_instance.entry_point_map = pkg_resources.get_entry_map('productionsystem')
return config_instance
| 2.15625 | 2 |
universalwrapper/__init__.py | Basdbruijne/UniversalWrapper | 4 | 12791818 | import sys
import universalwrapper.universal_wrapper as universalwrapper
sys.modules[__name__] = universalwrapper
| 1.21875 | 1 |
index/utils/webuserinfo.py | importer/widen | 0 | 12791819 | # coding=utf-8
import logging
from widen import settings
import requests
from index.models import *
class WeiXinLogin():
def __init__(self, code, state):
self.code = code
self.state = state
self.appid = settings.APP_ID
self.appsecret = settings.APP_SECRET
self.access_token = ''
self.refresh_token = ''
self.openid = ''
self.is_expires = 1
self.detail = {}
# 为了方便大家看,我都写在一个函数里
def get_access_token(self):
# 2.通过code换取网页授权access_token
if self.refresh_token:
url = u'https://api.weixin.qq.com/sns/oauth2/refresh_token'
params = {
'appid': self.appid,
'grant_type': self.refresh_token,
'refresh_token': self.refresh_token
}
res = requests.get(url, params=params).json()
if res.get('errcode', None):
logging.info(res.get('errmsg'))
return res.get('errmsg')
self.access_token = res.get("access_token")
self.openid = res.get("openid")
self.refresh_token = res.get('refresh_token')
logging.info(
'access_token:%s ;openid:%s ;refresh_token:%s' % (
self.access_token, self.openid, self.refresh_token))
return True
url = u'https://api.weixin.qq.com/sns/oauth2/access_token'
params = {
'appid': self.appid,
'secret': self.appsecret,
'code': self.code,
'grant_type': 'authorization_code'
}
res = requests.get(url, params=params).json()
if res.get('errcode', None):
logging.info(res.get('errmsg'))
return res.get('errmsg')
self.access_token = res.get("access_token")
self.openid = res.get("openid")
self.refresh_token = res.get('refresh_token')
Token_get = Token(**res)
Token_get.save()
logging.info(
'access_token:%s ;openid:%s ;refresh_token:%s' % (
self.access_token, self.openid, self.refresh_token))
return True
def token_expires(self):
# 监测当前access_token是否超时?
url = u'https://api.weixin.qq.com/sns/auth'
params = {
'appid': self.appid,
'access_token': self.access_token,
}
res = requests.get(url, params=params).json()
if res.get('errcode'):
self.is_expires = 1
logging.info('is_expires:%s' % self.is_expires)
else:
self.is_expires = 0
return True
def get_info(self):
# 4.拉取用户信息
user_info_url = u'https://api.weixin.qq.com/sns/userinfo'
params = {
'access_token': self.access_token,
'openid': self.openid,
'lang': 'zh_CN',
}
res = requests.get(user_info_url, params=params).json()
if res.get('errcode'):
return res.get('errmsg')
# decode response content
logging.info('Get user detail openid:' + res.get('openid'))
for key, value in res.items():
self.detail[key] = value.encode('iso8859-1').decode('utf-8') if isinstance(value, str) else value
WxUser = Wxuser(**self.detail)
WxUser.save()
logging.info('Save%s to db' % self.detail.get('openid'))
return True
def get_detail(self):
self.token_expires()
if self.is_expires == 1:
self.get_access_token()
self.get_info()
return self.detail
| 2.140625 | 2 |
tests/test_datamodel.py | qinfeng2011/wltp | 0 | 12791820 | <reponame>qinfeng2011/wltp
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2013-2019 European Commission (JRC);
# Licensed under the EUPL (the 'Licence');
# You may not use this work except in compliance with the Licence.
# You may obtain a copy of the Licence at: http://ec.europa.eu/idabc/eupl
import sys
import unittest
from unittest.case import skip
import pandas as pd
from wltp import datamodel
import wltp
from wltp.experiment import Experiment
from .goodvehicle import goodVehicle
class Test(unittest.TestCase):
def testGoodVehicle(self):
mdl = goodVehicle()
exp = Experiment(mdl)
mdl = exp._model
defwot = datamodel.upd_default_load_curve({})["wot"]
self.assertTrue(
pd.DataFrame(mdl["wot"][["n_norm", "p_norm"]]).equals(pd.DataFrame(defwot))
)
@skip("Cascade-models disabled") ##TODO: Re-enable when pandel works.
def testOverlayOnInit(self):
mdl = goodVehicle()
nval = 6000
mdl2 = {"n_rated": nval}
exp = Experiment(mdl, mdl2)
mdl = exp._model
self.assertEqual(mdl["n_rated"], nval)
def test_get_class_parts_limits_sorted(self):
classes = datamodel.get_wltc_data()["classes"]
class_limits = {
cls: datamodel.get_class_parts_limits(cls, edges=True)
for cls in classes.keys()
}
for (cls, l) in class_limits.items():
self.assertSequenceEqual(l, sorted(l), "Class(%s): Unsorted!" % cls)
def test_get_class_parts_limits_with_edges(self):
classes = datamodel.get_wltc_data()["classes"]
class_limits = {
cls: datamodel.get_class_parts_limits(cls, edges=True)
for cls in classes.keys()
}
for (cls, l) in class_limits.items():
self.assertEqual(l[0], 0, "Class(%s): Left-edge not 0!" % cls)
for (cls, l) in class_limits.items():
self.assertEqual(
l[-1],
len(classes[cls]["v_cycle"]),
"Class(%s): Section Right-edge not len(cycle)!" % cls,
)
def test_get_class_pmr_limits(self):
l = datamodel.get_class_pmr_limits()
self.assertSequenceEqual(l, [22, 34])
def test_get_class_pmr_limits_with_edges(self):
pmr_limits = datamodel.get_class_pmr_limits(edges=True)
self.assertEqual(pmr_limits[0], 0, "Left-edge not 0!")
self.assertEqual(pmr_limits[-1], float("inf"), "PMR-limit: Right-edge not INF!")
if __name__ == "__main__":
# import sys;sys.argv = ['', 'Test.testName']
unittest.main()
| 1.90625 | 2 |
postfix_expression.py | GoddessLuBoYan/postfix-expression | 0 | 12791821 | """
作者(github账号):GoddessLuBoYan
内容:将四则运算的中缀表达式转换成后缀表达式并计算
要求:每个数字必须都是一位数,我没有考虑多位数和小数的情况
"""
# 基本容器定义
class Container:
def __init__(self, iterator=None):
self._list = []
if iterator:
for i in iterator:
self.push(i)
def __len__(self):
return self._list.__len__()
def __str__(self):
return self._list.__str__()
def __repr__(self):
return self._list.__repr__()
def isempty(self):
return self._list.__len__() == 0
def push(self, u):
pass
def get(self):
pass
def pop(self):
pass
class Stack(Container):
def push(self, u):
self._list = [u] + self._list
def get(self):
return self._list[0] if not self.isempty() else None
def pop(self):
return self._list.pop(0) if not self.isempty() else None
class Queue(Container):
def push(self, u):
self._list = self._list + [u]
def get(self):
return self._list[-1] if not self.isempty() else None
def pop(self):
return self._list.pop(0) if not self.isempty() else None
# 解析中缀表达式并转换成后缀表达式
ex = "5+9-8*7+6*(5-4+(3*2))"
def level(v):
if v in '$':
return 0
elif v in '+-':
return 1
elif v in '*/':
return 2
elif v in '()':
return 0
raise Exception(v)
def compare(v1, v2):
return level(v1) - level(v2)
def parse(ex):
ops = Stack("$")
exp = Queue()
for c in (ex + "$"):
if c in '1234567890':
exp.push(c)
elif c == '(':
ops.push('(')
elif c == ')':
while True:
top = ops.pop()
if top == '(':
break
if top == '$':
raise
exp.push(top)
elif c in '+-*$':
top = ops.get()
l1 = level(c)
l2 = level(top)
while l1 <= l2:
if c == '$' and top == '$':
return exp, ops
exp.push(ops.pop())
top = ops.get()
l2 = level(top)
ops.push(c)
print(ops)
print(exp)
print()
exp, ops = parse(ex)
# 计算后缀表达式,并使用eval验算原中缀表达式
curr = Stack()
while not exp.isempty():
t = exp.pop()
if t in "123456789":
curr.push(t)
elif t in '()':
continue
else:
t2 = curr.pop()
t1 = curr.pop()
result = eval(str(t1) + str(t) + str(t2))
print(str(t1) + str(t) + str(t2), '=', result)
curr.push(result)
print(curr)
print(eval(ex)) | 4.21875 | 4 |
test_nagplug.py | aslafy-z/nagplug | 3 | 12791822 | import unittest
from nagplug import Plugin, Threshold, ArgumentParserError
from nagplug import OK, WARNING, CRITICAL, UNKNOWN
class TestParsing(unittest.TestCase):
def test_parse(self):
plugin = Plugin()
plugin.add_arg('-e', '--test', action='store_true')
args = plugin.parser.parse_args(['-e'])
self.assertTrue(args.test)
def test_parse_threshold_string(self):
plugin = Plugin()
plugin.add_arg('-w', '--warning-threshold')
plugin.add_arg('-c', '--critical-threshold')
args = plugin.parse_args(['-w', '10:20', '-c', '0:40'])
self.assertEqual(OK, plugin.check_threshold(15,
args.warning_threshold,
args.critical_threshold))
def test_parse_threshold_native(self):
plugin = Plugin()
plugin.add_arg('-w', '--warning-threshold', type=Threshold)
plugin.add_arg('-c', '--critical-threshold', type=Threshold)
args = plugin.parse_args(['-w', '10:20', '-c', '0:40'])
self.assertEqual(OK, plugin.check_threshold(15,
args.warning_threshold,
args.critical_threshold))
def test_parse_exceptions(self):
plugin = Plugin()
plugin.add_arg('test')
self.assertRaises(ArgumentParserError, plugin.parse_args, [])
def test_parse_exceptions(self):
plugin = Plugin()
plugin.add_arg('threshold', type=Threshold)
self.assertRaises(ArgumentParserError, plugin.parse_args, [])
class TestThreshold(unittest.TestCase):
def test_threshold_parseerror(self):
self.assertRaises(ValueError, Threshold, ("helloworld"))
def test_threshold_valueerror(self):
self.assertRaises(ValueError, Threshold, ("10:2"))
def test_theshold_simple_neg(self):
self.assertFalse(Threshold("10").check(-1))
def test_theshold_simple_over(self):
self.assertFalse(Threshold("10").check(11))
def test_theshold_simple_zero(self):
self.assertTrue(Threshold("10").check(0))
def test_theshold_simple_upperbound(self):
self.assertTrue(Threshold("10").check(10))
def test_theshold_simple_inside(self):
self.assertTrue(Threshold("10").check(5))
def test_threshold_range_one(self):
self.assertTrue(Threshold("10:10").check(10))
def test_threshold_range_lowerbound(self):
self.assertTrue(Threshold("10:20").check(10))
def test_threshold_range_inside(self):
self.assertTrue(Threshold("10:20").check(15))
def test_threshold_range_upperbound(self):
self.assertTrue(Threshold("10:20").check(20))
def test_threshold_range_lower(self):
self.assertFalse(Threshold("10:20").check(9))
def test_threshold_range_upper(self):
self.assertFalse(Threshold("10:20").check(21))
def test_threshold_invert_bound(self):
self.assertFalse(Threshold("@10").check(10))
def test_threshold_invert_range(self):
self.assertFalse(Threshold("@10:20").check(10))
def test_threshold_invert_upper(self):
self.assertFalse(Threshold("@:20").check(10))
def test_threshold_openrange_simple(self):
self.assertTrue(Threshold("10:").check(20))
def test_threshold_openrange_inside(self):
self.assertTrue(Threshold(":10").check(5))
def test_threshold_openrange_over(self):
self.assertFalse(Threshold(":10").check(20))
def test_threshold_openrange_neg(self):
self.assertTrue(Threshold("~:10").check(-1))
def test_threshold_openrange_neg_over(self):
self.assertFalse(Threshold("~:10").check(11))
class TestCode(unittest.TestCase):
def test_simple_default(self):
plugin = Plugin()
self.assertEqual(plugin.get_code(), UNKNOWN)
def test_simple_ok(self):
plugin = Plugin()
plugin.add_result(OK, 'OK')
self.assertEqual(plugin.get_code(), OK)
def test_simple_warning(self):
plugin = Plugin()
plugin.add_result(WARNING, 'WARNING')
self.assertEqual(plugin.get_code(), WARNING)
def test_simple_critical(self):
plugin = Plugin()
plugin.add_result(CRITICAL, 'CRITICAL')
self.assertEqual(plugin.get_code(), CRITICAL)
def test_simple_owc(self):
plugin = Plugin()
plugin.add_result(OK, 'OK')
plugin.add_result(WARNING, 'WARNING')
plugin.add_result(CRITICAL, 'CRITICAL')
self.assertEqual(plugin.get_code(), CRITICAL)
def test_simple_ow(self):
plugin = Plugin()
plugin.add_result(OK, 'OK')
plugin.add_result(WARNING, 'WARNING')
self.assertEqual(plugin.get_code(), WARNING)
def test_simple_cw(self):
plugin = Plugin()
plugin.add_result(CRITICAL, 'OK')
plugin.add_result(WARNING, 'WARNING')
plugin.add_result(WARNING, 'WARNING')
plugin.add_result(WARNING, 'WARNING')
plugin.add_result(WARNING, 'UNKNOWN')
self.assertEqual(plugin.get_code(), CRITICAL)
class TestMessage(unittest.TestCase):
def test_simple_default(self):
plugin = Plugin()
self.assertEqual(plugin.get_message(), '')
def test_simple_ok(self):
plugin = Plugin()
plugin.add_result(OK, 'OK')
self.assertEqual(plugin.get_message(), 'OK')
def test_simple_owc(self):
plugin = Plugin()
plugin.add_result(OK, 'OK')
plugin.add_result(WARNING, 'WARNING')
plugin.add_result(CRITICAL, 'CRITICAL')
self.assertEqual(plugin.get_message(joiner=', '),
', '.join(['OK', 'WARNING', 'CRITICAL']))
def test_simple_owc_level(self):
plugin = Plugin()
plugin.add_result(OK, 'OK')
plugin.add_result(WARNING, 'WARNING')
plugin.add_result(CRITICAL, 'CRITICAL')
self.assertEqual(plugin.get_message(joiner=', ', msglevels=[WARNING]),
', '.join(['WARNING']))
class TestExtData(unittest.TestCase):
def test_simple(self):
plugin = Plugin()
plugin.add_extdata('OK')
plugin.add_extdata('hey!')
plugin.add_extdata('STUFF')
self.assertEqual(plugin.get_extdata(),
'\n'.join(['OK', 'hey!', 'STUFF']))
if __name__ == '__main__':
unittest.main()
| 2.703125 | 3 |
get-xkcd.py | zimolzak/get-xkcd | 0 | 12791823 | <reponame>zimolzak/get-xkcd
import json
import urllib.request
import ssl
from sys import argv
unverified = ssl._create_unverified_context()
start = 614
end = 617
# last comic with a reliable transcript is roughly 1608?
# Last with any might be 1677.
# But 1677 really contains transcript from 1674.
# someone noticed this: http://forums.xkcd.com/viewtopic.php?t=113433
if len(argv) == 3:
s = int(argv[1])
e = int(argv[2])
assert s < e
start = s
end = e + 1
def get_comic_by_num(n):
url = 'https://xkcd.com/' + str(n) + '/info.0.json'
response = urllib.request.urlopen(url, context = unverified)
json_data = str(response.read(), encoding='utf-8')
return json.loads(json_data)
print("comic_num,panels,words")
for comic_to_get in range(start, end):
if comic_to_get == 404:
next
python_obj = get_comic_by_num(comic_to_get)
n = python_obj['num']
assert comic_to_get == n
T = python_obj['transcript']
panels = len(T.split('\n\n')) - 1
words = len(T.split())
print(str(n) + ',' + str(panels) + ',' + str(words))
| 3.015625 | 3 |
paypal_transactions_wrapper/transaction.py | Ori-Roza/paypal_transactions_wrapper | 0 | 12791824 | from paypal_transactions_wrapper.exceptions import TransactionPropertyNotFound
class Transaction:
KEY_MAP = {
"TIMESTAMP": "date",
"TIMEZONE": "timezone",
"TYPE": "type",
"EMAIL": "costumer_email",
"NAME": "costumer_name",
"TRANSACTIONID": "id",
"STATUS": "status",
"AMT": "amount",
"CURRENCYCODE": "currency",
}
def __init__(self, transaction):
self._transaction = transaction
def __str__(self):
return str(self._transaction)
def __getattr__(self, item):
if item in self._transaction:
return self._transaction[item]
raise TransactionPropertyNotFound("%s property has not found") | 2.859375 | 3 |
cracking-the-coding-interview/ch9-recursion-and-dynamic-programming/9.5-permutations.py | joeghodsi/interview-questions | 1 | 12791825 | <filename>cracking-the-coding-interview/ch9-recursion-and-dynamic-programming/9.5-permutations.py
'''
Problem: Return all permutations of a given string
Solution: Iterate over the elements and lock the current elem in as the ith elem in a partial
permutation then recurse with the remaining values sans the ith element. Eventually there will
be no more remaining values and you will have a single complete permutation. When the entire
call is complete, it will have made all permutations. While I'm storing the perms outside the
function to limit memory usage, this is not a DP solution as it isn't relying on the previous
results
- O(n!) time and space
total time: 45mins :)
'''
_permutations = []
def all_permutations(string):
def _fill_permutations(partial_permutation, remaining_characters):
if len(remaining_characters) == 0:
_permutations.append(partial_permutation)
return
for index, value in enumerate(remaining_characters):
new_partial_permutation = partial_permutation + value
new_remaining_values = remaining_characters[:index] + remaining_characters[index + 1:]
_fill_permutations(new_partial_permutation, new_remaining_values)
_fill_permutations('', string)
return set(_permutations)
string = 'abc'
permutations = all_permutations(string)
print permutations
print len(permutations)
| 4.15625 | 4 |
chapter_09/04_unique_words.py | SergeHall/Tony-Gaddis-Python-4th | 2 | 12791826 | <filename>chapter_09/04_unique_words.py
# unique words
# # 4. Уникальные слова. Напишите программу, которая открывает заданный
# текстовый файл (text_file_9.4.txt) и затем показывает список всех
# уникальных слов в файле. (Подсказка: храните слова в качестве элементов
# множества.)
def main():
my_str = "Welcome! Are are are you completely new to programming? " \
"'If not then we presume you will be looking for information " \
"about why and how to get started with Python. Fortunately an " \
"experienced programmer in any programming language " \
"(whatever it may be) can pick up Python very quickly. " \
"It's also easy for beginners to use and learn, so jump in!"
print()
print("Unique words are below: ")
print(creat_file(my_str))
print("Total unique words in the text =", len(creat_file(my_str)))
# записываем текстовый литерал в файл и очищаем от спец. знаков
# в начале слов и в конце. фунция чтения файла и определения в нем
# уникальных слов
def creat_file(str_literal):
# creat file text_file_9.4.txt.
# Where will all the unique words be written
new_file = open('text_file_9.4.txt', 'w')
content = str_literal.split()
# пустое множество куда бужем складывать значения и потом возмем
# len что бы ущнать количество
content_clear = set([])
# Циклом очищаем циклом все слова от знаков в начале
# и конце словаи записываем это в файл
# ? ! , . { } ( ) " '
for i in content:
if i.endswith('.') or i.endswith(',') \
or i.endswith('!') or i.endswith(':') \
or i.endswith(')') or i.endswith('?') \
or i.endswith('}') or i.endswith('"'):
new_file.write(i[:-1] + '\n')
content_clear.update([i[:-1]])
continue
if i.startswith('.') or i.startswith(',') \
or i.startswith('!') or i.startswith(':') \
or i.startswith('(') or i.startswith('?') \
or i.startswith('{') or i.startswith("'") or \
i.startswith('"'):
new_file.write(i[1:] + '\n')
content_clear.update([i[1:]])
continue
else:
new_file.write(i + '\n')
content_clear.update([i])
new_file.close()
return content_clear
main()
| 4.25 | 4 |
src/peachyprinter/api/test_print_api.py | Createcafe3d/YXE3Dtools | 23 | 12791827 | <filename>src/peachyprinter/api/test_print_api.py<gh_stars>10-100
import inspect
from peachyprinter.domain.layer_generator import LayerGenerator
from peachyprinter.infrastructure import print_test_layer_generators
class TestPrintAPI(object):
'''Api used for getting test prints
Typical usage:
API = TestPrintAPI()
selected_print = API.test_print_names()[0]
height = 30
width = 30
layer_height = 0.01
test_print = API.get_test_print(selected_print,height,width,layer_height)
'''
def __init__(self):
self.test_prints = self._get_test_prints()
def test_print_names(self):
'''Returns list of test prints by name'''
return self.test_prints.keys()
def get_test_print(self, name, height, width, layer_height, speed=100):
'''Gets the layer generator for a print with the name with height, width, and layer height'''
return self.test_prints[name](height, width, layer_height, speed)
def _get_test_prints(self):
available_prints = {}
for name in dir(print_test_layer_generators):
obj = getattr(print_test_layer_generators, name)
if inspect.isclass(obj):
if issubclass(obj, LayerGenerator):
if hasattr(obj, 'name'):
available_prints[obj.name] = obj
return available_prints
| 2.5 | 2 |
demo-spiral.py | rougier/JCGT-2014a | 5 | 12791828 | <filename>demo-spiral.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (C) 2013 <NAME>. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY NICOLAS P. ROUGIER ''AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL NICOLAS P. ROUGIER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are
# those of the authors and should not be interpreted as representing official
# policies, either expressed or implied, of Nicolas P. Rougier.
# -----------------------------------------------------------------------------
import numpy as np
import OpenGL.GL as gl
from transforms import ortho
# -------------------------------------
def on_display():
gl.glClearColor(1,1,1,1)
gl.glClear(gl.GL_COLOR_BUFFER_BIT | gl.GL_DEPTH_BUFFER_BIT)
collection.draw(uniforms= {'u_projection': u_projection,
'u_model' : u_model,
'u_view' : u_view})
glut.glutSwapBuffers()
# -------------------------------------
def on_reshape(width, height):
gl.glViewport(0, 0, width, height)
u_projection[...] = ortho(0,width,0,height,-1,1)
# collection.scale = min(width, height)
# -------------------------------------
def on_keyboard(key, x, y):
if key == '\033': sys.exit()
# -------------------------------------
def on_timer(fps):
collection.dash_phase -= 0.05
collection.rotate -= 0.25*np.pi/180.0
glut.glutTimerFunc(1000/fps, on_timer, fps)
glut.glutPostRedisplay()
# -------------------------------------
if __name__ == '__main__':
import sys
import OpenGL.GLUT as glut
from dash_lines_2D import DashLines
glut.glutInit(sys.argv)
# HiDPI support for retina display
# This requires glut from http://iihm.imag.fr/blanch/software/glut-macosx/
if sys.platform == 'darwin':
import ctypes
from OpenGL import platform
try:
glutInitDisplayString = platform.createBaseFunction(
'glutInitDisplayString', dll=platform.GLUT, resultType=None,
argTypes=[ctypes.c_char_p],
doc='glutInitDisplayString( ) -> None',
argNames=() )
text = ctypes.c_char_p("rgba stencil double samples=8 hidpi")
glutInitDisplayString(text)
except:
pass
glut.glutInitDisplayMode(glut.GLUT_DOUBLE | glut.GLUT_RGB | glut.GLUT_DEPTH)
glut.glutInitWindowSize(1000, 1000)
glut.glutCreateWindow("Dashed rotating spiral")
glut.glutDisplayFunc(on_display)
glut.glutReshapeFunc(on_reshape)
glut.glutKeyboardFunc(on_keyboard)
fps = 60
glut.glutTimerFunc(1000/fps, on_timer, fps)
# Some init
gl.glBlendFunc( gl.GL_SRC_ALPHA, gl.GL_ONE_MINUS_SRC_ALPHA )
gl.glDisable( gl.GL_DEPTH_TEST )
gl.glEnable( gl.GL_BLEND )
gl.glClearColor(1.0,1.0,1.0,1.0)
u_projection = np.eye(4).astype( np.float32 )
u_view = np.eye(4).astype( np.float32 )
u_model = np.eye(4).astype( np.float32 )
collection = DashLines()
lw = 20
x0,y0 = 500.0, 500.0
coils = 12
rho_max = 450.
theta_max = coils * 2 * np.pi
rho_step = rho_max / theta_max
P=[]
chord = 1
theta = 1 + chord / rho_step
while theta <= theta_max:
rho = rho_step * theta
x = rho * np.cos( theta )
y = rho * np.sin( theta )
P.append( (x,y) )
theta += chord / rho
chord += .05
collection.append(P, translate=(x0,y0),
color=(0,0,0,1), linewidth=lw+2, dash_pattern = 'solid')
collection.append(P, translate=(x0,y0),
color=(1,1,1,.95), linewidth=lw+1, dash_pattern = 'solid')
collection.append(P, translate=(x0,y0),
color=(.65,.65,.65,1), linewidth=lw, dash_pattern = 'dashed')
glut.glutMainLoop()
| 1.742188 | 2 |
mmdeploy/backend/sdk/__init__.py | aegis-rider/mmdeploy | 1 | 12791829 | <reponame>aegis-rider/mmdeploy
# Copyright (c) OpenMMLab. All rights reserved.
import importlib
import os
import sys
lib_dir = os.path.abspath(
os.path.join(os.path.dirname(__file__), '../../../build/lib'))
sys.path.insert(0, lib_dir)
_is_available = False
if importlib.util.find_spec('mmdeploy_python') is not None:
from .wrapper import SDKWrapper
__all__ = ['SDKWrapper']
_is_available = True
def is_available() -> bool:
return _is_available
| 1.84375 | 2 |
deliravision/torch/models/gans/context_conditional/__init__.py | delira-dev/vision_torch | 4 | 12791830 | <reponame>delira-dev/vision_torch<filename>deliravision/torch/models/gans/context_conditional/__init__.py
from deliravision.models.gans.context_conditional.context_cond_gan import \
ContextConditionalGAN
| 1.046875 | 1 |
prodj/core/prodj.py | beauburrows/python-prodj-link | 66 | 12791831 | import socket
import logging
from threading import Thread
from select import select
from enum import Enum
from prodj.core.clientlist import ClientList
from prodj.core.vcdj import Vcdj
from prodj.data.dataprovider import DataProvider
from prodj.network.nfsclient import NfsClient
from prodj.network.ip import guess_own_iface
from prodj.network import packets
from prodj.network import packets_dump
class OwnIpStatus(Enum):
notNeeded = 1,
waiting = 2,
acquired = 3
class ProDj(Thread):
def __init__(self):
super().__init__()
self.cl = ClientList(self)
self.data = DataProvider(self)
self.vcdj = Vcdj(self)
self.nfs = NfsClient(self)
self.keepalive_ip = "0.0.0.0"
self.keepalive_port = 50000
self.beat_ip = "0.0.0.0"
self.beat_port = 50001
self.status_ip = "0.0.0.0"
self.status_port = 50002
self.need_own_ip = OwnIpStatus.notNeeded
self.own_ip = None
def start(self):
self.keepalive_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.keepalive_sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
self.keepalive_sock.bind((self.keepalive_ip, self.keepalive_port))
logging.info("Listening on {}:{} for keepalive packets".format(self.keepalive_ip, self.keepalive_port))
self.beat_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.beat_sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
self.beat_sock.bind((self.beat_ip, self.beat_port))
logging.info("Listening on {}:{} for beat packets".format(self.beat_ip, self.beat_port))
self.status_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.status_sock.bind((self.status_ip, self.status_port))
logging.info("Listening on {}:{} for status packets".format(self.status_ip, self.status_port))
self.socks = [self.keepalive_sock, self.beat_sock, self.status_sock]
self.keep_running = True
self.data.start()
self.nfs.start()
super().start()
def stop(self):
self.keep_running = False
self.nfs.stop()
self.data.stop()
self.vcdj_disable()
self.join()
self.keepalive_sock.close()
self.beat_sock.close()
def vcdj_set_player_number(self, vcdj_player_number=5):
logging.info("Player number set to {}".format(vcdj_player_number))
self.vcdj.player_number = vcdj_player_number
#self.data.dbc.own_player_number = vcdj_player_number
def vcdj_enable(self):
self.vcdj_set_iface()
self.vcdj.start()
def vcdj_disable(self):
self.vcdj.stop()
self.vcdj.join()
def vcdj_set_iface(self):
if self.own_ip is not None:
self.vcdj.set_interface_data(*self.own_ip[1:4])
def run(self):
logging.debug("starting main loop")
while self.keep_running:
rdy = select(self.socks,[],[],1)[0]
for sock in rdy:
if sock == self.keepalive_sock:
data, addr = self.keepalive_sock.recvfrom(128)
self.handle_keepalive_packet(data, addr)
elif sock == self.beat_sock:
data, addr = self.beat_sock.recvfrom(128)
self.handle_beat_packet(data, addr)
elif sock == self.status_sock:
data, addr = self.status_sock.recvfrom(256)
self.handle_status_packet(data, addr)
self.cl.gc()
logging.debug("main loop finished")
def handle_keepalive_packet(self, data, addr):
#logging.debug("Broadcast keepalive packet from {}".format(addr))
try:
packet = packets.KeepAlivePacket.parse(data)
except Exception as e:
logging.warning("Failed to parse keepalive packet from {}, {} bytes: {}".format(addr, len(data), e))
packets_dump.dump_packet_raw(data)
return
# both packet types give us enough information to store the client
if packet["type"] in ["type_ip", "type_status", "type_change"]:
self.cl.eatKeepalive(packet)
if self.own_ip is None and len(self.cl.getClientIps()) > 0:
self.own_ip = guess_own_iface(self.cl.getClientIps())
if self.own_ip is not None:
logging.info("Guessed own interface {} ip {} mask {} mac {}".format(*self.own_ip))
self.vcdj_set_iface()
packets_dump.dump_keepalive_packet(packet)
def handle_beat_packet(self, data, addr):
#logging.debug("Broadcast beat packet from {}".format(addr))
try:
packet = packets.BeatPacket.parse(data)
except Exception as e:
logging.warning("Failed to parse beat packet from {}, {} bytes: {}".format(addr, len(data), e))
packets_dump.dump_packet_raw(data)
return
if packet["type"] in ["type_beat", "type_mixer"]:
self.cl.eatBeat(packet)
packets_dump.dump_beat_packet(packet)
def handle_status_packet(self, data, addr):
#logging.debug("Broadcast status packet from {}".format(addr))
try:
packet = packets.StatusPacket.parse(data)
except Exception as e:
logging.warning("Failed to parse status packet from {}, {} bytes: {}".format(addr, len(data), e))
packets_dump.dump_packet_raw(data)
return
self.cl.eatStatus(packet)
packets_dump.dump_status_packet(packet)
# called whenever a keepalive packet is received
# arguments of cb: this clientlist object, player number of changed client
def set_client_keepalive_callback(self, cb=None):
self.cl.client_keepalive_callback = cb
# called whenever a status update of a known client is received
# arguments of cb: this clientlist object, player number of changed client
def set_client_change_callback(self, cb=None):
self.cl.client_change_callback = cb
# called when a player media changes
# arguments of cb: this clientlist object, player_number, changed slot
def set_media_change_callback(self, cb=None):
self.cl.media_change_callback = cb
| 2.1875 | 2 |
ProtonBeamTherapy/macrotools.py | dmitryhits/ProtonBeamTherapy | 0 | 12791832 | <gh_stars>0
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/00_macrotools.ipynb (unless otherwise specified).
__all__ = ['MacroWriter', 'create_all', 'run_macro', 'Ek']
# Cell
import subprocess
from os import path
from time import strftime
class MacroWriter:
"""Main class for creating a macro file to be run by `gate`
"""
def __init__(self, macro_folder='../mac', results_folder='../results', logs_folder='../logs', system_thickness=1,
system_y_loc=0, sensor_material='Silicon', physics_list='QGSP_BERT_EMV'):
self.macro = macro_folder
self.results = results_folder
self.logs = logs_folder
self.sensor_material = sensor_material
self.physics_list = physics_list
self.macro_dict = dict.fromkeys(['geometry', 'physics', 'actors', 'intialisation', 'beam', 'start_beam'])
self.geometry_dict = {}
self.physics_dict = {}
self.actor_dict = {}
self.system_y_loc = system_y_loc
self.system_thickness = system_thickness
self.results_files = {key:[] for key in ['trackers', 'hits', 'dose']}
self.timestamp = strftime("%Y%b%d_%H%M%S")
self.n_phantom_layers = 0
self.beam_created = False
self.no_system = True
def print_info(self):
print(f'Info for the system created on {self.timestamp}...')
print(f'Number of phantom layers: {self.n_phantom_layers}')
for layer in range(self.n_phantom_layers):
print()
def create_sensor(self, n=0, x_length=200, z_length=200, thickness=0.1, x_loc=0, y_loc=0, z_loc=0,
system='scanner'):
"""Compose a GATE macro for creating a sensor
In the current implementation sensor is a flat plane perpendicular to the beam
the beam is along y direction
all dimensions are in mm"""
# move starting point from the center to the top_surface of the system
# print(f'system thickness: {self.system_thickness}')
y_loc = self.system_thickness / 2 - thickness / 2 + y_loc
# print(f'y location of sensor {n} is: {y_loc}')
# create system fisrt but only once
geometry_lines = ''
if self.no_system:
# print(f'system created with: thickness: {self.system_thickness} at loc: {self.system_y_loc}')
geometry_lines += self.create_system(thickness = self.system_thickness, y_loc=self.system_y_loc)
self.no_system = False
# then add sensor lines
geometry_lines += f"""
#sensor
/gate/scanner/daughters/name sensor{n}
/gate/{system}/daughters/insert box
/gate/sensor{n}/geometry/setXLength {x_length} mm
/gate/sensor{n}/geometry/setYLength {round(thickness,3)} mm
/gate/sensor{n}/geometry/setZLength {z_length} mm
/gate/sensor{n}/setMaterial {self.sensor_material}
/gate/sensor{n}/placement/setTranslation {x_loc} {y_loc} {z_loc} mm
/gate/sensor{n}/vis/setVisible 1
/gate/sensor{n}/vis/setColor magenta
/gate/systems/{system}/level1/attach sensor{n}
/gate/sensor{n}/attachCrystalSD
"""
physics_lines = f"""
"""
self.geometry_dict[f'sensor{n}'] = geometry_lines
self.physics_dict[f'sensor{n}'] = physics_lines
def create_system(self, x_length=200, z_length=200, thickness=1, x_loc=0, y_loc=0, z_loc=0,
system='scanner'):
system_lines = f'''
/gate/world/daughters/name {system}
/gate/world/daughters/insert box
/gate/scanner/geometry/setXLength {x_length} mm
/gate/scanner/geometry/setYLength {thickness} mm
/gate/scanner/geometry/setZLength {z_length} mm
/gate/scanner/placement/setTranslation {x_loc} {y_loc} {z_loc} mm
/gate/scanner/setMaterial Air
/gate/scanner/vis/setVisible 1
/gate/scanner/vis/setColor cyan
/gate/scanner/vis/setLineStyle dashed
'''
return system_lines
def create_phantom_layer(self, n=0, x_length=200, z_length=200, thickness=1, x_loc=0, y_loc=0, z_loc=0,
material='Water', color='blue'):
'''
Compose a GATE macro for creating a phantom box.
which in the current implementation is a flat box perpendicular to beam which is in y direction
all dimensions are in cm
two actors are currently added to this volume
PhaseSpaceActor and DoseActor
'''
tracker_results_path = path.join(self.results, f'tracker_{self.timestamp}_{n}.root')
dose_results_path = path.join(self.results, f'dose_{self.timestamp}_{n}.txt')
self.results_files['trackers'].append(tracker_results_path)
self.results_files['dose'].append(dose_results_path)
if material == 'Skull':
color = 'yellow'
geometry_lines = f"""
#phatom box {n}
/gate/world/daughters/name box{n}
/gate/world/daughters/insert box
/gate/box{n}/geometry/setXLength {x_length} mm
/gate/box{n}/geometry/setYLength {thickness} mm
/gate/box{n}/geometry/setZLength {z_length} mm
/gate/box{n}/placement/setTranslation {x_loc} {y_loc} {z_loc} mm
/gate/box{n}/setMaterial {material}
/gate/box{n}/vis/setVisible 1
/gate/box{n}/vis/setColor {color}
"""
physics_lines = f"""#*************************************************************
# Physics (infrared divergence) cuts for the layer{n}
#*************************************************************
/gate/physics/Gamma/SetCutInRegion box{n} 0.1 mm
/gate/physics/Electron/SetCutInRegion box{n} 0.1 mm
/gate/physics/Positron/SetCutInRegion box{n} 0.1 mm
"""
actor_lines = f"""#*************************************************************
# attached actor to the box{n}
#***************************************************************
/gate/actor/addActor PhaseSpaceActor tracker{n}
/gate/actor/tracker{n}/save {tracker_results_path}
/gate/actor/tracker{n}/attachTo box{n}
/gate/actor/tracker{n}/enableNuclearFlag true
/gate/actor/tracker{n}/enableProductionProcess false
#/gate/actor/tracker{n}/useVolumeFrame true
/gate/actor/tracker{n}/storeOutgoingParticles true
/gate/actor/addActor DoseActor dose{n}
/gate/actor/dose{n}/save {dose_results_path}
/gate/actor/dose{n}/attachTo box{n}
/gate/actor/dose{n}/stepHitType random
/gate/actor/dose{n}/setResolution 1 10 1
/gate/actor/dose{n}/enableDose true
#/gate/actor/dose{n}/enableUncertaintyDose true
#/gate/actor/dose{n}/enableNumberOfHits true
"""
self.geometry_dict[f'layer{n}'] = geometry_lines
self.physics_dict[f'layer{n}'] = physics_lines
self.actor_dict[f'layer{n}'] = actor_lines
def create_physics(self):
physics_lines = f'''/gate/physics/addPhysicsList {self.physics_list}
/gate/physics/Gamma/SetCutInRegion world 1 mm
/gate/physics/Electron/SetCutInRegion world 1 mm
/gate/physics/Positron/SetCutInRegion world 1 mm
/gate/physics/Gamma/SetCutInRegion scanner 0.1 mm
/gate/physics/Electron/SetCutInRegion scanner 0.1 mm
/gate/physics/Positron/SetCutInRegion scanner 0.1 mm
/gate/physics/SetMaxStepSizeInRegion world 1 mm
/gate/physics/ActivateStepLimiter proton
/gate/physics/ActivateStepLimiter deuteron
/gate/physics/ActivateStepLimiter triton
/gate/physics/ActivateStepLimiter alpha
/gate/physics/ActivateStepLimiter GenericIon
/gate/physics/displayCuts
#/gate/physics/addProcess LowEnergyHadronIonisation
/gate/physics/addProcess HadronIonisation proton
'''
# add lines from phantom and sensors
for item in self.physics_dict.values():
physics_lines += item
# write to file and mark it with current timestamp
macro_name = f'physics{self.timestamp}.mac'
self.macro_dict['physics'] = macro_name
with open(f'{self.macro}/{macro_name}', 'w') as f:
f.write(physics_lines)
def create_geometry(self):
geometry_lines = f'''
/gate/geometry/setMaterialDatabase ../data/GateMaterials.db
# World
/gate/world/geometry/setXLength 1000 mm
/gate/world/geometry/setYLength 1000 mm
/gate/world/geometry/setZLength 1000 mm
/gate/world/setMaterial Air
'''
for item in self.geometry_dict.values():
geometry_lines += item
macro_name = f'geometry{self.timestamp}.mac'
self.macro_dict['geometry'] = macro_name
with open(f'{self.macro}/{macro_name}', 'w') as f:
f.write(geometry_lines)
def create_actors(self):
actor_lines = f'''
/gate/actor/addActor ProtonNuclearInformationActor myNuclearInfoActor
/gate/actor/myNuclearInfoActor/attachTo world
'''
for item in self.actor_dict.values():
actor_lines += item
macro_name = f'actor{self.timestamp}.mac'
self.macro_dict['actors'] = macro_name
with open(f'{self.macro}/{macro_name}', 'w') as f:
f.write(actor_lines)
def create_initialization(self):
lines = f'''
/gate/run/initialize
# Enable the following lines to display available and enabled processes
# /gate/physics/processList Available
# /gate/physics/processList Enabled
'''
macro_name = f'initialise{self.timestamp}.mac'
self.macro_dict['intialisation'] = macro_name
with open(f'{self.macro}/{macro_name}', 'w') as f:
f.write(lines)
def create_beam(self, energy=250, sigma_energy=1.0, position={'x':0, 'y':250, 'z':0}):
self.beam_created = True
lines = f'''
#=====================================================
# BEAMS
#=====================================================
/gate/source/addSource PBS PencilBeam
/gate/source/PBS/setParticleType proton
/gate/source/PBS/setEnergy {energy} MeV
/gate/source/PBS/setSigmaEnergy {sigma_energy} MeV
/gate/source/PBS/setPosition {position['x']} {position['y']} {position['z']} mm
/gate/source/PBS/setSigmaX 2 mm
/gate/source/PBS/setSigmaY 2 mm
/gate/source/PBS/setSigmaTheta 3 mrad
/gate/source/PBS/setSigmaPhi 3 mrad
/gate/source/PBS/setEllipseXThetaEmittance 15 mm*mrad
#/gate/source/PBS/setEllipseXThetaRotationNorm negative
/gate/source/PBS/setEllipseYPhiEmittance 15 mm*mrad
/gate/source/PBS/setEllipseYPhiRotationNorm negative
/gate/source/PBS/setRotationAxis 1 0 0
/gate/source/PBS/setRotationAngle 90 deg
#/gate/application/setTotalNumberOfPrimaries 10000
'''
macro_name = f"beam{energy}.mac"
self.macro_dict['beam'] = macro_name
with open(f'{self.macro}/{macro_name}', 'w') as f:
f.write(lines)
def create_start_beams(self, n_primaries=10000):
lines = f"""
#=====================================================
# START BEAMS
#=====================================================
# JamesRandom Ranlux64 MersenneTwister
/gate/random/setEngineName MersenneTwister
/gate/random/setEngineSeed 123456
# /gate/random/verbose 1
# /gate/source/verbose 0
# to check Steplimiter
#/tracking/verbose 1
#/gate/application/noGlobalOutput
/gate/application/setTotalNumberOfPrimaries {n_primaries}
"""
macro_name = f'start_beam{n_primaries}.mac'
self.macro_dict['start_beam'] = macro_name
with open(f'{self.macro}/{macro_name}', 'w') as f:
f.write(lines)
def create_output(self):
results_path = path.join(self.results, f'TrackerHits{self.timestamp}')
self.results_files['hits'].append(results_path + '.root')
out = f"""
/gate/output/root/enable
/gate/output/root/setFileName {results_path}
/gate/output/root/setRootHitFlag 1
/gate/output/root/setRootSinglesFlag 0
#/gate/output/root/setRootCoincidencesFlag 0
/gate/output/root/setRootNtupleFlag 0
/gate/output/root/setRootOpticalFlag 0
"""
return out
def create_macro_file(self):
"""creates the main macro file
output: name the macro file, a dictionary containing the list of root file
dictionary keys are 'trackers', 'hits', 'dose'
"""
self.create_geometry()
self.create_physics()
self.create_actors()
self.create_geometry()
self.create_initialization()
if not self.beam_created:
self.create_beam()
self.create_start_beams()
lines = f"""
#=====================================================
# GEOMETRY
#=====================================================
/control/execute {self.macro}/{self.macro_dict["geometry"]}
#=====================================================
# PHYSICS
#=====================================================
/control/execute {self.macro}/{self.macro_dict["physics"]}
#================================================================
# ACTORS
#================================================================
/control/execute {self.macro}/{self.macro_dict["actors"]}
#=====================================================
# INITIALISATION
#=====================================================
/control/execute {self.macro}/{self.macro_dict["intialisation"]}
#=====================================================
# BEAMS
#=====================================================
/control/execute {self.macro}/{self.macro_dict["beam"]}
#=====================================================
# START BEAMS
#=====================================================
/control/execute {self.macro}/{self.macro_dict["start_beam"]}
#===============================================
# OUTPUT SETTINGS
#===============================================
{self.create_output()}
/gate/application/start
exit
"""
macro_name = f'{self.macro}/main_macro{self.timestamp}.mac'
with open(macro_name, 'w') as f:
f.write(lines)
return macro_name, self.results_files, self.timestamp
# Cell
def create_all(n_phantom_layers = 21, phantom_layer_thickness = [1]*21, phantom_material = 'Water', beam_energy = 250,
distance_to_system = 1, system_thickness = 1, n_sensors = 1, sensor_pitch = 1, sensor_thickness=0.5,
roc_thickness=0.5):
"""sets parameters for phantom and system geometries"""
phantom_thickness = sum(phantom_layer_thickness)
system_thickness = (sensor_thickness + sensor_pitch) * n_sensors
# initialize an instance of MacroWriter
my_macro = MacroWriter(system_y_loc=(-1)*phantom_thickness - distance_to_system,
system_thickness=system_thickness)
# create phantom layers
y_loc = 10
for layer in range(n_phantom_layers):
phantom_material = 'Water'
# the parameters of the particles are recorded at the exit from the layer
# the Air layer is added to get parameters at the entrance to the real phantom layer
if layer == 0:
phantom_material = 'Air'
# set material to Skull for the first and the last layer
elif layer == 1 or layer == n_phantom_layers - 1:
phantom_material = 'Skull'
# layers start from 10 and extend in the negative y direction
y_loc -= phantom_layer_thickness[layer]
my_macro.create_phantom_layer(n=layer, thickness=phantom_layer_thickness[layer], y_loc=y_loc,
material=phantom_material)
# create system with sensors and readout chips
for i_sensor in range(n_sensors):
sensor_loc = -(sensor_pitch + sensor_thickness) * i_sensor
roc_loc = sensor_loc - sensor_thickness/2 - roc_thickness/2
print(f'sensor {sensor_loc} - roc: {roc_loc}')
my_macro.create_sensor(n=i_sensor, y_loc= sensor_loc,
thickness=sensor_thickness)
my_macro.create_sensor(n=i_sensor + 100, y_loc= roc_loc,
thickness=roc_thickness)
my_macro.create_beam(energy=beam_energy)
return my_macro.create_macro_file()
# Cell
def run_macro(macroname, log_folder='../logs'):
"""runs macro file the log file is time stamped"""
log_file_name = f'gate_stdout_err_{strftime("%Y%b%d_%H%M%S")}.log'
log_path = path.join(log_folder, log_file_name)
with open(log_path,'a+') as f_stdout:
subprocess.run(['Gate', macroname], stdout=f_stdout, stderr=subprocess.STDOUT)
# Cell
import math
def Ek(mass, momentum):
'''Helpfull function that converts momentum to kinetic energy'''
return math.sqrt(mass**2 + momentum**2) - mass | 2.125 | 2 |
tests/test_read_requirements.py | benbenbang/Req2Toml | 10 | 12791833 | <reponame>benbenbang/Req2Toml<filename>tests/test_read_requirements.py
# standard library
from unittest import TestCase, expectedFailure
# req2toml plugin
from req2toml.utils import read_requirments
from tests.mixin import CaseMixin, TempfileMixin
class TestReadRequirmentsFormattedContent(CaseMixin, TempfileMixin, TestCase):
def test_read_req1_from_right_path_with_txt(self):
self.set_read_requirements_cases()
self.tmpPath = self._create_tempfile(
self.right_path_txt, self.formatted_content
)
reqs = read_requirments(self.ctx, self.tmpPath)
self.assertEqual(len(reqs), 3)
for req in reqs:
for r in req.split(";"):
self.assertIn(r, self.formatted_content)
class TestReadRequirmentsUnformattedContent(CaseMixin, TempfileMixin, TestCase):
def test_read_req2_from_right_path_with_txt(self):
self.set_read_requirements_cases()
self.tmpPath = self._create_tempfile(
self.right_path_txt, self.unformatted_content
)
reqs = read_requirments(self.ctx, self.tmpPath)
self.assertEqual(len(reqs), 4)
for req in reqs:
for r in req.split(";"):
self.assertIn(r, self.unformatted_content)
class TestReadRequirmentsMessyContent(CaseMixin, TempfileMixin, TestCase):
def test_read_req3_from_right_path_with_txt(self):
self.set_read_requirements_cases()
self.tmpPath = self._create_tempfile(self.right_path_txt, self.messy_content)
reqs = read_requirments(self.ctx, self.tmpPath)
self.assertEqual(len(reqs), 5)
for req in reqs:
for r in req.split(";"):
self.assertIn(r, self.messy_content)
class TestReadRequirmentsRstExt(CaseMixin, TempfileMixin, TestCase):
def test_read_req_from_right_path_with_rst(self):
self.set_read_requirements_cases()
self.tmpPath = self._create_tempfile(
self.support_fmt_file, self.formatted_content
)
reqs = read_requirments(self.ctx, self.tmpPath)
self.assertEqual(len(reqs), 3)
for req in reqs:
for r in req.split(";"):
self.assertIn(r, self.formatted_content)
@expectedFailure
class TestReadRequirmentsFileNotFound(CaseMixin, TempfileMixin, TestCase):
def test_read_req_from_wrong_path_with_txt(self):
self.set_read_requirements_cases()
self.tmpPath = self._create_tempfile(self.wrong_path_txt, self.messy_content)
read_requirments(self.ctx, self.wrong_path_txt)
@expectedFailure
class TestReadRequirmentsMarkdownExt(CaseMixin, TempfileMixin, TestCase):
def test_read_req_from_right_path_with_rst(self):
self.set_read_requirements_cases()
self.tmpPath = self._create_tempfile(
self.unsupport_fmt_file, self.formatted_content
)
read_requirments(self.ctx, self.tmpPath)
| 2.609375 | 3 |
grep/file_helper.py | florianbegusch/simple_grep | 0 | 12791834 | <reponame>florianbegusch/simple_grep
"""Supplies relevant files for grep.py."""
import os
import sys
def get_next_file(caller_dir, is_recursive):
"""Generates next file to be searched."""
assert type(caller_dir) is str
assert type(is_recursive) is bool
for root, dirs, files in os.walk(caller_dir):
for f in files:
# Environment specific file paths.
file_path = os.path.normpath('{0}/{1}'.format(root, f))
# Check if it is an actual file on disk.
if os.path.isfile(file_path):
yield file_path
if is_recursive is False:
break
def is_binary_file(file_path, block_size=512):
"""
If a file can't be decoded by ascii or there are NULL ('\x00') bytes
assume this is a binary file.
"""
assert type(file_path) == str
try:
with open(file_path, 'rb') as f:
block = f.read(block_size)
if b'\x00' in block:
return True # Consider files containing null bytes binary
elif not block:
return False # Consider an empty file a text file
try:
block.decode('ascii')
return False
except UnicodeDecodeError:
return True
except IOError as io_error:
return False
def with_read(file_path):
def wrapper(func):
with open(file_path, 'r') as f:
return func(f)
return wrapper
| 3.109375 | 3 |
patent_search.py | f--f/nano-patents | 0 | 12791835 | import requests
import bs4
import sqlite3
# Relevant API Documentation:
# USPTO: https://developer.uspto.gov/ibd-api-docs/
# Google Maps: https://developers.google.com/maps/documentation/geocoding/intro
USPTO_API = "https://developer.uspto.gov/ibd-api/v1/patent/application"
MAPS_API = "https://maps.googleapis.com/maps/api/geocode/json"
# Link to individual USPTO search page by patent number:
USPTO_PAGE = "http://patft.uspto.gov/netacgi/nph-Parser?Sect1=PTO1&Sect2=HIT\
OFF&d=PALL&p=1&u=%2Fnetahtml%2FPTO%2Fsrchnum.htm&r=1&f=G&l=50&s1={}.PN."
SQLITE_DB = "db.sqlite"
def get_latlon(location):
"""Gets latitude and longitude corresponding to a place using Google Maps
API."""
result = requests.get(MAPS_API, params={"address": location})
return result.json()['results'][0]['geometry']['location']
def scrape_patent_web(patent_num):
"""Returns BS4/HTML of USPTO patent search for a patent entry. Contains
extra information (location, text) not available through API."""
patent_html = USPTO_PAGE.format(patent_num)
return bs4.BeautifulSoup(requests.get(patent_html).content, "lxml")
def get_location(patent_html):
"""Gets location of company associated with patent entry (dict)."""
# Grab metadata table
ass_loc = patent_html.find(text="Assignee:").find_next()
# Split tag contents so that only first assignee location is retrieved
ass_text = ass_loc.text.split('\n\n')[0].replace('\n', '')
lind = ass_text.find("(")
rind = ass_text.rfind(")")
return ass_text[lind + 1:rind]
def get_title(patent_html):
"""Gets title of patent entry (dict). Avoids case mangling (MRI -> Mri)
associated with the API results."""
return ' '.join(patent_html.find_all('font')[-1].text.split())
def get_abstract(patent_html):
"""Gets abstract of company associated with patent entry (dict)."""
return ' '.join(patent_html.p.contents[0].split())
if __name__ == '__main__':
# Search for successful (granted) patent applications in nanotechnology
search_params = {"searchText": "nano", "applicationType": "UTILITY",
"documentType": "grant", "rows": 100, 'sortOrder': 'desc'}
response = requests.get(USPTO_API, params=search_params)
# Check if request went through successfully (status code 200)
if response.status_code == 200:
# Get list of results
patents = response.json()['response']['docs']
# Populate a new SQLite database
db = sqlite3.connect(SQLITE_DB)
# Overwrite old data
db.execute("DROP TABLE IF EXISTS patents")
db.execute("""CREATE TABLE patents
(id INTEGER PRIMARY KEY, title TEXT, year INTEGER, assignee TEXT,
city TEXT, abstract TEXT, lat REAL, lng REAL)""")
for pat in patents:
html = scrape_patent_web(pat['patentNumber'])
pat['title'] = get_title(html)
print(pat['patentNumber'], pat['title'])
# Skip patent if there's no company listed.
if "assignee" not in pat:
print("No company assigned to patent - skipping.")
continue
try:
city = get_location(html)
loc = get_latlon(city)
print(city, loc)
except (IndexError, KeyError):
print("Can't grab location information - skipping.")
continue
abstr = get_abstract(html)
db.execute("INSERT INTO patents VALUES (?,?,?,?,?,?,?,?)",
(int(pat['patentNumber']), pat['title'], int(pat['year']),
pat['assignee'][0], city, abstr, loc['lat'], loc['lng']))
db.commit()
db.close()
else:
print("Unexpected response code:", response.status_code)
| 2.96875 | 3 |
cryptoprice.py | Marto32/cryptoprice | 0 | 12791836 | #! /usr/bin/env python
import argparse
import datetime
import json
import time
import logging
import pandas as pd
import requests
from pathlib import Path
from retrying import retry
AVAILABLE_CURRENCY_PAIRS = ['BTC_AMP', 'BTC_ARDR', 'BTC_BCH', 'BTC_BCN', 'BTC_BCY', 'BTC_BELA',
'BTC_BLK', 'BTC_BTCD', 'BTC_BTM', 'BTC_BTS', 'BTC_BURST', 'BTC_CLAM',
'BTC_CVC', 'BTC_DASH', 'BTC_DCR', 'BTC_DGB', 'BTC_DOGE', 'BTC_EMC2',
'BTC_ETC', 'BTC_ETH', 'BTC_EXP', 'BTC_FCT', 'BTC_FLDC', 'BTC_FLO', 'BTC_GAME',
'BTC_GAS', 'BTC_GNO', 'BTC_GNT', 'BTC_GRC', 'BTC_HUC', 'BTC_LBC', 'BTC_LSK',
'BTC_LTC', 'BTC_MAID', 'BTC_NAV', 'BTC_NEOS', 'BTC_NMC', 'BTC_NXC', 'BTC_NXT',
'BTC_OMG', 'BTC_OMNI', 'BTC_PASC', 'BTC_PINK', 'BTC_POT', 'BTC_PPC', 'BTC_RADS',
'BTC_SC', 'BTC_STEEM', 'BTC_STORJ', 'BTC_STR', 'BTC_STRAT', 'BTC_SYS',
'BTC_VIA', 'BTC_VRC', 'BTC_VTC', 'BTC_XBC', 'BTC_XCP', 'BTC_XEM', 'BTC_XMR',
'BTC_XPM', 'BTC_XRP', 'BTC_XVC', 'BTC_ZEC', 'BTC_ZRX', 'ETH_BCH', 'ETH_CVC',
'ETH_ETC', 'ETH_GAS', 'ETH_GNO', 'ETH_GNT', 'ETH_LSK', 'ETH_OMG', 'ETH_REP',
'ETH_STEEM', 'ETH_ZEC', 'ETH_ZRX', 'USDT_BCH', 'USDT_BTC', 'USDT_DASH',
'USDT_ETC', 'USDT_ETH', 'USDT_LTC', 'USDT_NXT', 'USDT_REP', 'USDT_STR',
'USDT_XMR', 'USDT_XRP', 'USDT_ZEC', 'XMR_BCN', 'XMR_BLK', 'XMR_BTCD', 'XMR_DASH',
'XMR_LTC', 'XMR_MAID', 'XMR_NXT', 'XMR_ZEC', 'BTC_REP', 'BTC_RIC', 'BTC_SBD',]
class CryptoData(object):
"""
Poloneix Documentation: https://poloniex.com/support/api/
## returnChartData
Returns candlestick chart data. Required GET parameters are "currencyPair",
"period" (candlestick period in seconds; valid values are 300, 900, 1800, 7200,
14400, and 86400), "start", and "end". "Start" and "end" are given in UNIX
timestamp format and used to specify the date range for the data returned. Sample output:
[{"date":1405699200,"high":0.0045388,"low":0.00403001,"open":0.00404545,"close":0.00427592,"volume":44.11655644,
"quoteVolume":10259.29079097,"weightedAverage":0.00430015}, ...]
"""
def __init__(self, currency_pair='USDT_BTC', start_date='2015-01-01', end_date=None,
period=14400, destination=None, api='returnChartData', logger=None):
self.currency_pair = currency_pair.upper()
self.start_timestamp = self.get_timestamp(date_string=start_date)
if not end_date:
self.end_timestamp = 9999999999
else:
self.end_timestamp = self.get_timestamp(date_string=end_date)
self.period = 300
self.api = api
self.destination = destination
self.data = None
self.logger = logger
self.url = f'https://poloniex.com/public?command={self.api}¤cyPair' \
f'={self.currency_pair}&start={self.start_timestamp}&end=' \
f'{self.end_timestamp}&period={self.period}'
def get_timestamp(self, date_string=None, date_format='%Y-%m-%d'):
if date_string is None:
return int(time.mktime(datetime.datetime.utcnow().timetuple()))
else:
return int(time.mktime(datetime.datetime.strptime(date_string, date_format).timetuple()))
def get_api_data(self):
response = requests.get(self.url)
return response
def parse_api_data_text(self, response):
parsed_data = json.loads(response.text)
if isinstance(parsed_data, dict) and 'error' in parsed_data.keys():
if parsed_data['error'] == 'Invalid currency pair.':
raise Exception(f'{self.currency_pair} is not a valid currency pair. ' \
f'You must use one of: \n{AVAILABLE_CURRENCY_PAIRS}')
else:
raise Exception(f'API Error: {parsed_data["error"]}')
return parsed_data
def build_dataframe(self, parsed_data):
data = pd.DataFrame(parsed_data)
data['datetime'] = data['date'].apply(datetime.datetime.utcfromtimestamp)
data.sort_values('datetime', inplace=True)
data['datetime_utc'] = data['datetime']
cols = ['datetime_utc', 'open', 'high', 'low', 'close', 'quoteVolume', 'volume',
'weightedAverage']
self.data = data[cols]
def save_data(self, dataframe):
dataframe.to_csv(self.destination, index=False)
return self
@retry(stop_max_attempt_number=7, wait_random_min=1000, wait_random_max=2000)
def run(self, save=True):
if self.data is None:
response = self.get_api_data()
self.build_dataframe(self.parse_api_data_text(response))
if save:
self.save_data(self.data)
return self
else:
return self.data
if __name__ == '__main__':
DESCRIPTION = """
A simple tool to pull price data from Poloneix's API. The data
can be saved down as a csv or used in memory as a pandas DataFrame.
Poloneix Documentation: https://poloniex.com/support/api/
"""
parser = argparse.ArgumentParser(description=DESCRIPTION)
parser.add_argument('--currency-pair', dest='currency_pair', default='USDT_LTC',
type=str, help='A poloneix currency pair. Use --pairs to view pairs')
parser.add_argument('--period', dest='period', default=14400, help='The timefrime to use '
'when pulling data in seconds. Defaults to 14400. Available options' \
' 300, 900, 1800, 7200, 14400, 86400.', type=int)
parser.add_argument('--dest', dest='dest', type=str, default=None, help='The full path to which '
'the output file should be saved. Defaults to the home directory.')
parser.add_argument('--start-date', dest='start_date', type=str,
default=datetime.datetime.strftime(
datetime.datetime.utcnow() + datetime.timedelta(-30), format='%Y-%m-%d'),
help='The start date for the data pull in the format YYYY-MM-DD. Defaults ' \
'to 30 days ago.')
parser.add_argument('--end-date', dest='end_date', type=str, default=None,
help='The end date for the data pull in the format YYYY-MM-DD. Defaults ' \
'to now.')
parser.add_argument('--pairs', dest='pairs', action='store_true',
default=False, help='A flag used to view currency pairs.')
args = parser.parse_args()
logger = logging.getLogger(__name__)
logger.setLevel('INFO')
if args.pairs:
chunks = [AVAILABLE_CURRENCY_PAIRS[x:x + 3] for x in range(0, len(AVAILABLE_CURRENCY_PAIRS), 3)]
setup = [[str(e) for e in row] for row in chunks]
lens = [max(map(len, col)) for col in zip(*setup)]
fmt = '\t'.join('{{:{}}}'.format(x) for x in lens)
table = [fmt.format(*row) for row in setup]
print('\n'.join(table))
CURRENCY_PAIR = args.currency_pair
SAVE = True
PERIOD = args.period
_dest = args.dest
if SAVE and _dest is None:
home_dir = str(Path.home())
DESTINATION = f'{home_dir}/{CURRENCY_PAIR}_{PERIOD}.csv'
else:
DESTINATION = _dest
START_DATE = args.start_date
END_DATE = args.end_date
client = CryptoData(
currency_pair=CURRENCY_PAIR,
destination=DESTINATION,
period=PERIOD,
start_date=START_DATE,
end_date=END_DATE,
logger=logger
)
client.run(save=SAVE)
| 1.8125 | 2 |
second/pytorch/annotations_process.py | lpj0822/pointpillars_train | 1 | 12791837 | <reponame>lpj0822/pointpillars_train<filename>second/pytorch/annotations_process.py
import os
import sys
sys.path.insert(0, os.getcwd() + "/.")
import math
import numpy as np
import json
# import pcl
from second.pytorch.show_3dbox import mayavi_show_3dbox
def getFileData(dataFilePath):
with open(dataFilePath, 'r') as file:
for line in file:
if line.strip():
yield line.strip()
return
def read_pcd_points(pcd_path):
pcl_points = pcl.load_XYZI(pcd_path)
points = []
for point in pcl_points:
x = point[0]
y = point[1]
z = point[2]
if abs(x) < 1e-1 and abs(y) < 1e-1 and abs(z) < 1e-1:
continue
points.append([point[0], point[1], point[2], point[3]])
numpy_points = np.array(points)
return numpy_points
def read_bin_points(bin_path):
points = np.fromfile(
str(bin_path), dtype=np.float32,
count=-1).reshape([-1, 4])
return points
def read_annotations_data(annotation_path):
my_file = open(annotation_path, encoding='utf-8')
result = json.load(my_file)
object_list = result['objects']['rect3DObject']
box_names = []
box_locs = []
for box_value in object_list:
if box_value['class'].strip() != 'DontCare':
yaw = -box_value['yaw'] # inverse clockwise
box = [box_value['centerX'],
box_value['centerY'],
box_value['centerZ'],
box_value['width'],
box_value['length'],
box_value['height'],
yaw]
if (box[0] >= -1.5) and (box[0] <= 1.5) and \
(box[1] >= -2.5) and (box[1] <= 2.5):
continue
if (box[0] >= -41) and (box[0] <= 41) and \
(box[1] >= -81) and (box[1] <= 41):
box_names.append(box_value['class'].strip())
box_locs.append(box)
gt_boxes = np.array(box_locs).astype(np.float32)
gt_names = np.array(box_names)
return gt_boxes, gt_names
def get_image_and_label_list(train_path):
result = []
annotation_post = ".json"
path, _ = os.path.split(train_path)
pcd_dir = os.path.join(path, "../pcds")
annotation_dir = os.path.join(path, "../Annotations")
for filename_and_post in getFileData(train_path):
filename, post = os.path.splitext(filename_and_post)
annotation_filename = filename + annotation_post
annotation_path = os.path.join(annotation_dir, annotation_filename)
pcd_path = os.path.join(pcd_dir, filename_and_post)
# print(pcd_path)
if os.path.exists(annotation_path) and \
os.path.exists(pcd_path):
result.append((annotation_path, pcd_path))
else:
print("%s or %s not exist" % (annotation_path, pcd_path))
return result
def show_annotations(info_path):
cloud_and_label_list = get_image_and_label_list(info_path)
print("remain number of infos:", len(cloud_and_label_list))
for annotation_path, pcd_path in cloud_and_label_list:
print(pcd_path)
points = read_bin_points(pcd_path)
gt_boxes, gt_names = read_annotations_data(annotation_path)
mayavi_show_3dbox(points, gt_boxes, gt_names)
if __name__ == '__main__':
show_annotations("/home/lpj/github/data/my_point_cloud/ali_dataset/ImageSets/Pedestrian_train.txt")
| 2.3125 | 2 |
python/ros_ws/src/recorder/nodes/relay.py | JonathanCamargo/Eris | 0 | 12791838 | #!/usr/bin/env python
# Relay node takes a list of topics and republish prepending /record namespace
import rospy
import rostopic
import signal
import sys
QUEUE_SIZE=1000 #Make sure we don't miss points
def signal_handler(sig,frame):
print('Ctrl+c')
sys.exit(0)
signal.signal(signal.SIGINT,signal_handler)
def echo(pub,msg):
''' echos the message to a publisher '''
pub.publish(msg)
rospy.init_node('talker', anonymous=True)
# Get the list of topics to relay from rosparam
publishers=[]
subscribers=[]
# Manually list the topics to Relay
topics=['/emg']
for topic in topics:
#relay
(topicClass,topicName,c)=rostopic.get_topic_class(topic,blocking=True)
print("Relay for "+topicName+" with class "+str(topicClass))
pub = rospy.Publisher("/record"+topicName, topicClass, queue_size=QUEUE_SIZE)
callback=lambda msg: echo(pub,msg)
sub = rospy.Subscriber(topic, topicClass,callback)
publishers.append(pub)
subscribers.append(sub)
rospy.spin()
| 2.96875 | 3 |
humanreadable/_base.py | thombashi/humanreadable | 12 | 12791839 | """
.. codeauthor:: <NAME> <<EMAIL>>
"""
import abc
import re
from decimal import Decimal
from typepy import RealNumber, String
from .error import ParameterError, UnitNotFoundError
_BASE_ATTRS = ("name", "regexp")
_RE_NUMBER = re.compile(r"^[-\+]?[0-9\.]+$")
def _get_unit_msg(text_units):
return ", ".join([", ".join(values) for values in text_units.values()])
class HumanReadableValue(metaclass=abc.ABCMeta):
@abc.abstractproperty
def _text_units(self): # pragma: no cover
pass
@abc.abstractproperty
def _units(self): # pragma: no cover
pass
@abc.abstractmethod
def get_as(self, unit): # pragma: no cover
pass
def __init__(self, readable_value, default_unit=None):
self._default_unit = self._normalize_unit(default_unit)
self._number, self._from_unit = self.__preprocess(readable_value)
def __repr__(self):
items = [str(self._number)]
if self._from_unit.name:
items.append(self._from_unit.name)
return " ".join(items)
def _normalize_unit(self, unit):
if unit is None:
return None
for u in self._text_units:
if u.regexp.search(unit):
return u
raise ValueError("unit not found: {}".format(unit))
def __split_unit(self, readable_value):
if RealNumber(readable_value).is_type():
if self._default_unit is None:
raise UnitNotFoundError(
"unit not found",
value=readable_value,
available_units=_get_unit_msg(self._text_units),
)
return (readable_value, self._default_unit)
if not String(readable_value).is_type():
raise TypeError("readable_value must be a string")
for unit in self._units:
try:
if unit.regexp.search(readable_value):
number = unit.regexp.split(readable_value)[0]
if not RealNumber(number).is_type():
continue
return (number, unit)
except TypeError:
continue
raise UnitNotFoundError(
"unit not found", value=readable_value, available_units=_get_unit_msg(self._text_units),
)
def __preprocess(self, readable_value):
if readable_value is None:
raise TypeError("readable_value must be a string")
number, from_unit = self.__split_unit(readable_value)
if number is not None:
number = self.__to_number(number)
if from_unit is None:
raise UnitNotFoundError(
"unit not found",
value=readable_value,
available_units=_get_unit_msg(self._text_units),
)
return (number, from_unit)
def __to_number(self, readable_num):
match = _RE_NUMBER.search(readable_num)
if not match:
raise ParameterError(
"human-readable value should only include a number", value=readable_num
)
return Decimal(match.group())
| 2.984375 | 3 |
photo/views_show.py | shagun30/djambala-2 | 0 | 12791840 | <filename>photo/views_show.py
# -*- coding: utf-8 -*-
"""
/dms/photo/views_show.py
.. zeigt den Inhalt eines Photos an
Django content Management System
<NAME>
<EMAIL>
Die Programme des dms-Systems koennen frei genutzt und den spezifischen
Beduerfnissen entsprechend angepasst werden.
0.01 29.10.2007 Beginn der Arbeit
0.02 31.10.2007 Anzeige des Bildes
"""
from django.shortcuts import render_to_response
from django.template.loader import get_template
from django.template import Context
from django.utils.translation import ugettext as _
from dms.utils import show_link
from dms.utils_form import get_item_vars_show
from dms.views_comment import item_comment
from dms.file.utils import get_file_url
from dms.gallery.utils import get_exibition_url
from dms_ext.extension import * # dms-Funktionen ueberschreiben
# -----------------------------------------------------
def photo_show(request,item_container):
""" zeigt den Inhalt eines Photos """
def get_photo_name_middle(item_container):
""" ..liefert die Namen der normalen Bilder """
file_name = get_file_url(item_container)
ext_pos = file_name.rfind('.')
return file_name[:ext_pos] + '_middle' + file_name[ext_pos:]
app_name = 'photo'
parent = item_container.get_parent()
if parent.item.has_comments:
comments = item_comment(request, item_container=item_container)
else:
comments = ''
vars = get_item_vars_show(request, item_container, app_name)
vars['comments'] = comments
vars['image_url'] = get_photo_name_middle(item_container)
vars['image_url_big'] = show_link(get_file_url(item_container), _(u'Originalphoto'), True)
tItem = get_template('app/photo/show_photo.html')
vars['full_name'] = item_container.item.string_1
vars['email'] = item_container.item.string_2
vars['exibition_url'] = get_exibition_url(item_container)
vars['name'] = item_container.item.name
vars['text_more'] = tItem.render(Context(vars))
vars['text'] = ''
vars['image_url'] = ''
return render_to_response ( 'base-full-width.html', vars )
| 2.171875 | 2 |
animation_and_polting.py | SABS-R3-projects/PARA_PDE | 0 | 12791841 | import matplotlib.pyplot as plt
import time
from matplotlib import animation
def animate(U, timeSteps: int, postionSteps: int, timeStepSize: float):
fig= plt.figure()
ims = []
for i in range(timeSteps):
im = plt.plot(postionSteps, U[:,i] , animated = True, color = 'red')
ims.append(im)
ani = animation.ArtistAnimation(fig, ims, interval = (10), blit = True, repeat_delay = 500)
plt.show()
#animation(u, trial.k_N, trial.x_range, trial.k) | 3.125 | 3 |
tests/TestPortfolio.py | patchan/potatofy | 2 | 12791842 | import unittest
from decimal import Decimal
from Broker import Broker
from Portfolio import Account, Portfolio
from tests.MockAuthenticator import MockAuthenticator, load_test_balance, \
load_test_positions
class TestPortfolio(unittest.TestCase):
def setUp(self):
self.broker = Broker()
self.broker.set_authenticator(MockAuthenticator())
self.portfolio = Portfolio(self.broker)
self.portfolio.load_accounts()
def test_load_accounts(self):
self.portfolio.load_accounts()
result = self.portfolio.list_accounts()
expected = ['111111', '222222']
self.assertEqual(result, expected)
def test_get_all_positions(self):
expected = {}
for account in self.portfolio.list_accounts():
positions = load_test_positions(account)
for position in positions:
if position['symbol'] not in expected:
expected[position['symbol']] = Decimal(
position['currentMarketValue']).quantize(
Decimal('0.00'))
else:
expected[position['symbol']] += Decimal(
position['currentMarketValue']).quantize(
Decimal('0.00'))
result = self.portfolio.get_all_positions()
self.assertEqual(result, expected)
def test_get_total_holdings(self):
expected = 0
for account in self.portfolio.list_accounts():
expected += Decimal(
load_test_balance(account)['combinedBalances'][0][
'marketValue']).quantize(Decimal('0.00'))
result = self.portfolio.get_total_holdings()
self.assertEqual(result, expected)
def test_get_cash(self):
expected = 0
for account in self.portfolio.list_accounts():
expected += Decimal(
load_test_balance(account)['combinedBalances'][0][
'cash']).quantize(Decimal('0.00'))
result = self.portfolio.get_cash()
self.assertEqual(result, expected)
class TestAccount(unittest.TestCase):
def setUp(self):
self.account_id = '111111'
self.account = Account(load_test_balance(self.account_id),
load_test_positions(self.account_id))
def test_get_balance(self):
expected = load_test_balance(self.account_id)
result = self.account.get_balance()
self.assertEqual(result, expected)
def test_get_total_holdings(self):
expected = Decimal(
load_test_balance(self.account_id)['combinedBalances'][0][
'marketValue']).quantize(Decimal('0.00'))
result = self.account.get_total_holdings()
self.assertEqual(result, expected)
def test_get_positions(self):
positions = load_test_positions(self.account_id)
expected = {}
for position in positions:
expected[position['symbol']] = Decimal(
position['currentMarketValue']).quantize(Decimal('0.00'))
result = self.account.get_positions()
self.assertEqual(result, expected)
def test_get_cash(self):
balance = load_test_balance(self.account_id)
expected = Decimal(balance['combinedBalances'][0]['cash']).quantize(
Decimal('0.00'))
result = self.account.get_cash()
self.assertEqual(result, expected)
if __name__ == '__main__':
unittest.main()
| 2.671875 | 3 |
tests/unit/baskerville_tests/models_tests/pipeline_task_tests/tests_task_base.py | deflect-ca/baskerville | 2 | 12791843 | <reponame>deflect-ca/baskerville<gh_stars>1-10
# Copyright (c) 2020, eQualit.ie inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from unittest import mock
from baskerville.models.config import BaskervilleConfig
from tests.unit.baskerville_tests.helpers.spark_testing_base import \
SQLTestCaseLatestSpark
from tests.unit.baskerville_tests.helpers.utils import test_baskerville_conf
class TestTask(SQLTestCaseLatestSpark):
def setUp(self):
super().setUp()
self.test_conf = test_baskerville_conf
self.baskerville_config = BaskervilleConfig(self.test_conf).validate()
def _helper_task_set_up(self, steps=()):
from baskerville.models.pipeline_tasks.tasks_base import Task
self.task = Task(
self.baskerville_config, steps
)
def test_initialize(self):
self._helper_task_set_up()
step_one = mock.MagicMock()
step_two = mock.MagicMock()
self.task.steps = [step_one, step_two]
with mock.patch.object(
self.task.service_provider, 'initialize_db_tools_service'
) as mock_initialize_db_tools_service:
with mock.patch.object(
self.task.service_provider, 'initialize_spark_service'
) as mock_initialize_spark_service:
self.task.initialize()
mock_initialize_db_tools_service.assert_called_once()
mock_initialize_spark_service.assert_called_once()
step_one.initialize.assert_called_once()
step_two.initialize.assert_called_once()
def test_run(self):
step_one = mock.MagicMock()
step_two = mock.MagicMock()
mock_steps = [step_one, step_two]
self._helper_task_set_up(mock_steps)
self.task.run()
for step in mock_steps:
step.set_df.assert_called_once()
step.set_df.return_value.run.assert_called_once()
self.assertTrue(len(self.task.remaining_steps) == 0)
def test_finish_up(self):
self._helper_task_set_up()
with mock.patch.object(
self.task.service_provider, 'finish_up'
) as mock_finish_up:
self.task.finish_up()
mock_finish_up.assert_called_once()
def test_reset(self):
self._helper_task_set_up()
with mock.patch.object(
self.task.service_provider, 'reset'
) as mock_reset:
self.task.reset()
mock_reset.assert_called_once()
class TestCacheTask(SQLTestCaseLatestSpark):
def setUp(self):
super().setUp()
self.test_conf = test_baskerville_conf
self.baskerville_config = BaskervilleConfig(self.test_conf).validate()
def _helper_task_set_up(self, steps=()):
from baskerville.models.pipeline_tasks.tasks_base import CacheTask
self.task = CacheTask(
self.baskerville_config, steps
)
def test_initialize(self):
self._helper_task_set_up()
step_one = mock.MagicMock()
step_two = mock.MagicMock()
self.task.steps = [step_one, step_two]
with mock.patch.object(
self.task.service_provider, 'initialize_db_tools_service'
) as mock_initialize_db_tools_service:
with mock.patch.object(
self.task.service_provider, 'initialize_spark_service'
) as mock_initialize_spark_service:
with mock.patch.object(
self.task.service_provider,
'initialize_request_set_cache_service'
) as mock_initialize_request_set_cache_service:
self.task.initialize()
mock_initialize_db_tools_service.assert_called_once()
mock_initialize_spark_service.assert_called_once()
mock_initialize_request_set_cache_service.\
assert_called_once()
step_one.initialize.assert_called_once()
step_two.initialize.assert_called_once()
class TestMLTask(SQLTestCaseLatestSpark):
def setUp(self):
super().setUp()
self.test_conf = test_baskerville_conf
self.baskerville_config = BaskervilleConfig(self.test_conf).validate()
def _helper_task_set_up(self, steps=()):
from baskerville.models.pipeline_tasks.tasks_base import MLTask
self.task = MLTask(
self.baskerville_config, steps
)
def test_initialize(self):
self._helper_task_set_up()
step_one = mock.MagicMock()
step_two = mock.MagicMock()
self.task.steps = [step_one, step_two]
self.task.service_provider = mock.MagicMock()
self.task.initialize()
self.task.service_provider.initialize_db_tools_service\
.assert_called_once()
self.task.service_provider\
.initialize_spark_service.assert_called_once()
self.task.service_provider.initialize_request_set_cache_service. \
assert_called_once()
self.task.service_provider.initalize_ml_services.assert_called_once()
step_one.initialize.assert_called_once()
step_two.initialize.assert_called_once()
| 2.046875 | 2 |
src/BiGruSelfattention.py | keigotak/COVID19Tweet | 0 | 12791844 | <gh_stars>0
import torch
import torch.nn as nn
from AbstractModel import AbstractModel
from Attention import Attention
class BiGruSelfattention(AbstractModel):
def __init__(self, device='cpu', hyper_params=None):
sup = super()
sup.__init__(device=device, hyper_params=hyper_params)
self.embeddings = nn.ModuleList([sup.get_embeddings(key=key, device=device) for key in self.hyper_params['embeddings']])
emb_dim = sum([item.embedding_dim for item in self.embeddings])
self.hidden_size = emb_dim
self.f_gru1 = nn.GRU(input_size=emb_dim, hidden_size=emb_dim, batch_first=True)
self.b_gru1 = nn.GRU(input_size=emb_dim, hidden_size=emb_dim, batch_first=True)
self.f_gru2 = nn.GRU(input_size=emb_dim, hidden_size=emb_dim, batch_first=True)
self.b_gru2 = nn.GRU(input_size=emb_dim, hidden_size=emb_dim, batch_first=True)
self.num_head = hyper_params['num_head']
self.attention = nn.ModuleList([Attention(dimensions=emb_dim) for _ in range(self.num_head)])
self.dropout = nn.Dropout(hyper_params['dropout_ratio'])
self.pooling = nn.AdaptiveAvgPool1d(1)
self.output = nn.Linear(emb_dim, hyper_params['num_class'])
self.to(device)
def forward(self, batch_sentence):
embeddings = [embedding(batch_sentence) for embedding in self.embeddings]
embeddings = torch.cat(embeddings, dim=2)
max_len = embeddings.shape[1]
outf1, hidf1 = self.f_gru1(self.dropout(embeddings))
resf1 = outf1 + embeddings
rev_resf1 = resf1[:,torch.arange(max_len-1, -1, -1),:] # reversed
outb1, hidb1 = self.b_gru1(self.dropout(rev_resf1))
resb1 = outb1 + rev_resf1
rev_resb1 = resb1[:,torch.arange(max_len-1, -1, -1),:] # not reversed
outf2, hidf2 = self.f_gru2(self.dropout(rev_resb1))
resf2 = outf2 + rev_resb1
rev_resf2 = resf2[:,torch.arange(max_len-1, -1, -1),:] # reversed
outb2, hidb2 = self.b_gru2(self.dropout(rev_resf2))
resb2 = outb2 + rev_resf2
rev_resb2 = resb2[:,torch.arange(max_len-1, -1, -1),:] # not reversed
drop_output = self.dropout(rev_resb2)
seq_logits, attention_weights = [], []
for i in range(self.num_head):
l, w = self.attention[i](query=drop_output, context=drop_output)
seq_logits.append(l)
attention_weights.append(w)
avg_seq_logits = None
for l in seq_logits:
if avg_seq_logits is None:
avg_seq_logits = l
else:
avg_seq_logits = avg_seq_logits + l
avg_seq_logits = avg_seq_logits / self.num_head
pooled_logits = self.pooling(avg_seq_logits.transpose(2, 1)).transpose(2, 1).squeeze()
output = self.output(pooled_logits)
return output
'''
- with stanford twitter embedding 200d
2020.07.09 18:22:50|epoch: 9|train loss: 388.35|valid loss: 99.32|valid f1: 82.573|valid precision: 80.894|valid recall: 84.322|valid accuracy: 83.200|valid tp: 398|valid fp: 94|valid fn: 74|valid tn: 434
- with stanford twitter embedding 100d
2020.07.09 15:38:28|epoch: 9|train loss: 496.90|valid loss: 103.71|valid f1: 81.206|valid precision: 77.247|valid recall: 85.593|valid accuracy: 81.300|valid tp: 404|valid fp: 119|valid fn: 68|valid tn: 409
- with ntua twitter embedding
2020.07.09 14:18:49|epoch: 17|train loss: 311.18|valid loss: 102.94|valid f1: 83.452|valid precision: 80.117|valid recall: 87.076|valid accuracy: 83.700|valid tp: 411|valid fp: 102|valid fn: 61|valid tn: 426
- apply ekphrasis
2020.07.09 02:02:37|epoch: 21|train loss: 253.24|valid loss: 146.71|valid f1: 81.186|valid precision: 78.458|valid recall: 84.110|valid accuracy: 81.600|valid tp: 397|valid fp: 109|valid fn: 75|valid tn: 419
- add Tweet normalizer
2020.07.03 20:04:02|epoch: 20|train loss: 319.42|valid loss: 140.92|valid f1: 81.307|valid precision: 78.501|valid recall: 84.322|valid accuracy: 81.700|valid tp: 398|valid fp: 109|valid fn: 74|valid tn: 419
- with multi head
2020.07.02 00:32:48|epoch: 12|train loss: 523.66|valid loss: 123.62|valid f1: 79.959|valid precision: 77.273|valid recall: 82.839|valid accuracy: 80.400|valid tp: 391|valid fp: 115|valid fn: 81|valid tn: 413
'''
| 2.296875 | 2 |
src/level/level.py | fish-face/agutaywusyg | 0 | 12791845 | <filename>src/level/level.py
# encoding=utf-8
### Levels define the terrain making up an area
TEST_LEVEL = (
"##########",
"#....#...#",
"#....#...#",
"#.####...#",
"#........#",
"#........#",
"##########")
class TerrainInfo:
def __init__(self, char, name, index, block_move, block_sight, **kwargs):
self.char = char
self.name = name
self.tiletype = 0
self.tileindex = index
self.block_move = block_move
self.block_sight = block_sight
for key in kwargs:
setattr(self, key, kwargs[key])
def bumped(self, other):
return False
wall = TerrainInfo('#', 'wall', (0,0), True, True)
floor = TerrainInfo(u'·', 'floor', (1,0), False, False)
TERRAINS = {'#' : wall, '.' : floor}
class Level:
mult = [[1, 0, 0, -1, -1, 0, 0, 1],
[0, 1, -1, 0, 0, -1, 1, 0],
[0, 1, 1, 0, 0, -1, -1, 0],
[1, 0, 0, 1, -1, 0, 0, -1]]
def __init__(self, world, width, height):
self.world = world
self.terraintypes = TERRAINS
self.objects = set()
self.map = []
self.regions = []
self.set_cursor(0,0)
self.map = []
for y in xrange(height):
self.map.append([])
for x in xrange(width):
self.map[-1].append([])
self.width = width
self.height = height
#self.setup()
#self.done_setup()
def setup(self):
"""Clear objects and initialize map. Override with level generation."""
self.set_cursor(0, 0)
for obj in self.objects.copy():
obj.destroy()
for y in xrange(self.height):
for x in xrange(self.width):
self.map[x][y] = [floor]
self[self.world.player.location].append(self.world.player)
self.regions = []
def done_setup(self):
"""Things to do after level generation"""
for obj in self.objects:
obj.level_setup_finished()
knowledge = [f for facts in [obj.get_facts() for obj in self.objects] for f in facts]
for npc in [obj for obj in self.objects if isinstance(obj, Villager)]:
for i in range(random.randrange(100,101)):
if not knowledge:
break
fact = random.choice(knowledge)
npc.knowledge.add(fact)
def set_cursor(self, x, y):
"""Set the level's origin; all terrain-drawing will be translated by this amount"""
self.x = x
self.y = y
def translate(self, x, y):
"""Like set_cursor but relative"""
self.x += x
self.y += y
def add_region(self, region, translate=True):
"""Add a region and translate by our cursor"""
if translate:
region.points = [(x+self.x, y+self.y) for x, y in region.points]
self.regions.append(region)
region.update()
#self.regions.append(Region(name, self, [(x+self.x, y+self.y) for x, y in points]))
def get_regions(self, location):
"""Get regions containing the given location"""
return [region for region in self.regions if location in region]
def get_regions_of(self, obj):
"""Get regions containing given object or its container"""
return self.get_regions(self.get_coords_of(obj))
def get_coords_of(self, obj):
"""Get coordinates of given object or its container"""
if not obj.container:
return obj.location
return self.get_coords_of(obj.container)
def set_terrain(self, p, terrain):
x = p[0] + self.x
y = p[1] + self.y
if callable(terrain):
terrain = terrain(p)
if x < 0 or y < 0 or x >= self.width or y >= self.height:
return
if self.map[y][x]:
self.map[y][x][0] = terrain
else:
self.map[y][x] = [terrain]
# TODO: Nothing specifies that there must be exactly one terrain
# per tile, or even where it is in the tile's list.
def block_sight(self, p):
"""Return whether the tile at p blocks sight"""
for thing in self[p]:
if thing is None or thing.block_sight:
return True
return False
def get_fov(self, location):
"""Get the set of locations that can be seen from the given location"""
light = set((location,))
light = {location: 1}
radius = 20
for oct in range(8):
self._cast_light(
location[0], location[1], 1, 1.0, 0.0, radius,
self.mult[0][oct], self.mult[1][oct],
self.mult[2][oct], self.mult[3][oct], 0, light)
return light
def _cast_light(self, cx, cy, row, start, end, radius, xx, xy, yx, yy, id, light):
"""Recursive lightcasting function, obtained from
http://www.roguebasin.com/index.php?title=Python_shadowcasting_implementation"""
if start < end:
return
radius_squared = radius*radius
for j in range(row, radius+1):
dx, dy = -j-1, -j
blocked = False
while dx <= 0:
dx += 1
# Translate the dx, dy coordinates into map coordinates:
p = cx + dx * xx + dy * xy, cy + dx * yx + dy * yy
if p[0] < 0 or p[0] >= self.width or p[1] < 0 or p[1] >= self.height:
continue
# l_slope and r_slope store the slopes of the left and right
# extremities of the square we're considering:
l_slope, r_slope = (dx-0.5)/(dy+0.5), (dx+0.5)/(dy-0.5)
if start < r_slope:
continue
elif end > l_slope:
break
else:
# Our light beam is touching this square; light it:
dist_squared = dx**2 + dy**2
if dist_squared < radius_squared:
light[p] = dist_squared
if blocked:
# we're scanning a row of blocked squares:
if self.block_sight(p):
new_start = r_slope
continue
else:
blocked = False
start = new_start
else:
if self.block_sight(p) and j < radius:
# This is a blocking square, start a child scan:
blocked = True
self._cast_light(cx, cy, j+1, start, l_slope,
radius, xx, xy, yx, yy, id+1, light)
new_start = r_slope
# Row is scanned; do next row unless last square was blocked:
if blocked:
break
def is_connected(self, points):
if not points:
return False
connected = []
self.get_flood(points[0][0], points[0][1], set(points), connected)
if len(set(connected)) == len(set(points)):
return True
else:
return False
def get_flood(self, x, y, points, connected):
if (x,y) in points and (x,y) not in connected:
connected.append((x,y))
else:
return
self.get_flood(x+1, y, points, connected)
self.get_flood(x-1, y, points, connected)
self.get_flood(x, y+1, points, connected)
self.get_flood(x, y-1, points, connected)
def add_object(self, obj):
"""Add object to the level's list of objects"""
if obj in self.objects:
return
self.objects.add(obj)
#Translate by our cursor coords - this should only happen during level generation.
if obj.location:
x, y = obj.location
self[(x,y)].append(obj)
obj.location = (x+self.x, y+self.y)
def remove_object(self, obj):
"""Should only be called from obj.destroy()"""
if obj not in self.objects:
return
obj.location = None
self.objects.remove(obj)
def move_object(self, obj, location):
"""Should only be called from obj.move"""
if obj.location:
self[obj.location].remove(obj)
if location:
self[location].append(obj)
def get_tile(self, x, y):
"""Return all the stuff at the given location"""
try:
return self.map[y][x]
except (KeyError, IndexError):
return None
def get_tiles(self, x1, y1, x2, y2):
"""Iterator for all the tiles in the given rectangle"""
for y in xrange(y1, y2):
for x in xrange(x1, x2):
yield (x, y, self.map[y][x])
def __getitem__(self, location):
return self.get_tile(location[0], location[1]) if location else None
def __contains__(self, other):
return other in self.objects
class Region:
def __init__(self, name, level, points):
self.name = name
self.level = level
self.points = points
self.update()
def update(self):
"""Recalculate derivable properties of the region"""
if self.points:
x = sum((p[0] for p in self.points))/len(self.points)
y = sum((p[1] for p in self.points))/len(self.points)
else:
x = None
y = None
self.centre = (x, y)
self.area = len(self.points)
def __str__(self):
return self.name
def __contains__(self, p):
return p in self.points
from village import VillageGenerator
from castle import CastleGenerator
from fortress import FortressGenerator
from actor import Villager
import random
grass = TerrainInfo('v', 'road', (0,1), False, False)
class TestLevel(Level):
def setup(self):
Level.setup(self)
self.set_cursor(100,100)
#VillageGenerator(self).generate()
#CastleGenerator(self).generate()
FortressGenerator(self).generate()
self.set_cursor(0,0)
| 3.125 | 3 |
online_doctor/prescription/admin.py | zawad2221/online-doctor-server | 0 | 12791846 | from django.contrib import admin
from .models import Medicine, MedicineForm, Suggestion, Instruction, Company, Prescription, PrescribedMedicine, PatientHistory
admin.site.register(Medicine)
admin.site.register(MedicineForm)
admin.site.register(Suggestion)
admin.site.register(Instruction)
admin.site.register(Company)
admin.site.register(Prescription)
admin.site.register(PrescribedMedicine)
admin.site.register(PatientHistory)
| 1.25 | 1 |
packages/testcases/input/nameprep/extract-tests.py | taarushv/ethers.js | 4,494 | 12791847 | import json
import re
output = ""
for line in file("test-vectors-00.txt"):
line = line.strip()
if line == "" or line[0:1] == "#":
continue
if line.startswith("Josefsson") or line.startswith("Internet-Draft"):
continue
output += line.replace("\n", "")
Tests = [ ]
def get_byte(v):
if len(v) == 1:
return ord(v)
return int(v[2:4], 16)
def get_string(value):
value = value.strip()
if value[0] == '"' and value[-1] == '"':
return map(get_byte, re.findall("(\\\\x[0-9a-fA-F]{2}|.)", value[1:-1].replace('""', '')))
if value.lower() == "null":
return None
raise Exception("unhandled")
Tests = [ ]
matches = re.findall("({(?:.|\n)*?})", output)
for m in matches:
comps = m[1:-1].split(",")
test = dict(
comment = comps[0].strip()[1:-1],
input = get_string(comps[1]),
output = get_string(comps[2])
)
if len(comps) >= 4:
test["profile"] = get_string(comps[3])
if len(comps) >= 5:
test["flags"] = comps[4].strip()
if len(comps) >= 6:
test["rc"] = comps[5].strip()
Tests.append(test)
print json.dumps(Tests)
| 2.828125 | 3 |
log_analyzer.py | timvoet/log_parser | 1 | 12791848 | <reponame>timvoet/log_parser
#!/usr/bin/env python
from orm.model import LogEvent
import optparse
import time
import re
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
supported_DBS = ['sqlite3']
date_pattern = re.compile(r'(\d\d\d\d\-\d\d\-\d\d.*)(INFO|WARN|DEBUG|ERROR|FATAL)')
level_pattern = re.compile(r'.*(DEBUG|ERROR|INFO|WARN|FATAL).*')
class_name_pattern = re.compile(r'.*\[([a-zA-Z0-9]*\.[a-zA-Z0-9\.]*).*')
message_pattern = re.compile(r'.*\[.*\]\s-(.*)')
sessionId_pattern = re.compile(r'.*\[(sessionId:[A-Z0-9]*)\].*')
meetingId_pattern = re.compile(r'.*\[(meetingId:[A-Z0-9]*)\].*')
def createEngine(options):
"""
Create the engine for use
"""
LogEvent_table = LogEvent.__table__
metadata = LogEvent.metadata
if ( options.dbType == "sqlite3"):
engine = create_engine('sqlite:///%s' % options.dataStorePath, echo=options.verbose)
Session = sessionmaker(bind=engine)
session = Session()
metadata.create_all(engine)
return session
def processFile(f, options):
"""
Process the file and insert into the DB
"""
session = createEngine(options)
multiLineMessage = False
for line in f:
line = line.strip()
if ( not lineStartsWithDate( line ) ):
pass
else:
logEntry = processLine( line )
session.add(logEntry)
if options.verbose:
print logEntry
"Commit at end of file"
session.commit()
def lineStartsWithDate(line):
"""
checks to see if the line starts with a date
"""
match = re.search("\d\d\d\d\-\d\d\-\d\d", line )
if (re.search("\d\d\d\d\-\d\d\-\d\d", line ) ):
return True
else:
return False
def processLine(line):
"""
Parse the line and create the entry to log
"""
date_match = date_pattern.match(line)
if date_match:
date = date_match.group(1)
else:
date = "Invalid pattern"
level_match = level_pattern.match(line)
if level_match:
level = level_match.group(1)
else:
level = "UNKNOWN"
class_name_match = class_name_pattern.match(line)
if class_name_match:
class_name = class_name_match.group(1)
else:
class_name = "UNKNOWN"
message_match = message_pattern.match(line)
if message_match:
message = message_match.group(1)
else:
message = "CAN'T PARSE"
sessionId_match = sessionId_pattern.match(line)
if sessionId_match:
sessionId = sessionId_match.group(1)
pos = sessionId.index(":")
sessionId = sessionId[pos+1:]
else:
sessionId = None
meetingId_match = meetingId_pattern.match(line)
if meetingId_match:
meetingId = meetingId_match.group(1)
pos = meetingId.index(":")
meetingId = meetingId[pos+1:]
else:
meetingId = None
le = LogEvent(date_time=date.strip(),level=level.strip(),class_name=class_name.strip(), message=message.strip(), session_id=sessionId, meeting_id = meetingId)
return le
def main():
"""
Main entry point.
"""
p = optparse.OptionParser(description="Parses the log files to generate a DB for analysis.", prog="log_analyzer",version="0.1", usage="%prog --filePath <path_to_file>")
p.add_option("--filePath","-f",action="store",help="specifies the log file to analyze")
p.add_option("--dataStorePath","-s", action="store", help="The path to store the db file", default="tomcat_stats.db")
p.add_option("--dbType", "-d", action="store", default="sqlite3", help="Database format, currently supports " + ' '.join(supported_DBS ))
p.add_option("--verbose", "-v", action="store_true", default=False, help="Enables verbose output")
options,arguments = p.parse_args()
if ( not options.filePath ):
p.print_help()
return
if ( not options.dbType in supported_DBS ):
p.print_help()
return
try:
f = open(options.filePath)
except IOError:
print "No such file: %s" % options.filePath
raw_input("Press Enter to close window")
return
processFile(f, options)
if __name__ == "__main__":
main()
| 2.3125 | 2 |
tests/unit/outputs/cli/ci/system/utils/sorting/test_jobs.py | rhos-infra/cibyl | 3 | 12791849 | """
# Copyright 2022 Red Hat
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
from unittest import TestCase
from unittest.mock import Mock
from cibyl.outputs.cli.ci.system.utils.sorting.jobs import SortJobsByName
class TestSortJobsByName(TestCase):
"""Tests for :class:`SortJobsByName`.
"""
def test_names_are_equal(self):
"""Checks that two jobs are equal if they share the same name.
"""
job1 = Mock()
job1.name.value = 'job'
job2 = Mock()
job2.name.value = 'job'
comparator = SortJobsByName()
self.assertEqual(
0,
comparator.compare(job1, job2)
)
def test_alphabetical_order(self):
"""Checks that the comparator will sort jobs in alphabetical order.
"""
job1 = Mock()
job1.name.value = 'A'
job2 = Mock()
job2.name.value = 'B'
comparator = SortJobsByName()
self.assertEqual(
-1,
comparator.compare(job1, job2)
)
self.assertEqual(
1,
comparator.compare(job2, job1)
)
| 3.4375 | 3 |
src/utils.py | aemccoy/GrayCode-QubitEncoding | 4 | 12791850 | <reponame>aemccoy/GrayCode-QubitEncoding
import numpy as np
from openfermion.ops import QubitOperator
from itertools import product
from functools import reduce
mats = {"I" : np.eye(2),
"X" : np.array(([[0, 1], [1, 0]])),
"Y" : np.array(([[0, -1j], [1j, 0]])),
"Z" : np.array(([[1, 0], [0, -1]])),
"P0" : np.array(([[1, 0], [0, 0]])),
"P1" : np.array(([[0, 0], [0, 1]]))}
def gray_code(N):
""" Generate a Gray code for traversing the N qubit states. """
if N <= 0 or type(N) is not int:
raise ValueError("Input for gray code construction must be a positive integer.")
if N == 1: # Base case
return ["0", "1"]
else:
sub_code = gray_code(N-1)
return ["0" + x for x in sub_code] + ["1" + x for x in sub_code[::-1]]
def find_flipped_bit(s1, s2):
""" For two adjacent elements in a gray code, determine which bit is the
one that was flipped.
"""
if len(s1) == 0 or len(s2) == 0:
raise ValueError("Empty string inputted.")
if len(s1) != len(s2):
raise ValueError("Strings compared in gray code must have the same length.")
if any([x != "0" and x != "1" for x in s1]) or any([x != "0" and x != "1" for x in s2]):
raise ValueError(f"One of inputs {s1}, {s2} is not a valid binary string.")
# Sum the strings elementwise modulo 2; the sum will be 1 only in the slot
# where we flipped a bit
string_sums = [(int(s1[i]) + int(s2[i])) % 2 for i in range(len(s1))]
if string_sums.count(1) == 0:
raise ValueError(f"Strings {s1} and {s2} are the same.")
elif string_sums.count(1) > 1:
raise ValueError(f"Strings {s1} and {s2} are not ordered in a gray code.")
return string_sums.index(1)
def expand_projector_sequence(seq):
# Take a list of projectors, e.g. ["P0", "P1", "X"] and expand it in terms of Paulis
# return an openfermion QubitOperator
# Copy the sequence before making replacements
substitution_seq = seq
if len(seq) <= 0:
raise ValueError(f"Cannot expand empty projector sequence.")
if any([x not in mats.keys() for x in seq]):
raise ValueError(f"Sequence {seq} contains elements that are not Paulis or P0/P1 projectors.")
prefactor = 1 / (2 ** (substitution_seq.count("P0") + substitution_seq.count("P1")))
# First, replace P0 and P1 with 0.5 (1 +- Z)
for item_idx in range(len(seq)):
if seq[item_idx] == "P0":
substitution_seq[item_idx] = ["I", "Z"]
elif seq[item_idx] == "P1":
substitution_seq[item_idx] = ["I", "mZ"]
qubit_operators = []
# Expand out the term into individual Paulis
for pauli in product(*substitution_seq):
pauli_string = "".join(pauli)
# Extract the sign and remove the m indicators
sign = (-1) ** pauli_string.count("m")
pauli_string = pauli_string.replace("m", "")
# Remove identities and label Paulis with their qubit indices
qubit_operator_string = ""
for qubit_idx in range(len(pauli_string)):
if pauli_string[qubit_idx] != "I":
qubit_operator_string += f"{pauli_string[qubit_idx]}{qubit_idx} "
qubit_operators.append(QubitOperator(qubit_operator_string, sign*prefactor))
full_operator = QubitOperator()
for term in qubit_operators:
full_operator += term
return full_operator
def pauli_generators(N, x_loc=None):
""" Construct a list of strings of Pauli generators on N qubits. If x_loc is set
to an integer, then we will construct the generators on N qubits where the x_loc qubit
is set to X and the remaining qubits contain the generators of N - 1 qubits.
For example,
pauli_generators(4) = ['ZIII', 'IZII', 'IIZI', 'IIIZ']
pauli_generators(4, 2) = ['ZIXI', 'IZXI', IIXZ']
"""
if N < 1:
raise ValueError("Number of Paulis must be >= 1 to construct generators.")
if x_loc is None:
return ["I" * idx + "Z" + "I" * (N - idx - 1) for idx in range(N)]
else:
if x_loc < 0 or x_loc > N:
raise ValueError(f"Invalid placement ({x_loc}) X in {N}-qubit Pauli.")
base_generators = [list("I" * idx + "Z" + "I" * (N - idx - 2)) for idx in range(N - 1)]
# If we have two qubits, need to add I to the generator list
if N == 2:
base_generators.append(["I"])
for idx in range(len(base_generators)):
base_generators[idx].insert(x_loc, "X")
return ["".join(gen) for gen in base_generators]
def get_pauli_matrix(pauli):
""" Take a Pauli string and compute its matrix representation.
Parameters:
pauli (string): A string indicating the Pauli whose expectation value
we want to compute, e.g. "ZZIZZ". Tensor products are computed
from left to right here.
"""
pauli_list = list(pauli)
if any([op not in ['I', 'X', 'Y', 'Z'] for op in pauli_list]):
raise ValueError("Pauli string must consist only of I, X, Y, or Z.")
return reduce(np.kron, [mats[sigma_idx] for sigma_idx in pauli_list])
def pauli_expectation_value(pauli, meas_results):
""" Compute and return the expectation value of a given Pauli
based on the measurement outcomes observed in result.
Parameters:
pauli (string): A string indicating the Pauli whose expectation value
we want to compute, e.g. "ZZIZZ"
meas_results (Dict): A dictionary containing the results of an experiment run on qiskit.
The key value pairs are computational basis states and number of times that
state was observed, e.g. {'1001' : 24, '1000' : 36}, etc.
Returns:
The expectation value of pauli.
"""
pauli_list = list(pauli)
n_qubits = len(pauli_list)
n_shots = sum(meas_results.values())
if any([op not in ['I', 'X', 'Y', 'Z'] for op in pauli_list]):
raise ValueError("Pauli string must consist only of I, X, Y, or Z.")
# Determine whether the computational basis states in meas_results are +1 or -1
# eigenstates of the Pauli in question.
eigenvalues = {}
for basis_state in meas_results.keys():
num_z_and_1 = [-1 if (basis_state[bit_idx] == '1' and pauli_list[bit_idx] != 'I') else 1 for bit_idx in range(n_qubits)]
eigenvalues[basis_state] = reduce(lambda x, y: x*y, num_z_and_1)
# Count number of +1 and -1 outcomes, i.e. 0 and 1
num_0_outcomes = sum([meas_results[key] for key in eigenvalues.keys() if eigenvalues[key] == 1])
num_1_outcomes = sum([meas_results[key] for key in eigenvalues.keys() if eigenvalues[key] == -1])
return (num_0_outcomes - num_1_outcomes) / n_shots | 3.296875 | 3 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.