max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
model/report.py | CJBriers/orange-river-thesis | 0 | 12790751 | """ Construct dataset """
import sys
import math
import pandas as pd
import numpy as np
import csv
def calc_gaps(station):
"""Calculate gaps in time series"""
df = pd.read_csv('../data/all_clean/{0}-clean.txt'.format(station), parse_dates=['Date'])
df = df.set_index(['Date'])
df.index = pd.to_datetime(df.index)
dates = df.index.values
first_date = dates[0]
last_date = dates[-1]
print('Data from {0} to {1}'.format(first_date, last_date))
total_range = last_date - first_date
total_range_seconds = total_range / np.timedelta64(1, 's')
last_read_date = first_date
gaps = []
total_gap = 0;
for d in dates:
diff = d - last_read_date
seconds = diff / np.timedelta64(1, 's')
hours = diff / np.timedelta64(1, 'h')
if hours > 72: # met stations
# if hours > 24: # flow stations
total_gap = total_gap + seconds
gaps.append(seconds)
last_read_date = d
print('Number of gaps {0}'.format(len(gaps)))
years = math.floor(total_gap / 3600 / 24 / 365.25)
days = math.floor((total_gap / 3600 / 24 % 365.25))
print('Total gap {0} years'.format(total_gap / 3600 / 24 / 365.25))
print('Total gap {0} years {1} days'.format(years, days))
total_left = total_range_seconds - total_gap
years_left = math.floor(total_left / 3600 / 24 / 365.25)
days_left = math.floor((total_left / 3600 / 24 % 365.25))
print('Total left {0} years'.format(total_left / 3600 / 24 / 365.25))
print('Total left {0} years {1} days'.format(years_left, days_left))
# gap_file = '{0}-gaps.txt'.format(station)
# np.savetxt(gap_file, gaps, delimiter=',', fmt="%s")
def calc_histogram(station):
"""Get histogram"""
raw = pd.read_csv('../data/all_clean/{0}-clean.txt'.format(station), parse_dates=['Date'])
raw = raw.set_index(['Date'])
raw.index = pd.to_datetime(raw.index)
df = raw.resample('1H').mean()
total_count = df.count()
i0 = df[(df['Value'] == 0)].count()
i1 = df[(df['Value'] > 0) & (df['Value'] <= 10)].count()
i2 = df[(df['Value'] > 10) & (df['Value'] <= 50)].count()
i3 = df[(df['Value'] > 50) & (df['Value'] <= 100)].count()
i4 = df[(df['Value'] > 100) & (df['Value'] <= 200)].count()
i5 = df[(df['Value'] > 200) & (df['Value'] <= 300)].count()
i6 = df[(df['Value'] > 300) & (df['Value'] <= 400)].count()
i7 = df[(df['Value'] > 400) & (df['Value'] <= 500)].count()
i8 = df[(df['Value'] > 500) & (df['Value'] <= 1000)].count()
i9 = df[(df['Value'] > 1000)].count()
print('Total count: {0}'.format(total_count['Value']))
print(' 0: {0}'.format(i0['Value']/total_count['Value']))
print(' 0 - 10: {0}'.format(i1['Value']/total_count['Value']))
print(' 10 - 50: {0}'.format(i2['Value']/total_count['Value']))
print(' 50 - 100: {0}'.format(i3['Value']/total_count['Value']))
print('100 - 200: {0}'.format(i4['Value']/total_count['Value']))
print('200 - 300: {0}'.format(i5['Value']/total_count['Value']))
print('300 - 400: {0}'.format(i6['Value']/total_count['Value']))
print('400 - 500: {0}'.format(i7['Value']/total_count['Value']))
print('500 - 1000: {0}'.format(i8['Value']/total_count['Value']))
print(' > 1000: {0}'.format(i9['Value']/total_count['Value']))
def calc_histogram4(station1, station2):
"""Get histogram"""
raw1 = pd.read_csv('../data/all_clean/{0}-clean.txt'.format(station1), parse_dates=['Date'])
raw1 = raw1.set_index(['Date'])
raw1.index = pd.to_datetime(raw1.index)
df1 = raw1.resample('1H').mean()
raw2 = pd.read_csv('../data/all_clean/{0}-clean.txt'.format(station2), parse_dates=['Date'])
raw2 = raw2.set_index(['Date'])
raw2.index = pd.to_datetime(raw2.index)
df2 = raw2.resample('1H').mean()
df1['Total'] = df1['Value'] + df2['Value']
total_count = df1.count()
i0 = df1[(df1['Total'] == 0)].count()
i1 = df1[(df1['Total'] > 0) & (df1['Total'] <= 10)].count()
i2 = df1[(df1['Total'] > 10) & (df1['Total'] <= 50)].count()
i3 = df1[(df1['Total'] > 50) & (df1['Total'] <= 100)].count()
i4 = df1[(df1['Total'] > 100) & (df1['Total'] <= 200)].count()
i5 = df1[(df1['Total'] > 200) & (df1['Total'] <= 300)].count()
i6 = df1[(df1['Total'] > 300) & (df1['Total'] <= 400)].count()
i7 = df1[(df1['Total'] > 400) & (df1['Total'] <= 500)].count()
i8 = df1[(df1['Total'] > 500) & (df1['Total'] <= 1000)].count()
i9 = df1[(df1['Total'] > 1000)].count()
print('Total count: {0}'.format(total_count['Total']))
print(' 0: {0}'.format(i0['Total']/total_count['Total']))
print(' 0 - 10: {0}'.format(i1['Total']/total_count['Total']))
print(' 10 - 50: {0}'.format(i2['Total']/total_count['Total']))
print(' 50 - 100: {0}'.format(i3['Total']/total_count['Total']))
print('100 - 200: {0}'.format(i4['Total']/total_count['Total']))
print('200 - 300: {0}'.format(i5['Total']/total_count['Total']))
print('300 - 400: {0}'.format(i6['Total']/total_count['Total']))
print('400 - 500: {0}'.format(i7['Total']/total_count['Total']))
print('500 - 1000: {0}'.format(i8['Total']/total_count['Total']))
print(' > 1000: {0}'.format(i9['Total']/total_count['Total']))
def calc_histogram3(station1, station2, station3):
"""Get histogram"""
raw1 = pd.read_csv('../data/all_clean/{0}-clean.txt'.format(station1), parse_dates=['Date'])
raw1 = raw1.set_index(['Date'])
raw1.index = pd.to_datetime(raw1.index)
df1 = raw1.resample('1H').mean()
raw2 = pd.read_csv('../data/all_clean/{0}-clean.txt'.format(station2), parse_dates=['Date'])
raw2 = raw2.set_index(['Date'])
raw2.index = pd.to_datetime(raw2.index)
df2 = raw2.resample('1H').mean()
raw3 = pd.read_csv('../data/all_clean/{0}-clean.txt'.format(station3), parse_dates=['Date'])
raw3 = raw3.set_index(['Date'])
raw3.index = pd.to_datetime(raw3.index)
df3 = raw3.resample('1H').mean()
df1['Total'] = df1['Value'] + df2['Value'] + df3['Value']
total_count = df1.count()
i0 = df1[(df1['Total'] == 0)].count()
i1 = df1[(df1['Total'] > 0) & (df1['Total'] <= 10)].count()
i2 = df1[(df1['Total'] > 10) & (df1['Total'] <= 50)].count()
i3 = df1[(df1['Total'] > 50) & (df1['Total'] <= 100)].count()
i4 = df1[(df1['Total'] > 100) & (df1['Total'] <= 200)].count()
i5 = df1[(df1['Total'] > 200) & (df1['Total'] <= 300)].count()
i6 = df1[(df1['Total'] > 300) & (df1['Total'] <= 400)].count()
i7 = df1[(df1['Total'] > 400) & (df1['Total'] <= 500)].count()
i8 = df1[(df1['Total'] > 500) & (df1['Total'] <= 1000)].count()
i9 = df1[(df1['Total'] > 1000)].count()
print('Total count: {0}'.format(total_count['Total']))
print(' 0: {0}'.format(i0['Total']/total_count['Total']))
print(' 0 - 10: {0}'.format(i1['Total']/total_count['Total']))
print(' 10 - 50: {0}'.format(i2['Total']/total_count['Total']))
print(' 50 - 100: {0}'.format(i3['Total']/total_count['Total']))
print('100 - 200: {0}'.format(i4['Total']/total_count['Total']))
print('200 - 300: {0}'.format(i5['Total']/total_count['Total']))
print('300 - 400: {0}'.format(i6['Total']/total_count['Total']))
print('400 - 500: {0}'.format(i7['Total']/total_count['Total']))
print('500 - 1000: {0}'.format(i8['Total']/total_count['Total']))
print(' > 1000: {0}'.format(i9['Total']/total_count['Total']))
def calc_histogram2(station):
"""Get histogram"""
raw = pd.read_csv('../data/all_clean/{0}-clean.txt'.format(station), parse_dates=['Date'])
raw = raw.set_index(['Date'])
raw.index = pd.to_datetime(raw.index)
df = raw.resample('1H').mean()
total_count = df.count()
i0 = df[(df['Value'] == 0)].count()
i1 = df[(df['Value'] > 0) & (df['Value'] <= 5)].count()
i2 = df[(df['Value'] > 5) & (df['Value'] <= 10)].count()
i3 = df[(df['Value'] > 10) & (df['Value'] <= 20)].count()
i4 = df[(df['Value'] > 20) & (df['Value'] <= 50)].count()
i5 = df[(df['Value'] > 50) & (df['Value'] <= 100)].count()
i6 = df[(df['Value'] > 100)].count()
print('Total count: {0}'.format(total_count['Value']))
print(' 0: {0}'.format(i0['Value']/total_count['Value']))
print(' 0 - 5: {0}'.format(i1['Value']/total_count['Value']))
print(' 5 - 10: {0}'.format(i2['Value']/total_count['Value']))
print(' 10 - 20: {0}'.format(i3['Value']/total_count['Value']))
print(' 20 - 50: {0}'.format(i4['Value']/total_count['Value']))
print(' 50 - 100: {0}'.format(i5['Value']/total_count['Value']))
print(' > 100: {0}'.format(i6['Value']/total_count['Value']))
def median_sampling_rate(station):
"""Get median over year sampling rate"""
raw = pd.read_csv('../data/all_clean/{0}-clean.txt'.format(station), parse_dates=['Date'])
raw = raw.set_index(['Date'])
raw.index = pd.to_datetime(raw.index)
df = raw.resample('Y').count()
df.to_csv('{0}_sample_count.csv'.format(station))
def resample(station):
"""Resample station data"""
raw = pd.read_csv('../data/all_clean/{0}-clean.txt'.format(station), parse_dates=['Date'])
raw = raw.set_index(['Date'])
raw.index = pd.to_datetime(raw.index)
df = raw.resample('1H').mean()
df = df.round({'Value': 0})
df.to_csv('{0}_resampled.csv'.format(station))
if __name__ == '__main__':
station = sys.argv[1]
calc_gaps(station)
#calc_histogram(station)
#calc_histogram2(station)
#calc_histogram3('D7H014Z', 'D7H015Z', 'D7H016Z')
#calc_histogram4('D7H008', 'D7H017PLUS')
#median_sampling_rate(station)
#resample(station)
| 3.40625 | 3 |
web_app/infinite_trivia.py | 4398TempleSpring2020/cscapstoneproject-infinitetrivia | 1 | 12790752 | from app import app, socketio
if __name__ == '__main__':
app.jinja_env.auto_reload = True
app.config['TEMPLATES_AUTO_RELOAD'] = True
socketio.run(app)
| 1.421875 | 1 |
m3.py | bshishov/DeepForecasting | 4 | 12790753 | import numpy as np
import matplotlib.pyplot as plt
import keras
import keras.layers as klayers
import time_series as tsutils
import processing
import metrics
class ModelBase(object):
# Required 'context' information for a model
input_window = None
# How many point the model can predict for a single given context
output_window = None
# How output is shifted w.r.t. to input window
offset = 1
class Model(ModelBase):
def __init__(self,
input_shape: tuple = (5, 1),
outputs: int = 1):
self.input_window = input_shape[0]
self.output_window = outputs
self.offset = outputs
model = keras.Sequential()
model.add(klayers.Conv1D(10, input_shape=input_shape, padding='same', kernel_size=3, activation='relu'))
model.add(klayers.Conv1D(10, padding='same', kernel_size=3, activation='relu'))
model.add(klayers.Conv1D(10, padding='same', kernel_size=3, activation='relu'))
model.add(klayers.Flatten())
model.add(klayers.Dense(outputs))
#model.add(klayers.Dense(10, input_shape=input_shape))
#model.add(klayers.Dense(outputs))
model.compile(optimizer='adam', loss='mse', metrics=['mae'])
self.model = model
def predict(self, x, *args, **kwargs):
return self.model.predict(x, *args, **kwargs)
def train(self, x, y, *args, **kwargs):
self.model.fit(x, y, *args, **kwargs)
def main():
path = 'D:\\data\\M3\\M3Other\\N2836.csv'
data = np.genfromtxt(path)
print('Data len: {0}'.format(len(data)))
predict_points = 8
model = Model()
ts = tsutils.TimeSeries(data, test_size=predict_points, scaler=processing.StandardScaler())
x_train, y_train, t_train = ts.train_data(input_window=model.input_window, output_window=model.output_window, expand=True)
model.train(x_train, y_train, epochs=200)
#x_test, y_test, t_test = ts.train_data(input_window=model.input_window, output_window=model.output_window)
ctx = np.expand_dims(ts.get_test_context(model.input_window, expand=True), axis=0)
y_pred = tsutils.free_run_batch(model.predict, ctx, predict_points, ts, batch_size=1)
y_true = ts.get_test_data()
y_pred_flat = ts.inverse_y(np.squeeze(y_pred))
y_true_flat = ts.inverse_y(np.squeeze(y_true))
print(metrics.evaluate(y_true_flat, y_pred_flat, metrics=('smape', 'mae', 'umbrae')))
'''
x_all, y_all, t_all = ts.train_data(input_window=model.input_window, output_window=model.output_window)
y_all_pred = model.predict(x_all)
t_all_flat = ts.inverse_y(np.squeeze(t_all))
y_all_flat = ts.inverse_y(np.squeeze(y_all))
y_pred_pred_flat = ts.inverse_y(np.squeeze(y_all_pred))
plt.plot(t_all_flat, y_all_flat)
plt.plot(t_all_flat, y_pred_pred_flat)
plt.show()
'''
#y_free_run_flat = np.squeeze(predictions)
#plt.plot(np.reshape(y_all, (-1, )))
#plt.plot(np.concatenate((y_pred_flat, y_free_run_flat)))
#plt.show()
if __name__ == '__main__':
main()
| 2.765625 | 3 |
0451 Sort Characters By Frequency.py | MdAbedin/leetcode | 4 | 12790754 | class Solution:
def frequencySort(self, s: str) -> str:
counts = Counter(s)
chars = defaultdict(list)
for char in counts:
chars[counts[char]].append(char)
ans = ''
for count in sorted(chars.keys(),reverse=True):
for char in chars[count]:
ans += char*count
return ans
| 3.671875 | 4 |
Back-End/Python/Basics/Part -3- Hash Maps/02- Dictionaries/some-dict-code/04_API_difference.py | ASHISHKUMAR2411/Programming-CookBook | 25 | 12790755 | <reponame>ASHISHKUMAR2411/Programming-CookBook
n1 = {'employees': 100, 'employee': 5000, 'users': 10, 'user': 100}
n2 = {'employees': 250, 'users': 23, 'user': 230}
n3 = {'employees': 150, 'users': 4, 'login': 1000}
dict_union = n1.keys() | n2.keys() | n3.keys()
dict_intersection = n1.keys() & n2.keys() & n3.keys()
dict_differ = dict_union - dict_intersection
print('Union |', dict_union)
print('Intersection &', dict_intersection)
print('Difference -', dict_differ)
set_differ = (set(n1) | set(n2) | set(n3)) - (set(n1) & set(n2) & set(n3))
print(set_differ)
# Union | {'login', 'user', 'employee', 'users', 'employees'}
# Intersection & {'users', 'employees'}
# Difference - {'login', 'employee', 'user'}
def nodes(node1, node2, node3):
node_order = (node1, node2, node3)
def get_node():
return (set(node1) | set(node2) | set(node3)) - (set(node1) & set(node2) & set(node3))
node = get_node()
return {i: (node1.get(i, 0), node2.get(i, 0), node3.get(i, 0)) for i in node}
my_nodes = nodes(n1, n2, n3)
print('my_nodes -->', my_nodes) # {'login', 'employee', 'user'}
# zip(n1.get(i, 0), n2.get(i, 0), n3.get(i, 0))
def identify(node1, node2, node3):
union = node1.keys() | node2.keys() | node3.keys()
intersection = node1.keys() & node2.keys() & node3.keys()
relevant = union - intersection
result = {key: (node1.get(key, 0),
node2.get(key, 0),
node3.get(key, 0))
for key in relevant}
return result
result = identify(n1, n2, n3)
for k, v in result.items():
print(f'{k}: {v}')
#
# login: (0, 0, 1000)
# user: (100, 230, 0)
# employee: (5000, 0, 0)
# | 3.96875 | 4 |
hello/forms.py | choudharykartik1717/python-docs-hello-django | 0 | 12790756 | <gh_stars>0
from django import forms
from django.db import models
from django.db.models import fields
from .models import *
class AddForm(forms.ModelForm):
class Meta:
model = Post
fields = ('title','desc') | 1.914063 | 2 |
spookyconsole/gui/style.py | FRCTeam7170/spooky-console | 0 | 12790757 |
"""
Contains styling utilities for tkinter widgets.
Some features include:
- a hierarchical styling system for the non-ttk widgets;
- a collection of colour constants;
- and reasonable cross-platform named fonts.
"""
import tkinter as tk
import tkinter.font as tkfont
from contextlib import contextmanager
# Colour constants:
GRAY_SCALE_0 = "#000000"
GRAY_SCALE_1 = "#111111"
GRAY_SCALE_2 = "#222222"
GRAY_SCALE_3 = "#333333"
GRAY_SCALE_4 = "#444444"
GRAY_SCALE_5 = "#555555"
GRAY_SCALE_6 = "#666666"
GRAY_SCALE_7 = "#777777"
GRAY_SCALE_8 = "#888888"
GRAY_SCALE_9 = "#999999"
GRAY_SCALE_A = "#AAAAAA"
GRAY_SCALE_B = "#BBBBBB"
GRAY_SCALE_C = "#CCCCCC"
GRAY_SCALE_D = "#DDDDDD"
GRAY_SCALE_E = "#EEEEEE"
GRAY_SCALE_F = "#FFFFFF"
MUTE_BLUE = "#333355"
MUTE_GREEN = "#335533"
MUTE_RED = "#663333"
MUTE_YELLOW = "#888833"
MUTE_TURQUOISE = "#335555"
MUTE_PURPLE = "#553377"
MUTE_PINK = "#663366"
MUTE_ORANGE = "#774433"
RED_TEAM = "#992222"
BLUE_TEAM = "#222299"
UNKNOWN_TEAM = GRAY_SCALE_B
BOOL_TRUE = MUTE_GREEN
BOOL_FALSE = MUTE_RED
# Named fonts:
FONT_MONOSPACE_TITLE = None
"""
Tkinter named font with these properties:
- size: 10
- family: "Courier"
- weight: BOLD
``init_fonts`` must be called to initialize this font.
"""
FONT_MONOSPACE_NORMAL = None
"""
Tkinter named font with these properties:
- size: 8
- family: "Courier"
``init_fonts`` must be called to initialize this font.
"""
FONT_SERIF_TITLE = None
"""
Tkinter named font with these properties:
- size: 10
- family: "Times"
- weight: BOLD
``init_fonts`` must be called to initialize this font.
"""
FONT_SERIF_NORMAL = None
"""
Tkinter named font with these properties:
- size: 8
- family: "Times"
``init_fonts`` must be called to initialize this font.
"""
FONT_SANS_SERIF_TITLE = None
"""
Tkinter named font with these properties:
- size: 10
- family: "Helvetica"
- weight: BOLD
``init_fonts`` must be called to initialize this font.
"""
FONT_SANS_SERIF_NORMAL = None
"""
Tkinter named font with these properties:
- size: 8
- family: "Helvetica"
``init_fonts`` must be called to initialize this font.
"""
# Backup copies of "normal" tkinter widgets:
_tkButton = tk.Button
_tkCanvas = tk.Canvas
_tkCheckbutton = tk.Checkbutton
_tkEntry = tk.Entry
_tkFrame = tk.Frame
_tkLabel = tk.Label
_tkLabelFrame = tk.LabelFrame
_tkListbox = tk.Listbox
_tkMenu = tk.Menu
_tkPanedWindow = tk.PanedWindow
_tkRadiobutton = tk.Radiobutton
_tkScale = tk.Scale
_tkScrollbar = tk.Scrollbar
_tkSpinbox = tk.Spinbox
_tkText = tk.Text
_tkToplevel = tk.Toplevel
_global_style = None
"""A global ``Style`` object used by the ``stylize`` context manager."""
class StyleableMixin:
"""
Mixin class used to make a widget "styleable". This class works in cooperation with the ``Style`` class. Styleable
widgets should never use their ``widget.configure`` method to set styles in their ``StyleableMixin.STYLED_OPTS``;
``StyleableMixin.apply_style`` should be used instead. (Although configuring "functional" styles through
``widget.configure`` is perfect fine.)
There are four sources of style options and they work on a priority system (higher number means higher priority):
1. ``StyleableMixin.TK_DEFAULT_STYLES``
2. ``StyleableMixin.DEFAULT_STYLES``
3. A given ``Style`` instance
4. A given dictionary of overrides
"""
STYLED_OPTS = []
"""
A list of strings specifying all the widget options (i.e. the ones that would normally be passed to
``widget.configure(...)``) to be considered for styling; any other options encountered are considered "functional"
and hence won't be regulated in any way by this class. Subclasses should define this.
"""
TK_DEFAULT_STYLES = None
"""
A dictionary of default (platform-specific) styles to revert to if an initially explicitly given style option is
revoked. This dictionary is lazily built for each unique styled class (i.e. a style is added to this dictionary the
first time it changes from its default).
"""
DEFAULT_STYLES = None
"""
A dictionary of default user-defined styles for a given class. Subclasses may define this. One may also set this at
runtime through ``StyleableMixin.set_defaults``, but any changes made won't be taken into effect on instances of
that class until ``StyleableMixin.update_style`` is called.
"""
def __init__(self, master=None, cnf={}, *args, style=None, **overrides):
"""
:param master: The master tkinter widget.
:param cnf: A dictionary of configuration options. This is here to mimic the tkinter widget constructors.
:type cnf: dict
:param args: Additional args for the widget constructor.
:param style: An initial style to employ.
:type style: Style
:param overrides: Style overrides to use.
"""
super().__init__(master, cnf, *args)
self._style = None
"""The widget's current ``Style``."""
self._overrides = None
"""A dictionary of the widget's currently-overridden styles."""
self._assure_default_dicts_exist()
# Initialize the widget's style to the given style or the global style, which may be set by the stylize context
# manger.
self.apply_style(style or _global_style, **overrides)
def apply_style(self, style=None, *, keep_style=False, keep_overrides=False, **overrides):
"""
Apply the given style with the given overrides.
:param style: The style to employ, or None to clear the current style (if ``keep_style`` is False).
:type style: Style
:param keep_style: If ``style`` is None, setting this will keep the previous style. Does nothing if ``style`` is
given.
:type keep_style: bool
:param keep_overrides: Whether to append the given ``overrides`` to the already existing overridden styles, or
replace them.
:type keep_overrides: bool
:param overrides: Style overrides to use.
"""
# Sort out the functional options from the styled ones.
functional, styled = {}, {}
for k, v in overrides.items():
if k in self.STYLED_OPTS:
styled[k] = v
else:
functional[k] = v
# Directly apply the functional options
self.configure(functional)
if keep_overrides:
self._overrides.update(styled)
else:
self._overrides = styled
if style:
if self._style:
self._style.unregister_styleable(self)
self._style = style
style.register_styleable(self)
elif self._style and not keep_style:
self._style.unregister_styleable(self)
self._style = None
self.update_style()
def update_style(self):
"""Update this widget's styles."""
# Alias TK_DEFAULT_STYLES for conciseness.
tk_defaults = self.__class__.TK_DEFAULT_STYLES
# Start off the styles_dict with a copy of the tk_defaults, since those styles are of lowest priority (1). We
# will update the dict with increasing style priority so that lower priority styles will get overridden.
styles_dict = tk_defaults.copy()
# Update the dict with the class-specific user-provided defaults. (Priority 2)
styles_dict.update(self.__class__.DEFAULT_STYLES)
if self._style:
# Update the dict with the styles from the Style object. (Priority 3)
styles_dict.update(self._style.get_relevant_styles(self))
# Update the dict with the overridden styles. (Priority 4)
styles_dict.update(self._overrides)
# Before we actually configure the widget, save any of the styles set to this widget by default so we may return
# to them if an explicit style option on this widget is removed.
tk_defaults.update((k, self.cget(k)) for k in styles_dict if k not in tk_defaults)
self.configure(styles_dict)
@classmethod
def _assure_default_dicts_exist(cls):
"""
Make sure that this class's ``StyleableMixin.TK_DEFAULT_STYLES`` and ``StyleableMixin.DEFAULT_STYLES`` are
defined (every class needs its own version of these; if they were initialized to an empty dict in
``StyleableMixin`` then all classes would share the same dictionaries).
"""
if cls.TK_DEFAULT_STYLES is None:
cls.TK_DEFAULT_STYLES = {}
if cls.DEFAULT_STYLES is None:
cls.DEFAULT_STYLES = {}
@classmethod
def set_defaults(cls, keep_existing=True, **defaults):
"""
Convenience method to update the default styles for this class.
:param keep_existing: Whether to keep the already existing default styles, or replace them.
:type keep_existing: bool
:param defaults: A dictionary of default styles.
"""
cls._assure_default_dicts_exist()
if keep_existing:
cls.DEFAULTS.update(defaults)
else:
cls.DEFAULTS = defaults
# TODO: Styleable Tk (root)?
class Button(StyleableMixin, tk.Button):
"""Styleable version of ``tkinter.Button``."""
STYLED_OPTS = ["font", "bg", "activebackground", "fg", "activeforeground", "disabledforeground", "bd",
"highlightcolor", "highlightbackground", "highlightthickness", "relief", "overrelief", "justify"]
class Canvas(StyleableMixin, tk.Canvas):
"""Styleable version of ``tkinter.Canvas``."""
STYLED_OPTS = ["bg", "bd", "selectbackground", "selectborderwidth", "selectforeground", "highlightcolor",
"highlightbackground", "highlightthickness", "relief"]
class Checkbutton(StyleableMixin, tk.Checkbutton):
"""Styleable version of ``tkinter.Checkbutton``."""
STYLED_OPTS = ["font", "bg", "activebackground", "fg", "activeforeground", "disabledforeground", "bd",
"highlightcolor", "highlightbackground", "highlightthickness", "relief", "overrelief", "justify",
"indicatoron", "offrelief", "selectcolor"]
class Entry(StyleableMixin, tk.Entry):
"""Styleable version of ``tkinter.Entry``."""
STYLED_OPTS = ["font", "bg", "disabledbackground", "fg", "disabledforeground", "readonlybackground", "bd",
"highlightcolor", "highlightbackground", "highlightthickness", "relief", "justify",
"selectbackground", "selectborderwidth", "selectforeground"]
class Frame(StyleableMixin, tk.Frame):
"""Styleable version of ``tkinter.Frame``."""
STYLED_OPTS = ["bg", "bd", "highlightcolor", "highlightbackground", "highlightthickness", "relief"]
class Label(StyleableMixin, tk.Label):
"""Styleable version of ``tkinter.Label``."""
STYLED_OPTS = ["font", "bg", "activebackground", "fg", "activeforeground", "disabledforeground", "bd",
"highlightcolor", "highlightbackground", "highlightthickness", "relief", "justify"]
class LabelFrame(StyleableMixin, tk.LabelFrame):
"""Styleable version of ``tkinter.LabelFrame``."""
STYLED_OPTS = ["font", "bg", "fg", "bd", "highlightcolor", "highlightbackground", "highlightthickness", "relief",
"labelanchor"]
class Listbox(StyleableMixin, tk.Listbox):
"""Styleable version of ``tkinter.Listbox``."""
STYLED_OPTS = ["font", "bg", "activestyle", "fg", "disabledforeground", "bd", "relief", "highlightcolor",
"highlightbackground", "highlightthickness", "selectbackground", "selectborderwidth",
"selectforeground"]
class Menu(StyleableMixin, tk.Menu):
"""Styleable version of ``tkinter.Menu``."""
STYLED_OPTS = ["font", "bg", "activebackground", "fg", "activeforeground", "disabledforeground", "bd",
"selectcolor", "relief", "activeborderwidth"]
class PanedWindow(StyleableMixin, tk.PanedWindow):
"""Styleable version of ``tkinter.PanedWindow``."""
STYLED_OPTS = ["bg", "bd", "relief", "sashrelief", "showhandle"]
class Radiobutton(StyleableMixin, tk.Radiobutton):
"""Styleable version of ``tkinter.Radiobutton``."""
STYLED_OPTS = ["font", "bg", "activebackground", "fg", "activeforeground", "disabledforeground", "bd",
"highlightcolor", "highlightbackground", "highlightthickness", "relief", "overrelief", "justify",
"indicatoron", "offrelief", "selectcolor"]
class Scale(StyleableMixin, tk.Scale):
"""Styleable version of ``tkinter.Scale``."""
STYLED_OPTS = ["font", "bg", "activebackground", "fg", "bd", "showvalue", "sliderrelief", "troughcolor",
"highlightcolor", "highlightbackground", "highlightthickness", "relief"]
class Scrollbar(StyleableMixin, tk.Scrollbar):
"""Styleable version of ``tkinter.Scrollbar``."""
STYLED_OPTS = ["bg", "activebackground", "activerelief", "bd", "elementborderwidth", "troughcolor",
"highlightcolor", "highlightbackground", "highlightthickness", "relief"]
class Spinbox(StyleableMixin, tk.Spinbox):
"""Styleable version of ``tkinter.Spinbox``."""
STYLED_OPTS = ["font", "bg", "disabledbackground", "fg", "disabledforeground", "readonlybackground", "bd",
"highlightcolor", "highlightbackground", "highlightthickness", "relief", "justify",
"selectbackground", "selectborderwidth", "selectforeground", "buttonbackground",
"buttondownrelief", "buttonuprelief", "insertbackground", "insertborderwidth"]
class Text(StyleableMixin, tk.Text):
"""Styleable version of ``tkinter.Text``."""
STYLED_OPTS = ["font", "bg", "fg", "bd", "insertbackground", "insertborderwidth", "highlightcolor",
"highlightbackground", "highlightthickness", "relief", "selectbackground", "selectborderwidth",
"selectforeground"]
class Toplevel(StyleableMixin, tk.Toplevel):
"""Styleable version of ``tkinter.Toplevel``."""
STYLED_OPTS = ["bg", "bd", "highlightcolor", "highlightbackground", "highlightthickness", "relief"]
class Style:
"""
A dictionary proxy for tkinter widget styles. ``StyleableMixin``s register themselves to ``Style``s so that whenever
a ``Style`` is updated, any registered ``StyleableMixin``s are automatically updated to reflect the changes.
``Style``s employ a parent-child system in which a ``Style`` can have one or more parents to inherit styles from.
When a style is requested from a ``Style`` and cannot be found in said ``Style``'s own styles, the style is looked
for in its ancestors, prioritizing the first ones specified in the constructor. When a ``Style`` is updated, all
child ``Style``s of the changed ``Style`` are recursively informed of the change.
"""
DEFAULTS = {}
"""Global default styles for all ``Style`` objects. This should be set through ``Style.set_defaults``."""
def __init__(self, *parents, **styles):
"""
:param parents: ``Style``s to inherit styles from.
:param styles: Styles to use.
"""
self._dict = styles
"""A dictionary of the styles specific to this ``Style``."""
self._styled = []
"""
A list of registered ``StyleableMixin``s. These are signaled of any changes to this ``Style`` in
``Style._signal_style_changed``.
"""
self._parents = parents
"""A list of this ``Style``'s parent ``Style``s."""
self._children = []
"""
A list of registered child ``Style``s. These are signaled of any changes to this ``Style`` in
``Style._signal_style_changed``.
"""
for parent in parents:
parent._register_child(self)
def register_styleable(self, styleable):
"""
Called by ``StyleableMixin`` objects to receive updates on whenever this style changes. This should not be
called by user code.
:param styleable: The styleable widget to register.
:type styleable: StyleableMixin
"""
self._styled.append(styleable)
def unregister_styleable(self, styleable):
"""
Called by ``StyleableMixin`` objects to stop receiving updates on whenever this style changes. This should not
be called by user code.
:param styleable: The styleable widget to unregister.
:type styleable: StyleableMixin
"""
# This will raise an error if the styleable is not already registered.
self._styled.remove(styleable)
def _register_child(self, style):
"""
Called by child ``Style``s to receive updates on whenever this style changes.
:param style: The child ``Style``.
:type style: Style
"""
self._children.append(style)
# Keep the same naming scheme as tkinter.
def configure(self, **kwargs):
"""
Configure this ``Style``'s styles.
:param kwargs: The styles to add/edit.
"""
self._dict.update(kwargs)
self._signal_style_changed()
config = configure
"""Alias for ``Style.configure``."""
def remove_styles(self, *styles):
"""
Remove the given styles from this ``Style``. This will raise a ``KeyError`` if any of the given style names are
not in this ``Style``.
:param styles: Style names to remove.
"""
for style in styles:
del self._dict[style]
self._signal_style_changed()
def _signal_style_changed(self):
"""
Internal method to update all the ``StyleableMixin`` widgets registered to this ``Style`` and its children.
"""
for s in self._styled:
s.update_style()
for child in self._children:
child._signal_style_changed()
def get_relevant_styles(self, widget):
"""
Determine and return all the styles in this ``Style`` recognized by the given tkinter widget.
:param widget: The tkinter widget to find styles for.
:return: All the styles recognized by the given tkinter widget.
"""
return {k: v for k, v in map(lambda k: (k, self.get_style(k)), widget.keys()) if v is not None}
def get_style(self, key):
"""
Return the style corresponding to the given style name, first checking this ``Style`` and its parents, then
resorting to the global default styles (``Style.DEFAULTS``).
:param key: The style name.
:type key: str
:return: The style corresponding to the given style name or None if it could not be found.
"""
return self._get_style(key) or self.__class__.DEFAULTS.get(key)
def _get_style(self, key):
"""
Attempt to retrieve the given style from this ``Style``'s ``Style._dict``. If that fails, recursively search
this widget's parents.
:param key: The style name.
:type key: str
:return: The style corresponding to the given style name or None if it could not be found.
"""
ret = self._dict.get(key)
if ret is None:
for p in self._parents:
ret = p._get_style(key)
if ret is not None:
break
return ret
@classmethod
def set_defaults(cls, keep_existing=True, **defaults):
"""
Convenience method to update the global default styles (``Style.DEFAULTS``).
:param keep_existing: Whether to keep the already existing default styles, or replace them.
:type keep_existing: bool
:param defaults: A dictionary of styles.
"""
if keep_existing:
cls.DEFAULTS.update(defaults)
else:
cls.DEFAULTS = defaults
def patch_tk_widgets():
"""Monkey patch the tkinter widgets with their styleable equivalents."""
tk.Button = Button
tk.Canvas = Canvas
tk.Checkbutton = Checkbutton
tk.Entry = Entry
tk.Frame = Frame
tk.Label = Label
tk.LabelFrame = LabelFrame
tk.Listbox = Listbox
tk.Menu = Menu
tk.PanedWindow = PanedWindow
tk.Radiobutton = Radiobutton
tk.Scale = Scale
tk.Scrollbar = Scrollbar
tk.Spinbox = Spinbox
tk.Text = Text
tk.Toplevel = Toplevel
def unpatch_tk_widgets():
"""Revert the tkinter widgets back to their defaults after monkey patching them with ``patch_tk_widgets``."""
tk.Button = _tkButton
tk.Canvas = _tkCanvas
tk.Checkbutton = _tkCheckbutton
tk.Entry = _tkEntry
tk.Frame = _tkFrame
tk.Label = _tkLabel
tk.LabelFrame = _tkLabelFrame
tk.Listbox = _tkListbox
tk.Menu = _tkMenu
tk.PanedWindow = _tkPanedWindow
tk.Radiobutton = _tkRadiobutton
tk.Scale = _tkScale
tk.Scrollbar = _tkScrollbar
tk.Spinbox = _tkSpinbox
tk.Text = _tkText
tk.Toplevel = _tkToplevel
@contextmanager
def patch():
"""Context manager to temporarily monkey patch the tkinter widgets with their styleable equivalents."""
try:
patch_tk_widgets()
yield
finally:
unpatch_tk_widgets()
@contextmanager
def stylize(style, **overrides):
"""
Context manager to temporarily apply a global-level style and some overrides. This global-level style will only be
used by ``StyleableMixin``s if they're not explicitly given a ``Style`` object already.
:param style: The style to apply.
:type style: Style
:param overrides: Style overrides to use.
"""
global _global_style
try:
_global_style = Style(style, **overrides) if overrides else style
yield
finally:
_global_style = None
def init_fonts(root):
"""
Initialize all the named fonts. This must be called prior to attempting to use any of the named fonts.
:param root: The tkinter root widget.
:type root: tkinter.Tk
"""
global FONT_MONOSPACE_TITLE, FONT_MONOSPACE_NORMAL,\
FONT_SERIF_TITLE, FONT_SERIF_NORMAL,\
FONT_SANS_SERIF_TITLE, FONT_SANS_SERIF_NORMAL
FONT_MONOSPACE_TITLE = tkfont.Font(root, size=10,
name="FONT_MONOSPACE_TITLE",
family="Courier",
weight=tkfont.BOLD)
FONT_MONOSPACE_NORMAL = tkfont.Font(root, size=8,
name="FONT_MONOSPACE_NORMAL",
family="Courier")
FONT_SERIF_TITLE = tkfont.Font(root, size=10,
name="FONT_SERIF_TITLE",
family="Times",
weight=tkfont.BOLD)
FONT_SERIF_NORMAL = tkfont.Font(root, size=8,
name="FONT_SERIF_NORMAL",
family="Times")
FONT_SANS_SERIF_TITLE = tkfont.Font(root, size=10,
name="FONT_SANS_SERIF_TITLE",
family="Helvetica",
weight=tkfont.BOLD)
FONT_SANS_SERIF_NORMAL = tkfont.Font(root, size=8,
name="FONT_SANS_SERIF_NORMAL",
family="Helvetica")
| 2.984375 | 3 |
tests/test_engine/test_queries/test_queryop_logical_not.py | gitter-badger/MontyDB | 0 | 12790758 |
import re
from bson.regex import Regex
def test_qop_not_1(monty_find, mongo_find):
docs = [
{"a": 4},
{"x": 8}
]
spec = {"a": {"$not": {"$eq": 8}}}
monty_c = monty_find(docs, spec)
mongo_c = mongo_find(docs, spec)
assert mongo_c.count() == 2
assert monty_c.count() == mongo_c.count()
def test_qop_not_2(monty_find, mongo_find):
docs = [
{"a": 6},
{"a": [6]}
]
spec = {"a": {"$not": {"$eq": 6}}}
monty_c = monty_find(docs, spec)
mongo_c = mongo_find(docs, spec)
assert mongo_c.count() == 0
assert monty_c.count() == mongo_c.count()
def test_qop_not_3(monty_find, mongo_find):
docs = [
{"a": [{"b": 8}, {"b": 6}]},
]
spec = {"a.b": {"$not": {"$in": [6]}}}
monty_c = monty_find(docs, spec)
mongo_c = mongo_find(docs, spec)
assert mongo_c.count() == 0
assert monty_c.count() == mongo_c.count()
def test_qop_not_4(monty_find, mongo_find):
docs = [
{"a": "apple"},
]
spec = {"a": {"$not": Regex("^a")}}
monty_c = monty_find(docs, spec)
mongo_c = mongo_find(docs, spec)
assert mongo_c.count() == 0
assert monty_c.count() == mongo_c.count()
def test_qop_not_5(monty_find, mongo_find):
docs = [
{"a": "apple"},
{"a": "banana"},
]
spec = {"a": {"$not": re.compile("^a")}}
monty_c = monty_find(docs, spec)
mongo_c = mongo_find(docs, spec)
assert mongo_c.count() == 1
assert monty_c.count() == mongo_c.count()
assert next(monty_c) == next(mongo_c)
| 2.515625 | 3 |
routine.py | Atari2/WhiteSnake | 0 | 12790759 | import asar
from rom import Rom
from patchexception import PatchException
import os
import re
class Routine:
incsrc = 'incsrc global_ow_code/defines.asm\nfreecode cleaned\n'
def __init__(self, file):
self.path = file
self.ptr = None
self.name = re.findall(r'\w+\.asm', file)[-1].replace('.asm', '')
with open(self.path, 'r') as r:
self.routine = f'print "$",pc\n{r.read()}\n\n'
def __str__(self):
return self.incsrc + 'parent:\n' + self.routine
def create_macro(self):
return f'macro {self.name}()\n\tJSL {self.ptr}\nendmacro\n'
def create_autoclean(self):
return f'autoclean {self.ptr}\n'
def patch_routine(self, rom: Rom):
with open(f'tmp_{self.name}.asm', 'w') as f:
f.write(str(self))
(success, rom_data) = asar.patch(f'tmp_{self.name}.asm', rom.data)
if success:
rom.data = rom_data
ptrs = asar.getprints()
self.ptr = ptrs[0]
print(f'Routine {self.name} was applied correctly')
else:
print(asar.geterrors())
raise PatchException(f'Routine {self.name} encountered an error while patching')
os.remove(f'tmp_{self.name}.asm')
| 2.34375 | 2 |
envio2/poisson.py | leonheld/INE5118 | 0 | 12790760 | <gh_stars>0
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import collections
import scipy
from scipy.stats import poisson
from scipy.stats import norm
from math import sqrt
sns.set(style = "darkgrid", context = "paper")
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
def poipmf(start, stop, lamb):
k = np.arange(start, stop, 1)
pmf = scipy.stats.poisson.pmf(k, lamb)
mu = lamb
sigma = sqrt(lamb)
x = np.linspace(mu - 5*sigma, mu + 5*sigma, 100)
plt.plot(x, scipy.stats.norm.pdf(x, mu, sigma), color = '#353131')
plt.stem(k, pmf, '--', bottom=0)
plt.title(r"Poisson Normal Appr. with $\lambda$ = %d" % (lamb))
plt.show()
lamb = [1, 5, 10, 200]
poipmf(0, 13, lamb[0])
poipmf(0, 17, lamb[1])
poipmf(0, 25, lamb[2])
poipmf(0, 200, lamb[3]) | 2.78125 | 3 |
test/test_dataset.py | ffrankies/tf_rnn | 1 | 12790761 | import pytest
from tf_rnn.dataset import *
INPUT_PLACEHOLDER = 1
OUTPUT_PLACEHOLDER = 2
NUM_BATCHES_SMALL = 10
NUM_ROWS_SMALL = 10
ROW_LEN_SMALL = 5
NUM_BATCHES_MEDIUM = 1000
NUM_ROWS_MEDIUM = 100
ROW_LEN_MEDIUM = 20
BATCHED_INPUTS_SMALL = [[[INPUT_PLACEHOLDER for timestep in range(ROW_LEN_SMALL)]
for row in range(NUM_ROWS_SMALL)]
for batch in range(NUM_BATCHES_SMALL)]
BATCHED_OUTPUTS_SMALL = [[[INPUT_PLACEHOLDER for timestep in range(ROW_LEN_SMALL)]
for row in range(NUM_ROWS_SMALL)]
for batch in range(NUM_BATCHES_SMALL)]
BATCHED_SIZES_SMALL_ = [[True] + [len(row) for row in batch] + [True] for batch in BATCHED_INPUTS_SMALL]
BATCHED_SIZES_SMALL = [[len(row) for row in batch] for batch in BATCHED_INPUTS_SMALL]
BATCHED_INPUTS_MEDIUM = [[[INPUT_PLACEHOLDER for timestep in range(ROW_LEN_MEDIUM)]
for row in range(NUM_ROWS_MEDIUM)]
for batch in range(NUM_BATCHES_MEDIUM)]
BATCHED_OUTPUTS_MEDIUM = [[[INPUT_PLACEHOLDER for timestep in range(ROW_LEN_MEDIUM)]
for row in range(NUM_ROWS_MEDIUM)]
for batch in range(NUM_BATCHES_MEDIUM)]
BATCHED_SIZES_MEDIUM_ = [[True] + [len(row) for row in batch] + [True] for batch in BATCHED_INPUTS_MEDIUM]
BATCHED_SIZES_MEDIUM = [[len(row) for row in batch] for batch in BATCHED_INPUTS_MEDIUM]
class TestDataPartition():
def test_should_correctly_calculate_the_batch_length(self):
partition = DataPartition(BATCHED_INPUTS_SMALL, BATCHED_OUTPUTS_SMALL, BATCHED_SIZES_SMALL_)
assert partition.x == BATCHED_INPUTS_SMALL
assert partition.y == BATCHED_OUTPUTS_SMALL
assert partition.sizes == BATCHED_SIZES_SMALL
assert partition.num_batches == NUM_BATCHES_SMALL
partition = DataPartition(BATCHED_INPUTS_MEDIUM, BATCHED_OUTPUTS_MEDIUM, BATCHED_SIZES_MEDIUM_)
assert partition.x == BATCHED_INPUTS_MEDIUM
assert partition.y == BATCHED_OUTPUTS_MEDIUM
assert partition.sizes == BATCHED_SIZES_MEDIUM
assert partition.num_batches == NUM_BATCHES_MEDIUM
| 2.703125 | 3 |
dxgb_bench/datasets/__init__.py | trivialfis/dxgb_bench | 2 | 12790762 | from .mortgage import Mortgage
from .taxi import Taxi
from .higgs import Higgs
from .year import YearPrediction
from .covtype import Covtype
from .airline import Airline
from .epsilon import Epsilon
from .generated import Generated
import argparse
from typing import Tuple
from dxgb_bench.utils import DataSet
def factory(name: str, args: argparse.Namespace) -> Tuple[DataSet, str]:
if name.startswith("mortgage"):
d: DataSet = Mortgage(args)
return d, d.task
elif name == "taxi":
d = Taxi(args)
return d, d.task
elif name == "higgs":
d = Higgs(args)
return d, d.task
elif name == "year":
d = YearPrediction(args)
return d, d.task
elif name == "covtype":
d = Covtype(args)
return d, d.task
elif name == "airline":
d = Airline(args)
return d, d.task
elif name == "epsilon":
d = Epsilon(args)
return d, d.task
elif name == "generated":
d = Generated(args)
return d, d.task
else:
raise ValueError("Unknown dataset:", name)
| 2.4375 | 2 |
examples/iris.py | speedcell4/houttuynia | 1 | 12790763 | from pathlib import Path
import aku
from torch import nn, optim
from houttuynia.monitors import get_monitor
from houttuynia.schedules import EpochalSchedule
from houttuynia.nn import Classifier
from houttuynia import log_system, manual_seed, to_device
from houttuynia.datasets import prepare_iris_dataset
from houttuynia.schedule import Moment, Pipeline
from houttuynia.extensions import ClipGradNorm, CommitScalarByMean, Evaluation
from houttuynia.triggers import Periodic
from houttuynia.utils import ensure_output_dir, experiment_hash, options_dump
class IrisEstimator(Classifier):
def __init__(self, in_features: int, num_classes: int, hidden_features: int, dropout: float,
bias: bool, negative_slope: float) -> None:
self.dropout = dropout
self.in_features = in_features
self.num_classes = num_classes
self.hidden_features = hidden_features
self.negative_slope = negative_slope
super(IrisEstimator, self).__init__(estimator=nn.Sequential(
nn.Dropout(dropout),
nn.Linear(in_features, hidden_features, bias),
nn.LeakyReLU(negative_slope=negative_slope, inplace=True),
nn.Linear(hidden_features, hidden_features, bias),
nn.LeakyReLU(negative_slope=negative_slope, inplace=True),
nn.Linear(hidden_features, num_classes, bias),
))
app = aku.App(__file__)
@app.register
def train(hidden_features: int = 100, dropout: float = 0.05,
bias: bool = True, negative_slope: float = 0.05,
seed: int = 42, device: str = 'cpu', batch_size: int = 5, num_epochs: int = 50,
out_dir: Path = Path('../out_dir'), monitor: ('filesystem', 'tensorboard') = 'tensorboard'):
""" train iris classifier
Args:
hidden_features: the size of hidden layers
dropout: the dropout ratio
bias: whether or not use the bias in hidden layers
negative_slope: the ratio of negative part
seed: the random seed number
device: device id
batch_size: the size of each batch
num_epochs: the total numbers of epochs
out_dir: the root path of output
monitor: the type of monitor
"""
options = locals()
experiment_dir = out_dir / experiment_hash(**options)
ensure_output_dir(experiment_dir)
options_dump(experiment_dir, **options)
log_system.notice(f'experiment_dir => {experiment_dir}')
manual_seed(seed)
log_system.notice(f'seed => {seed}')
train, test = prepare_iris_dataset(batch_size)
estimator = IrisEstimator(
in_features=4, dropout=dropout, num_classes=3, hidden_features=hidden_features,
negative_slope=negative_slope, bias=bias
)
optimizer = optim.Adam(estimator.parameters())
monitor = get_monitor(monitor)(log_dir=experiment_dir)
to_device(device, estimator)
schedule = EpochalSchedule(estimator, optimizer, monitor)
schedule.register_extension(Periodic(Moment.AFTER_ITERATION, iteration=5))(CommitScalarByMean(
'criterion', 'acc', chapter='train',
))
schedule.register_extension(Periodic(Moment.AFTER_BACKWARD, iteration=1))(ClipGradNorm(max_norm=4.))
schedule.register_extension(Periodic(Moment.AFTER_EPOCH, epoch=1))(Pipeline(
Evaluation(data_loader=test, chapter='test'),
CommitScalarByMean('criterion', 'acc', chapter='test'),
))
return schedule.run(train, num_epochs)
if __name__ == '__main__':
app.run()
| 2.203125 | 2 |
tests/test_tools/test_aucote_ad/test_parsers/test_enum4linux_parser.py | Wolodija/aucote | 1 | 12790764 | <filename>tests/test_tools/test_aucote_ad/test_parsers/test_enum4linux_parser.py<gh_stars>1-10
from unittest import TestCase
from tools.acuote_ad.parsers.enum4linux_parser import Enum4linuxParser
from tools.acuote_ad.structs import Enum4linuxOS, Enum4linuxUser, Enum4linuxShare, Enum4linuxGroup, \
Enum4linuxPasswordPolicy
class Enum4linuxParserTest(TestCase):
from tests.test_tools.test_aucote_ad.test_parsers.test_enum4linux_parser_input import PASSWORD_POLICY, OUTPUT, \
LOCAL_GROUPS, GROUPS, DOMAIN_GROUPS, BUILTIN_GROUPS, SHARES, OS_INFORMATION, USERS
def setUp(self):
self.parser = Enum4linuxParser()
def test_parse_os_information(self):
result = self.parser.parse_os_information(self.OS_INFORMATION)
expected = Enum4linuxOS(domain='CS', os='Windows Server 2012 R2 Standard 9600',
server='Windows Server 2012 R2 Standard 6.3')
self.assertEqual(result, expected)
def test_parse_users(self):
result = self.parser.parse_users(self.USERS)
expected = [
Enum4linuxUser(index='0xf4d', rid='0x1f4', acb='0x00000010', account='Administrator', name=None,
desc='Built-in account for administering the computer/domain'),
Enum4linuxUser(index='0x101e', rid='0x451', acb='0x00000210', account='jkowalski', name='<NAME>',
desc=None)
]
self.assertEqual(result, expected)
def test_parse_shares(self):
result = self.parser.parse_shares(self.SHARES)
expected = [
Enum4linuxShare(name='ADMIN$', share_type='Disk', comment='Remote Admin'),
Enum4linuxShare(name='C$', share_type='Disk', comment='Default share'),
Enum4linuxShare(name='IPC$', share_type='IPC', comment='Remote IPC'),
Enum4linuxShare(name='NETLOGON', share_type='Disk', comment='Logon server share'),
Enum4linuxShare(name='SYSVOL', share_type='Disk', comment='Logon server share')
]
self.assertCountEqual(result, expected)
def test_parse_groups_list(self):
result = self.parser.parse_groups_list(self.LOCAL_GROUPS)
result_users = [group.users for group in result]
expected = [
Enum4linuxGroup(name='Cert Publishers', rid='0x205'),
Enum4linuxGroup(name='Denied RODC Password Replication Group', rid='0x23c')
]
expected_users = [set(), {'CS\krbtgt', 'CS\Domain Controllers', 'CS\Enterprise Admins',
'CS\Read-only Domain Controllers'}]
self.assertCountEqual(result, expected)
self.assertCountEqual(result_users, expected_users)
def test_parse_password_policy(self):
result = self.parser.parse_password_policy(self.PASSWORD_POLICY)
expected = Enum4linuxPasswordPolicy(min_length='7', complexity='1', history='24',
max_age='41 days 23 hours 52 minutes', min_age='1 day', cleartext='0',
no_anon_change='0', no_clear_change='0', lockout_admins='0',
reset_lockout='30 minutes', lockout_duration='30 minutes',
lockout_threshold='None', force_logoff_time='Not Set')
self.assertEqual(result, expected)
| 2.25 | 2 |
torch_basic/NMT.py | quoniammm/mine-pytorch | 3 | 12790765 | <gh_stars>1-10
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from nltk import FreqDist
import re
import jieba
import math
import time
from collections import Counter
import random
path = '../data/cmn-eng/'
SOS_token = 0
EOS_token = 1
MAX_LENGTH = 10
# 工具函数
def deal_en_sen(raw):
raw.strip()
letters_only = re.sub("[^a-zA-Z]", " ", raw)
words = letters_only.lower().split()
return (" ".join(words))
def deal_zh_sen(raw):
raw.strip()
letters_only = re.sub("[^\u4e00-\u9fa5]", "", raw)
return(letters_only)
def wordandindex(vocab):
return {word: i + 1 for i, word in enumerate(vocab)}, {i + 1: word for i, word in enumerate(vocab)}
def sen2index(sen, lang):
global word2index_en
global word2index_zh
if lang == 'en':
no_eos = [word2index_en[word] for word in sen.split(' ')]
else:
no_eos = [word2index_zh[word] for word in list(jieba.cut(sen))]
no_eos.append(0)
return no_eos
def as_minutes(s):
pass
def time_since(since, percent):
pass
# 数据预处理
with open(path + 'cmn.txt') as f:
lines = f.readlines()
en_sens = [deal_en_sen(line.split('\t')[0]) for line in lines]
zh_sens = [deal_zh_sen(line.split('\t')[1]) for line in lines]
pairs = [[en, zh] for en, zh in zip (en_sens, zh_sens)]
en_max_len = max([len(x) for x in en_sens])
zh_max_len = max([len(x) for x in zh_sens])
# 借助 NLTK 函数
en_word_counts = FreqDist(' '.join(en_sens).split(' '))
# zh_word_counts = FreqDist(list(jieba.cut(''.join(zh_sens))))
en_vocab = set(en_word_counts)
# zh_vocab = set(zh_word_counts)
zh_counts = Counter()
for sen in zh_sens:
for word in list(jieba.cut(sen)):
zh_counts[word] += 1
zh_vocab = set(zh_counts)
MAX_LENGTH = 7
filter_pairs = [pair for pair in pairs if len(pair[0].split(' ')) < MAX_LENGTH and len(list(jieba.cut(pair[1]))) < MAX_LENGTH]
word2index_en, index2word_en = wordandindex(en_vocab)
word2index_en['EOS'] = 0
index2word_en[0] = 'EOS'
word2index_zh, index2word_zh = wordandindex(zh_vocab)
word2index_zh['EOS'] = 0
index2word_zh[0] = 'EOS'
sen_vector = [[sen2index(pair[0], 'en'), sen2index(pair[1], 'zh')] for pair in filter_pairs]
# 模型实现
# seq2seq with attention
# np.array([sen_vector[2][1]]).shape
class EncoderRNN(nn.Module):
def __init__(self, input_size, hidden_size, n_layers=1, is_cuda=False):
super(EncoderRNN, self).__init__()
# input_size 实际上是 vocab 的size
self.input_size = input_size
self.hidden_size = hidden_size
self.n_layers = n_layers
self.is_cuda = is_cuda
# input (N, W) LongTensor N = mini-batch
# output (N, W, embedding_dim)
self.embedded = nn.Embedding(input_size, hidden_size)
# input (seq_len, batch, input_size)
# h_0 (num_layers * num_directions, batch, hidden_size)
# output (seq_len, batch, hidden_size * num_directions)
# h_n (num_layers * num_directions, batch, hidden_size)
self.gru = nn.GRU(hidden_size, hidden_size)
def forward(self, word_inputs, hidden):
seq_len = len(word_inputs[0])
embedded = self.embedded(word_inputs).view(seq_len, 1, -1)
output, hidden = self.gru(embedded, hidden)
return output, hidden
def init_hidden(self):
hidden = Variable(torch.zeros(self.n_layers, 1, self.hidden_size))
if self.is_cuda: hidden = hidden.cuda()
return hidden
# 为了简便 这里实现的是 Attn 中的 general method
class Attn(nn.Module):
def __init__(self, hidden_size, max_length=MAX_LENGTH, is_cuda=False):
super(Attn, self).__init__()
self.hidden_size = hidden_size
self.is_cuda = is_cuda
# general method
self.attn = nn.Linear(self.hidden_size, hidden_size)
def forward(self, hidden, encoder_outputs):
seq_len = len(encoder_outputs)
attn_energies = Variable(torch.zeros(seq_len))
if self.is_cuda: attn_energies = attn_energies.cuda()
for i in range(seq_len):
attn_energies[i] = self.score(hidden.squeeze(0).squeeze(0), encoder_outputs[i].squeeze(0))
# Normalize energies to weights in range 0 to 1, resize to 1 x 1 x seq_len
# 返回的 是 attn_weigths 维度 与 encoder_outputs 保持一致
return F.softmax(attn_energies).unsqueeze(0).unsqueeze(0)
def score(self, hidden, encoder_output):
energy = self.attn(encoder_output)
energy = hidden.dot(energy)
return energy
class DecoderRNN(nn.Module):
def __init__(self, hidden_size, output_size, n_layers=1, is_cuda=False):
super(DecoderRNN, self).__init__()
self.hidden_size = hidden_size
self.output_size = output_size
self.n_layers = n_layers
self.is_cuda = is_cuda
# outout_size 为 中文 vocab 的 length
self.embedded = nn.Embedding(output_size, hidden_size)
self.gru = nn.GRU(hidden_size * 2, hidden_size)
# input => (N, *, in_features)
# output => (N, * , out_features)
self.out = nn.Linear(hidden_size * 2, output_size)
self.attn = Attn(hidden_size)
def forward(self, word_input, last_context, last_hidden, encoder_outputs):
# embedding 的本来输出是 (n, w, embedding_dim)
# [1, 1, 10]
embedded = self.embedded(word_input)
# print("decoder's embedded is {}".format(embedded))
# [1, 1, 20]
rnn_input = torch.cat((embedded,last_context), 2)
# print("decoder's rnn_input is {}".format(rnn_input))
# [1, 1, 10]
rnn_output, hidden = self.gru(rnn_input, last_hidden)
# print("decoder's rnn_output is {}".format(rnn_output))
# [1, 1, 3]
attn_weights = self.attn(rnn_output, encoder_outputs)
# print("decoder's attn_weights is {}".format(attn_weights))
# [1, 1, 3] bmm [1, 3, 10](转置之前 [3, 1, 10]) => [1, 1, 10]
# print(type(attn_weights))
# print(type(encoder_outputs.transpose(0, 1)))
# print(attn_weights.size())
# print(encoder_outputs.transpose(0,1).size())
context = attn_weights.bmm(encoder_outputs.transpose(0, 1))
# print("decoder's context is {}".format(context))
#print("{}".format(self.out(torch.cat((rnn_output, context), 2)).size()))
output = F.log_softmax(self.out(torch.cat((rnn_output.squeeze(0), context.squeeze(0)), 1)))
#print("decoder's output is {}".format(output))
return output, context, hidden, attn_weights
# 训练
# 500
hidden_size = 128
n_layers = 1
MAX_LENGTH = 7
# USE_CUDA = False
USE_CUDA = False
encoder = EncoderRNN(len(en_vocab), hidden_size, n_layers, False)
decoder = DecoderRNN(hidden_size, len(zh_vocab), n_layers, False)
if USE_CUDA:
encoder.cuda()
decoder.cuda()
lr = 1e-3
encoder_optimizer = optim.Adam(encoder.parameters(), lr=lr)
decoder_optimizer = optim.Adam(decoder.parameters(), lr=lr)
criterion = nn.NLLLoss()
# 训练函数
def train(input_variable, target_variable, encoder, decoder, encoder_optimizer, decoder_optimizer, criterion, is_cuda=False):
# 梯度初始化
encoder_optimizer.zero_grad()
decoder_optimizer.zero_grad()
loss = 0 # added noto for each word
# input_length = input_variable.size()[1]
target_length = target_variable.size()[1]
## encoder
# [num_layers * direc, batch_s, fea] [1, 1, 500]
encoder_hidden = encoder.init_hidden()
# 假设 input 为 [1, 4, 5, 7] [1, 4]
# [4, 1, 500] [1, 1, 500]
encoder_outputs, encoder_hidden = encoder(input_variable, encoder_hidden)
## decoder
# [1, 1]
decoder_input = Variable(torch.LongTensor([[0]]))
# [1, 1, 500]
decoder_context = Variable(torch.zeros(1, 1, decoder.hidden_size))
# [1, 1, 500]
decoder_hidden = encoder_hidden
if is_cuda:
decoder_input = decoder_input.cuda()
decoder_context = decoder_context.cuda()
for i in range(target_length):
decoder_output, decoder_context, decoder_hidden, decoder_attention = decoder(decoder_input, decoder_context, decoder_hidden, encoder_outputs)
# ???
# print(target_variable[i].size())
loss += criterion(decoder_output, target_variable[0][i])
topv, topi = decoder_output.data.topk(1)
max_similar_pos = topi[0][0]
decoder_input = Variable(torch.LongTensor([[max_similar_pos]]))
# print(decoder_input)
if USE_CUDA: decoder_input = decoder_input.cuda()
if max_similar_pos == 0: break
loss.backward()
encoder_optimizer.step()
decoder_optimizer.step()
return loss.data[0] / target_length
# 训练细节定义
n_epochs = 500
print_every = 50
print_loss_total = 0
# 开始训练
for epoch in range(1, n_epochs + 1):
training_pair = random.choice(sen_vector)
input_variable = Variable(torch.LongTensor([training_pair[0]]))
# int(input_variable.size())
target_variable = Variable(torch.LongTensor([training_pair[1]]))
if USE_CUDA:
input_variable = input_variable.cuda()
target_variable = target_variable.cuda()
loss = train(input_variable, target_variable, encoder, decoder, encoder_optimizer, decoder_optimizer, criterion, False)
print_loss_total += loss
if epoch % print_every == 0:
print_loss_avg = print_loss_total / epoch
print('epoch {}\'s avg_loss is {}'.format(epoch, print_loss_avg))
# 查看结果
def evaluate(sentence, max_length=MAX_LENGTH, is_cuda=False):
input_sen2index = sen2index(sentence, 'en')
input_variable = Variable(torch.LongTensor(input_sen2index).view(1, -1))
input_length = input_variable.size()[1]
if is_cuda:
input_variable = input_variable.cuda()
encoder_hidden = encoder.init_hidden()
encoder_outputs, encoder_hidden = encoder(input_variable, encoder_hidden)
decoder_input = Variable(torch.LongTensor([[0]]))
decoder_context = Variable(torch.zeros(1, 1, decoder.hidden_size))
if is_cuda:
decoder_input = decoder_input.cuda()
decoder_context = decoder_context.cuda()
decoder_hidden = encoder_hidden
# 翻译结果
decoded_words = []
# 这块还不是很理解
decoder_attentions = torch.zeros(max_length, max_length)
for di in range(max_length):
decoder_output, decoder_context, decoder_hidden, decoder_attention = decoder(decoder_input, decoder_context, decoder_hidden, encoder_outputs)
topv, topi = decoder_output.data.topk(1)
ni = topi[0][0]
# EOS_token
if ni == 0:
decoded_words.append('<EOS>')
break
else:
decoded_words.append(index2word_zh[ni])
decoder_input = Variable(torch.LongTensor([[ni]]))
if USE_CUDA: decoder_input = decoder_input.cuda()
return ''.join(decoded_words), decoder_attentions[:di+1, :len(encoder_outputs)]
print(evaluate('i love you')[0])
| 2.0625 | 2 |
predict.py | henryhust/Coreference-Chinese | 0 | 12790766 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import json
import tensorflow as tf
import util
import json
except_name = ["公司", "本公司", "该公司", "贵公司", "贵司", "本行", "该行", "本银行", "该集团", "本集团", "集团",
"它", "他们", "他们", "我们", "该股", "其", "自身"]
def check_span(fla_sentences, span):
"""检查span对应词语是否符合要求"""
if "".join(fla_sentences[span[0] - 1]) in ["该", "本", "贵"]: # 对span进行补全
span = (span[0]-1, span[1])
return span
def flatten_sentence(sentences):
"""将多维列表展开"""
return [char for sentence in sentences for char in sentence]
def max_all_count(dict0):
"""获取字典当中的count最大值"""
a = max([(value, key) for key, value in dict0.items()])
return a[0]
def cluster_rule(example):
"""
对模型预测结果进行算法修正
:param example: 一条json数据
:return: 纠正后的predicted_cluster
"""
fla_sentences = flatten_sentence(example["sentences"])
res_clusters = []
com2cluster = {}
except_cluster = {}
for cluster in example["predicted_clusters"]:
res_cluster = []
span_count = {}
span2pos = {}
for span in cluster:
if "".join(fla_sentences[span[0]:span[1] + 1]) in ["公司", "集团"]: # 对缺失字符进行补充
span = check_span(fla_sentences, span)
if "#" in "".join(fla_sentences[span[0]:span[1] + 1]): # 对不合法单词进行过滤
continue
res_cluster.append(span)
word = "".join(fla_sentences[span[0]:span[1] + 1])
span_count[word] = span_count.get(word, 0) + 1
if span2pos.get(word, None) is not None:
span2pos[word].append(span)
else:
span2pos[word] = [span]
com_name = set(span_count.keys())
for ex in except_name:
com_name.discard(ex)
max_name = ""
max_count = 0
for com in com_name: # 获取cluster当中的频率最大的单词
if span_count[com] > max_count:
max_count = span_count[com]
max_name = com
elif span_count[com] == max_count and len(com) > len(max_name):
max_count = span_count[com]
max_name = com
print("max_name:{}".format(max_name))
for com in com_name: # 公司名称
if com[:2] == max_name[:2]: # 头部两个字相同则认为两公司相同
continue
elif len(com) < len(max_name) and com in max_name: # 具有包含关系的两公司,则认为相同
continue
elif len(com) > len(max_name) and max_name in com:
continue
else:
print(com)
# span2pos[com]
except_cluster[com] = span2pos[com] # 该公司名
for n in span2pos[com]: # 错误预测的span将会筛除
res_cluster.remove(n)
if com2cluster.get(max_name, None) is None:
com2cluster[max_name] = res_cluster
else:
print(res_cluster)
com2cluster[max_name].extend(res_cluster)
for key, value in except_cluster.items(): # 这步是十分有用的
if com2cluster.get(key, None) is None:
print("该span将被彻底清除:{}".format(key))
continue
else:
print("{}重新融入别的cluster当中".format(key), value)
com2cluster[key].extend(value)
# res_clusters.append(res_cluster)
for v_cluster in com2cluster.values():
res_clusters.append(v_cluster)
return res_clusters
if __name__ == "__main__":
"""
命令行示例
python predict.py bert_base conll-2012/tagging_pure/tagging_dev_pos.chinese.128.jsonlines result_of_20.txt
"""
config = util.initialize_from_env()
log_dir = config["log_dir"]
# Input file in .jsonlines format.
input_filename = sys.argv[2] # 输入数据地址
# Predictions will be written to this file in .jsonlines format.
output_filename = sys.argv[3] # 输出地址
model = util.get_model(config)
saver = tf.train.Saver()
with tf.Session() as session:
model.restore(session)
with open(output_filename, "w") as output_file:
with open(input_filename) as input_file:
for example_num, line in enumerate(input_file.readlines()):
example = json.loads(line)
tensorized_example = model.tensorize_example(example, is_training=False)
feed_dict = {i: t for i, t in zip(model.input_tensors, tensorized_example)}
_, _, _, top_span_starts, top_span_ends, top_antecedents, top_antecedent_scores = session.run(model.predictions, feed_dict=feed_dict)
predicted_antecedents = model.get_predicted_antecedents(top_antecedents, top_antecedent_scores)
example["predicted_clusters"], mention_to_predict = model.get_predicted_clusters(top_span_starts, top_span_ends, predicted_antecedents)
example["top_spans"] = list(zip((int(i) for i in top_span_starts), (int(i) for i in top_span_ends)))
example['head_scores'] = []
example["mention_to_predict"] = str(mention_to_predict)
example["predicted_clusters"] = cluster_rule(example)
output_file.write(str(json.dumps(example, ensure_ascii=False)))
output_file.write("\n")
if example_num % 100 == 0:
print("Decoded {} examples.".format(example_num + 1))
| 2.515625 | 3 |
pyobs/utils/threads/__init__.py | pyobs/pyobs-core | 4 | 12790767 | <gh_stars>1-10
from .future import Future
from .lockwithabort import LockWithAbort, AcquireLockFailed
from .threadwithreturnvalue import ThreadWithReturnValue
| 1.179688 | 1 |
tpDcc/libs/python/folder.py | tpDcc/tpDcc-libs-python | 1 | 12790768 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
Utility methods related to folders
"""
from __future__ import print_function, division, absolute_import
import os
import sys
import time
import errno
import shutil
import fnmatch
import logging
import tempfile
import traceback
import subprocess
from distutils.dir_util import copy_tree
logger = logging.getLogger('tpDcc-libs-python')
def create_folder(name, directory=None, make_unique=False):
"""
Creates a new folder on the given path and with the given name
:param name: str, name of the new directory
:param directory: str, path to the new directory
:param make_unique: bool, Whether to pad the name with a number to make it unique if the folder is not unique
:return: variant, str || bool, folder name with path or False if the folder creation failed
"""
from tpDcc.libs.python import path, osplatform
full_path = False
if directory is None:
full_path = name
if not name:
full_path = directory
if name and directory:
full_path = path.join_path(directory, name)
if make_unique:
full_path = path.unique_path_name(directory=full_path)
if not full_path:
return False
if path.is_dir(full_path):
return full_path
try:
os.makedirs(full_path)
except Exception:
return False
osplatform.get_permission(full_path)
return full_path
def rename_folder(directory, name, make_unique=False):
"""
Renames given with a new name
:param directory: str, full path to the directory we want to rename
:param name: str, new name of the folder we want to rename
:param make_unique: bool, Whether to add a number to the folder name to make it unique
:return: str, path of the renamed folder
"""
from tpDcc.libs.python import path, osplatform
base_name = path.get_basename(directory=directory)
if base_name == name:
return
parent_path = path.get_dirname(directory=directory)
rename_path = path.join_path(parent_path, name)
if make_unique:
rename_path = path.unique_path_name(directory=rename_path)
if path.exists(rename_path):
return False
try:
osplatform.get_permission(directory)
message = 'rename: {0} >> {1}'.format(directory, rename_path)
logger.info(message)
os.rename(directory, rename_path)
except Exception:
time.sleep(0.1)
try:
os.rename(directory, rename_path)
except Exception:
logger.error('{}'.format(traceback.format_exc()))
return False
return rename_path
def copy_folder(directory, directory_destination, ignore_patterns=[]):
"""
Copy the given directory into a new directory
:param directory: str, directory to copy with full path
:param directory_destination: str, destination directory
:param ignore_patterns: list<str>, extensions we want to ignore when copying folder elements
If ['txt', 'py'] is given all py and text extension files will be ignored during the copy operation
:return: str, destination directory
"""
from tpDcc.libs.python import path, osplatform
if not path.is_dir(directory=directory):
return
if not ignore_patterns:
cmd = None
if osplatform.is_linux():
cmd = ['rsync', directory, directory_destination, '-azr']
elif osplatform.is_windows():
cmd = [
'robocopy', directory.replace('/', '\\'), directory_destination.replace('/', '\\'), '/S', '/Z', '/MIR']
if cmd:
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = proc.communicate()
if out:
logger.error(err)
else:
shutil.copytree(directory, directory_destination)
else:
shutil.copytree(directory, directory_destination, ignore=shutil.ignore_patterns(ignore_patterns))
return directory_destination
def move_folder(source_directory, target_directory, only_contents=False):
"""
Moves the folder pointed by source_directory under the directory target_directory
:param source_directory: str, folder with full path
:param target_directory: str, path where path1 should be move into
:param only_contents: bool, Whether to move the folder or only its contents
:return: bool, Whether the move operation was successfully
"""
try:
if only_contents or os.path.isdir(target_directory):
file_list = os.listdir(source_directory)
for i in file_list:
src = os.path.join(source_directory, i)
dest = os.path.join(target_directory, i)
if os.path.exists(dest):
if os.path.isdir(dest):
move_folder(src, dest)
continue
else:
os.remove(dest)
shutil.move(src, target_directory)
else:
shutil.move(source_directory, target_directory)
except Exception as exc:
logger.warning('Failed to move {} to {}: {}'.format(source_directory, target_directory, exc))
return False
return True
def copy_directory_contents(path1, path2, *args, **kwargs):
"""
Copies all the contents of the given path1 to the folder path2. If path2 directory does not
exists, it will be created
:param path1: str
:param path2: str
:param args:
:param kwargs:
:return:
"""
try:
copy_tree(path1, path2, *args, **kwargs)
except Exception:
logger.warning('Failed to move contents of {0} to {1}'.format(path1, path2))
return False
return True
def delete_folder(folder_name, directory=None):
"""
Deletes the folder by name in the given directory
:param folder_name: str, name of the folder to delete
:param directory: str, the directory path where the folder is stored
:return: str, folder that was deleted with path
"""
from tpDcc.libs.python import name, path, osplatform
def delete_read_only_error(action, name, exc):
"""
Helper to delete read only files
"""
osplatform.get_permission(name)
action(name)
folder_name = name.clean_file_string(folder_name)
full_path = folder_name
if directory:
full_path = path.join_path(directory, folder_name)
if not path.is_dir(full_path):
return None
try:
shutil.rmtree(full_path, onerror=delete_read_only_error)
except Exception as exc:
logger.warning('Could not remove children of path "{}" | {}'.format(full_path, exc))
return full_path
def clean_folder(directory):
"""
Removes everything in the given directory
:param directory: str
"""
from tpDcc.libs.python import path, fileio, folder
base_name = path.get_basename(directory=directory)
dir_name = path.get_dirname(directory=directory)
if path.is_dir(directory):
try:
files = folder.get_files(directory)
except Exception:
files = list()
for f in files:
fileio.delete_file(f, directory)
delete_folder(base_name, dir_name)
if not path.is_dir(directory):
create_folder(base_name, dir_name)
def get_folder_size(directory, round_value=2, skip_names=None):
"""
Returns the size of the given folder
:param directory: str
:param round_value: int, value to round size to
:return: str
"""
from tpDcc.libs.python import python, path, fileio
skip_names = python.force_list(skip_names)
size = 0
for root, dirs, files in os.walk(directory):
root_name = path.get_basename(root)
if root_name in skip_names:
continue
for name in files:
if name in skip_names:
continue
size += fileio.get_file_size(path.join_path(root, name), round_value)
return size
def get_size(file_path, round_value=2):
"""
Return the size of the given directory or file path
:param file_path: str
:param round_value: int, value to round size to
:return: int
"""
from tpDcc.libs.python import fileio, path
size = 0
if path.is_dir(file_path):
size = get_folder_size(file_path, round_value)
if path.is_file(file_path):
size = fileio.get_file_size(file_path, round_value)
return size
def get_sub_folders(root_folder, sort=True):
"""
Return a list with all the sub folders names on a directory
:param root_folder: str, folder we want to search sub folders for
:param sort: bool, True if we want sort alphabetically the returned folders or False otherwise
:return: list<str>, sub folders names
"""
if not os.path.exists(root_folder):
raise RuntimeError('Folder {0} does not exists!'.format(root_folder))
file_names = os.listdir(root_folder)
result = list()
for f in file_names:
if os.path.isdir(os.path.join(os.path.abspath(root_folder), f)):
result.append(f)
if sort:
result.sort()
return result
def get_folders(root_folder, recursive=False, full_path=False):
"""
Get folders found in the root folder
:param root_folder: str, folder we ant to search folders on
:param recursive: bool, Whether to search in all root folder child folders or not
:return: list<str>
"""
from tpDcc.libs.python import path
found_folders = list()
if not recursive:
try:
found_folders = next(os.walk(root_folder))[1]
except Exception:
pass
else:
try:
for root, dirs, files in os.walk(root_folder):
for d in dirs:
if full_path:
folder_name = path.join_path(root, d)
found_folders.append(folder_name)
else:
folder_name = path.join_path(root, d)
folder_name = os.path.relpath(folder_name, root_folder)
folder_name = path.clean_path(folder_name)
found_folders.append(folder_name)
except Exception:
return found_folders
return found_folders
def get_folders_without_dot_prefix(directory, recursive=False, base_directory=None):
from tpDcc.libs.python import path, version
if not path.exists(directory):
return
found_folders = list()
base_directory = base_directory or directory
folders = get_folders(directory)
for folder in folders:
if folder == 'version':
version = version.VersionFile(directory)
if version.updated_old:
continue
if folder.startswith('.'):
continue
folder_path = path.join_path(directory, folder)
folder_name = path.clean_path(os.path.relpath(folder_path, base_directory))
found_folders.append(folder_name)
if recursive:
sub_folders = get_folders_without_dot_prefix(
folder_path, recursive=recursive, base_directory=base_directory)
found_folders += sub_folders
return found_folders
def get_files(root_folder, full_path=False, recursive=False, pattern="*"):
"""
Returns files found in the given folder
:param root_folder: str, folder we want to search files on
:param full_path: bool, if true, full path to the files will be returned otherwise file names will be returned
:return: list<str>
"""
from tpDcc.libs.python import path
if not path.is_dir(root_folder):
return []
# TODO: For now pattern only works in recursive searches. Fix it to work on both
found = list()
if recursive:
for dir_path, dir_names, file_names in os.walk(root_folder):
for file_name in fnmatch.filter(file_names, pattern):
if full_path:
found.append(path.join_path(dir_path, file_name))
else:
found.append(file_name)
else:
files = os.listdir(root_folder)
for f in files:
file_path = path.join_path(root_folder, f)
if path.is_file(file_path=file_path):
if full_path:
found.append(file_path)
else:
found.append(f)
return found
def get_files_and_folders(directory):
"""
Get files and folders found in the given directory
:param directory: str, folder we want to get files and folders from
:return: list<str>
"""
try:
files = os.listdir(directory)
except Exception:
files = list()
return files
def get_files_with_extension(extension, root_directory, full_path=False, recursive=False, filter_text=''):
"""
Returns file in given directory with given extensions
:param extension: str, extension to find (.py, .data, etc)
:param root_directory: str, directory path
:param full_path: bool, Whether to return the file path or just the file names
:param recursive: bool
:param filter_text: str
:return: list(str)
"""
found = list()
if not extension.startswith('.'):
extension = '.{}'.format(extension)
if recursive:
for dir_path, dir_names, file_names in os.walk(root_directory):
for file_name in file_names:
filename, found_extension = os.path.splitext(file_name)
if found_extension == '{}'.format(extension):
if not full_path:
found.append(file_name)
else:
found.append(os.path.join(root_directory, file_name))
else:
try:
objs = os.listdir(root_directory)
except Exception:
return found
for filename_and_extension in objs:
filename, found_extension = os.path.splitext(filename_and_extension)
if filter_text and filename_and_extension.find(filter_text) == -1:
continue
if found_extension == extension:
if not full_path:
found.append(filename_and_extension)
else:
found.append(os.path.join(root_directory, filename_and_extension))
return found
def get_files_date_sorted(root_directory, extension=None, filter_text=''):
"""
Returns files date sorted found in the given directory
:param root_directory: str, directory path
:param extension: str, optional extension to find
:param filter_text: str, optional text filtering
:return: list(str), list of files date sorted in the directory
"""
from tpDcc.libs.python import fileio
def _get_mtime(fld):
return os.stat(os.path.join(root_directory, fld)).st_mtime
if not extension:
files = fileio.get_files(root_directory, filter_text=filter_text)
else:
files = get_files_with_extension(extension=extension, root_directory=root_directory, filter_text=filter_text)
return list(sorted(files, key=_get_mtime))
def open_folder(path=None):
"""
Opens a folder in the explorer in a independent platform way
If not path is passed the current directory will be opened
:param path: str, folder path to open
"""
if path is None:
path = os.path.curdir
if sys.platform == 'darwin':
subprocess.check_call(['open', '--', path])
elif sys.platform == 'linux2':
subprocess.Popen(['xdg-open', path])
elif sys.platform in ['windows', 'win32', 'win64']:
if path.endswith('/'):
path = path[:-1]
new_path = path.replace('/', '\\')
try:
subprocess.check_call(['explorer', new_path], shell=False)
except Exception:
pass
def get_user_folder(absolute=True):
"""
Get path to the user folder
:return: str, path to the user folder
"""
from tpDcc.libs.python import path
if absolute:
return path.clean_path(os.path.abspath(os.path.expanduser('~')))
else:
return path.clean_path(os.path.expanduser('~'))
def get_temp_folder():
"""
Get the path to the temp folder
:return: str, path to the temp folder
"""
from tpDcc.libs.python import path
return path.clean_path(tempfile.gettempdir())
def get_current_working_directory():
"""
Returns current working directory
:return: str, path to the current working directory
"""
return os.getcwd()
def get_folders_from_path(path):
"""
Gets a list of sub folders in the given path
:param path: str
:return: list<str>
"""
folders = list()
while True:
path, folder = os.path.split(path)
if folder != '':
folders.append(folder)
else:
if path != '':
folders.append(path)
break
folders.reverse()
return folders
def get_folders_date_sorted(root_folder):
"""
Returns folder dates sorted found in the given root directory
:param root_folder: str, directory path
:return: list(str): list of folder date sorted in the directory
"""
def _get_mtime(fld):
return os.stat(os.path.join(root_folder, fld)).st_mtime
return list(sorted(os.listdir(root_folder), key=_get_mtime))
def ensure_folder_exists(folder_path, permissions=0o755, place_holder=False):
"""
Checks that folder given folder exists. If not, folder is created.
:param folder_path: str, folder path to check or created
:param permissions:int, folder permission mode
:param place_holder: bool, Whether to create place holder text file or not
:raise OSError: raise OSError if the creation of the folder fails
"""
if not os.path.exists(folder_path):
try:
logger.debug('Creating folder {} [{}]'.format(folder_path, permissions))
os.makedirs(folder_path, permissions)
if place_holder:
place_path = os.path.join(folder_path, 'placeholder')
if not os.path.exists(place_path):
with open(place_path, 'wt') as fh:
fh.write('Automatically generated place holder file')
except OSError as err:
if err.errno != errno.EEXIST:
raise
def get_latest_file_at_folder(folder_path, filter_text=''):
"""
Returns the latest path added to a folder
:param folder_path:
:param filter_text:
:return: str
"""
from tpDcc.libs.python import path
files = get_files_date_sorted(folder_path, filter_text=filter_text)
if not files:
return None
return path.join_path(folder_path, files[-1])
def walk_level(root_directory, level=None):
root_directory = root_directory.rstrip(os.path.sep)
assert os.path.isdir(root_directory)
if level is None:
for root, dirs, files in os.walk(root_directory):
yield root, dirs, files
else:
num_sep = root_directory.count(os.path.sep)
for root, dirs, files in os.walk(root_directory):
yield root, dirs, files
num_sep_this = root.count(os.path.sep)
if num_sep + level <= num_sep_this:
del dirs[:]
| 3.03125 | 3 |
main.py | kevinrpb/additives-scrapper | 0 | 12790769 | <filename>main.py
#!/usr/bin/env python3
import json
from scrappers import EAditivosScrapper, EuropaScrapper, LaVeganisteriaScrapper
from util import parse_args, select_dietary, setup_logger, update_dietary
def main():
args = parse_args()
logger = setup_logger(level=args.log_level)
logger.info('Starting')
# Get the scrappers ready
europa_scrapper = EuropaScrapper()
eaditivos_scrapper = EAditivosScrapper()
laveganisteria_scrapper = LaVeganisteriaScrapper()
# Get the information
additives_info = europa_scrapper.get_additives(args.start, args.end)
# returns an array of
# {
# 'id': ...,
# 'number': ...,
# 'name': ...,
# 'synonyms': ...,
# 'groups': ...,
# 'dietary': {
# 'vegetarian': 'unknown',
# 'vegan': 'unknown'
# },
# 'authorisations': ...
# }
# See which numbers we get
numbers = map(lambda element: element['number'], additives_info)
# Get some dietary information from several sources
dietary_info = eaditivos_scrapper.get_additives()
dietary_info = update_dietary(dietary_info, laveganisteria_scrapper.get_additives())
# returns a dict of
# {
# number: {
# 'vegetarian': ...
# 'vegan': ...
# }
# }
# Consolidate the dietary info and update the original base with it
dietary_info = select_dietary(dietary_info)
for index, element in enumerate(additives_info):
e_id = element['number']
if e_id in dietary_info.keys():
additives_info[index]['dietary'].update(dietary_info[e_id])
# Now save the json file
logger.info('Done')
logger.info('Saving additives to file...')
with open('additives.json', 'w') as file:
json.dump(additives_info, file, indent=2)
if __name__ == '__main__':
main()
| 3.015625 | 3 |
neural_interaction_detection.py | mtsang/neural-interaction-detection | 35 | 12790770 | import bisect
import operator
import numpy as np
import torch
from torch.utils import data
from multilayer_perceptron import *
from utils import *
def preprocess_weights(weights):
w_later = np.abs(weights[-1])
w_input = np.abs(weights[0])
for i in range(len(weights) - 2, 0, -1):
w_later = np.matmul(w_later, np.abs(weights[i]))
return w_input, w_later
def make_one_indexed(interaction_ranking):
return [(tuple(np.array(i) + 1), s) for i, s in interaction_ranking]
def interpret_interactions(w_input, w_later, get_main_effects=False):
interaction_strengths = {}
for i in range(w_later.shape[1]):
sorted_hweights = sorted(
enumerate(w_input[i]), key=lambda x: x[1], reverse=True
)
interaction_candidate = []
candidate_weights = []
for j in range(w_input.shape[1]):
bisect.insort(interaction_candidate, sorted_hweights[j][0])
candidate_weights.append(sorted_hweights[j][1])
if not get_main_effects and len(interaction_candidate) == 1:
continue
interaction_tup = tuple(interaction_candidate)
if interaction_tup not in interaction_strengths:
interaction_strengths[interaction_tup] = 0
interaction_strength = (min(candidate_weights)) * (np.sum(w_later[:, i]))
interaction_strengths[interaction_tup] += interaction_strength
interaction_ranking = sorted(
interaction_strengths.items(), key=operator.itemgetter(1), reverse=True
)
return interaction_ranking
def interpret_pairwise_interactions(w_input, w_later):
p = w_input.shape[1]
interaction_ranking = []
for i in range(p):
for j in range(p):
if i < j:
strength = (np.minimum(w_input[:, i], w_input[:, j]) * w_later).sum()
interaction_ranking.append(((i, j), strength))
interaction_ranking.sort(key=lambda x: x[1], reverse=True)
return interaction_ranking
def get_interactions(weights, pairwise=False, one_indexed=False):
w_input, w_later = preprocess_weights(weights)
if pairwise:
interaction_ranking = interpret_pairwise_interactions(w_input, w_later)
else:
interaction_ranking = interpret_interactions(w_input, w_later)
interaction_ranking = prune_redundant_interactions(interaction_ranking)
if one_indexed:
return make_one_indexed(interaction_ranking)
else:
return interaction_ranking
def prune_redundant_interactions(interaction_ranking, max_interactions=100):
interaction_ranking_pruned = []
current_superset_inters = []
for inter, strength in interaction_ranking:
set_inter = set(inter)
if len(interaction_ranking_pruned) >= max_interactions:
break
subset_inter_skip = False
update_superset_inters = []
for superset_inter in current_superset_inters:
if set_inter < superset_inter:
subset_inter_skip = True
break
elif not (set_inter > superset_inter):
update_superset_inters.append(superset_inter)
if subset_inter_skip:
continue
current_superset_inters = update_superset_inters
current_superset_inters.append(set_inter)
interaction_ranking_pruned.append((inter, strength))
return interaction_ranking_pruned
def detect_interactions(
Xd,
Yd,
arch=[256, 128, 64],
batch_size=100,
device=torch.device("cpu"),
seed=None,
**kwargs
):
if seed is not None:
set_seed(seed)
data_loaders = convert_to_torch_loaders(Xd, Yd, batch_size)
model = create_mlp([feats.shape[1]] + arch + [1]).to(device)
model, mlp_loss = train(model, data_loaders, device=device, **kwargs)
inters = get_interactions(get_weights(model))
return inters, mlp_loss
| 2.296875 | 2 |
haas/tests/test_text_test_result.py | itziakos/haas | 4 | 12790771 | # -*- coding: utf-8 -*-
# Copyright (c) 2013-2019 <NAME>
# All rights reserved.
#
# This software may be modified and distributed under the terms
# of the 3-clause BSD license. See the LICENSE.txt file for details.
from __future__ import absolute_import, unicode_literals
from datetime import datetime, timedelta
from ..plugins.i_result_handler_plugin import IResultHandlerPlugin
from ..result import (
ResultCollector, TestResult, TestCompletionStatus, TestDuration
)
from ..testing import unittest
from . import _test_cases, _test_case_data
from .fixtures import ExcInfoFixture, MockDateTime
from .compat import mock
class TestTextTestResult(ExcInfoFixture, unittest.TestCase):
def test_result_collector_calls_handlers_start_stop_methods(self):
# Given
handler = mock.Mock(spec=IResultHandlerPlugin)
collector = ResultCollector()
collector.add_result_handler(handler)
case = _test_cases.TestCase('test_method')
# When
handler.reset_mock()
collector.startTestRun()
# Then
handler.start_test_run.assert_called_once_with()
self.assertFalse(handler.called)
self.assertFalse(handler.stop_test_run.called)
self.assertFalse(handler.start_test.called)
self.assertFalse(handler.stop_test.called)
# When
handler.reset_mock()
collector.stopTestRun()
# Then
handler.stop_test_run.assert_called_once_with()
self.assertFalse(handler.called)
self.assertFalse(handler.start_test_run.called)
self.assertFalse(handler.start_test.called)
self.assertFalse(handler.stop_test.called)
# When
handler.reset_mock()
collector.startTest(case)
# Then
handler.start_test.assert_called_once_with(case)
self.assertFalse(handler.called)
self.assertFalse(handler.start_test_run.called)
self.assertFalse(handler.stop_test_run.called)
self.assertFalse(handler.stop_test.called)
# When
handler.reset_mock()
collector.stopTest(case)
# Then
handler.stop_test.assert_called_once_with(case)
self.assertFalse(handler.called)
self.assertFalse(handler.start_test_run.called)
self.assertFalse(handler.stop_test_run.called)
self.assertFalse(handler.start_test.called)
def test_unicode_traceback(self):
# Given
handler = mock.Mock(spec=IResultHandlerPlugin)
collector = ResultCollector()
collector.add_result_handler(handler)
start_time = datetime(2015, 12, 23, 8, 14, 12)
duration = timedelta(seconds=10)
end_time = start_time + duration
expected_duration = TestDuration(start_time, end_time)
case = _test_cases.TestCase('test_method')
# When
with mock.patch('haas.result.datetime', new=MockDateTime(start_time)):
collector.startTest(case)
# Then
self.assertTrue(handler.start_test.called)
handler.start_test.reset_mock()
# Given
msg = '\N{GREEK SMALL LETTER PHI}'.encode('utf-8')
with self.failure_exc_info(msg) as exc_info:
expected_result = TestResult.from_test_case(
case, TestCompletionStatus.error, expected_duration,
exception=exc_info)
# When
with mock.patch(
'haas.result.datetime', new=MockDateTime(end_time)):
collector.addError(case, exc_info)
# Then
handler.assert_called_once_with(expected_result)
self.assertFalse(handler.start_test_run.called)
self.assertFalse(handler.stop_test_run.called)
self.assertFalse(handler.start_test.called)
self.assertFalse(handler.stop_test.called)
self.assertFalse(collector.wasSuccessful())
def test_result_collector_calls_handlers_on_error(self):
# Given
handler = mock.Mock(spec=IResultHandlerPlugin)
collector = ResultCollector()
collector.add_result_handler(handler)
start_time = datetime(2015, 12, 23, 8, 14, 12)
duration = timedelta(seconds=10)
end_time = start_time + duration
expected_duration = TestDuration(start_time, end_time)
case = _test_cases.TestCase('test_method')
# When
with mock.patch('haas.result.datetime', new=MockDateTime(start_time)):
collector.startTest(case)
# Then
self.assertTrue(handler.start_test.called)
handler.start_test.reset_mock()
# When
with self.exc_info(RuntimeError) as exc_info:
# Given
expected_result = TestResult.from_test_case(
case, TestCompletionStatus.error, expected_duration,
exception=exc_info)
# When
with mock.patch(
'haas.result.datetime', new=MockDateTime(end_time)):
collector.addError(case, exc_info)
# Then
handler.assert_called_once_with(expected_result)
self.assertFalse(handler.start_test_run.called)
self.assertFalse(handler.stop_test_run.called)
self.assertFalse(handler.start_test.called)
self.assertFalse(handler.stop_test.called)
self.assertFalse(collector.wasSuccessful())
def test_result_collector_calls_handlers_on_failure(self):
# Given
handler = mock.Mock(spec=IResultHandlerPlugin)
collector = ResultCollector()
collector.add_result_handler(handler)
start_time = datetime(2015, 12, 23, 8, 14, 12)
duration = timedelta(seconds=10)
end_time = start_time + duration
expected_duration = TestDuration(start_time, end_time)
case = _test_cases.TestCase('test_method')
# When
with mock.patch('haas.result.datetime', new=MockDateTime(start_time)):
collector.startTest(case)
# Then
self.assertTrue(handler.start_test.called)
handler.start_test.reset_mock()
# Given
with self.failure_exc_info() as exc_info:
expected_result = TestResult.from_test_case(
case, TestCompletionStatus.failure, expected_duration,
exception=exc_info)
# When
with mock.patch(
'haas.result.datetime', new=MockDateTime(end_time)):
collector.addFailure(case, exc_info)
# Then
handler.assert_called_once_with(expected_result)
self.assertFalse(handler.start_test_run.called)
self.assertFalse(handler.stop_test_run.called)
self.assertFalse(handler.start_test.called)
self.assertFalse(handler.stop_test.called)
self.assertFalse(collector.wasSuccessful())
def test_result_collector_calls_handlers_on_success(self):
# Given
handler = mock.Mock(spec=IResultHandlerPlugin)
collector = ResultCollector()
collector.add_result_handler(handler)
start_time = datetime(2015, 12, 23, 8, 14, 12)
duration = timedelta(seconds=10)
end_time = start_time + duration
expected_duration = TestDuration(start_time, end_time)
case = _test_cases.TestCase('test_method')
# When
with mock.patch('haas.result.datetime', new=MockDateTime(start_time)):
collector.startTest(case)
# Then
self.assertTrue(handler.start_test.called)
handler.start_test.reset_mock()
# Given
expected_result = TestResult.from_test_case(
case, TestCompletionStatus.success, expected_duration)
# When
with mock.patch('haas.result.datetime', new=MockDateTime(end_time)):
collector.addSuccess(case)
# Then
handler.assert_called_once_with(expected_result)
self.assertFalse(handler.start_test_run.called)
self.assertFalse(handler.stop_test_run.called)
self.assertFalse(handler.start_test.called)
self.assertFalse(handler.stop_test.called)
self.assertTrue(collector.wasSuccessful())
def test_result_collector_calls_handlers_on_skip(self):
# Given
handler = mock.Mock(spec=IResultHandlerPlugin)
collector = ResultCollector()
collector.add_result_handler(handler)
start_time = datetime(2015, 12, 23, 8, 14, 12)
duration = timedelta(seconds=10)
end_time = start_time + duration
expected_duration = TestDuration(start_time, end_time)
case = _test_cases.TestCase('test_method')
# When
with mock.patch('haas.result.datetime', new=MockDateTime(start_time)):
collector.startTest(case)
# Then
self.assertTrue(handler.start_test.called)
handler.start_test.reset_mock()
# Given
expected_result = TestResult.from_test_case(
case, TestCompletionStatus.skipped, expected_duration,
message='reason')
# When
with mock.patch('haas.result.datetime', new=MockDateTime(end_time)):
collector.addSkip(case, 'reason')
# Then
handler.assert_called_once_with(expected_result)
self.assertFalse(handler.start_test_run.called)
self.assertFalse(handler.stop_test_run.called)
self.assertFalse(handler.start_test.called)
self.assertFalse(handler.stop_test.called)
self.assertTrue(collector.wasSuccessful())
def test_result_collector_calls_handlers_on_expected_fail(self):
# Given
handler = mock.Mock(spec=IResultHandlerPlugin)
collector = ResultCollector()
collector.add_result_handler(handler)
start_time = datetime(2015, 12, 23, 8, 14, 12)
duration = timedelta(seconds=10)
end_time = start_time + duration
expected_duration = TestDuration(start_time, end_time)
case = _test_cases.TestCase('test_method')
# When
with mock.patch('haas.result.datetime', new=MockDateTime(start_time)):
collector.startTest(case)
# Then
self.assertTrue(handler.start_test.called)
handler.start_test.reset_mock()
# Given
with self.exc_info(RuntimeError) as exc_info:
expected_result = TestResult.from_test_case(
case, TestCompletionStatus.expected_failure, expected_duration,
exception=exc_info)
# When
with mock.patch(
'haas.result.datetime', new=MockDateTime(end_time)):
collector.addExpectedFailure(case, exc_info)
# Then
handler.assert_called_once_with(expected_result)
self.assertFalse(handler.start_test_run.called)
self.assertFalse(handler.stop_test_run.called)
self.assertFalse(handler.start_test.called)
self.assertFalse(handler.stop_test.called)
self.assertTrue(collector.wasSuccessful())
def test_result_collector_calls_handlers_on_unexpected_success(self):
# Given
handler = mock.Mock(spec=IResultHandlerPlugin)
collector = ResultCollector()
collector.add_result_handler(handler)
start_time = datetime(2015, 12, 23, 8, 14, 12)
duration = timedelta(seconds=10)
end_time = start_time + duration
expected_duration = TestDuration(start_time, end_time)
case = _test_cases.TestCase('test_method')
# When
with mock.patch('haas.result.datetime', new=MockDateTime(start_time)):
collector.startTest(case)
# Then
self.assertTrue(handler.start_test.called)
handler.start_test.reset_mock()
# Given
expected_result = TestResult.from_test_case(
case, TestCompletionStatus.unexpected_success, expected_duration)
# When
with mock.patch('haas.result.datetime', new=MockDateTime(end_time)):
collector.addUnexpectedSuccess(case)
# Then
handler.assert_called_once_with(expected_result)
self.assertFalse(handler.start_test_run.called)
self.assertFalse(handler.stop_test_run.called)
self.assertFalse(handler.start_test.called)
self.assertFalse(handler.stop_test.called)
self.assertFalse(collector.wasSuccessful())
def test_result_collector_should_stop(self):
# Given
collector = ResultCollector()
# Then
self.assertFalse(collector.shouldStop)
# When
collector.stop()
# Then
self.assertTrue(collector.shouldStop)
def test_multiple_errors_from_one_test(self):
# Given
collector = ResultCollector()
case = _test_case_data.TestWithTwoErrors('test_with_two_errors')
start_time = datetime(2016, 4, 12, 8, 17, 32)
test_end_time = datetime(2016, 4, 12, 8, 17, 38)
tear_down_end_time = datetime(2016, 4, 12, 8, 17, 39)
# When
with mock.patch(
'haas.result.datetime',
new=MockDateTime(
[start_time, test_end_time, tear_down_end_time])):
case.run(collector)
# Then
self.assertEqual(len(collector.errors), 2)
| 2.203125 | 2 |
scripts/configs/minimal.py | Skydivizer/compsci | 0 | 12790772 | <gh_stars>0
import numpy as np
from pcs_aero.experiment import ExperimentGroup
# Convenience
_all_shapes = ['rect', 'halfcircle', 'triangle']
# Script to use.
program = "scripts/coef.py"
experiments = [
ExperimentGroup("scripts/results/theta.txt", {
'shape': _all_shapes,
'theta': range(0, 181, 30),
'time': [20],
'Re': [105],
'r': [64],
}),
ExperimentGroup('scripts/results/reynolds.txt', {
'shape': _all_shapes,
'Re': range(55, 106, 10),
'time': [20],
'r': [64],
}),
ExperimentGroup('scripts/results/velocity.txt', {
'shape': _all_shapes,
'Uin': np.arange(4, 16, 2) / 100,
'time': [30],
'r': [64],
}),
ExperimentGroup('scripts/results/size.txt', {
'shape': _all_shapes,
'size': np.arange(5, 24, 3) / 64,
'time': [20],
'r': [64],
}),
ExperimentGroup('scripts/results/circle_reynolds.txt', {
'shape': ['circle'],
'Re': range(10, 1001, 100),
'time': [40],
'r': [64],
}),
] | 2.140625 | 2 |
src/university/models.py | Nikhilgupta18/practice-react_django | 0 | 12790773 | from django.db import models
from account.models import Country
from django.contrib import admin
grad_streams_list = [
'Engineering',
'Law',
'Medicine',
'Business',
]
grad_streams = (
('Engineering', 'Engineering'),
('Law', 'Law'),
('Medicine', 'Medicine'),
('Business', 'Business'),
)
class GRE(models.Model):
verbal = models.IntegerField(default=None, null=True, blank=True)
quant = models.IntegerField(default=None, null=True, blank=True)
awa = models.FloatField(default=None, null=True, blank=True)
def __str__(self):
return str(self.verbal)
class MCAT(models.Model):
old_total = models.IntegerField(default=None, null=True, blank=True)
new_total = models.IntegerField(default=None, null=True, blank=True)
chemical_physical = models.IntegerField(default=None, null=True, blank=True)
critical_analysis = models.IntegerField(default=None, null=True, blank=True)
biologic_biochemical = models.IntegerField(default=None, null=True, blank=True)
psycho_social_biological = models.IntegerField(default=None, null=True, blank=True)
def __str__(self):
return str(self.new_total)
class University(models.Model):
name = models.TextField(default=None)
info_link = models.TextField(default=None, null=True)
rank = models.IntegerField(default=None, null=True, blank=True)
country = models.ForeignKey(Country, on_delete=models.CASCADE)
total_students = models.IntegerField(default=None, null=True, blank=True)
total_int_students = models.IntegerField(default=None, null=True, blank=True)
address = models.TextField(default=None, null=True, blank=True)
website = models.TextField(default=None, null=True, blank=True, max_length=500)
schools = models.TextField(default=None, null=True, blank=True)
uni_type = models.TextField(default=None, null=True, blank=True)
grad_school_link = models.TextField(default=None, null=True, blank=True, max_length=500)
undergrad_link = models.TextField(default=None, null=True, blank=True, max_length=500)
business_link = models.TextField(default=None, null=True, blank=True, max_length=500)
med_link = models.TextField(default=None, null=True, blank=True, max_length=500)
law_link = models.TextField(default=None, null=True, blank=True, max_length=500)
engg_link = models.TextField(default=None, null=True, blank=True, max_length=500)
slug = models.SlugField(default=None, null=True, blank=True, max_length=500)
logo = models.TextField(default=None, null=True, blank=True, max_length=500)
def __str__(self):
return self.name
class UniversityAdmin(admin.ModelAdmin):
search_fields = ('name',)
ordering = ('rank',)
class BusinessGrad(models.Model):
university = models.OneToOneField(University, on_delete=models.CASCADE)
enrollment = models.IntegerField(default=None, null=True, blank=True)
international = models.FloatField(default=None, null=True, blank=True)
male = models.FloatField(default=None, null=True, blank=True)
female = models.FloatField(default=None, null=True, blank=True)
acceptance_rate_masters = models.FloatField(default=None, null=True, blank=True) #
acceptance_rate_phd = models.FloatField(default=None, null=True, blank=True) #
us_application_fee = models.IntegerField(default=None, null=True, blank=True) #
int_application_fee = models.IntegerField(default=None, null=True, blank=True) #
tuition = models.FloatField(default=None, null=True, blank=True)
us_deadline = models.DateTimeField(default=None, null=True, blank=True)
int_deadline = models.DateTimeField(default=None, null=True, blank=True)
rolling = models.BooleanField(default=False)
gpa = models.FloatField(default=None, null=True, blank=True)
min_toefl_score = models.IntegerField(default=None, null=True, blank=True)
mean_toefl_score = models.IntegerField(default=None, null=True, blank=True)
min_ielts_score = models.FloatField(default=None, null=True, blank=True)
fin_aid_director_name = models.TextField(default=None, null=True, blank=True)
fin_aid_director_phone = models.TextField(default=None, null=True, blank=True)
fellowships = models.IntegerField(default=None, null=True, blank=True)
teaching_assistantships = models.IntegerField(default=None, null=True, blank=True)
research_assistantships = models.IntegerField(default=None, null=True, blank=True)
# look for room and board
living_expenses = models.IntegerField(default=None, null=True, blank=True)
# unique to business
employed = models.FloatField(default=None, null=True, blank=True)
employed_3_months = models.FloatField(default=None, null=True, blank=True)
avg_work_ex_months = models.IntegerField(default=None, null=True, blank=True)
gmat = models.IntegerField(default=None, null=True, blank=True)
gre = models.OneToOneField(GRE, on_delete=models.CASCADE) #
avg_salary = models.IntegerField(default=None, null=True, blank=True)
def __str__(self):
return self.university.name
class BusinessGradAdmin(admin.ModelAdmin):
search_fields = ('university__name',)
ordering = ('university__rank',)
class EngineeringGrad(models.Model):
university = models.OneToOneField(University, on_delete=models.CASCADE) #
enrollment = models.IntegerField(default=None, null=True, blank=True) #
us_application_fee = models.IntegerField(default=None, null=True, blank=True) #
int_application_fee = models.IntegerField(default=None, null=True, blank=True) #
international = models.FloatField(default=None, null=True, blank=True) #
male = models.FloatField(default=None, null=True, blank=True) #
female = models.FloatField(default=None, null=True, blank=True) #
acceptance_rate_masters = models.FloatField(default=None, null=True, blank=True) #
acceptance_rate_phd = models.FloatField(default=None, null=True, blank=True) #
tuition = models.FloatField(default=None, null=True, blank=True) #
us_deadline = models.DateTimeField(default=None, null=True, blank=True) #
int_deadline = models.DateTimeField(default=None, null=True, blank=True) #
rolling = models.BooleanField(default=False) #
gpa = models.FloatField(default=None, null=True, blank=True) #
min_toefl_score = models.IntegerField(default=None, null=True, blank=True) #
mean_toefl_score = models.IntegerField(default=None, null=True, blank=True) #
min_ielts_score = models.FloatField(default=None, null=True, blank=True) #
fin_aid_director_name = models.TextField(default=None, null=True, blank=True) #
fin_aid_director_phone = models.TextField(default=None, null=True, blank=True) #
fellowships = models.IntegerField(default=None, null=True, blank=True)
teaching_assistantships = models.IntegerField(default=None, null=True, blank=True) #
research_assistantships = models.IntegerField(default=None, null=True, blank=True) #
# look for room and board
living_expenses = models.IntegerField(default=None, null=True, blank=True) #
# unique to engineering
gre = models.OneToOneField(GRE, on_delete=models.CASCADE, null=True, blank=True) #
def __str__(self):
return self.university.name
class EngineeringGradAdmin(admin.ModelAdmin):
search_fields = ('university__name',)
ordering = ('university__rank',)
class MedicineGrad(models.Model):
university = models.OneToOneField(University, on_delete=models.CASCADE)
enrollment = models.IntegerField(default=None, null=True, blank=True)
international = models.FloatField(default=None, null=True, blank=True)
us_application_fee = models.IntegerField(default=None, null=True, blank=True) #
int_application_fee = models.IntegerField(default=None, null=True, blank=True) #
acceptance_rate_masters = models.FloatField(default=None, null=True, blank=True) #
acceptance_rate_phd = models.FloatField(default=None, null=True, blank=True) #
male = models.FloatField(default=None, null=True, blank=True)
female = models.FloatField(default=None, null=True, blank=True)
tuition = models.FloatField(default=None, null=True, blank=True)
us_deadline = models.DateTimeField(default=None, null=True, blank=True)
int_deadline = models.DateTimeField(default=None, null=True, blank=True)
rolling = models.BooleanField(default=False)
gpa = models.FloatField(default=None, null=True, blank=True) #
fin_aid_director_name = models.TextField(default=None, null=True, blank=True)
fin_aid_director_phone = models.TextField(default=None, null=True, blank=True)
students_receiving_aid = models.FloatField(default=None, null=True, blank=True)
# look for room and board
living_expenses = models.IntegerField(default=None, null=True, blank=True)
# unique to medicine
mcat = models.OneToOneField(MCAT, on_delete=models.CASCADE)
def __str__(self):
return self.university.name
class MedicineGradAdmin(admin.ModelAdmin):
search_fields = ('university__name',)
ordering = ('university__rank',)
class LawGrad(models.Model):
university = models.OneToOneField(University, on_delete=models.CASCADE)
enrollment = models.IntegerField(default=None, null=True, blank=True)
international = models.FloatField(default=None, null=True, blank=True)
us_application_fee = models.IntegerField(default=None, null=True, blank=True) #
int_application_fee = models.IntegerField(default=None, null=True, blank=True) #
male = models.FloatField(default=None, null=True, blank=True)
female = models.FloatField(default=None, null=True, blank=True)
acceptance_rate = models.FloatField(default=None, null=True, blank=True)
tuition = models.FloatField(default=None, null=True, blank=True)
us_deadline = models.DateTimeField(default=None, null=True, blank=True)
int_deadline = models.DateTimeField(default=None, null=True, blank=True)
rolling = models.BooleanField(default=False)
int_rolling = models.BooleanField(default=False)
employed = models.FloatField(default=None, null=True, blank=True)
fin_aid_director_name = models.TextField(default=None, null=True, blank=True)
fin_aid_director_phone = models.TextField(default=None, null=True, blank=True)
students_receiving_aid = models.FloatField(default=None, null=True, blank=True)
gpa = models.FloatField(default=None, null=True, blank=True) #
# look for room and board
living_expenses = models.IntegerField(default=None, null=True, blank=True)
# unique to law
# look for median lsat
employed = models.FloatField(default=None, null=True, blank=True)
bar_passage_rate = models.FloatField(default=None, null=True, blank=True)
median_grant = models.IntegerField(default=None, null=True, blank=True)
lsat_score = models.IntegerField(default=None, null=True, blank=True)
median_public_salary = models.IntegerField(default=None, null=True, blank=True)
median_private_salary = models.IntegerField(default=None, null=True, blank=True)
def __str__(self):
return self.university.name
class LawGradAdmin(admin.ModelAdmin):
search_fields = ('university__name',)
ordering = ('university__rank',)
| 2.125 | 2 |
disk.py | oznotes/Pit | 1 | 12790774 | <gh_stars>1-10
from __future__ import print_function
import sys
import time
import wmi
__author__ = "Oz"
__copyright__ = "Disk Reader WMI"
def read_in_chunks(fileobj, chunksize=65536):
"""
Lazy function to read a file piece by piece.
Default chunk size: 64kB.
"""
while True:
data = fileobj.read(chunksize)
if not data:
break
yield data
def detect_disk():
count = 0
disks = []
while disks == []:
disks = wmi.WMI().Win32_DiskDrive(MediaType="Removable Media")
if disks == []:
time.sleep(2)
print ("Please connect ")
if count == 10:
return False, "a", "b", "c"
count += 1
else:
for disk in disks:
disk_size = int(disk.size)
gig = 1024 * 1024
uid = disk.serialnumber
sector_size = disk.BytesPerSector
return True, disk.name, disk_size, sector_size
def reading():
pass
def writing(d, image, addr):
f = open(image, "rb")
booster = open(d, "r+b")
addr2 = addr.strip("L")
addr2 = int(addr2, 16)
print(" Flashing...", end="")
booster.seek(addr2)
for piece in read_in_chunks(f):
booster.write(piece)
print ("Completed")
return True
| 3.078125 | 3 |
ex4.py | hanskrupakar/Tensorflow-Beginner | 0 | 12790775 | <reponame>hanskrupakar/Tensorflow-Beginner
import tensorflow as tf
import numpy as np
from tensorflow.models.rnn import rnn, rnn_cell
if __name__ == '__main__':
X = np.random.randint(0,2,(50000,25,10))
Y = np.reshape(np.sum(X,axis=2),(50000,25,1))
X_test = np.random.randint(0,2,(1000,25,10))
Y_test = np.reshape(np.sum(X_test,axis=2),(1000,25,1))
W = tf.Variable(tf.random_normal([10,1], stddev=0.01))
B = tf.Variable(tf.zeros([25,1]))
x = tf.placeholder(tf.float32, [None,25,10])
y = tf.placeholder(tf.float32, [None,25,1])
lstm = rnn_cell.BasicLSTMCell(10,forget_bias=1.0)
XT = tf.transpose(x, [1, 0, 2])
XR = tf.reshape(XT, [-1, 10])
X_split = tf.split(0, 25, XR)
init_state = tf.placeholder("float", [None, 2*10])
outputs, _states = rnn.rnn(lstm,X_split, init_state)
res = tf.matmul(outputs[-1], W) + B
print("XT: ", XT)
print("XR: ",XR)
print("X_SPLIT: ",X_split)
print("RES: ", res.get_shape())
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(res, y))
train_op = tf.train.AdamOptimizer().minimize(cost)
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
for i in range(100):
for start in range(0,50000,500):
sess.run(train_op, feed_dict = {x: X[start:start+500], y: Y[start:start+500], init_state: np.zeros([50000, 20])})
print(sess.run(outputs))
| 3.515625 | 4 |
operators/identity_operator.py | aryanz-co-in/python-comments-variables-type-casting-operators | 0 | 12790776 | # Identity operator
# "is" and "is not"
living_room_temperature = 23
kitchen_room_temperature = 23
# is
if living_room_temperature is kitchen_room_temperature:
print("Temperature is same")
else:
print("Temperature is NOT same")
living_room_temperature = 18
# is
if living_room_temperature is kitchen_room_temperature:
print("Temperature is same")
else:
print("Temperature is NOT same")
# is not
if living_room_temperature is not kitchen_room_temperature:
print("Temperature is NOT same")
else:
print("Temperature is same")
# The id of the value must be same
# It does not compare the value, but it checks if has same id.
name1 = "John"
name2 = name1.lower()
# name2 = "John"
if name1 is name2:
print(f"{id(name1)} is {id(name2)}")
else:
print(f"{id(name1)} is NOT {id(name2)}")
| 4.46875 | 4 |
tests/test_atividade.py | leticiaarj/TrabalhoTPPE | 0 | 12790777 | <reponame>leticiaarj/TrabalhoTPPE<filename>tests/test_atividade.py<gh_stars>0
import unittest
from usecases.Atividades import Atividade
from usecases.Nodos import Nodo, NodoDecisao, NodoFusao, NodoFinal
class testeAtividade(unittest.TestCase):
def testCriacaoDiagrama(self):
nodoInicial = Nodo('Nodo Inicial', ['proximo'])
nodoDecisao = NodoDecisao('Nodo Decisao', ['Nodo Inicial'] ,['Nodo Fusao'])
nodoFusao = NodoFusao('Nodo Fusao', ['Nodo Decisao'] ,['Nodo Final'])
nodoFinal = NodoFinal('Nodo Final', ['Nodo Fusao'])
nodoInicial.addNodo(nodoDecisao)
nodoInicial.addNodo(nodoFusao)
nodoInicial.addNodo(nodoFinal)
self.assertEqual(len(nodoInicial.arrayNodo), 4, 'Nodos não criados')
atividade = Atividade('Diagrama de Atividades', 'Atividade de Monitoramento', nodoInicial.arrayNodo)
"""self.assertEqual(Atividade['nomeDiagrama'], 'Diagrama de Atividades', 'Nome do diagrama não criado')
self.assertEqual(Atividades['nomeAtividade'], 'Atividade de Monitoramento', 'Nome da atividade não criado')
self.assertEqual(len(nodoInicial['arrayAtividades']), 4, 'Atividades não foram criadas')
self.assertEqual(len(nodoInicial['arrayTransicoes']), 4, 'Transicoes não foram criadas')
if __name__ == "__main__":
unittest.main() """
if __name__ == "__main__" and __package__ is None:
from os import sys, path
sys.path.append(path.dirname(path.dsirname(path.abspath(__file__)))) | 2.703125 | 3 |
Pensamento Computacional/Exercicios/ex055.py | jotaven/UnipeCC | 1 | 12790778 | '''Faça um programa que receba dois números inteiros e gere os números inteiros que estão no intervalo compreendido por eles.'''
number1 = int(input("Digite o primeiro numero: ")) + 1
number2 = int(input("Digite o segundo numero: "))
print(list(range(number1, number2))) | 4.0625 | 4 |
TOPSIS-Aseem-101803469/__init__.py | 26aseem/TOPSIS | 0 | 12790779 | from TOPSIS-Aseem-101803469.topsis import topsisPerformer | 0.925781 | 1 |
Gui/Images/title_icons.py | marioharper182/OptionsPricing | 0 | 12790780 | #----------------------------------------------------------------------
# This file was generated by img2py.py
#
from wx.lib.embeddedimage import PyEmbeddedImage
AmericanOption = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAAEAAAABACAYAAACqaXHeAAAABHNCSVQICAgIfAhkiAAAAAlw"
"SFlzAAAHIQAAByEBauL93wAAABl0RVh0U29mdHdhcmUAd3d3Lmlua3NjYXBlLm9yZ5vuPBoA"
"AAcESURBVHic7ZpbaB1VFIa/ld7S1t7UYmq8JDWpWOut0lqLiCCCPgjewBel9EEpiAiC2Bcf"
"fFFBvIEoivgkIihFQQShUhBbsGhbtWrTS1Jr29TWS7SX2Jhk+bD2zpmzz5w5M3MmmVbzw2LP"
"mbNn7bXX7HXbe1BVVBVgBvAI8AOgwCDwNrDM9/kvkrjJIyIPAW9Si16gW1VHY/4769ESuX64"
"Tp/FwO0TIEspmBq5PpnQ7+/xFiQtRGQBcKGjRY5aMBkHI+0g8CuwTVXryh9VwEvAamBa0OcL"
"RxMGEZkFXAEsA6507RKgHWjNyG5IRL4GNntS1WNjY3kf4AZeDzwbMDgP6ATWAFuALap6IKMQ"
"sYhMdGmErnTjtSQ82iy2q+pyqF4BAAMxnU8BC4FHHSEih3DKAL7BzCe6/Hx7DpXlGqWLsYlf"
"CkgDYUeAXcAeYB+wN9L2A6OOFFBVHRWReUAX0O3IX18PTMf8miEaEoB1nlGEWjEnGN4fDxoF"
"eoB3gMcwk5xVVMgDdrhxBvy9cAWUjftV9f2JHPBMU8A/SX+KyEzMP1yGLeNFwBRHLZHrv4Dd"
"nlT1aD2eZ5oCxiAiXcBKR8sxO26jsc+I4/UnpozO8L8iFPAz5tTqQYGjwAUpeK0RkXXACuDc"
"AmTzmOd4RmUCmlPAaaxW2AC8gHn1MIf4DfgM+BRYC9zUgOddde4PAfuxtLwXiwK9WBQYcTQa"
"uZ6L5Q1R6gZmh4ybUcAMYKGqbhSRjcDVMX0UU85+4NWUfEeA74GtEdqpqiMZ5fsy+kNEBAvZ"
"<KEY>"
"<KEY>"
"<KEY>/LJrdi6XD49qdiE1qGTS4c/11gZsqxfiHZCcY6vRBFK2ARNrkTwB11+nQB"
"mzBBQ4ST/xtLX7dhub+PAr1p/ISItFMdCdYCC6J9ilDAINWCrwfmYEWUxxBWhIAVQFBt+x69"
"wOdUvP+3qpqYHSZBVQ8BhzCFIyK3UqACTgNfAR9gewkec1zr7XYf8AlwJxYNkvC4qn6UVgAR"
"aSXeCZ5Q1VNJz3o0mwfsUNWXRUQxxxZ62R7gXsxW02yr1aS5bgfoOuKjwPw6fFREDmMl9B7M"
"fPZQcCIElge8gdl+TYjBwtfdbuC2FPxmiMgqqqNAdw65BNs9agduSerYrAJuc23SjvHTpN/d"
"eS/j+McwRzmEVZK+nYb5mnl1niukFogiaYLNbm0dp5LI9ERod6NIICLzsQqww9GTWFFWWCpc"
"FEaweqFqgkCPqh7Oy1RVB4DtjhCRNQRVaR4F/OzadrK/3V8JJuhon6oOZWEkInOwt9qKhdhp"
"kfYf4Cfgp6QtccingNdU9TkRmY7Z2WJsmbVjthXa41herqq/ZxnI1QGdmDNd4lpPi1KwUBE5"
"<KEY>"
"<KEY>"
"<KEY>"
"<KEY>"
"<KEY>oUkgi5hCPuXLEu3MFoN/FOcW4RcoVDElNs5VHAWhHppuL8eoE+Vf0FxmJ3nBMM"
"3+hFcQIloJ9aE0qKAp1UTNSb680Emy55FOAnUwURGca8fDO5/0kqviLqM3ar6vGMvP7AdpKi"
"Mu4gcNhF1gJpeY1iDimuuDlYoDypkEcBT2Gbl9EokCoVxia6V1VPZxlQRFqwZCZ0ilmiQB8F"
"bYgMq6pnWihE5FzinWIXdTK5OphK5YuThh0nFK6I6iI+zJ2fg+UAlQ85Wsl4epxHAc+IyKME"
"UYDGqbCfcAfxO8JxGMZMZ6dr+yN0BDgS5hgiMgPz9DMxhd6AfWmympgTrLQK6Mc86tXuGf+t"
"T6PT3rQYwRS5EzsY9W1P1rzf+ZfT2MroB74D3gIQkfOwqvBa3z/8Smwd8HrAc6bXsojMxjYq"
"vUZXkf4cfwQrcPqBg9gnuX6yu7Jmi0Uhkwm4PbhNVA4aBPs4Yja25FqD9iSVJXvsTPzctikn"
"6AqZQr4ZLAvj+THiWYFJBZQtQNmYVEDZApSNSQWULUDZmFRA2QKUjUkFlC1A2ZhUQNkClI1J"
"BZQtQNmYVEDZApSN/70C/gVDTFLx+fxz1wAAAABJRU5ErkJggg==")
#----------------------------------------------------------------------
AsianOption = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAAEAAAABACAMAAACdt4HsAAAAA3NCSVQICAjb4U/gAAAACXBI"
"WXMAABIFAAASBQEoPImrAAAAGXRFWHRTb2Z0d2FyZQB3d3cuaW5rc2NhcGUub3Jnm+48GgAA"
"ArJQTFRF////AAAAAAAAAAAAAAAAMzMzKysrJCQkICAgHBwcGhoaFxcXFRUVFBQUJCQSIiIi"
"ICAgHh4eHBwcGxsbGB<KEY>"
"<KEY>"
"<KEY>"
"<KEY>"
"<KEY>"
"<KEY>"
"<KEY>"
"<KEY>"
"<KEY>"
"<KEY>aHBwaHhwaHRwaHRwaHR<KEY>"
"HRwbHRwaHRsaHBwaHRwaHRwaHRwaHRwaHRwaHRwaHRsaHBwaHRwaHRwaHRwaHRwaHRwaHRwa"
"HRwaHRwaHhwaHRwaHRwaHRwaHRwaHRwaHRwaHRwaHRwaHRwaHRwaHRwaHRwaHRwaDWpeAgAA"
"AOV0Uk5TAAECAwQFBgcICQoLDA0ODxAREhMVFxgaGxwdHh8gISIjJCUmJygpKissLS8wMTM0"
"NTY3ODk6Ozw9PkBBQkNERUZHSUpLTE1PUFFSU1RVVldYWVpbXF1fYGFiY2RlZmdoaWprbW9w"
"<KEY>"
"<KEY>"
"<KEY>"
"<KEY>"
"D8+ndNutL0Q3pcWEPN2ErvPOka4GikrOPwx80kwN4Hoou/gSfkm6QS37jJ275RQ1Fuh6udze"
"1FnrjxFgt6Rps8b1jw6VM5e7W1LGh0uLD1biqFTSCoyKPasWzZmWPX7UkJSXxk6cMmPekuVr"
"<KEY>PJKSaZhdkzvL4j5IQ1QN1xWbaZBtsjXuN3ziN9ywSbpa5MubuC7VpT988fH7"
"E0enPN5MtUVVU6eS6a+PTh62H2OQ6rQfS2UllvIpowb3S9mH32b5dffBKtUt5onuHVs3Udgi"
"jARZEvEbrsu+g9mqX/RFWCG/u/DrIL8JGG+7JYXH9n9lXKTqEHkAdkXJ7wRGdVMZrT7lsoPF"
"h6swMuUsfD3GpvZSWPME/IrmF67dXkGAqQpwX95n+<KEY>uLYhCjCL6+I7UbZj/Vf5"
"<KEY>"
"<KEY>"
"<KEY>"
"<KEY>"
"q3oUKhrJVgB88C3GmbbhOXAuVMEKAd+Ggm0YVesOAbGybaBGxUngpBz8RaDBsm2ltj/lYCOB"
"hsm2DLhQhd8F4JgcZMDZ5NjOn2OsHlgAxMo2A+iZimVtxEdwVA56wNeSojFa6U4475ItE2ga"
"jSVDT8IhOXgAlktyY0jycU41xgEtPFgy1Qt+l4NYWCfJjSHpIpdUIw1ofQ+WbHWD3+TgXtgq"
"yY0h6V9wyTYS8NyNZaruh31y0Ap2SXJjhEhnIEK2F4GYNlhmKAbK5CAaTkt6BKOtGgMxso0E"
"uruxzFUPqAxTsCFAV0XMxFjwcBYwQLb3gBEPYikJyQQ6Klguxu7T/C9LtkLAdxy/coxEBSsl"
"UJ5s2wiQqmD5XGX/UWC8bD8SoKeCJQJvteu0EuNURAwQL9t04PC7ZzGq4nKAWAWLg79DpD4Y"
"P0sXwSNbNpClhRjligc6KVgX+ENSe4yV0uR/vlSNEUBvJVQDG+Xawq9hChayvXq2jEkb1izq"
"pNrCBo3pLal93769wiVFuuSouW67Zf0HmwyT/NCKhBoAAAAASUVORK5CYII=")
#----------------------------------------------------------------------
EuropeanOption = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAAEAAAABACAYAAACqaXHeAAAABHNCSVQICAgIfAhkiAAAAAlw"
"SFlzAAAwfQAAMH0BS0BPwgAAABl0RVh0U29mdHdhcmUAd3d3Lmlua3NjYXBlLm9yZ5vuPBoA"
"AALNSURBVHic7Zs9axRRFIafExfcBGFVNCiBoIKsCGpEiHaKlZUfoJjSwkp/gZ1dLFMGK0GE"
"BAQFFQKCHxBBKz8KXUWyq5WiYgISVyQei5mNN9fBmN2ZOca5DwwsZ+fcee+7c+7cuTMrqkqR"
"6bIWYE2p9UFENgMnDbXkybiq1iE2QER2A/uBYUtVOdIUkbuq+pR4DGgAWrCtpqrzY8DHjvxc"
"nnwCEFVFRFYDn40F5c0aVZ0u/FUgGGAtwJpggLUAawpvQCm+BFashRhQEREEmAReu18Ag8Bt"
"L+EwcAuYc2IDwAxQd2Jr4/gdL/8YcM2L7QE+AG+9/B3A/Tbz1wNVon65HAeuerFBgFOqSmsD"
"tgETbiyOPwHKXuw8MOTFBoDrCfm1hNgF4GhC/lgH+fuASwn7NhJiI4UfAwS4THTKtVgJ9AFT"
"<KEY>"
<KEY>//<KEY>"
"YAuwC9gLnAFW5aBFANxpYdbX3Qlgqz8dTZie9gHjOegpq2puBpxerOMJRhwBvv8PBgwvtfOO"
"prPL3YCbQFe7BsS6RrM0IMupsAKbVNW/KVkSItIDvCf9gTHzqfBkp50HUNVZfl8ISY0sDbjy"
"j7a1kIzGgEdApZPa98aBFcDDlDWW1Xk4mjZ1VZ1ZfLe/Q1XniBcv0sZ6KmxOMMBagDXBAGsB"
"1gQDrAVYEwywFmBNMMBagDXBAGsB1gQDrAVYEwywFmBNMMBagDXBAGsB1hTegFL8j7Fqyu32"
"i8gQcE9V33XSkIj0AgeB/lSU/eKEiLwAGCG7J7CHUngociBDfaOFL4HCG1ACbgAd1ekfeJlC"
"G1PAuRTaSeLx/PsBRaXwJfAT3BK6v9OwyUMAAAAASUVORK5CYII=")
#----------------------------------------------------------------------
ImpliedVol = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAAEAAAABACAYAAACqaXHeAAAABHNCSVQICAgIfAhkiAAAAAlw"
"SFlzAAAdmwAAHZsBHYZu2AAAABl0RVh0U29mdHdhcmUAd3d3Lmlua3NjYXBlLm9yZ5vuPBoA"
"AAWcSURBVHic7ZpdiFVVFMd/a2Y0dWZ0orRGRtRJy1FzRkELKjA1oSkjCKqHoogICguKHgr1"
"IaSHPh8Ci3roGwPLosKi0oIoAoV0FGf8oqYya2pmSBud0VFXD3tfOXM9n/fsc08D9w+Lc8+9"
"+6z9P/+79t7rrH1EVSkFInIp8B2wTlXfK8nJ/wBVKa5tAC4DNorIFhGZKSLjCuaIX+ZII4AX"
"7cBPwGDBROQ+R74zhSsBinEE2JyRb6dII8DtIb89rKpHU/guG6SUSVBEWoBdwFifnz9X1fa0"
"xMqFxBEgIgK8hv/NA+xPxajMqCnxml+AXnteBdwAjHdFqpxILICqDgN3eb8TkVnAq8AyQNxQ"
"Kw9Ch4CIzBKRt0SkLqydqh5S1eXAPcAWlwQzh6r6GvAQcBxQYHZQu7gGNKT1kYXViMgkoBEz"
"hicAtcDjmHFdgIvM7oCIdALfAFOBNaraG3FNWfAV5l8Os8UOIqDT468fqMv731dVqoA1wOkI"
"kVJFgIhcAszwfPW+qg6k8ekKVaq6HXg6ot0DKfu5GRgD9NjzSSn9uYMNzxpgO+HD4LaUQ6DG"
"Hm8EVuUd+gU7lwqLyN3A2yFa9QLzVbUnpM2ogzcPOBHR9mLgqQy5nIOITBWRDSKyVUResnNI"
"JvBmgsUCbAXexDzfn7DHfVkRKUBEGoGdwBT71XLgDhFZoap7nHfoGaNLMWP9OLAa+6QYc3xf"
"<KEY>"
"<KEY>"
"<KEY>"
"<KEY>"
"pDWJ71EhAGQnwqgRABKL0BbHp7M5QESqgXofG4d5vvi3yAa0xM5jzgn9mDlhV6ivJBzstvc8"
"oBVYYI9zMBWepBsjhQevPmAv0IHJ9DqAA6p6JoKLExEiBbChdD9mvb8cqA69wDwYHQWO2eMg"
"JhImYoSaSPSGzBBGlE+A123o+3GLK8IKVd3p+2vA2luPqQPuIHjtPQ18DbyA2RBZBEyIubY3"
"ANcCDwIvAz9G9PMpsAqoLjFP6AMWxkqEgKuAv0Kc9WAqQ9McJzxtwCuYiAnqewfQ6FKEYkdj"
"MPl1kJNBoC2LjM/D4d6IG/kiRcZ4ngjFTi7EjOEgB91AfcYCzANOhXDYHXJtYhH8nDyB/2Nu"
"wT4D5mVw44JZ47tC+h4A2iP8xBGhH1jkK4B1shL4EjMBBTn5AXgEuI4SNz4xe5IrgXWY5Cao"
"r+PARmBuTL9xI6EpdBm0Fdo7Me8DLApsaPAbppLTT/QyOBmYj8nagnAG2Aa8C3ykCbfSYi6R"
"<KEY>lILtGC2u/3S4LBUuGA/"
"<KEY>3ArZgw3wd0q+pZh0THWpJzrD2rqlG71nF9N2GSttmer08CS5IIsBSj"
"ZAFDmLDdB/yKfcDxWOH8JObFizpr9Z7PE4FmzA03MzLNHq+qQ/FvM5J/PfAksBj4HXhOVfem"
"ESBrOBUgCKOqHpAFKgLkTSBvVATIm0DeqAiQN4G8UREgbwJ5oyJA3gTyRkWAvAnkjYoACdo6"
"KU7ExFlrmSOJAN8Dm7IiUoRHVfVUWXpKWMauAW4C3sFUfcPKzkntMPAisCTLjZdiK7koamt4"
"hfpdiz1OY2TJqw7zNukJRpbLjmGKoV2YkloXcEjTVGhLxH+OUEeDxifcOAAAAABJRU5ErkJg"
"gg==")
#----------------------------------------------------------------------
LookBackOption = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAAEAAAABACAMAAACdt4HsAAAAA3NCSVQICAjb4U/gAAAACXBI"
"WXMAAAGcAAABnAH6gGQJAAAAGXRFWHRTb2Z0d2FyZQB3d3cuaW5rc2NhcGUub3Jnm+48GgAA"
"AaFQTFRF////AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAxZP4DgAAAIp0"
"Uk5TAAECAwQFBwgJDQ4PEBIUFhcZGhscHh8iIyYnKCorLTM0Njc6PEZJTE1OT1BRU1VWXF5f"
"ZGdpamxtbnBxdHV3eH2AgYKGiImKjI+QkpSWnJ2en6GjpaanqaqrrK2xuLm6vr/Aw8XGx8jJ"
"y8zQ0dTX2Nna3N3e4OLj6Onq6+zu7/L09fb3+fr7/P3+brfoEQAAAnpJREFUWMPtV2lD00AQ"
"HVraKlYqHlW8UBAVUPFWVFRUsIp4n2BVPFFQEW21gAeUo+RX285u0t1kj0m/6vuUvJn3kmxm"
"d2cB/oOCRDpVV6N0defQ6KdfjuMs5t+OXGmLhFOvO/9y0ZFQuNW5iixvujbvKDDTEyPJU4NF"
"R4OpboK+/YdjwNgOizzWvyKkL+THnmYnCiL1e7958N5Vxc9OpvkfrN91edzjSxcM+saPbtr3"
"Yw1yKN3vDez9uE6/1n3Oz95EMLrh5jIP39Xo17znCcNJdcLOSZ7Qq44/YdGVi9riTWb5OOxT"
"Rbv5MHd4TH0fw2GPiWZ40nZF/cywWJcwk/gbZ4W0QUaNBg2GWeQGmA2izxm3169vZfx43GIA"
"STaSE/7p+RDpP1vBZgAtrDCPyPqNS8geBbsB/1vfZHIAyQ9AMdjGCqpF5CKzyJ0jGcBtZC9J"
"NcZKaBPNYA8bb5E6gdRroBlEppFOC9QDZHqIBnAH6eMCk8cKb6IaHEB6AK7mXCCx7N3mHkVF"
"<KEY>"
"<KEY>IIA+go+Q3Ogt3A+4QyTvmyroPdwBtEREZKGhHmeTwnI/AbGeoeC/o3pC3YLST3"
"Tavl8KWRtP/6S9krh+ktJH1wMvFymG+l9Q+K6YzlUOqi6VULCpbDaaJetaRVyiFD1KsXVYBD"
"1HZOvazTodtYqNBubVToN1fa8w3bO+n7jQ2GHbYWxwJ7k2UEqc3Tgd5oKrA+XKurmDkhm22z"
"Aand1xtQDxxqg1BHHtmgpkNXFbGaj33/Bv4C7Cs45u1y0kgAAAAASUVORK5CYII=")
#----------------------------------------------------------------------
VanillaOption = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAAEAAAABACAMAAACdt4HsAAAAA3NCSVQICAjb4U/gAAAACXBI"
"WXMAAAJbAAACWwFBeK9IAAAAGXRFWHRTb2Z0d2FyZQB3d3cuaW5rc2NhcGUub3Jnm+48GgAA"
"AddQTFRF////AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAJ/rhuQAAAJx0"
"Uk5TAAECAwUGBwgJCgsNDhAREhMXGBkbHh8gISIjJCYoKisuMDEzNTY3ODs+P0FCQ0RISU1O"
"UVNUVVZXWV1eX2BhYmNkZWZoam9ydnh6fH6BgoOEhYaHiImLlZeYmZudoqOlpqipqq2ur7Cx"
"tbe4ubq7vL/Cw8XHyMzP0NHS09fY2dvc4OHj5OXm5+jp6+zt7u/w8vP09vf4+fr7/P3+JuId"
"/QAAAqhJREFUGBnlwftbk2UAx+Hvkm1SjlQ0wMwsRS2ygwyNskCRxHPTICrJNA3MbNraMipC"
"MFFpE1cy4P38sT7oLq6xPe/eQz9235KrdZ1fZCaLxcnMl/tfVHDbLiywYuny6wpm/TcOq13Z"
"oAA67lD2sEDZvXfk2wdLGE763P4tEbV1nb3uYDiH5VNyEePPN7Viz+8YzofyZUcJWBxcqwqx"
"T0vA4h750PAr8M9uVdlRBP6Iy9txjCOq0YNxRp7is8BlLdvZoUqXgEKjvPQCU02SNl5w5l9Q"
"hcRt4LC8ZIFeSYnfgC5V6gUy8hArAW2SrmOcU6U2YD6m+nYBU5LexrjYrFWmgN2q7zQwIikN"
"fKUqI0BK9UTOYvRIDfMwt1ZVPsZIReQqOsayl6XXgG9V7SWWjUblZgij0CXpEHBKNd4vYAzJ"
"xV4HSG+S9MoUMKBam9KAs1d240A2JumNPMZHsohlgXFZtQOFZkmNf2PcaJJNcwFol80wMCij"
"<KEY>"
"<KEY>"
"unfbYqbEM91ydXAaT9MHVUd8IE9d+YG46kskj52/OfPYYubm+WPJhP4fvvthbNRiLP29/PkZ"
"Fz/Jn/dw8a78iYxj9Yv86sbqgPxaM4nFxHPyrQ+LHvkX/Ysa0w0K4Cg1PlEQz89S5UGjAjlJ"
"lRMKpukRq8wlFNAQq3ymoJofU+HfDQpsmAqfK7jWBVYstCiEr1kxojBedShztimUUcquKJx2"
"ynYqpDRP/aiw3uKpDoWWxcgqvH0Y+xReJAOZiP6DllyuRXU9AXB3E/FsJnLpAAAAAElFTkSu"
"QmCC")
| 1.390625 | 1 |
reposit/auth/exceptions.py | tombasche/reposit-home-assistant | 0 | 12790781 | <filename>reposit/auth/exceptions.py
"""
Exceptions relating to auth
"""
class NoAuthenticationError(Exception):
"""
If a method is called that requires an access token and none is yet
set then raise this error
"""
# pylint: disable=unnecessary-pass
pass
| 1.929688 | 2 |
proto/core/molecule.py | protoserver/proto-cli | 0 | 12790782 |
from abc import abstractmethod
from cement import Interface, Handler
class MoleculeInterface(Interface):
class Meta:
interface = 'stack'
@abstractmethod
def _build_config(self):
"""Do something to build the config."""
pass
def build_config(self):
"""Do something to build the config."""
self._build_config()
def start(self):
"""Do something to start the stack."""
pass
def stop(self):
"""Do something to stop the stack."""
pass
def restart(self):
"""Do something to restart the stack."""
self.stop()
self.start()
class Molecule(MoleculeInterface, Handler):
"""FIXME: Put all common operations here."""
pass
| 3.109375 | 3 |
netcdf_utils/NetCDF_O2_corr.py | shaunwbell/EcoFOCI_MooringAnalysis | 2 | 12790783 | <reponame>shaunwbell/EcoFOCI_MooringAnalysis
#!/usr/bin/env python
"""
NetCDF_O2_corr.py
calculate salinity from conductivity.
History:
========
2019-01-03 Put in flag for correcting Aandera optode values vs SBE-43 values (mmole/l vs umole/kg)
Compatibility:
==============
python >=3.6 - not tested, unlikely to work without updates
python 2.7 - Tested and developed for
"""
import datetime
import argparse
import sys
import os
# Science Stack
from netCDF4 import Dataset
import numpy as np
import seawater as sw
# Relative User Stack
parent_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
os.sys.path.insert(1, parent_dir)
import calc.aanderaa_corrO2_sal as O2_sal_corr
from io_utils.EcoFOCI_netCDF_read import EcoFOCI_netCDF
__author__ = "<NAME>"
__email__ = "<EMAIL>"
__created__ = datetime.datetime(2016, 11, 1)
__modified__ = datetime.datetime(2016, 11, 1)
__version__ = "0.1.0"
__status__ = "Development"
__keywords__ = "O2", "salinity correction"
"""------------------------------- MAIN--------------------------------------------"""
parser = argparse.ArgumentParser(
description="Correct Oxygen in Timeseries using salinity but not depth"
)
parser.add_argument(
"sourcefile", metavar="sourcefile", type=str, help="complete path to epic file"
)
parser.add_argument(
"sal_source",
metavar="sal_source",
type=str,
help="quick description of source of salinity for correction",
)
parser.add_argument(
"-aanderaa",
"--aanderaa",
action="store_true",
help="aanderaa optode with Molar output",
)
parser.add_argument(
"-sbe43", "--sbe43", action="store_true", help="sbe43 optode with Mmkg output"
)
args = parser.parse_args()
ncfile = args.sourcefile
df = EcoFOCI_netCDF(ncfile)
global_atts = df.get_global_atts()
nchandle = df._getnchandle_()
vars_dic = df.get_vars()
ncdata = df.ncreadfile_dic()
O2_corr = O2_sal_corr.O2_sal_comp(
oxygen_conc=ncdata["O_65"][:, 0, 0, 0],
salinity=ncdata["S_41"][:, 0, 0, 0],
temperature=ncdata["T_20"][:, 0, 0, 0],
)
O2psat_corr = O2_sal_corr.O2PercentSat(
oxygen_conc=O2_corr,
temperature=ncdata["T_20"][:, 0, 0, 0],
salinity=ncdata["S_41"][:, 0, 0, 0],
pressure=ncdata["depth"][:],
)
if args.aanderaa:
O2_corr_umkg = O2_sal_corr.O2_molar2umkg(
oxygen_conc=O2_corr,
temperature=ncdata["T_20"][:, 0, 0, 0],
salinity=ncdata["S_41"][:, 0, 0, 0],
pressure=ncdata["depth"][:],
)
if args.sbe43:
sys.exit("Correction not currently valid for SBE-43.")
O2_corr_umkg[np.where(np.isnan(O2_corr_umkg))] = 1e35
O2psat_corr[np.where(np.isnan(O2psat_corr))] = 1e35
nchandle.variables["O_65"][:, 0, 0, 0] = O2_corr_umkg
nchandle.variables["OST_62"][:, 0, 0, 0] = O2psat_corr
update = "Oxygen Concentration and Saturation corrected for salinity using {0}".format(
args.sal_source
)
if not "History" in global_atts.keys():
print("adding history attribute")
histtime = datetime.datetime.utcnow()
nchandle.setncattr(
"History",
"{histtime:%B %d, %Y %H:%M} UTC - {history} ".format(
histtime=histtime, history=update
),
)
else:
print("updating history attribute")
histtime = datetime.datetime.utcnow()
nchandle.setncattr(
"History",
global_atts["History"]
+ "\n"
+ "{histtime:%B %d, %Y %H:%M} UTC - {history}".format(
histtime=histtime, history=update
),
)
df.close()
| 2.046875 | 2 |
gen-py/Services_old/__init__.py | afshelburn/irpy | 0 | 12790784 | <filename>gen-py/Services_old/__init__.py
__all__ = ['ttypes', 'constants', 'GameService', 'GamePlayer']
| 1.054688 | 1 |
cscs-checks/libraries/magma/magma_checks.py | CLIP-HPC/reframe | 167 | 12790785 | <gh_stars>100-1000
# Copyright 2016-2021 Swiss National Supercomputing Centre (CSCS/ETH Zurich)
# ReFrame Project Developers. See the top-level LICENSE file for details.
#
# SPDX-License-Identifier: BSD-3-Clause
import reframe as rfm
import reframe.utility.sanity as sn
@rfm.simple_test
class MagmaCheck(rfm.RegressionTest):
subtest = parameter(['cblas_z', 'zgemm', 'zsymmetrize', 'ztranspose',
'zunmbr'])
valid_systems = ['daint:gpu', 'dom:gpu']
valid_prog_environs = ['builtin']
num_gpus_per_node = 1
prebuild_cmds = ['patch < patch.txt']
modules = ['magma']
maintainers = ['AJ', 'SK']
tags = {'scs', 'production', 'maintenance'}
@run_before('compile')
def set_build_system_opts(self):
self.build_system = 'Make'
self.build_system.makefile = f'Makefile_{self.subtest}'
self.build_system.cxxflags = ['-std=c++11']
self.build_system.ldflags = ['-lcusparse', '-lcublas', '-lmagma',
'-lmagma_sparse']
self.executable = f'./testing_{self.subtest}'
# FIXME: Compile cblas_z with -O0 since with a higher level a
# segmentation fault is thrown
if self.subtest == 'cblas_z':
self.build_system.cxxflags += ['-O0']
@run_before('run')
def set_exec_opts(self):
if self.subtest == 'zgemm':
self.executable_opts = ['--range 1088:3136:1024']
@sanity_function
def assert_success(self):
return sn.assert_found(r'Result = PASS', self.stdout)
@run_before('performance')
def set_performance_patterns(self):
if self.subtest == 'cblas_z':
self.perf_patterns = {
'duration': sn.extractsingle(r'Duration: (\S+)',
self.stdout, 1, float)
}
self.reference = {
'daint:gpu': {
'duration': (0.10, None, 1.05, 's'),
},
'dom:gpu': {
'duration': (0.10, None, 1.05, 's'),
},
}
elif self.subtest == 'zgemm':
self.perf_patterns = {
'magma': sn.extractsingle(
r'MAGMA GFlops: (?P<magma_gflops>\S+)',
self.stdout, 'magma_gflops', float, 2
),
'cublas': sn.extractsingle(
r'cuBLAS GFlops: (?P<cublas_gflops>\S+)', self.stdout,
'cublas_gflops', float, 2)
}
self.reference = {
'daint:gpu': {
'magma': (3692.65, -0.05, None, 'Gflop/s'),
'cublas': (4269.31, -0.09, None, 'Gflop/s'),
},
'dom:gpu': {
'magma': (3692.65, -0.05, None, 'Gflop/s'),
'cublas': (4269.31, -0.09, None, 'Gflop/s'),
}
}
elif self.subtest == 'zsymmetrize':
self.perf_patterns = {
'gpu_perf': sn.extractsingle(r'GPU performance: (\S+)',
self.stdout, 1, float),
}
self.reference = {
'daint:gpu': {
'gpu_perf': (158.3, -0.05, None, 'GB/s'),
},
'dom:gpu': {
'gpu_perf': (158.3, -0.05, None, 'GB/s'),
}
}
elif self.subtest == 'ztranspose':
self.perf_patterns = {
'gpu_perf':
sn.extractsingle(
r'GPU performance: (?P<gpu_performance>\S+)',
self.stdout, 'gpu_performance', float
)
}
self.reference = {
'daint:gpu': {
'gpu_perf': (498.2, -0.05, None, 'GB/s'),
},
'dom:gpu': {
'gpu_perf': (498.2, -0.05, None, 'GB/s'),
}
}
elif self.subtest == 'zunmbr':
self.perf_patterns = {
'gpu_perf':
sn.extractsingle(
r'GPU performance: (?P<gpu_performance>\S+)',
self.stdout, 'gpu_performance', float
)
}
self.reference = {
'daint:gpu': {
'gpu_perf': (254.7, -0.05, None, 'Gflop/s'),
},
'dom:gpu': {
'gpu_perf': (254.7, -0.05, None, 'Gflop/s'),
}
}
| 1.875 | 2 |
main.py | kamaravichow/hrv-stress-detection-model | 2 | 12790786 | import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import OneHotEncoder
import keras
from keras import backend as K
from keras.models import Sequential
from keras.layers import Activation
from keras.layers.core import Dense
from keras.optimizers import Adam
from keras.metrics import categorical_crossentropy
from keras.utils import to_categorical
trainFile = pd.read_csv('./dataset/train.csv').drop(columns="datasetId")
testFile = pd.read_csv('./dataset/test.csv').drop(columns="datasetId")
# train
train_samples = trainFile.drop(columns='condition').to_numpy()
train_labels = trainFile['condition'].to_numpy()
# test
test_samples = testFile.drop(columns='condition').to_numpy()
test_labels = testFile['condition'].to_numpy()
# normalizing features
scaler = MinMaxScaler(feature_range=(0, 1))
train_samples = scaler.fit_transform(train_samples)
test_samples = scaler.fit_transform(test_samples)
# one-hot-encoding labels
one_hot_encoder = OneHotEncoder(categories='auto')
train_labels = one_hot_encoder.fit_transform(train_labels.reshape(-1, 1)).toarray()
test_labels = one_hot_encoder.fit_transform(test_labels.reshape(-1, 1)).toarray()
# build the model
model = Sequential([
Dense(34, input_shape=[34, ], activation='relu'),
Dense(20, activation='relu'),
Dense(10, activation='relu'),
Dense(3, activation='softmax')
])
print(model.summary())
model.compile(Adam(lr=.0001),
loss='categorical_crossentropy',
metrics=['accuracy'])
model.fit(train_samples, train_labels, validation_split=0.1, batch_size=10, epochs=10, shuffle=True, verbose=2)
model.save('model.h5')
predictions = model.predict(test_samples)
print(predictions)
np.savetxt('predictions.csv', test_samples, delimiter=",")
| 2.8125 | 3 |
topCoder/srms/500s/srm571/div2/fox_and_game.py | ferhatelmas/algo | 25 | 12790787 | class FoxAndGame:
def countStars(self, result):
return sum(r.count("o") for r in result)
| 2.765625 | 3 |
kinbot/write_mesmer.py | rubenvdvijver/KinBot | 0 | 12790788 | ###################################################
## ##
## This file is part of the KinBot code v2.0 ##
## ##
## The contents are covered by the terms of the ##
## BSD 3-clause license included in the LICENSE ##
## file, found at the root. ##
## ##
## Copyright 2018 National Technology & ##
## Engineering Solutions of Sandia, LLC (NTESS). ##
## Under the terms of Contract DE-NA0003525 with ##
## NTESS, the U.S. Government retains certain ##
## rights to this software. ##
## ##
## Authors: ##
## <NAME> ##
## <NAME> ##
## ##
###################################################
import os,sys
import xml.etree.cElementTree as ET
import xml.dom.minidom as minidom
import random
def write_mesmer_input(species,barriers,products):
root = ET.Element( 'me:mesmer',{'xmlns':'http://www.xml-cml.org/schema',
'xmlns:me':'http://www.chem.leeds.ac.uk/mesmer',
'xmlns:xsi':'http://www.w3.org/2001/XMLSchema-instance'})
title = ET.SubElement(root,'me:title').text = 'species.chemid'
mollist = ET.SubElement(root,'moleculeList')
#write the initial species
atom = ['C','C','C','H','H','H','H','H','H']
natom = len(atom)
rad = [0 for ai in atom]
charge = 0
addMolecule(mollist, species, atom, natom, rad, charge)
#Todo: write the products and tss to the mollist
reaclist = ET.SubElement(root,'reactionList')
#write the reactions
for index, instance in enumerate(species.reac_inst):
addReaction(reaclist, species, index, instance)
st = ET.tostring(root,'utf-8')
st = minidom.parseString(st)
fout = open('test.xml','w')
fout.write(st.toprettyxml(indent = ' '))
fout.close()
#write st.toprettyxml(indent = ' ')
#tree.write('test.xml', encoding='utf-8',xml_declaration=True)
def addReaction(reaclist, species, index, instance):
a = 1
def addMolecule(mollist,mol, atom, natom, rad, charge):
geom = []
for i,at in enumerate(atom):
geom.append([random.uniform(-3.,3.), random.uniform(-3.,3.), random.uniform(-3.,3.)])
bond = [
[0,2,0,1,1,0,0,0,0],
[2,0,1,0,0,1,0,0,0],
[0,1,0,0,0,0,1,1,1],
[1,0,0,0,0,0,0,0,0],
[1,0,0,0,0,0,0,0,0],
[0,1,0,0,0,0,0,0,0],
[0,0,1,0,0,0,0,0,0],
[0,0,1,0,0,0,0,0,0],
[0,0,1,0,0,0,0,0,0]
]
molecule = ET.SubElement(mollist, 'molecule', {'id':'species.chemid','spinMultiplicity':'{}'.format(sum(rad))})
atomarray = ET.SubElement(molecule, 'atomArray')
for i,at in enumerate(atom):
args = {'id':'a{}'.format(i+1)}
args['elementType'] = at
args['x3'] = '{:.8f}'.format(geom[i][0])
args['y3'] = '{:.8f}'.format(geom[i][1])
args['z3'] = '{:.8f}'.format(geom[i][2])
at = ET.SubElement(atomarray, 'atom', args)
bond_id = 1
bondarray = ET.SubElement(molecule, 'bondArray')
for i in range(len(atom)-1):
for j in range(i+1,len(atom)):
if bond[i][j] > 0:
args = {'id':'b{}'.format(bond_id)}
args['atomRefs2']="a{} a{}".format(i+1,j+1)
args['order']="{}".format(bond[i][j])
b = ET.SubElement(bondarray,'bond',args)
bond_id += 1
propertylist = ET.SubElement(molecule, 'propertyList')
#add the zpe
property = ET.SubElement(propertylist, 'property', {'dictRef':'me:ZPE'})
scalar = ET.SubElement(property, 'scalar', {'units':'cm-1'}).text = str(15.5)
def indent(elem, level=0):
i = "\n" + level*" "
j = "\n" + (level-1)*" "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
if not elem.tail or not elem.tail.strip():
elem.tail = i
for subelem in elem:
indent(subelem, level+1)
if not elem.tail or not elem.tail.strip():
elem.tail = j
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = j
return elem
species = 'a'
barriers = ['b1','b2']
products = [['p1','p2'],['p3']]
write_mesmer_input(species,barriers,products) | 1.75 | 2 |
tests/core/validation/test_transaction_validation.py | dbfreem/py-evm | 1,641 | 12790789 | <reponame>dbfreem/py-evm
import pytest
from eth.vm.forks.london.transactions import UnsignedDynamicFeeTransaction
from eth.vm.forks.berlin.transactions import UnsignedAccessListTransaction
from eth_utils import ValidationError
@pytest.mark.parametrize(
"unsigned_access_list_transaction,is_valid",
(
# While ethereum tests do not yet have Berlin or London transaction tests,
# this adds a few tests to test some obvious cases, especially positive test cases.
(UnsignedAccessListTransaction(
chain_id=123456789,
nonce=0,
gas_price=1000000000,
gas=40000,
to=b'\xf0' * 20,
value=0,
data=b'',
access_list=((b'\xf0' * 20, (1, 2)),),
), True),
(UnsignedAccessListTransaction(
chain_id=0,
nonce=0,
gas_price=0,
gas=0,
to=b'\xf0' * 20,
value=0,
data=b'',
access_list=(),
), True),
(UnsignedAccessListTransaction(
chain_id=123456789,
nonce=0,
gas_price=1000000000,
gas=40000,
to=b'\xf0' * 20,
value=0,
data=b'',
access_list=((b'\xf0' * 20, ()),),
), True),
(UnsignedAccessListTransaction(
chain_id=123456789,
nonce=0,
gas_price=1000000000,
gas=40000,
to=b'\xf0' * 20,
value=0,
data=b'',
access_list=((b'\xf0' * 19, (1,)),), # access_list address fails validation
), False),
(UnsignedAccessListTransaction(
chain_id='1', # chain_id fails validation
nonce=0,
gas_price=0,
gas=0,
to=b'\xf0' * 20,
value=0,
data=b'',
access_list=(),
), False),
)
)
def test_validate_unsigned_access_list_transaction(unsigned_access_list_transaction, is_valid):
if is_valid:
unsigned_access_list_transaction.validate()
else:
with pytest.raises(ValidationError):
unsigned_access_list_transaction.validate()
@pytest.mark.parametrize(
"unsigned_dynamic_fee_transaction,is_valid",
(
# While ethereum tests do not yet have Berlin or London transaction tests,
# this adds a few tests to test some obvious cases, especially positive test cases.
(UnsignedDynamicFeeTransaction(
chain_id=123456789,
nonce=0,
max_fee_per_gas=1000000000,
max_priority_fee_per_gas=1000000000,
gas=40000,
to=b'\xf0' * 20,
value=0,
data=b'',
access_list=((b'\xf0' * 20, (1, 2)),),
), True),
(UnsignedDynamicFeeTransaction(
chain_id=0,
nonce=0,
max_fee_per_gas=0,
max_priority_fee_per_gas=0,
gas=0,
to=b'\xf0' * 20,
value=0,
data=b'',
access_list=(),
), True),
(UnsignedDynamicFeeTransaction(
chain_id=123456789,
nonce=0,
max_fee_per_gas=1000000000,
max_priority_fee_per_gas=1000000000,
gas=40000,
to=b'\xf0' * 20,
value=0,
data=b'',
access_list=((b'\xf0' * 20, ()),),
), True),
(UnsignedDynamicFeeTransaction(
chain_id=123456789,
nonce=0,
max_fee_per_gas=1000000000,
max_priority_fee_per_gas=1000000000,
gas=40000,
to=b'\xf0' * 20,
value=0,
data=b'',
access_list=((b'\xf0' * 19, (1,)),), # access_list address fails validation
), False),
(UnsignedDynamicFeeTransaction(
chain_id='1', # chain_id fails validation
nonce=0,
max_fee_per_gas=1000000000,
max_priority_fee_per_gas=1000000000,
gas=0,
to=b'\xf0' * 20,
value=0,
data=b'',
access_list=(),
), False),
)
)
def test_validate_unsigned_dynamic_fee_transaction(unsigned_dynamic_fee_transaction, is_valid):
if is_valid:
unsigned_dynamic_fee_transaction.validate()
else:
with pytest.raises(ValidationError):
unsigned_dynamic_fee_transaction.validate()
| 2.015625 | 2 |
burden/validate_burden.py | PingjunChen/LiverCancerSeg | 41 | 12790790 | <filename>burden/validate_burden.py<gh_stars>10-100
# -*- coding: utf-8 -*-
import os, sys
import numpy as np
import pandas as pd
from skimage import io
from pydaily import format
def cal_train_burden(slides_dir):
slide_list = []
slide_list.extend([ele[6:-4] for ele in os.listdir(slides_dir) if "svs" in ele])
slide_list.extend([ele[6:-4] for ele in os.listdir(slides_dir) if "SVS" in ele])
burden_dict = {}
for ind, cur_slide in enumerate(slide_list):
cur_slide = str(cur_slide)
print("Processing {}/{}".format(ind+1, len(slide_list)))
cur_whole_path = os.path.join(slides_dir, "01_01_"+cur_slide+"_whole.tif")
whole_mask = io.imread(cur_whole_path)
cur_viable_path = os.path.join(slides_dir, "01_01_"+cur_slide+"_viable.tif")
viable_mask = io.imread(cur_viable_path)
cur_burden = np.sum(viable_mask) * 1.0 / np.sum(whole_mask)
burden_dict[cur_slide] = cur_burden
save_json_path = os.path.join(os.path.dirname(slides_dir), "SourceData", "calculated_tumor_burden.json")
format.dict_to_json(burden_dict, save_json_path)
def extract_csv_burden(csv_path, case_num):
df = pd.read_csv(csv_path)
slide_ids = df['wsi_id'].values.tolist()[:case_num]
slide_burden = df['pixel ratio'].values.tolist()[:case_num]
burden_dict = {}
for id, burden in zip(slide_ids, slide_burden):
burden_dict[str(int(id)).zfill(4)] = burden
return burden_dict
if __name__ == "__main__":
# extract prepared ground truth viable tumor burden
source_slides_dir = "../data/SourceData"
phase1_path = os.path.join(source_slides_dir, "Phase_1_tumor_burden.csv")
phase2_path = os.path.join(source_slides_dir, "Phase_2_tumor_burden.csv")
gt_burden_dict = {}
phase1_burden_dict = extract_csv_burden(phase1_path, case_num=20)
gt_burden_dict.update(phase1_burden_dict)
phase2_burden_dict = extract_csv_burden(phase2_path, case_num=30)
gt_burden_dict.update(phase2_burden_dict)
# get calculate viable tumor burden
slides_dir = os.path.join(os.path.dirname(source_slides_dir), "LiverImages")
cal_train_burden(slides_dir)
# load calcualted burden
cal_burden_path = os.path.join(source_slides_dir, "calculated_tumor_burden.json")
cal_burden_dict = format.json_to_dict(cal_burden_path)
# compare gt & cal
for ind, key in enumerate(gt_burden_dict):
if key not in cal_burden_dict:
print("Error: {}".format(key))
gt_burden = gt_burden_dict[key]
cal_burden = cal_burden_dict[key]
if np.absolute(gt_burden-cal_burden) > 0.001:
print("{}/{} {} gt:{:.3f}, cal:{:.3f}".format(ind+1, len(gt_burden_dict), key,
gt_burden, cal_burden))
| 2.484375 | 2 |
tclr_pretraining/dl_tclr.py | DAVEISHAN/TCLR | 4 | 12790791 | r'''This dataloader is an attemp to make a master DL that provides 2 augmented version
of a sparse clip (covering minimum 64 frames) and 2 augmented versions of 4 dense clips
(covering 16 frames temporal span minimum)'''
import os
import torch
import numpy as np
import matplotlib.pyplot as plt
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
import config as cfg
import random
import pickle
import parameters as params
import json
import math
import cv2
# from tqdm import tqdm
import time
import torchvision.transforms as trans
# from decord import VideoReader
class ss_dataset_gen1(Dataset):
def __init__(self, shuffle = True, data_percentage = 1.0, split = 1):
#####################
# self.all_paths = open(os.path.join(cfg.path_folder,'train_vids.txt'),'r').read().splitlines()
if split == 1:
self.all_paths = open(os.path.join(cfg.path_folder, 'ucfTrainTestlist/trainlist01.txt'),'r').read().splitlines()
elif split ==2:
self.all_paths = open(os.path.join(cfg.path_folder, 'ucfTrainTestlist/trainlist02.txt'),'r').read().splitlines()
elif split ==3:
self.all_paths = open(os.path.join(cfg.path_folder, 'ucfTrainTestlist/trainlist03.txt'),'r').read().splitlines()
else:
print(f'Invalid split input: {split}')
#####################
self.shuffle = shuffle
if self.shuffle:
random.shuffle(self.all_paths)
self.data_percentage = data_percentage
self.data_limit = int(len(self.all_paths)*self.data_percentage)
self.data = self.all_paths[0: self.data_limit]
self.PIL = trans.ToPILImage()
self.TENSOR = trans.ToTensor()
self.erase_size = 19
def __len__(self):
return len(self.data)
def __getitem__(self,index):
sparse_clip, dense_clip0, dense_clip1, dense_clip2, dense_clip3, \
a_sparse_clip, a_dense_clip0, a_dense_clip1, a_dense_clip2, a_dense_clip3, list_sparse, list_dense, vid_path = self.process_data(index)
return sparse_clip, dense_clip0, dense_clip1, dense_clip2, dense_clip3, \
a_sparse_clip, a_dense_clip0, a_dense_clip1, a_dense_clip2, a_dense_clip3, list_sparse, list_dense, vid_path
def process_data(self, idx):
vid_path = cfg.path_folder + '/UCF-101/' + self.data[idx].split(' ')[0]
sparse_clip, dense_clip0, dense_clip1, dense_clip2, dense_clip3, \
a_sparse_clip, a_dense_clip0, a_dense_clip1, a_dense_clip2, a_dense_clip3, list_sparse, list_dense = self.build_clip(vid_path)
return sparse_clip, dense_clip0, dense_clip1, dense_clip2, dense_clip3, \
a_sparse_clip, a_dense_clip0, a_dense_clip1, a_dense_clip2, a_dense_clip3, list_sparse, list_dense, vid_path
def build_clip(self, vid_path):
try:
cap = cv2.VideoCapture(vid_path)
cap.set(1, 0)
frame_count = cap.get(7)
if frame_count <= 56:
# print(f'Video {vid_path} has insufficient frames')
return None, None, None, None, None, None, None, None, None, None, None, None
############################# frame_list maker start here#################################
min_temporal_span_sparse = params.num_frames*params.sr_ratio
if frame_count > min_temporal_span_sparse:
start_frame = np.random.randint(0,frame_count-min_temporal_span_sparse)
#Dynamic skip rate experiment
# skip_max = int((frame_count - start_frame)/params.num_frames)
# # here 4 is the skip rate ratio = 4 chunks
# if skip_max >= 16:
# sr_sparse = np.random.choice([4,8,12,16])
# elif (skip_max<16) and (skip_max>=12):
# sr_sparse = np.random.choice([4,8,12])
# elif (skip_max<12) and (skip_max>=8):
# sr_sparse = np.random.choice([4,8])
# else:
sr_sparse = 4
else:
start_frame = 0
sr_sparse = 4
sr_dense = int(sr_sparse/4)
frames_sparse = [start_frame] + [start_frame + i*sr_sparse for i in range(1,params.num_frames)]
frames_dense = [[frames_sparse[j*4]]+[frames_sparse[j*4] + i*sr_dense for i in range(1,params.num_frames)] for j in range(4)]
################################ frame list maker finishes here ###########################
################################ actual clip builder starts here ##########################
sparse_clip = []
dense_clip0 = []
dense_clip1 = []
dense_clip2 = []
dense_clip3 = []
a_sparse_clip = []
a_dense_clip0 = []
a_dense_clip1 = []
a_dense_clip2 = []
a_dense_clip3 = []
list_sparse = []
list_dense = [[] for i in range(4)]
count = -1
random_array = np.random.rand(10,8)
x_erase = np.random.randint(0,params.reso_h, size = (10,))
y_erase = np.random.randint(0,params.reso_w, size = (10,))
cropping_factor1 = np.random.uniform(0.6, 1, size = (10,)) # on an average cropping factor is 80% i.e. covers 64% area
x0 = [np.random.randint(0, params.ori_reso_w - params.ori_reso_w*cropping_factor1[ii] + 1) for ii in range(10)]
y0 = [np.random.randint(0, params.ori_reso_h - params.ori_reso_h*cropping_factor1[ii] + 1) for ii in range(10)]
contrast_factor1 = np.random.uniform(0.75,1.25, size = (10,))
hue_factor1 = np.random.uniform(-0.1,0.1, size = (10,))
saturation_factor1 = np.random.uniform(0.75,1.25, size = (10,))
brightness_factor1 = np.random.uniform(0.75,1.25,size = (10,))
gamma1 = np.random.uniform(0.75,1.25, size = (10,))
erase_size1 = np.random.randint(int(self.erase_size/2),self.erase_size, size = (10,))
erase_size2 = np.random.randint(int(self.erase_size/2),self.erase_size, size = (10,))
random_color_dropped = np.random.randint(0,3,(10))
while(cap.isOpened()):
count += 1
ret, frame = cap.read()
if ((count not in frames_sparse) and (count not in frames_dense[0]) \
and (count not in frames_dense[1]) and (count not in frames_dense[2]) \
and (count not in frames_dense[3])) and (ret == True):
continue
if ret == True:
if (count in frames_sparse):
sparse_clip.append(self.augmentation(frame, random_array[0], x_erase[0], y_erase[0], cropping_factor1[0],\
x0[0], y0[0], contrast_factor1[0], hue_factor1[0], saturation_factor1[0], brightness_factor1[0],\
gamma1[0],erase_size1[0],erase_size2[0], random_color_dropped[0]))
a_sparse_clip.append(self.augmentation(frame, random_array[1], x_erase[1], y_erase[1], cropping_factor1[1],\
x0[1], y0[1], contrast_factor1[1], hue_factor1[1], saturation_factor1[1], brightness_factor1[1],\
gamma1[1],erase_size1[1],erase_size2[1], random_color_dropped[1]))
list_sparse.append(count)
if (count in frames_dense[0]):
dense_clip0.append(self.augmentation(frame, random_array[2], x_erase[2], y_erase[2], cropping_factor1[2],\
x0[2], y0[2], contrast_factor1[2], hue_factor1[2], saturation_factor1[2], brightness_factor1[2],\
gamma1[2],erase_size1[2],erase_size2[2], random_color_dropped[2]))
a_dense_clip0.append(self.augmentation(frame, random_array[3], x_erase[3], y_erase[3], cropping_factor1[3],\
x0[3], y0[3], contrast_factor1[3], hue_factor1[3], saturation_factor1[3], brightness_factor1[3],\
gamma1[3],erase_size1[3],erase_size2[3], random_color_dropped[3]))
list_dense[0].append(count)
if (count in frames_dense[1]):
dense_clip1.append(self.augmentation(frame, random_array[4], x_erase[4], y_erase[4], cropping_factor1[4],\
x0[4], y0[4], contrast_factor1[4], hue_factor1[4], saturation_factor1[4], brightness_factor1[4],\
gamma1[4],erase_size1[4],erase_size2[4], random_color_dropped[4]))
a_dense_clip1.append(self.augmentation(frame, random_array[5], x_erase[5], y_erase[5], cropping_factor1[5],\
x0[5], y0[5], contrast_factor1[5], hue_factor1[5], saturation_factor1[5], brightness_factor1[5],\
gamma1[5],erase_size1[5],erase_size2[5], random_color_dropped[5]))
list_dense[1].append(count)
if (count in frames_dense[2]):
dense_clip2.append(self.augmentation(frame, random_array[6], x_erase[6], y_erase[6], cropping_factor1[6],\
x0[6], y0[6], contrast_factor1[6], hue_factor1[6], saturation_factor1[6], brightness_factor1[6],\
gamma1[6],erase_size1[6],erase_size2[6], random_color_dropped[6]))
a_dense_clip2.append(self.augmentation(frame, random_array[7], x_erase[7], y_erase[7], cropping_factor1[7],\
x0[7], y0[7], contrast_factor1[7], hue_factor1[7], saturation_factor1[7], brightness_factor1[7],\
gamma1[7],erase_size1[7],erase_size2[7], random_color_dropped[7]))
list_dense[2].append(count)
if (count in frames_dense[3]):
dense_clip3.append(self.augmentation(frame, random_array[8], x_erase[8], y_erase[8], cropping_factor1[8],\
x0[8], y0[8], contrast_factor1[8], hue_factor1[8], saturation_factor1[8], brightness_factor1[8],\
gamma1[8],erase_size1[8],erase_size2[8], random_color_dropped[8]))
a_dense_clip3.append(self.augmentation(frame, random_array[9], x_erase[9], y_erase[9], cropping_factor1[9],\
x0[9], y0[9], contrast_factor1[9], hue_factor1[9], saturation_factor1[9], brightness_factor1[9],\
gamma1[9],erase_size1[9],erase_size2[9], random_color_dropped[9]))
list_dense[3].append(count)
else:
break
if len(sparse_clip) < params.num_frames and len(sparse_clip)>13:
# if params.num_frames - len(sparse_clip) >= 1:
# print(f'sparse_clip {vid_path} is missing {params.num_frames - len(sparse_clip)} frames')
remaining_num_frames = params.num_frames - len(sparse_clip)
sparse_clip = sparse_clip + sparse_clip[::-1][1:remaining_num_frames+1]
a_sparse_clip = a_sparse_clip + a_sparse_clip[::-1][1:remaining_num_frames+1]
if len(dense_clip3) < params.num_frames and len(dense_clip3)>7:
# if params.num_frames - len(dense_clip3) >= 1:
# print(f'dense_clip3 {vid_path} is missing {params.num_frames - len(dense_clip3)} frames')
remaining_num_frames = params.num_frames - len(dense_clip3)
dense_clip3 = dense_clip3 + dense_clip3[::-1][1:remaining_num_frames+1]
a_dense_clip3 = a_dense_clip3 + a_dense_clip3[::-1][1:remaining_num_frames+1]
try:
assert(len(sparse_clip)==params.num_frames)
assert(len(dense_clip0)==params.num_frames)
assert(len(dense_clip1)==params.num_frames)
assert(len(dense_clip2)==params.num_frames)
assert(len(dense_clip3)==params.num_frames)
return sparse_clip, dense_clip0, dense_clip1, dense_clip2, dense_clip3, \
a_sparse_clip, a_dense_clip0, a_dense_clip1, a_dense_clip2, a_dense_clip3, list_sparse, list_dense
except:
print(f'Clip {vid_path} has some frames reading issue, failed')
return None, None, None, None, None, None, None, None, None, None, None, None
except:
print(f'Clip {vid_path} has some unknown issue, failed')
return None, None, None, None, None, None, None, None, None, None, None, None
def augmentation(self, image, random_array, x_erase, y_erase, cropping_factor1,\
x0, y0, contrast_factor1, hue_factor1, saturation_factor1, brightness_factor1,\
gamma1,erase_size1,erase_size2, random_color_dropped):
image = self.PIL(image)
image = trans.functional.resized_crop(image,y0,x0,int(params.ori_reso_h*cropping_factor1),int(params.ori_reso_h*cropping_factor1),(params.reso_h,params.reso_w),interpolation=2)
if random_array[0] < 0.125:
image = trans.functional.adjust_contrast(image, contrast_factor = contrast_factor1) #0.75 to 1.25
if random_array[1] < 0.3 :
image = trans.functional.adjust_hue(image, hue_factor = hue_factor1) # hue factor will be between [-0.1, 0.1]
if random_array[2] < 0.3 :
image = trans.functional.adjust_saturation(image, saturation_factor = saturation_factor1) # brightness factor will be between [0.75, 1,25]
if random_array[3] < 0.3 :
image = trans.functional.adjust_brightness(image, brightness_factor = brightness_factor1) # brightness factor will be between [0.75, 1,25]
if random_array[0] > 0.125 and random_array[0] < 0.25:
image = trans.functional.adjust_contrast(image, contrast_factor = contrast_factor1) #0.75 to 1.25
if random_array[4] > 0.70:
if random_array[4] < 0.875:
image = trans.functional.to_grayscale(image, num_output_channels = 3)
if random_array[5] > 0.25:
image = trans.functional.adjust_gamma(image, gamma = gamma1, gain=1) #gamma range [0.8, 1.2]
else:
image = trans.functional.to_tensor(image)
image[random_color_dropped,:,:] = 0
image = self.PIL(image)
if random_array[6] > 0.5:
image = trans.functional.hflip(image)
image = trans.functional.to_tensor(image)
if random_array[7] < 0.5 :
image = trans.functional.erase(image, x_erase, y_erase, erase_size1, erase_size2, v=0)
return image
def collate_fn2(batch):
sparse_clip, dense_clip0, dense_clip1, dense_clip2, dense_clip3, a_sparse_clip, \
a_dense_clip0, a_dense_clip1, a_dense_clip2, a_dense_clip3, \
list_sparse, list_dense, vid_path = [], [], [], [], [], [], [], [], [], [], [], [], []
for item in batch:
if not (None in item):
sparse_clip.append(torch.stack(item[0],dim=0))
dense_clip0.append(torch.stack(item[1],dim=0))
dense_clip1.append(torch.stack(item[2],dim=0))
dense_clip2.append(torch.stack(item[3],dim=0))
dense_clip3.append(torch.stack(item[4],dim=0))
a_sparse_clip.append(torch.stack(item[5],dim=0))
a_dense_clip0.append(torch.stack(item[6],dim=0))
a_dense_clip1.append(torch.stack(item[7],dim=0))
a_dense_clip2.append(torch.stack(item[8],dim=0))
a_dense_clip3.append(torch.stack(item[9],dim=0))
list_sparse.append(np.asarray(item[10]))
list_dense.append(np.asarray(item[11]))
vid_path.append(item[12])
sparse_clip = torch.stack(sparse_clip, dim=0)
dense_clip0 = torch.stack(dense_clip0, dim=0)
dense_clip1 = torch.stack(dense_clip1, dim=0)
dense_clip2 = torch.stack(dense_clip2, dim=0)
dense_clip3 = torch.stack(dense_clip3, dim=0)
a_sparse_clip = torch.stack(a_sparse_clip, dim=0)
a_dense_clip0 = torch.stack(a_dense_clip0, dim=0)
a_dense_clip1 = torch.stack(a_dense_clip1, dim=0)
a_dense_clip2 = torch.stack(a_dense_clip2, dim=0)
a_dense_clip3 = torch.stack(a_dense_clip3, dim=0)
return sparse_clip, dense_clip0, dense_clip1, dense_clip2, dense_clip3, a_sparse_clip, \
a_dense_clip0, a_dense_clip1, a_dense_clip2, a_dense_clip3, \
list_sparse, list_dense, vid_path
if __name__ == '__main__':
train_dataset = ss_dataset_gen1(shuffle = True, data_percentage = 1.0)
train_dataloader = DataLoader(train_dataset, batch_size=40, \
shuffle=False, num_workers=4, collate_fn=collate_fn2)
print(f'Step involved: {len(train_dataset)/24}')
t=time.time()
for i, (sparse_clip, dense_clip0, dense_clip1, dense_clip2, dense_clip3, a_sparse_clip, \
a_dense_clip0, a_dense_clip1, a_dense_clip2, a_dense_clip3, \
list_sparse, list_dense, vid_path) in enumerate(train_dataloader):
if (i+1)%25 == 0:
print(sparse_clip.shape)
print(dense_clip3.shape)
print()
print(f'Time taken to load data is {time.time()-t}')
| 2.1875 | 2 |
exts.py | whan6795/myweb | 0 | 12790792 | <reponame>whan6795/myweb<filename>exts.py
# encoding:utf-8
from flask_sqlalchemy import SQLAlchemy
from datetime import datetime
db = SQLAlchemy()
| 1.484375 | 1 |
1/kdh/8_11719.py | KNU-CS09/Baekjoon | 0 | 12790793 | import sys
for line in sys.stdin:
print(line.strip('\n')) | 2.125 | 2 |
the-cloudwatch-dashboard/python/app.py | mttfarmer/serverless | 1,627 | 12790794 | <reponame>mttfarmer/serverless
#!/usr/bin/env python3
from aws_cdk import core
from the_cloudwatch_dashboard.the_cloudwatch_dashboard_stack import TheCloudwatchDashboardStack
app = core.App()
TheCloudwatchDashboardStack(app, "the-cloudwatch-dashboard")
app.synth()
| 1.414063 | 1 |
party.py | jelkink/ucd-prog-2020 | 0 | 12790795 | from location import Location
import random
class Party:
def __init__(self, simulation, name, colour, strategy=""):
self.location = Location()
self.simulation = simulation
if strategy == "":
self.random_strategy()
else:
self.strategy = strategy
self.name = name
self.colour = colour # random_colour() ???
self.voters = []
self.previous_count = -1
# def random_strategy(self):
# self.strategy = random.choose(self.simulation.get_allowed_strategies())
def random_strategy(self):
a = random.randint(1,5)
if a == 1:
self.strategy = "sticker"
elif a == 2:
self.strategy = "predator"
elif a == 3:
self.strategy = "hunter"
elif a == 4:
self.strategy = "aggregator"
elif a == 5:
self.strategy = "random"
def add_voter(self, voter):
return self.voters.append(voter)
def reset_voters(self):
self.voters = []
def count_voters(self):
return len(self.voters)
def update_location(self):
if self.strategy == "sticker":
self.update_location_sticker()
elif self.strategy == "predator":
self.update_location_predator()
elif self.strategy == "hunter":
self.update_location_hunter()
elif self.strategy == "aggregator":
self.update_location_aggregator()
elif self.strategy == "random":
self.update_location_random()
else:
print("Strategy " + self.strategy + " does not exist!")
def update_location_predator(self):
parties = self.simulation.get_parties()
biggest_party = self
for p in parties:
if biggest_party.count_voters() < p.count_voters():
biggest_party = p
self.location.move_towards(biggest_party.location)
def update_location_aggregator(self):
if len(self.voters) > 0:
sum_x = 0
sum_y = 0
for voter in self.voters:
sum_x += voter.location.x
sum_y += voter.location.y
target_location = Location()
target_location.set_x(sum_x / len(self.voters))
target_location.set_y(sum_y / len(self.voters))
self.location.move_towards(target_location)
def update_location_hunter(self):
#get previous move
#if voters before prev move >= voters after prev move
# then turn 180 degrees and move again anywhere 90 degrees either side
#if voters before prev move < voters after prev move
# move same way as previous move again
if self.previous_count == -1:
direction = random.random() * 360.0
elif self.previous_count <= self.count_voters():
direction = self.previous_direction
else:
lower_limit = self.previous_direction + 90
direction = (random.random() * 180.0 + lower_limit) % 360
self.location.move_angle(direction)
self.previous_direction = direction
self.previous_count = self.count_voters()
# def save_state(self):
# self.previous_count = self.count_voters()
def update_location_random(self):
self.location.random_move()
def update_location_sticker(self):
pass
def get_location(self):
return self.location
def get_strategy(self):
return self.strategy
def get_name(self):
return self.name
def get_colour(self):
return self.colour
def get_voters(self):
return self.voters | 3.703125 | 4 |
utils/archive/plasticc_extract_gp.py | heather999/snmachine | 1 | 12790796 | from snmachine import sndata,snfeatures
import numpy as np
import pandas
from astropy.table import Table
import pickle
import os,sys
'''
print('starting readin of monster files')
#raw_data=pandas.read_csv('/share/hypatia/snmachine_resources/data/plasticc/test_set.csv')
raw_data=pandas.read_csv('/share/hypatia/snmachine_resources/data/plasticc/training_set.csv')
print('read in data set')
#raw_metadata=pandas.read_csv('/share/hypatia/snmachine_resources/data/plasticc/test_set_metadata.csv')
raw_metadata=pandas.read_csv('/share/hypatia/snmachine_resources/data/plasticc/training_set_metadata.csv')
print('read in metadata')
sys.stdout.flush()
#objects=np.unique(raw_data['object_id'])
#filters=np.unique(raw_data['passband']).astype('str')
'''
index=int(sys.argv[1])
print('Performing feature extraction on batch %d'%index)
out_folder='/share/hypatia/snmachine_resources/data/plasticc/data_products/plasticc_test/with_nondetection_cutting/fullset/data/'
print('loading data')
sys.stdout.flush()
with open(os.path.join(out_folder,'dataset_%d.pickle'%index),'rb') as f:
d=pickle.load(f)
int_folder=os.path.join(out_folder,'int')
feats_folder=os.path.join(out_folder,'features')
print('data loaded')
sys.stdout.flush()
#d=sndata.EmptyDataset(filter_set=filters,survey_name='plasticc',folder=out_folder)
#d.object_names=d.object_names[:10]
print('nobj: '+str(len(d.object_names)))
print('extracting features')
sys.stdout.flush()
wf=snfeatures.WaveletFeatures(wavelet='sym2',ngp=1100)
pca_folder='/share/hypatia/snmachine_resources/data/plasticc/dummy_pca/'
feats=wf.extract_features(d,nprocesses=1,save_output='all',output_root=int_folder, recompute_pca=False, pca_path=pca_folder,xmax=1100)
feats.write(os.path.join(feats_folder, 'wavelet_features_%d.fits'%index),overwrite=True)
'''
with open(os.path.join(feats_folder,'PCA_mean.pickle'),'wb') as f1:
pickle.dump(wf.PCA_mean,f1)
with open(os.path.join(feats_folder,'PCA_eigenvals.pickle'),'wb') as f2:
pickle.dump(wf.PCA_eigenvals,f2)
with open(os.path.join(feats_folder,'PCA_eigenvectors.pickle'),'wb') as f3:
pickle.dump(wf.PCA_eigenvectors,f3)
np.savetxt(os.path.join(feats_folder,'PCA_mean.txt'),wf.PCA_mean)
np.savetxt(os.path.join(feats_folder,'PCA_eigenvals.txt'),wf.PCA_eigenvals)
np.savetxt(os.path.join(feats_folder,'PCA_eigenvectors.txt'),wf.PCA_eigenvectors)
'''
| 2.25 | 2 |
benchmarking.py | niladell/DockStream | 34 | 12790797 | import os
import json
import errno
import sys
import argparse
from dockstream.utils.execute_external.execute import Executor
from dockstream.utils import files_paths
from dockstream.utils.enums.docking_enum import DockingConfigurationEnum
_DC = DockingConfigurationEnum()
def run_script(input_path: str) -> dict:
"""this method takes an input path to either a folder containing DockStream json files or a single json file and
returns a dictionary whose keys are the json names and the corresponding values are the paths to the json
file. The dictionary will be looped later to run DockStream
:param input_path: path to either a folder of json files or a single json file
:raises FileNotFoundError: this error is raised if input_path is neither a folder nor a file
:return: dictionary, keys are the DockStream json names and values are the paths to them
"""
# first check if input_path is valid (either a folder containing json files or a single json file)
if not os.path.isdir(input_path) and not os.path.isfile(input_path):
raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), input_path)
# if input_path is a folder, ensure it is not empty and that it contains at least 1 json file
if os.path.isdir(input_path):
if not os.listdir(input_path):
sys.exit(input_path + ' folder is empty. Please ensure your DockStream json files are added to the folder.')
elif not any(file.endswith('.json') for file in os.listdir(input_path)):
sys.exit(input_path + ' contains no json files. Please ensure your DockStream json files are added to the folder.')
# at this point, the path must be a file. Check that it is in json format
if os.path.isfile(input_path):
if not input_path.endswith('.json'):
sys.exit(input_path + ' is not a json file. Please ensure it is in json format.')
# initialize a dictionary to hold all DockStream runs
batch_runs = {}
# loop through all json files and update the paths if input_path if a directory
if os.path.isdir(input_path):
all_runs = [file for file in os.listdir(input_path) if file.endswith('.json')]
for json in all_runs:
batch_runs[json.replace('.json', '')] = os.path.join(input_path, json)
# at this point, input path must be a single json file
else:
json_name = os.path.basename(os.path.normpath(input_path)).replace('.json', '')
batch_runs[json_name] = input_path
return batch_runs
if __name__ == '__main__':
# take user specified input parameters to run the benchmarking script
parser = argparse.ArgumentParser(description='Facilitates batch DockStream execution.')
parser.add_argument('-input_path', type=str, required=True, help='The path to either a folder of DockStream json files or a single json file.')
args = parser.parse_args()
batch_runs = run_script(args.input_path)
executor = Executor()
# initialize a dictionary to store the names of all runs that did not enforce "best_per_ligand"
non_bpl_runs = {}
# loop through all user json files and run DockStream
for trial_name, json_path in batch_runs.items():
# check if the current DockStream run has "best_per_ligand" enforced
with open(json_path, "r") as f:
parameters = json.load(f)
# in case output mode was not specified in the configuration json
try:
for docking_run in parameters[_DC.DOCKING][_DC.DOCKING_RUNS]:
output_mode = docking_run[_DC.OUTPUT][_DC.OUTPUT_SCORES][_DC.OUTPUT_MODE]
if output_mode != _DC.OUTPUT_MODE_BESTPERLIGAND:
non_bpl_runs[trial_name] = output_mode
break
except:
pass
print(f'Running {trial_name}')
result = executor.execute(command=sys.executable, arguments=[files_paths.attach_root_path('docker.py'),
'-conf', json_path, '-debug'], check=False)
print(result)
# print out error messages (if applicable) for the current DockStream run
if result.returncode != 0:
print(f'There was an error with {trial_name} DockStream run.')
print(result.stdout)
print(result.stderr)
if bool(non_bpl_runs):
# print the names of the runs which did not enforce "best_per_ligand"
print(f"List of runs which did not have 'best_per_ligand' specified. These runs cannot be "
f"passed into the analysis script. {non_bpl_runs}")
| 2.75 | 3 |
sc/acme_test.py | elliotgunn/DS-Unit-3-Sprint-1-Software-Engineering | 0 | 12790798 | <reponame>elliotgunn/DS-Unit-3-Sprint-1-Software-Engineering<filename>sc/acme_test.py
import unittest
from acme import Product
from acme_report import generate_products, ADJECTIVES, NOUNS
class AcmeProductTests(unittest.TestCase):
"""Making sure Acme products are the tops!"""
def test_default_product_price(self):
"""Test default product price being 10."""
prod = Product('Test Product')
self.assertEqual(prod.price, 10)
def test_default_product_weight(self):
"""Test default product weight being 10."""
prod = Product('Test Product')
self.assertEqual(prod.weight, 20)
def test_methods(self):
'''Test custom product with changes to defaults
'''
self.custom_product = Product('Test Product', price=20, weight=40, flammability=0.5)
self.assertEqual(self.custom_product.stealability(), 'Not so stealable...')
self.assertEqual(self.custom_product.explode(), "...it's a glove.")
class AcmeReportTests(unittest.TestCase):
'''Two tests:
test_default_num_products() checks if product list received is 30
test_legal_names() checks if names generated are valid from
ADJECTIVE and NOUN lists
'''
def test_default_num_products(self):
self.assertEqual(len(generate_products()), 30)
def test_legal_names(self):
products = generate_products()
for product in products:
first = product.name.split()[0]
last = product.name.split()[1]
self.assertIn(first, ADJECTIVES)
self.assertIn(last, NOUNS)
if __name__ == '__main__':
unittest.main() | 3.234375 | 3 |
xyzflow/__init__.py | Renneke/xyzflow | 2 | 12790799 | #
# XYZFlow - Simple Orchestration Framework
#
from .Task import Task, task
from .Flow import get_flow_parameter, flow, Flow, save_parameters, load_parameters
from .HelperTasks import Add, Sub, Multiplication
from .Parameter import Parameter
from .xyzflow import inspect_parameters, main | 1.1875 | 1 |
tests/test_models.py | kahache/video_packaging_platform | 8 | 12790800 | <gh_stars>1-10
__author__ = "<NAME> & <NAME>"
__version__ = "1.0.0"
__start_date__ = "25th July 2020"
__end_date__ = "3rd August 2020"
__maintainer__ = "me"
__email__ = "<EMAIL>"
__requirements__ = "SQL-Alchemy, MySQL, Flask-SQLAlchemy, database script"
__status__ = "Production"
__description__ = """
This is the Database models script. Very important as it will
connect the database and map it with our model for this App
"""
from sqlalchemy import *
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import *
from test_database import metadata, db_session
class test_VideosDB(object):
query = db_session.query_property()
def __init__(self, input_content_id=None, input_content_origin=None, video_track_number=None, status=None,
output_file_path=None, video_key=None, kid=None, packaged_content_id=None, url=None):
self.input_content_id = input_content_id
self.input_content_origin = input_content_origin
self.video_track_number = video_track_number
self.status = status
self.output_file_path = output_file_path
self.video_key = video_key
self.kid = kid
self.packaged_content_id = packaged_content_id
self.url = url
def __repr__(self):
return '<VideosDB %r>' % (self.input_content_id)
test_uploaded_videos = Table('test_uploaded_videos', metadata,
Column('input_content_id', Integer, primary_key=True, autoincrement=True, unique=True),
Column('input_content_origin', String(255), ),
Column('video_track_number', Integer),
Column('status', String(255)),
Column('output_file_path', String(255)),
Column('video_key', String(255)),
Column('kid', String(255)),
Column('packaged_content_id', Integer, unique=True),
Column('url', String(255))
)
mapper(test_VideosDB, test_uploaded_videos)
| 2.296875 | 2 |
utils.py | nntrongnghia/learn-recsys | 0 | 12790801 | <reponame>nntrongnghia/learn-recsys<filename>utils.py
import os
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
def bpr_loss(pos: torch.Tensor, neg: torch.Tensor)-> torch.Tensor:
"""Bayesian Personalized Ranking Loss
Parameters
----------
pos : torch.Tensor
Ranking logit (0..1)
neg : torch.Tensor
Ranking logit (0..1)
Return
------
loss scalar
"""
diff = pos - neg
return -F.logsigmoid(diff).mean()
| 2.109375 | 2 |
scripts/gather_benchmarks.py | preshing/CompareIntegerMaps | 72 | 12790802 | #---------------------------------------------------
# Perform a bunch of experiments using CompareIntegerMaps.exe, and writes the results to results.txt.
# You can filter the experiments by name by passing a regular expression as a script argument.
# For example: run_tests.py LOOKUP_0_.*
# Results are also cached in an intermediate directory, temp, so you can add new results to results.txt
# without redoing previous experiments.
#---------------------------------------------------
import cmake_launcher
import math
import os
import re
import sys
from collections import defaultdict
from pprint import pprint
#GENERATOR = 'Visual Studio 10'
IGNORE_CACHE = False
#---------------------------------------------------
# TestLauncher
#---------------------------------------------------
class TestLauncher:
""" Configures, builds & runs CompareIntegerMaps using the specified options. """
DEFAULT_DEFS = {
'CACHE_STOMPER_ENABLED': 0,
'EXPERIMENT': 'INSERT',
'CONTAINER': 'TABLE',
}
def __init__(self):
cmakeBuilder = cmake_launcher.CMakeBuilder('..', generator=globals().get('GENERATOR'))
# It would be cool to get CMake to tell us the path to the executable instead.
self.launcher = cmake_launcher.CMakeLauncher(cmakeBuilder, 'CompareIntegerMaps.exe')
def run(self, seed, operationsPerGroup, keyCount, granularity, stompBytes, **defs):
args = [seed, operationsPerGroup, keyCount, granularity, stompBytes]
mergedDefs = dict(self.DEFAULT_DEFS)
mergedDefs.update(defs)
fullDefs = dict([('INTEGER_MAP_' + k, v) for k, v in mergedDefs.iteritems()])
self.launcher.ignoreCache = IGNORE_CACHE
output = self.launcher.run(*args, **fullDefs)
return eval(output)
#---------------------------------------------------
# Experiment
#---------------------------------------------------
class Experiment:
""" A group of CompareIntegerMaps runs using similar options but different seeds. """
def __init__(self, testLauncher, name, seeds, *args, **kwargs):
self.testLauncher = testLauncher
self.name = name
self.seeds = seeds
self.args = args
self.kwargs = kwargs
def run(self, results):
allGroups = defaultdict(list)
for seed in xrange(self.seeds):
print('Running %s #%d/%d...' % (self.name, seed + 1, self.seeds))
r = self.testLauncher.run(seed, *self.args, **self.kwargs)
for marker, units in r['results']:
allGroups[marker].append(units)
def medianAverage(values):
if len(values) >= 4:
values = sorted(values)[1:-1]
return sum(values) / len(values)
results[self.name] = [(marker, medianAverage(units)) for marker, units in sorted(allGroups.items())]
#---------------------------------------------------
# main
#---------------------------------------------------
if __name__ == '__main__':
from datetime import datetime
start = datetime.now()
os.chdir(os.path.split(sys.argv[0])[0])
filter = re.compile((sys.argv + ['.*'])[1])
if '--nocache' in sys.argv[1:]:
IGNORE_CACHE = True
results = {}
testLauncher = TestLauncher()
maxKeys = 18000000
granularity = 200
for container in ['TABLE', 'JUDY']:
experiment = Experiment(testLauncher,
'MEMORY_%s' % container,
8 if container == 'JUDY' else 1, 0, maxKeys, granularity, 0,
CONTAINER=container,
EXPERIMENT='MEMORY')
if filter.match(experiment.name):
experiment.run(results)
for stomp in [0, 1000, 10000]:
experiment = Experiment(testLauncher,
'INSERT_%d_%s' % (stomp, container),
8, 8000, maxKeys, granularity, stomp,
CONTAINER=container,
EXPERIMENT='INSERT',
CACHE_STOMPER_ENABLED=1 if stomp > 0 else 0)
if filter.match(experiment.name):
experiment.run(results)
experiment = Experiment(testLauncher,
'LOOKUP_%d_%s' % (stomp, container),
8, 8000, maxKeys, granularity, stomp,
CONTAINER=container,
EXPERIMENT='LOOKUP',
CACHE_STOMPER_ENABLED=1 if stomp > 0 else 0)
if filter.match(experiment.name):
experiment.run(results)
pprint(results, open('results.txt', 'w'))
print('Elapsed time: %s' % (datetime.now() - start))
| 2.46875 | 2 |
smoothcrawler/persistence/__init__.py | Chisanan232/pytsunami | 1 | 12790803 | from abc import ABCMeta, abstractmethod
class PersistenceFacade(metaclass=ABCMeta):
@abstractmethod
def save(self, data, *args, **kwargs):
pass
| 3.03125 | 3 |
lhorizon/solutions.py | arfon/lhorizon | 1 | 12790804 | """
functionality for solving body-intersection problems. used by
`lhorizon.targeter`. currently contains only ray-sphere intersection solutions
but could also sensibly contain expressions for bodies of different shapes.
"""
from collections.abc import Callable, Sequence
import sympy as sp
# sympy symbols for ray-sphere equations
x, y, z, x0, y0, z0, mx, my, mz, d = sp.symbols(
"x,y,z,x0,y0,z0,m_x,m_y,m_z,d", real=True
)
def ray_sphere_equations(radius: float) -> list[sp.Eq]:
"""
generate a simple system of equations for intersections between
a ray with origin at (0, 0, 0) and direction vector [x, y, z]
and a sphere with radius == 'radius' and center (mx, my, mz).
"""
x_constraint = sp.Eq(x, x0 * d)
y_constraint = sp.Eq(y, y0 * d)
z_constraint = sp.Eq(z, z0 * d)
sphere_bound_constraint = sp.Eq(
((x - mx) ** 2 + (y - my) ** 2 + (z - mz) ** 2) ** (1 / 2), radius
)
return [x_constraint, y_constraint, z_constraint, sphere_bound_constraint]
def get_ray_sphere_solution(
radius: float, farside: bool = False
) -> tuple[sp.Expr]:
"""
produce a solution to the generalized ray-sphere equation for a body of
radius `radius`. by default, take the nearside solution. this produces a
tuple of sympy expressions objects, which are fairly slow to evaluate;
unless you are planning to further manipulate them, you would probably
rather call make_ray_sphere_lambdas().
"""
# sp.solve() returns the nearside solution first
selected_solution = 0
if farside:
selected_solution = 1
general_solution = sp.solve(ray_sphere_equations(radius), [x, y, z, d])[
selected_solution
]
return general_solution
def lambdify_system(
expressions: Sequence[sp.Expr],
expression_names: Sequence[str],
variables: Sequence[sp.Symbol],
) -> dict[str, Callable]:
"""
returns a dict of functions that substitute the symbols in 'variables'
into the expressions in 'expressions'. 'expression_names' serve as the
keys of the dict.
"""
return {
expression_name: sp.lambdify(variables, expression, "numpy")
for expression, expression_name in zip(expressions, expression_names)
}
def make_ray_sphere_lambdas(
radius: float, farside=False
) -> dict[str, Callable]:
"""
produce a dict of functions that return solutions for the ray-sphere
equation for a sphere of radius `radius`.
"""
return lambdify_system(
get_ray_sphere_solution(radius, farside),
["x", "y", "z", "d"],
[x0, y0, z0, mx, my, mz],
)
| 2.96875 | 3 |
videoFeedStuff/UDPRecieverTest.py | mattwalstra/2019RobotCode | 4 | 12790805 | <reponame>mattwalstra/2019RobotCode<filename>videoFeedStuff/UDPRecieverTest.py
import socket
import cv2
UDP_IP = "127.0.0.1"
UDP_PORT = 5005
sock= socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind((UDP_IP,UDP_PORT))
while True:
data, addr = sock.recvfrom(1024)
print data
| 2.515625 | 3 |
morio/model/__init__.py | wddwycc/morio | 0 | 12790806 | from .base import db
from .user import User
from .repository import Repository
from .card import Card
from .course import Course
from .course_card_progress import CourseCardProgress
def init_app(app):
db.init_app(app)
| 1.453125 | 1 |
test/default_value_constrct.py | bourne7/demo-python | 0 | 12790807 | # Default values are computed once, then re-used.
# https://blog.csdn.net/x_r_su/article/details/54730654
class MyList:
def __init__(self, init_list=[]):
self.list = init_list
print(id(init_list))
def add(self, ele):
self.list.append(ele)
def appender(ele):
# obj = MyList()
obj = MyList(init_list=[])
obj.add(ele)
print(obj.list)
if __name__ == '__main__':
print('start')
for i in range(5):
appender(i)
| 3.71875 | 4 |
001-python-core-language/012_function_pass_by_ref_var_scope.py | CHemaxi/python | 0 | 12790808 | ## >---
## >YamlDesc: CONTENT-ARTICLE
## >Title: python functions
## >MetaDescription: python functions, function with parameters, function with return value example code, tutorials
## >MetaKeywords: python functions, function with parameters, function with return value example code, tutorials
## >Author: Hemaxi
## >ContentName: pass-by-reference-value
## >---
## ># PYTHON FUNCTION ARGUMENTS PASS BY REFERENCE and VALUE, VARIABLE SCOPE
## >* Variables defined inside a function have local scope
## >* Variables defined outside a function have global scope.
## >* Global variables can be accessed inside functions as well.
## >* Function arguments in python are passed by reference, any parameter
## > value changed inside the function is reflected outside the call too.
## >* The parameter passed in is actually a reference to a variable (but
## > the reference is passed by value)
## >* Caution, when changing arguments by reference make sure to copy the
## > contents to a local variable in the function, or else the outer variable
## > might be overwritten !
## >## Python Global and Local Variables
## >* **Global Variable** is accessable from anywhere in the program, Global
## > Variables outside the function are accessable inside the function as well.
## >* **Local Variable** are variables inside a function, They are accessable
## > from with in a function and not outside the function.
## >```
MyVar = 'This is a Global Value'
def myFunction():
# Local Variable
MyVar = 'This is a local variable'
print('Value of MyVar inside function: ', MyVar)
# End of function code: myFunction
# Make a call to myFunction
myFunction()
print('Value of MyVar outside function: ', MyVar)
## >```
## >## Pass by Reference
## >* Function arguments in python are passed by reference, any parameter
## > value changed inside the function is reflected outside the call too.
## >```
# Create a source list
source_list = ['A', 'B', 'C']
def function_pass_by_reference(in_list):
print('function_pass_by_reference says Input List: ', in_list)
# Changing the input by appending a value to in_list
in_list.append('D')
print('function_pass_by_reference says changed List: ', in_list)
# End of function code: function_pass_by_reference
print('Before passing reference to function, source_list: ', source_list)
# Passing the "source_list" and NOT A COPY of the "source_list"
function_pass_by_reference(source_list)
print('After passing reference to function, source_list: ', source_list)
# OUTPUT
# Before passing to function, source_list: ['A', 'B', 'C']
# Input List: ['A', 'B', 'C']
# changed List: ['A', 'B', 'C', 'D']
# After passing to function, source_list: ['A', 'B', 'C', 'D']
## >```
## >### Pass by Value
## >* The parameter passed in is actually a reference to a variable (but
## > the reference is passed by value)
## >```
# Create a source list
source_list_2 = ['A', 'B', 'C']
def function_pass_by_value(in_list):
print('function_pass_by_value says Input List: ', in_list)
# Reassigning a local value, [pass by value example]
in_list =[1, 2, 3, 4]
print('function_pass_by_value says changed List: ', in_list)
# End of function code: function_pass_by_value
print('Before passing by value to function, source_list: ', source_list_2)
# Passing the "source_list" and NOT A COPY of the "source_list"
function_pass_by_value(source_list_2)
print('After passing by value to function, source_list: ', source_list_2)
# OUTPUT
# Before passing to function, source_list: ['A', 'B', 'C']
# Input List: ['A', 'B', 'C']
# changed List: ['A', 'B', 'C', 'D']
# After passing to function, source_list: ['A', 'B', 'C', 'D']
## >```
| 3.96875 | 4 |
connect_four/agents/dfpn_build_db.py | rpachauri/connect4 | 0 | 12790809 | import gym
import time
from connect_four.agents import DFPN
from connect_four.agents import difficult_connect_four_positions
from connect_four.evaluation.victor.victor_evaluator import Victor
from connect_four.hashing import ConnectFourHasher
from connect_four.transposition.sqlite_transposition_table import SQLiteTranspositionTable
env = gym.make('connect_four-v0')
env.reset()
evaluator = Victor(model=env)
hasher = ConnectFourHasher(env=env)
tt = SQLiteTranspositionTable(database_file="connect_four.db")
agent = DFPN(evaluator, hasher, tt)
start = time.time()
evaluation = agent.depth_first_proof_number_search(env=env)
end = time.time()
print(evaluation)
print("time to run = ", end - start)
tt.close()
| 2.140625 | 2 |
inst/python/python_predict.py | cynthiayang525/PatientLevelPrediction | 141 | 12790810 | <reponame>cynthiayang525/PatientLevelPrediction<gh_stars>100-1000
# apply random forest model on new data
#===============================================================
# INPUT:
# 1) location of new data
# 2) location of model
#
# OUTPUT:
# it returns a file with indexes merged with prediction for test index - named new_pred
#================================================================
import numpy as np
from collections import OrderedDict
import os
import sys
import timeit
import math
#from sklearn.ensemble import RandomForestClassifier
#from sklearn.naive_bayes import GaussianNB
from scipy.sparse import coo_matrix,csr_matrix,vstack,hstack
#from sklearn.feature_selection import SelectFromModel
#from sklearn.cross_validation import PredefinedSplit
from sklearn.externals.joblib import Memory
#from sklearn.datasets import load_svmlight_file
from sklearn.externals import joblib
if "python_dir" in globals():
sys.path.insert(0, python_dir)
import TorchUtils as tu
#================================================================
print("Applying Python Model")
###########################################################################
def get_temproal_data(covariates, population):
p_ids_in_cov = set(covariates[:, 0])
timeid_len = len(set(covariates[:, -2]))
full_covariates = np.array([]).reshape(0,4)
default_covid = covariates[0, 1]
for p_id in population[:, 0]:
if p_id not in p_ids_in_cov:
tmp_x = np.array([p_id, default_covid, 1, 0]).reshape(1,4) #default cov id, timeid=1
full_covariates = np.concatenate((full_covariates, tmp_x), axis=0)
else:
tmp_x = covariates[covariates[:, 0] == p_id, :]
#print tmp_x.shape, X.shape
full_covariates = np.concatenate((full_covariates, tmp_x), axis=0)
X, patient_keys = tu.convert_to_temporal_format(full_covariates, timeid_len = timeid_len, predict = True)
return X
print("Loading Data...")
# load data + train,test indexes + validation index
y=population[:,1]
#print covariates.shape
if modeltype == 'temporal':
X = plpData.to_dense().numpy()
X = X[np.int64(population[:, 0]), :]
#X = get_temproal_data(covariates, population)
dense = 0
else:
#print included
X = plpData[population[:,0],:]
X = X[:,included.flatten()]
# load index file
print("population loaded- %s rows and %s columns" %(np.shape(population)[0], np.shape(population)[1]))
print("Dataset has %s rows and %s columns" %(X.shape[0], X.shape[1]))
print("Data ready for model has %s features" %(np.shape(X)[1]))
###########################################################################
# uf dense convert
if dense==1:
print("converting to dense data...")
X=X.toarray()
###########################################################################
# load model
print("Loading model...")
modelTrained = joblib.load(os.path.join(model_loc,"model.pkl"))
print(X.shape)
print("Calculating predictions on population...")
if autoencoder:
autoencoder_model = joblib.load(os.path.join(model_loc, 'autoencoder_model.pkl'))
X = autoencoder_model.get_encode_features(X)
if modeltype == 'temporal':
test_batch = tu.batch(X, batch_size = 32)
test_pred = []
for test in test_batch:
pred_test1 = modelTrained.predict_proba(test)[:, 1]
test_pred = np.concatenate((test_pred , pred_test1), axis = 0)
else:
test_pred = modelTrained.predict_proba(X)[:, 1]
if test_pred.ndim != 1:
test_pred = test_pred[:,1]
print("Prediction complete: %s rows" %(np.shape(test_pred)[0]))
print("Mean: %s prediction value" %(np.mean(test_pred)))
# merge pred with population
test_pred.shape = (population.shape[0], 1)
prediction = np.append(population,test_pred, axis=1)
| 2.484375 | 2 |
mapillary-tools/interpolate_with_anchors.py | mgottholsen/cynefin | 0 | 12790811 | #!/usr/bin/env python
import datetime
from lib.geo import interpolate_lat_lon, compute_bearing, offset_bearing
from lib.sequence import Sequence
import lib.io
import os
import sys
from lib.exifedit import ExifEdit
def interpolate_with_anchors(anchors, angle_offset):
'''
Interpolate gps position and compass angle given a list of anchors
anchor:
lat: latitude
lon: longitude
alt: altitude
datetime: date time of the anchor (datetime object)
num_image: number of images in between two anchors
'''
points = [ (a['datetime'], a['lat'], a['lon'], a.get('alt', 0)) for a in anchors]
inter_points = []
for i, (a1, a2) in enumerate(zip(points[:], points[1:])):
t1 = a1[0]
t2 = a2[0]
num_image = anchors[i]['num_image']
delta = (t2-t1).total_seconds()/float(num_image+1)
inter_points.append(points[i]+(0.0,))
for ii in xrange(num_image):
t = t1 + datetime.timedelta(seconds=(ii+1)*delta)
p = interpolate_lat_lon(points, t)
inter_points.append((t,)+p)
inter_points.append(points[-1]+(0,0,))
# get angles
bearings = [offset_bearing(compute_bearing(ll1[1], ll1[2], ll2[1], ll2[2]), angle_offset)
for ll1, ll2 in zip(inter_points, inter_points[1:])]
bearings.append(bearings[-1])
inter_points = [ (p[0], p[1], p[2], p[4], bearing) for p, bearing in zip(inter_points, bearings)]
return inter_points
def point(lat, lon, alt, datetime, num_image):
return {
'lat': lat,
'lon': lon,
'alt': alt,
'datetime': datetime,
'num_image': num_image
}
def test_run(image_path):
'''
Test run for images
'''
s = Sequence(image_path, check_exif=False)
file_list = s.get_file_list(image_path)
num_image = len(file_list)
t1 = datetime.datetime.strptime('2000_09_03_12_00_00', '%Y_%m_%d_%H_%M_%S')
t2 = datetime.datetime.strptime('2000_09_03_12_30_00', '%Y_%m_%d_%H_%M_%S')
p1 = point(0.5, 0.5, 0.2, t1, num_image-2)
p2 = point(0.55, 0.55, 0.0, t2, 0)
inter_points = interpolate_with_anchors([p1, p2], angle_offset=-90.0)
save_path = os.path.join(image_path, 'processed')
lib.io.mkdir_p(save_path)
assert(len(inter_points)==len(file_list))
for f, p in zip(file_list, inter_points):
meta = ExifEdit(f)
meta.add_lat_lon(p[1], p[2])
meta.add_altitude(p[3])
meta.add_date_time_original(p[0])
meta.add_orientation(1)
meta.add_direction(p[4])
meta.write()
| 2.859375 | 3 |
awstosrt.py | asulibraries/AwsTranscribe | 0 | 12790812 | <reponame>asulibraries/AwsTranscribe<gh_stars>0
import re
import sys
import json
import logging
from datetime import timedelta
import srt
import webvtt
log = logging.getLogger(__name__)
log.addHandler(logging.StreamHandler())
log.setLevel(logging.DEBUG)
sys.path.append("/home/ubuntu/.local/lib/python3.6/site-packages")
''' Parameters to control subtitle behavior.
See http://bbc.github.io/subtitle-guidelines/ for recommended practices.
'''
MAX_LEN = 74 # Maximum characters per screen (37 * 2)
MAX_LINE_LEN = 37 # Maximum characters per line
INTERVAL = 2000 # Minimum time in ms between lines
CC_TIME_WINDOW = 500 # Line combining window in ms
MAX_TIME = 8 # Seconds any line should persist on-screen
class TranscribeToSRT():
def __init__(self, infile, outfile):
self.srt = []
self.subtitles = []
self.last = timedelta(seconds=0)
self.outfile = outfile
f = open(infile, 'r')
self.data = json.loads(f.read())
self.items = self.data['results']['items']
log.debug("{} items found.".format(len(self.items)))
def to_delta(self, str_seconds, int_offset_secs=0):
''' Transform a string representing seconds in float (e.g. ss.mmm) to a
timedelta object'''
flt_s = float(str_seconds)
int_s = int(flt_s)
int_ms = int((flt_s - int_s) * 1000.0)
return timedelta(seconds=int_s + int_offset_secs, milliseconds=int_ms)
def get_last(self, i):
''' Return the timestamp (as a timedelta) of the last transcribed word
and update the pointer to this one if possible.'''
old_last = self.last
if 'start_time' in i:
self.last = self.to_delta(i['start_time'])
return old_last
def get_start(self, i):
try:
if 'start_time' in i:
return self.to_delta(i['start_time'])
else:
return self.get_last(i)
except:
log.exception('{}'.format(i))
def get_text(self, i):
return i['alternatives'][0]['content']
''' SRT lines are up to MAX_LEN (usually 74) characters. Break lines on
punctuation, line length, and intervals between words.
'''
def parse(self):
line = ''
start = None
for n, i in enumerate(self.items):
text = self.get_text(i)
if len(line) == 0: # New line. Start subtitle timestamp.
start = self.get_start(i)
# If n-1 is the length of self.items, we've hit the end of the list
if n + 1 == len(self.items):
line += text
self._add_line(line, start, self.get_last(
i) + timedelta(milliseconds=INTERVAL))
continue
# If the text is a period and the line will be over MAX_LEN with the next word, end it.
if text == '.' and len(line) + len(self.get_text(self.items[n + 1])) > MAX_LEN:
line += text
log.debug("Hit period at end of line.")
self._add_line(line, start, self.get_last(
i) + timedelta(milliseconds=INTERVAL))
line = ''
continue
# If the time elapsed since the last word is > INTERVAL ms, go to a new line.
if (self.get_start(i) - self.get_last(i)).total_seconds() > INTERVAL / 1000:
log.debug("Interval exceeded")
self._add_line(line, start, self.get_last(
i) + timedelta(milliseconds=INTERVAL))
line = text
start = self.get_start(i)
continue
if len(line) + len(text) < MAX_LEN: # Add it to the line
if i['type'] == 'punctuation': # No space before punctuation
line += text
else:
line += ' {}'.format(text)
else: # Line is long enough. Commit it.
self._add_line(line, start, self.get_last(
i) + timedelta(milliseconds=INTERVAL))
line = text
if 'start_time' in i:
start = self.to_delta(i['start_time'])
else:
start = self.get_last(i)
self.get_last(i)
def _add_line(self, text, start, end):
''' As each line comes in, set and/or correct the timing.
Algorithmically, subtitles are to be on the screen no more than
2 at a time, and for up to MAX_TIME seconds. Since each line comes
individually from the VBI parser, any lines that arrive within
half of a second should be consolidated into one SRT entry.
Subsequent entries should end the previous entry if it comes
less than 5 seconds after it.
Check that the next entry for the start time and set the end 1 frame
(~33ms) before it. '''
if len(self.srt) == 0: # First line
self.srt.append(srt.Subtitle(index=1,
start=start,
end=end,
content=text))
log.debug("Add: {}".format(self.srt[-1]))
return
# Line-combining threshold
delta = timedelta(milliseconds=CC_TIME_WINDOW)
if start < self.srt[-1].start + delta: # Is it within the time window?
# Combine
self.srt[-1].content = '{}\n{}'.format(self.srt[-1].content, text)
log.debug("Combine: {}".format(self.srt[-1]))
else: # It is outside the time window
# Previous entry is too long
if self.srt[-1].end > self.srt[-1].start + timedelta(seconds=MAX_TIME):
_e_time = self.srt[-1].start + timedelta(seconds=MAX_TIME)
_redux = (self.srt[-1].end - _e_time).total_seconds() * 1000
_total = (_e_time - self.srt[-1].start).total_seconds() * 1000
log.debug("Length set to {} (removed {}ms, {}ms total display time)".format(
_e_time, _redux, _total))
self.srt[-1].end = _e_time # So fix it
if self.srt[-1].end > start: # Previous entry ends past what we're adding
f_time = start - timedelta(milliseconds=33)
_redux = (f_time - self.srt[-1].end).total_seconds() * 1000
_total = (f_time - self.srt[-1].start).total_seconds() * 1000
log.debug("End timestamp reduced to {} ({}ms, {}ms total display time)".format(
f_time, _redux, _total))
self.srt[-1].end = f_time # So fix it
if len(text) > MAX_LINE_LEN and '\n' not in text: # Break the line if not already split
tlist = str.split(text)
tout = ''
for i, t in enumerate(tlist):
if i == 0: # First word
tout = t
elif len(tout) + len(t) <= MAX_LINE_LEN:
tout += ' {}'.format(t)
else:
tout += '\n{}'.format(' '.join(tlist[i:]))
break
log.debug("Split line longer than {} characters:\n{}==>\n{}".format(
MAX_LINE_LEN, text, tout))
text = tout # This could be assigned above, but is done here for the debug line above
# Add the new entry to the SRT list
self.srt.append(srt.Subtitle(index=len(self.srt) + 1,
start=start,
end=end,
content=text))
log.debug("Add: {}".format(self.srt[-1]))
#if len(self.srt) > 10:
# quit()
def write(self, filename=None):
if not filename:
filename = self.outfile
f = open(filename, 'w')
f.write(srt.compose(self.srt))
f.flush()
f.close()
if __name__ == "__main__":
infile = sys.argv[1]
outfile = sys.argv[2]
t = TranscribeToSRT(infile, outfile)
t.parse()
t.write()
webvtt.from_srt(outfile).save()
| 2.921875 | 3 |
tests/from_file_multiple_calls_mismatch/test_mock_server_from_file_multiple_calls_mismatch.py | icanbwell/mockserver_client | 0 | 12790813 | from glob import glob
from pathlib import Path
from typing import List, Any, Dict
import pytest
import requests
import json
from requests import Response
from mockserver_client.exceptions.mock_server_expectation_not_found_exception import (
MockServerExpectationNotFoundException,
)
from mockserver_client.exceptions.mock_server_json_content_mismatch_exception import (
MockServerJsonContentMismatchException,
)
from mockserver_client.mockserver_client import MockServerFriendlyClient
from mockserver_client.mockserver_verify_exception import MockServerVerifyException
def test_mock_server_from_file_multiple_calls_mismatch() -> None:
expectations_dir: Path = Path(__file__).parent.joinpath("./expectations")
requests_dir: Path = Path(__file__).parent.joinpath("./requests")
test_name = "test_mock_server"
mock_server_url = "http://mock-server:1080"
mock_client: MockServerFriendlyClient = MockServerFriendlyClient(
base_url=mock_server_url
)
mock_client.clear(f"/{test_name}/*")
mock_client.reset()
mock_client.expect_files_as_json_requests(
expectations_dir, path=f"/{test_name}/foo/1/merge", json_response_body={}
)
mock_client.expect_default()
http = requests.Session()
file_path: str
files: List[str] = sorted(
glob(str(requests_dir.joinpath("**/*.json")), recursive=True)
)
for file_path in files:
with open(file_path, "r") as file:
content: Dict[str, Any] = json.loads(file.read())
response: Response = http.post(
mock_server_url + "/" + test_name + "/foo/1/merge",
json=[content],
)
assert response.ok
with pytest.raises(MockServerVerifyException):
try:
mock_client.verify_expectations(test_name=test_name)
except MockServerVerifyException as e:
# there should be two expectations.
# One for the content not matching and one for the expectation not triggered
assert len(e.exceptions) == 2
json_content_mismatch_exceptions: List[
MockServerJsonContentMismatchException
] = [
e1
for e1 in e.exceptions
if isinstance(e1, MockServerJsonContentMismatchException)
]
assert len(json_content_mismatch_exceptions) == 1
expectation_not_found_exceptions: List[
MockServerExpectationNotFoundException
] = [
e1
for e1 in e.exceptions
if isinstance(e1, MockServerExpectationNotFoundException)
]
assert len(expectation_not_found_exceptions) == 1
print(str(e))
raise e
| 2.21875 | 2 |
src/updater.py | vincent-lg/cocomud | 3 | 12790814 | # Copyright (c) 2016-2020, <NAME>
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of ytranslate nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Auto-updater of the CocoMUD client."""
import os
from configobj import ConfigObj
from ytranslate import init, select, t
import wx
from wx.lib.pubsub import pub
from autoupdate import AutoUpdate
from version import BUILD
# Determines the user's language
AVAILABLE_LANGUAGES = ("en", "fr")
DEFAULT_LANGUAGE = "en"
path = os.path.join("settings", "options.conf")
config = ConfigObj(path)
try:
lang = config["general"]["language"]
assert lang in AVAILABLE_LANGUAGES
except (KeyError, AssertionError):
lang = DEFAULT_LANGUAGE
# Translation
init(root_dir="translations")
select(lang)
# Classes
class DummyUpdater(wx.Frame):
"""Dummy updater, to which updaters should inherit."""
def __init__(self, parent):
wx.Frame.__init__(self, parent)
self.autoupdater = None
self.default_text = t("ui.message.update.loading")
self.progress = 0
# Event binding
pub.subscribe(self.OnGauge, "gauge")
pub.subscribe(self.OnText, "text")
pub.subscribe(self.OnForceDestroy, "forceDestroy")
pub.subscribe(self.OnResponseUpdate, "responseUpdate")
def create_updater(self, just_checking=False):
"""Create a new autoupdater instance."""
self.autoupdate = AutoUpdate(BUILD, self, just_checking=just_checking)
self.autoupdate.start()
def OnGauge(self, value=0):
"""The progress indicator changes."""
pass
def OnText(self, text=""):
"""The text of the indicator changes."""
pass
def OnForceDestroy(self):
"""Ask for the window's destruction."""
pass
def OnResponseUpdate(self, build=None):
"""The check for updates is complete."""
pass
def UpdateGauge(self, value):
"""Change the level indicator."""
wx.CallAfter(pub.sendMessage, "gauge", value=value)
def UpdateText(self, text):
"""Change the text."""
wx.CallAfter(pub.sendMessage, "text", text=text)
def AskDestroy(self):
wx.CallAfter(pub.sendMessage, "forceDestroy")
def ResponseUpdate(self, build):
"""The check for updates has responded.
Note: the build parameter may be None (no update is available)
or a number (updates are available).
"""
wx.CallAfter(pub.sendMessage, "responseUpdate", build=build)
class Updater(DummyUpdater):
"""Graphical updater with a gauge."""
def __init__(self, parent, just_checking=False):
DummyUpdater.__init__(self, parent)
self.create_updater(just_checking)
self.InitUI()
self.SetTitle(t("ui.message.update.updating"))
self.Show()
self.Center()
def InitUI(self):
panel = wx.Panel(self)
sizer = wx.BoxSizer(wx.VERTICAL)
panel.SetSizer(sizer)
self.text = wx.TextCtrl(panel, value=self.default_text,
size=(600, 100), style=wx.TE_MULTILINE | wx.TE_READONLY)
self.gauge = wx.Gauge(panel, range=100, size=(250, 25))
self.cancel = wx.Button(panel, wx.ID_CANCEL)
# Window design
sizer.Add(self.text)
sizer.Add(self.gauge)
sizer.Add(self.cancel)
# Event binding
self.Bind(wx.EVT_BUTTON, self.OnCancel, self.cancel)
def OnGauge(self, value=0):
self.gauge.SetValue(value)
text = self.default_text
text += " ({}%)".format(value)
self.text.SetValue(text)
def OnText(self, text):
self.default_text = t(text)
self.text.SetValue(self.default_text)
def OnForceDestroy(self):
self.Destroy()
def OnCancel(self, e):
"""The user clicks on 'cancel'."""
value = wx.MessageBox(t("ui.message.update.confirm_cancel"),
t("ui.dialog.confirm"), wx.YES_NO | wx.NO_DEFAULT | wx.ICON_QUESTION)
if value == wx.YES:
self.Destroy()
# AppMainLoop
if __name__ == "__main__":
app = wx.App()
frame = Updater(None)
app.MainLoop()
| 1.335938 | 1 |
src/epc_exporter/collector/port_utilization_table.py | cisco-cx/epc_exporter | 0 | 12790815 | <reponame>cisco-cx/epc_exporter
"""
Collects show port utilization table command and parses it
"""
from prometheus_client import REGISTRY
from prometheus_client.metrics_core import GaugeMetricFamily
from collector.abstract_command_collector import AbstractCommandCollector
from device import AbstractDevice
class PortUtilizationCollector(AbstractCommandCollector):
""" Collector for show port utilization table command """
def __init__(self,
template_dir: str,
device: AbstractDevice,
registry=REGISTRY):
super().__init__(
template_dir + "/show_port_utilization_table.template", device,
registry)
def collect(self):
"""
collect method collects the command output from device and
return the metrics
"""
output = self._device.exec("show port utilization table")
rows = self._parser.ParseText(output)
metrics = [
GaugeMetricFamily("epc_port_rx_current",
"epc port rx current.",
labels=["port"]),
GaugeMetricFamily("epc_port_tx_current",
"epc npu tx current.",
labels=["port"]),
GaugeMetricFamily("epc_port_rx_5m",
"epc port rx 5m.",
labels=["port"]), GaugeMetricFamily(
"epc_port_tx_5m",
"epc npu tx 5m.",
labels=["port"]), GaugeMetricFamily(
"epc_port_rx_15m",
"epc port rx 15m.",
labels=["port"]), GaugeMetricFamily(
"epc_port_tx_15m",
"epc npu tx 15m.",
labels=["port"])
]
for row in rows:
for field_index in range(6):
metrics[field_index].add_metric(labels=[row[0]],
value=row[field_index + 1])
return metrics
| 2.5625 | 3 |
src/apps/trainings/managers/network_pandas_queryset.py | sanderland/katago-server | 27 | 12790816 | from django.db.models import QuerySet
class NetworkPandasQuerySet(QuerySet):
pass
| 1.351563 | 1 |
kinetic_model_construction_and_analysis/src/tests/test_process_simulations.py | svevol/accoa_project_data_analysis | 0 | 12790817 | <reponame>svevol/accoa_project_data_analysis<filename>kinetic_model_construction_and_analysis/src/tests/test_process_simulations.py<gh_stars>0
import os
import unittest
import scipy.io
from src.data.process_simulations import get_time_series_quantiles
from src.data.import_simulations import gather_sim_data, get_met_rxn_names
class TestProcessSimulations(unittest.TestCase):
def setUp(self):
this_dir, this_filename = os.path.split(__file__)
self.data_dir = os.path.join(this_dir, '..', '..', 'data', 'raw')
self.model_name = 'putida_v2_3_all_fixed_flux_2000_abs10_-4'
self.file_in = os.path.join(self.data_dir, f'simulation_{self.model_name}.mat')
self.mat = scipy.io.loadmat(self.file_in, squeeze_me=False)
def test_get_time_series_quantiles(self):
met_names, rxn_names = get_met_rxn_names(self.data_dir, 'putida_v2_3_all_fixed_flux')
time_points_spline = [10**-9, 10 ** -4, 10 ** -3, 10 ** -2, 10 ** -1, 1] # , 10, 100]
n_models = 10
conc, conc_interp, flux, flux_interp = gather_sim_data(self.mat, met_names, rxn_names, n_models, time_points_spline,
save_concs=False, save_fluxes=False)
data_type = 'rxn'
flux_interp_quantiles = get_time_series_quantiles(flux_interp, time_points_spline, data_type, rxn_names)
| 1.960938 | 2 |
test/moves/test_acrobatics.py | adacker10/showdown | 8 | 12790818 | import unittest
from sim.battle import Battle
from data import dex
class TestAcrobatics(unittest.TestCase):
def test_acrobatics(self):
b = Battle(debug=False, rng=False)
b.join(0, [{'species': 'charmander', 'moves': ['tackle']}])
b.join(1, [{'species': 'pidgey', 'moves': ['acrobatics']}])
b.choose(0, dex.Decision('move', 0))
b.choose(1, dex.Decision('move', 0))
b.do_turn()
charmander = b.sides[0].pokemon[0]
pidgey = b.sides[1].pokemon[0]
#damage calcs were done by hand
self.assertEqual(charmander.hp, charmander.maxhp-76)
def test_acrobatics_noitem(self):
b = Battle(debug=False, rng=False)
b.join(0, [{'species': 'charmander', 'moves': ['tackle']}])
b.join(1, [{'species': 'pidgey', 'item': 'pokeball','moves': ['acrobatics']}])
b.choose(0, dex.Decision('move', 0))
b.choose(1, dex.Decision('move', 0))
b.do_turn()
charmander = b.sides[0].pokemon[0]
pidgey = b.sides[1].pokemon[0]
#damage calcs were done by hand
self.assertEqual(charmander.hp, charmander.maxhp-39)
def runTest(self):
self.test_acrobatics()
self.test_acrobatics_noitem()
| 2.671875 | 3 |
hic3defdr/util/lrt.py | thomasgilgenast/hic3defdr | 0 | 12790819 | <gh_stars>0
import numpy as np
import scipy.stats as stats
from hic3defdr.util.scaled_nb import logpmf, fit_mu_hat
def lrt(raw, f, disp, design, refit_mu=True):
"""
Performs a likelihood ratio test on raw data ``raw`` given scaling factors
``f`` and dispersion ``disp``.
Parameters
----------
raw, f, disp : np.ndarray
Matrices of raw values, combined scaling factors, and dispersions,
respectively. Rows correspond to pixels, columns correspond to
replicates.
design : np.ndarray
Describes the grouping of replicates into conditions. Rows correspond to
replicates, columns correspond to conditions, and values should be True
where a replicate belongs to a condition and False otherwise.
Returns
-------
pvalues : np.ndarray
The LRT p-values per pixel.
llr : np.ndarray
The log likelihood ratio per pixel.
mu_hat_null, mu_hat_alt : np.ndarray
The fitted mean parameters under the null and alt models, respectively,
per pixel.
"""
if refit_mu:
mu_hat_null = fit_mu_hat(raw, f, disp)
mu_hat_alt = np.array(
[fit_mu_hat(raw[:, design[:, c]],
f[:, design[:, c]],
disp[:, design[:, c]])
for c in range(design.shape[1])]).T
else:
mu_hat_null = np.mean(raw / f, axis=1)
mu_hat_alt = np.array(
[np.mean(raw[:, design[:, c]] / f[:, design[:, c]], axis=1)
for c in range(design.shape[1])]).T
mu_hat_alt_wide = np.dot(mu_hat_alt, design.T)
null_ll = np.sum(logpmf(raw, mu_hat_null[:, None] * f, disp), axis=1)
alt_ll = np.sum(logpmf(raw, mu_hat_alt_wide * f, disp), axis=1)
llr = null_ll - alt_ll
pvalues = stats.chi2(design.shape[1] - 1).sf(-2 * llr)
return pvalues, llr, mu_hat_null, mu_hat_alt
| 2.484375 | 2 |
multitask_lightning/losses/loss.py | heyoh-app/gestures-detector | 8 | 12790820 | from .keypoint_losses import KpointFocalLoss
from .aux_losses import RegrLoss, MaskedFocal
def get_loss(loss):
loss_name = loss["name"]
params = loss["params"]
if loss_name == "kpoint_focal":
loss = KpointFocalLoss(**params)
elif loss_name == "masked_focal":
loss = MaskedFocal()
elif loss_name == "regr_loss":
loss = RegrLoss(**params)
else:
raise ValueError("Loss [%s] not recognized." % loss_name)
return loss
| 2.4375 | 2 |
server/githubsrm/apis/urls.py | Aradhya-Tripathi/githubsrm | 1 | 12790821 | from django.urls import path
from .open_views import (
Contributor, Maintainer, HealthCheck, Team, ContactUs
)
urlpatterns = [
path('contributor', Contributor.as_view()),
path('maintainer', Maintainer.as_view()),
path('healthcheck', HealthCheck.as_view()),
path('team', Team.as_view()),
path('contact-us', ContactUs.as_view()),
]
| 1.625 | 2 |
pdf4me/Pdf4mePythonClientApi/pdf4me/helper/custom_http.py | pdf4me/pdf4me-clientapi-python | 1 | 12790822 | import requests
from pdf4me.helper.json_converter import JsonConverter
from pdf4me.helper.pdf4me_exceptions import Pdf4meClientException, Pdf4meBackendException
from pdf4me.helper.response_checker import ResponseChecker
# from pdf4me.helper.token_generator import TokenGenerator
class CustomHttp(object):
def __init__(self, token, apiurl):
self.token = token
self.json_converter = JsonConverter()
self.url = "https://api.pdf4me.com/"
if apiurl is not None and len(apiurl) != 0:
self.url = apiurl
self.userAgent = "pdf4me-python/0.8.24"
def post_universal_object(self, universal_object, controller):
"""Sends a post request to the specified controller with the given
universal_object as a body.
:param universal_object: object to be sent
:type universal_object: object
:param controller: swagger controller
:type controller: str
:return: post response
"""
# prepare post request
request_url = self.url + controller
headers = {
'Accept': 'application/json',
'Content-Type': 'application/json',
'Authorization': 'Basic ' + self.token,
'User-Agent': self.userAgent
}
# convert body to json
body = self.json_converter.dump(element=universal_object)
# send request
res = requests.post(request_url, data=body, headers=headers)
# check status code
self.__check_status_code(res)
# check docLogs for error messages
self.__check_docLogs_for_error_messages(res)
# read content from response
json_response = self.json_converter.load(res.text)
return json_response
def post_wrapper(self, octet_streams, values, controller):
"""Builds a post requests from the given parameters.
:param octet_streams: (key: file identifier, value: open(fileName, 'rb'))) pairs
:type octet_streams: list
:param values: (key: identifier of value, value: content of value) pairs
:type values: list
:param controller: swagger controller
:type controller: str
:return: post response
"""
# prepare post request
request_url = self.url + controller
header = {'Authorization': 'Basic ' + self.token, 'User-Agent': self.userAgent}
# build files
if octet_streams is not None and len(octet_streams) != 0:
files = {key: value for (key, value) in octet_streams}
else:
files = None
# build values
if len(values) != 0:
data = {key: value for (key, value) in values}
else:
data = None
# send request
if files is None:
if data is None:
raise Pdf4meClientException("Please provide at least one value or an octet-stream.")
else:
res = requests.post(request_url, data=data, headers=header)
else:
if data is None:
res = requests.post(request_url, files=files, headers=header)
else:
res = requests.post(request_url, files=files, data=data, headers=header)
# check status code
self.__check_status_code(res)
# check docLogs for error messages
self.__check_docLogs_for_error_messages(res)
return res.content
def get_object(self, query_strings, controller):
"""Sends a get request to the specified controller with the given query strings.
:param query_strings: params to be sent
:type query_strings: str
:param controller: swagger controller
:type controller: str
:return: post response
"""
# prepare post request
request_url = self.url + controller
headers = {
'Authorization': 'Basic ' + self.token,
'User-Agent': self.userAgent
}
# send request
res = requests.get(request_url, data=query_strings, headers=headers)
# check status code
self.__check_status_code(res)
# check docLogs for error messages
self.__check_docLogs_for_error_messages(res)
# read content from response
json_response = self.json_converter.load(res.text)
return json_response
def get_wrapper(self, query_strings, controller):
"""Sends a get request to the specified controller with the given
query string and returns a file
:param query_strings: params to be sent
:type query_strings: str
:param controller: swagger controller
:type controller: str
:return: file
"""
# prepare post request
request_url = self.url + controller
headers = {
'Authorization': 'Basic ' + self.token,
'User-Agent': self.userAgent
}
# send request
res = requests.get(request_url, data=query_strings, headers=headers)
# check status code
self.__check_status_code(res)
# check docLogs for error messages
self.__check_docLogs_for_error_messages(res)
return res.content
def __check_status_code(self, response):
'''
Checks whether the status code is either 200 or 204, otw. throws a Pdf4meBackendException.
:param response: post response
:type response: requests.Response
:return: None
'''
status_code = response.status_code
status_reason = response.reason
if status_code == 500:
server_error = self.json_converter.load(response.text)['error_message']
trace_id = self.json_converter.load(response.text)['trace_id']
raise Pdf4meBackendException('HTTP 500 ' + status_reason + " : trace_id " + trace_id + " : " + server_error)
elif status_code != 200 and status_code != 204:
error = response.text
raise Pdf4meBackendException('HTTP ' + str(status_code) + ': ' + status_reason + " : " + error)
def __check_docLogs_for_error_messages(self, response):
'''
Checks whether the HTTP response's docLogs contain any error message, in case of an error
a Pdf4meBackendException is thrown.
:param response: post response
:type response: requests.Response
:return: None
'''
ResponseChecker().check_response_for_errors(response.text)
| 2.65625 | 3 |
0282.Expression Add Operators/solution.py | zhlinh/leetcode | 0 | 12790823 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
*****************************************
Author: zhlinh
Email: <EMAIL>
Version: 0.0.1
Created Time: 2016-05-09
Last_modify: 2016-05-09
******************************************
'''
'''
Given a string that contains only digits 0-9 and a target value,
return all possibilities to add binary operators (not unary)
+, -, or * between the digits so they evaluate to the target value.
Examples:
"123", 6 -> ["1+2+3", "1*2*3"]
"232", 8 -> ["2*3+2", "2+3*2"]
"105", 5 -> ["1*0+5","10-5"]
"00", 0 -> ["0+0", "0-0", "0*0"]
"3456237490", 9191 -> []
Credits:
Special thanks to @davidtan1890 for adding this problem
and creating all test cases.
'''
class Solution(object):
def addOperators(self, num, target):
"""
:type num: str
:type target: int
:rtype: List[str]
"""
results = []
if num:
self.dfs(num, target, 0, 0, 0, "", results)
return results
def dfs(self, num, target, pos, val, backup, result, results):
if pos >= len(num):
if val == target:
results.append(result)
return
for i in range(pos, len(num)):
if i != pos and num[pos] == "0":
break
cur = int(num[pos:i+1])
if pos == 0:
self.dfs(num, target, i + 1, cur, cur, \
result + str(cur), results)
else:
self.dfs(num, target, i + 1, val + cur, cur, \
result + "+" + str(cur), results)
self.dfs(num, target, i + 1, val - cur, -cur, \
result + "-" + str(cur), results)
self.dfs(num, target, i + 1, val - backup + backup * cur, \
backup * cur, result + "*" + str(cur), results)
| 4.15625 | 4 |
tests/fixtures.py | jsam/datagears | 3 | 12790824 | <gh_stars>1-10
from typing import Any, TypeVar, Union
T = TypeVar("T")
Fixture = Union[Any, T]
| 1.664063 | 2 |
api/src/wt/http_api/_common.py | sedlar/work-tracking | 0 | 12790825 | <gh_stars>0
from functools import wraps
from wt.common.errors import ObjectDoesNotExist, BadRequest
def get_error_response(ex):
return {
"code": ex.error_code.value,
"message": ex.message
}
def handle_errors(func):
@wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except ObjectDoesNotExist as ex:
return get_error_response(ex), 404
except BadRequest as ex:
return get_error_response(ex), 400
return wrapper
DUMMY_STATS = {
"progress": 0,
"estimated_duration": 0,
"estimated_cost": {
"amount": 0,
"currency": "CZK",
},
"burned_duration": 0,
"burned_cost": {
"amount": 0,
"currency": "CZK",
},
"burned_expenditures_cost": {
"amount": 0,
"currency": "CZK",
},
}
| 2.25 | 2 |
SubGNN/train_config.py | thomasly/SubGNN | 1 | 12790826 | # General
import numpy as np
import random
import argparse
import json
import commentjson
import joblib
import os
import pathlib
from collections import OrderedDict
# Pytorch
import torch
import pytorch_lightning as pl
from pytorch_lightning.loggers import TensorBoardLogger
from pytorch_lightning.callbacks import ModelCheckpoint
# Optuna
import optuna
from optuna.integration import PyTorchLightningPruningCallback
# Our Methods
from . import SubGNN as md
from SubGNN import config
def parse_arguments():
"""
Read in the config file specifying all of the parameters
"""
parser = argparse.ArgumentParser(description="Learn subgraph embeddings")
parser.add_argument("-config_path", type=str, default=None, help="Load config file")
args = parser.parse_args()
return args
def read_json(fname):
"""
Read in the json file specified by 'fname'
"""
with open(fname, "rt") as handle:
return commentjson.load(handle, object_hook=OrderedDict)
def get_optuna_suggest(param_dict, name, trial):
"""
Returns a suggested value for the hyperparameter specified by 'name' from the range
of values in 'param_dict'
name: string specifying hyperparameter
trial: optuna trial
param_dict: dictionary containing information about the hyperparameter (range of
values & type of sampler)
e.g.{
"type" : "suggest_categorical",
"args" : [[ 64, 128]]
}
"""
module_name = param_dict["type"] # e.g. suggest_categorical, suggest_float
args = [name]
args.extend(
param_dict["args"]
) # resulting list will look something like this ['batch_size', [ 64, 128]]
if "kwargs" in param_dict:
kwargs = dict(param_dict["kwargs"])
return getattr(trial, module_name)(*args, **kwargs)
else:
return getattr(trial, module_name)(*args)
def get_hyperparams_optuna(run_config, trial):
"""
Converts the fixed and variable hyperparameters in the run config to a dictionary of
the final hyperparameters
Returns: hyp_fix - dictionary where key is the hyperparameter name (e.g. batch_size)
and value is the hyperparameter value
"""
# initialize the dict with the fixed hyperparameters
hyp_fix = dict(run_config["hyperparams_fix"])
# update the dict with variable value hyperparameters by sampling a hyperparameter
# value from the range specified in the run_config
hyp_optuna = {
k: get_optuna_suggest(run_config["hyperparams_optuna"][k], k, trial)
for k in dict(run_config["hyperparams_optuna"]).keys()
}
hyp_fix.update(hyp_optuna)
return hyp_fix
def build_model(run_config, trial=None):
"""
Creates SubGNN from the hyperparameters specified in the run config
"""
# get hyperparameters for the current trial
hyperparameters = get_hyperparams_optuna(run_config, trial)
# Set seeds for reproducibility
torch.manual_seed(hyperparameters["seed"])
np.random.seed(hyperparameters["seed"])
torch.cuda.manual_seed(hyperparameters["seed"])
torch.cuda.manual_seed_all(hyperparameters["seed"])
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# initialize SubGNN
model = md.SubGNN_Chem(
hyperparameters,
run_config["graph_path"],
run_config["subgraphs_path"],
run_config["embedding_path"],
run_config["similarities_path"],
run_config["shortest_paths_path"],
run_config["degree_sequence_path"],
run_config["ego_graph_path"],
)
return model, hyperparameters
def build_trainer(run_config, hyperparameters, trial=None):
"""
Set up optuna trainer
"""
if "progress_bar_refresh_rate" in hyperparameters:
p_refresh = hyperparameters["progress_bar_refresh_rate"]
else:
p_refresh = 5
# set epochs, gpus, gradient clipping, etc.
# if 'no_gpu' in run config, then use CPU
trainer_kwargs = {
"max_epochs": hyperparameters["max_epochs"],
"gpus": 0 if "no_gpu" in run_config else 1,
"num_sanity_val_steps": 0,
"progress_bar_refresh_rate": p_refresh,
"gradient_clip_val": hyperparameters["grad_clip"],
}
# set auto learning rate finder param
if "auto_lr_find" in hyperparameters and hyperparameters["auto_lr_find"]:
trainer_kwargs["auto_lr_find"] = hyperparameters["auto_lr_find"]
# Create tensorboard logger
lgdir = os.path.join(run_config["tb"]["dir_full"], run_config["tb"]["name"])
if not os.path.exists(lgdir):
os.makedirs(lgdir)
logger = TensorBoardLogger(
run_config["tb"]["dir_full"],
name=run_config["tb"]["name"],
version="version_" + str(random.randint(0, 10000000)),
)
if not os.path.exists(logger.log_dir):
os.makedirs(logger.log_dir)
print("Tensorboard logging at ", logger.log_dir)
trainer_kwargs["logger"] = logger
# Save top three model checkpoints
trainer_kwargs["checkpoint_callback"] = ModelCheckpoint(
filepath=os.path.join(
logger.log_dir, "{epoch}-{val_micro_f1:.2f}-{val_acc:.2f}-{val_auroc:.2f}"
),
save_top_k=3,
verbose=True,
monitor=run_config["optuna"]["monitor_metric"],
mode="max",
)
# if we use pruning, use the pytorch lightning pruning callback
if run_config["optuna"]["pruning"]:
trainer_kwargs["early_stop_callback"] = PyTorchLightningPruningCallback(
trial, monitor=run_config["optuna"]["monitor_metric"]
)
trainer = pl.Trainer(**trainer_kwargs)
return trainer, trainer_kwargs, logger.log_dir
def train_model(run_config, trial=None):
"""
Train a single model whose hyperparameters are specified in the run config
Returns the max (or min) metric specified by 'monitor_metric' in the run config
"""
# get model and hyperparameter dict
model, hyperparameters = build_model(run_config, trial)
# build optuna trainer
trainer, trainer_kwargs, results_path = build_trainer(
run_config, hyperparameters, trial
)
# dump hyperparameters to results dir
hparam_file = open(os.path.join(results_path, "hyperparams.json"), "w")
hparam_file.write(json.dumps(hyperparameters, indent=4))
hparam_file.close()
# dump trainer args to results dir
tkwarg_file = open(os.path.join(results_path, "trainer_kwargs.json"), "w")
pop_keys = [
key
for key in ["logger", "profiler", "early_stop_callback", "checkpoint_callback"]
if key in trainer_kwargs.keys()
]
[trainer_kwargs.pop(key) for key in pop_keys]
tkwarg_file.write(json.dumps(trainer_kwargs, indent=4))
tkwarg_file.close()
# train the model
trainer.fit(model)
# write results to the results dir
if results_path is not None:
hparam_file = open(os.path.join(results_path, "final_metric_scores.json"), "w")
results_serializable = {k: float(v) for k, v in model.metric_scores[-1].items()}
hparam_file.write(json.dumps(results_serializable, indent=4))
hparam_file.close()
# return the max (or min) metric specified by 'monitor_metric' in the run config
all_scores = [
score[run_config["optuna"]["monitor_metric"]].numpy()
for score in model.metric_scores
]
if run_config["optuna"]["opt_direction"] == "maximize":
return np.max(all_scores)
else:
return np.min(all_scores)
def main():
"""
Perform an optuna run according to the hyperparameters and directory locations
specified in 'config_path'
"""
torch.autograd.set_detect_anomaly(True)
args = parse_arguments()
# read in config file
run_config = read_json(args.config_path)
# Set paths to data
task = run_config["data"]["task"]
# paths to subgraphs, edge list, and shortest paths between all nodes in the graph
run_config["subgraphs_path"] = os.path.join(task, "subgraphs.pth")
run_config["graph_path"] = os.path.join(task, "edge_list.txt")
run_config["shortest_paths_path"] = os.path.join(task, "shortest_path_matrix.npy")
run_config["degree_sequence_path"] = os.path.join(task, "degree_sequence.txt")
run_config["ego_graph_path"] = os.path.join(task, "ego_graphs.txt")
# directory where similarity calculations will be stored
run_config["similarities_path"] = os.path.join(task, "similarities/")
# get location of node embeddings
run_config["embedding_path"] = os.path.join(task, "atom_features.pth")
# create a tensorboard directory in the folder specified by dir in the PROJECT ROOT
# folder
if "local" in run_config["tb"] and run_config["tb"]["local"]:
run_config["tb"]["dir_full"] = run_config["tb"]["dir"]
else:
run_config["tb"]["dir_full"] = os.path.join(
config.PROJECT_ROOT, run_config["tb"]["dir"]
)
ntrials = run_config["optuna"]["opt_n_trials"]
print(f"Running {ntrials} Trials of optuna")
if run_config["optuna"]["pruning"]:
pruner = optuna.pruners.MedianPruner()
else:
pruner = None
# the complete study path is the tensorboard directory + the study name
run_config["study_path"] = os.path.join(
run_config["tb"]["dir_full"], run_config["tb"]["name"]
)
print("Logging to ", run_config["study_path"])
pathlib.Path(run_config["study_path"]).mkdir(parents=True, exist_ok=True)
# get database file
db_file = os.path.join(run_config["study_path"], "optuna_study_sqlite.db")
# specify sampler
if (
run_config["optuna"]["sampler"] == "grid"
and "grid_search_space" in run_config["optuna"]
):
sampler = optuna.samplers.GridSampler(run_config["optuna"]["grid_search_space"])
elif run_config["optuna"]["sampler"] == "tpe":
sampler = optuna.samplers.TPESampler()
elif run_config["optuna"]["sampler"] == "random":
sampler = optuna.samplers.RandomSampler()
# create an optuna study with the specified sampler, pruner, direction (e.g.
# maximize) A SQLlite database is used to keep track of results Will load in
# existing study if one exists
study = optuna.create_study(
direction=run_config["optuna"]["opt_direction"],
sampler=sampler,
pruner=pruner,
storage="sqlite:///" + db_file,
study_name=run_config["study_path"],
load_if_exists=True,
)
study.optimize(
lambda trial: train_model(run_config, trial),
n_trials=run_config["optuna"]["opt_n_trials"],
n_jobs=run_config["optuna"]["opt_n_cores"],
)
optuna_results_path = os.path.join(run_config["study_path"], "optuna_study.pkl")
print("Saving Study Results to", optuna_results_path)
joblib.dump(study, optuna_results_path)
print(study.best_params)
if __name__ == "__main__":
main()
| 2.1875 | 2 |
bangpy-ops/ops/sum/sum.py | testouya/mlu-ops | 0 | 12790827 | <filename>bangpy-ops/ops/sum/sum.py<gh_stars>0
# Copyright (C) [2021] by Cambricon, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# pylint: disable=missing-docstring, invalid-name, too-many-locals
"""A multi-platform code link example test for BANGPy TCP."""
import numpy as np
import bangpy
from bangpy import tcp
from bangpy.common import utils, load_op_by_type
from bangpy.platform.bang_config import ALIGN_LENGTH, TARGET
from bangpy.tcp.runtime import TaskType
from bangpy.tcp.util import round_up, round_down
DTYPES = [bangpy.float32]
TARGET_LIST = ["mlu290"]
KERNEL_NAME = "exp"
class Exp(object):
"""Operator description:
Add the data in the two buffers.
"""
def __init__(self, dtype, target, task_num):
self.dtype = dtype
self.target = target
self.task_num = task_num
self.bp = tcp.TCP(target)
self.length = self.bp.SizeVar("length")
self.nram_size = TARGET(target).nram_size
self.dtype_sz = dtype.bytes
self.col_count = self.bp.Var("col_count")
self.row_count = self.bp.Var("row_count")
self.bp.launch_task(self.task_num, 1, 1)
# buffer 源数据
# start_index 起始索引
# end_index 结束索引
# 计算一维数组 start_index 至 end_index 范围内的和 并将结果写在start_index除
def one_dimensional_sum(self,buffer,start_index,end_index):
data_length = self.bp.Scalar(bangpy.int32,"data_length",end_index - start_index +1 )#传进来得数据长度
count_for_128_align =self.bp.Scalar(bangpy.int32,"count_for_128_align",128 // self.dtype_sz)#128字节是几个占几个索引
remain = self.bp.Scalar(bangpy.int32,"remain",data_length % count_for_128_align)#128对齐后 不足对齐得数据个数
current_end_index = self.bp.Scalar(bangpy.int32,"current_end_index",end_index - remain +1)#刨去不足后 剩余可以对齐长度得末尾索引 +1是因为python数组切片语法[a:b]会对b自动-1 这里图省事就直接加上
#将末尾不能对齐的部分循环加到第一个元素上
with self.bp.if_scope(remain != 0):
with self.bp.if_scope(current_end_index != 0):
with self.bp.for_range(0,remain) as i:
buffer[start_index] = buffer[start_index] + buffer[current_end_index + i]
with self.bp.else_scope():
with self.bp.for_range(0,remain -1) as j:
buffer[start_index] = buffer[start_index] + buffer[current_end_index + j +1]
data_length.assign(data_length - remain)#刨除不足部分 重新定义数据长度
#当数据长度不足一次对齐时 不进行下面
#当满足一次对齐时 对其直接进行sum
#1.每行128字节
#2.算出多少行
#3.reshape (行,128字节数据个数)
#3.对其sumpool 因为之后每行第一个元素是需要的 所以最终结果直接在buffer[start_index]上
with self.bp.if_scope(data_length>=count_for_128_align):
self.bp.print(buffer[0:64])
self.bp.sum(buffer[start_index:current_end_index],buffer[start_index:current_end_index])
#self.bp.print("sumpool->",buffer[start_index])
row = self.bp.Scalar(bangpy.int32,"row",data_length/count_for_128_align)
reshape_buffer = buffer[start_index:current_end_index].reshape([row,count_for_128_align])
self.bp.sumpool(reshape_buffer,reshape_buffer,(row,),(1,))
# self.bp.print("sumpool->",buffer[start_index])
def two_dimension_row_sum(self,buffer,row_count,col_count):#按行 计算二维数组每行的和 结果放在每行首位
with self.bp.for_range(0,row_count) as i:
self.one_dimensional_sum(buffer[i][:],0,col_count-1)
#buffer 源数据
#temp_buffer 与buffer所占内存空间大小相等的nram存储
#row_count 行数
#col_count 列数
#该函数将计算传入的行数 0 - row_count 列数0 - col_count的这个矩形范围内每列的和 并将结果写在源数据的首行
def two_dimension_col_sum(self,buffer,temp_buffer,row_count,col_count):
count_for_128_align =self.bp.Scalar(bangpy.int32,"count_for_128_align",128 // self.dtype_sz) # 当前数据类型下 128个字节 对应了多少个元素
col_remain = self.bp.Scalar(bangpy.int32,"col_remain",col_count % count_for_128_align)
current_col_count = self.bp.Scalar(bangpy.int32,"current_col_count",col_count - col_remain)
with self.bp.if_scope(col_remain != 0):
with self.bp.for_range(0,col_remain) as i:
current_col_index = self.bp.Scalar(bangpy.int32,"current_col_index",col_count - i -1)
with self.bp.for_range(0,row_count - 1) as j:
buffer[0][current_col_index] = buffer[0][current_col_index] + buffer[j + 1][current_col_index]
with self.bp.if_scope(col_count >= count_for_128_align):
reshape_buffer = temp_buffer.reshape([row_count,current_col_count])
#self.bp.print("data_before_calc->",buffer[0])
self.bp.memcpy(reshape_buffer[:,:],buffer[:,0:current_col_count])
self.bp.sumpool(reshape_buffer,reshape_buffer,(row_count,),(1,))
#self.bp.print("temp_after_calc->",reshape_buffer[0])
self.bp.memcpy(buffer[0][0:current_col_count],reshape_buffer[0][0:current_col_count])
#self.bp.print("data_res->",buffer[0])
def compute_body(self):
one_core_count = self.bp.Scalar(bangpy.int32,"one_core_count")
remain = self.bp.Scalar(bangpy.int32,"remain")
current_core_start = self.bp.Scalar(bangpy.int32,"current_core_start") #当前核心数据开始索引
current_core_end = self.bp.Scalar(bangpy.int32,"current_core_end") #当前核心数据结束索引
total_count_in_core = self.bp.Scalar(bangpy.int32,"total_count_in_core")
calc_loop_count = self.bp.Scalar(bangpy.int32,"calc_loop_count")
once_loop_start = self.bp.Scalar(bangpy.int32,"once_loop_start")
calc_size = self.bp.Scalar(bangpy.int32,"calc_size")
nram_avable_size = round_down( (TARGET(self.target).nram_size - 30* 1024) // 2 ,128)#self.bp.Scalar(bangpy.int32,"nram_avable_size")
one_core_count.assign(self.length // self.task_num)#每个核均摊计算量(按索引分)
remain.assign(self.length % self.task_num)#分任务时的余数
process_count = nram_avable_size // self.dtype_sz #核心一次最多计算的长度
with self.bp.if_scope(self.bp.taskId < remain): #如果存在余数 将其均摊给各核 taskId从0起
current_core_start.assign((one_core_count + 1) * self.bp.taskId )
current_core_end.assign((one_core_count + 1) * (self.bp.taskId + 1) - 1) #此处应该不需要减1 待验证 python切片会自动将上标减1
with self.bp.else_scope():
current_core_start.assign((one_core_count + 1) * remain + one_core_count * (self.bp.taskId - remain))
current_core_end.assign((one_core_count + 1) * remain + one_core_count * (self.bp.taskId - remain) + one_core_count - 1)
total_count_in_core.assign(current_core_end - current_core_start + 1)
# buffer_in0 = self.bp.Buffer(
# shape=(self.length,), name="INPUT0", dtype=self.dtype, scope="global"
# )
buffer_in0 = self.bp.Buffer(
shape=(self.length,), name="INPUT0", dtype=self.dtype, scope="global"
)
buffer_out = self.bp.Buffer(
shape=(self.length,), name="OUTPUT", dtype=self.dtype, scope="global"
)
nram_buffer_in0 = self.bp.Buffer(
shape=(process_count,),
name="GALA_IN",
dtype=self.dtype,
scope="nram",
)
test_buffer = self.bp.Buffer(
shape=(process_count,),
name="test_buffer",
dtype=self.dtype,
scope="nram",
)
calc_loop_count.assign((total_count_in_core + process_count - 1) // process_count)
with self.bp.for_range(0, calc_loop_count) as i:
once_loop_start.assign(current_core_start + process_count * i) #当前核心数据开始的位置 + 第i次循环所应偏移的长度
with self.bp.if_scope(i < calc_loop_count - 1):
calc_size.assign(process_count)
with self.bp.else_scope():
calc_size.assign(total_count_in_core % process_count)
with self.bp.block("data_copy"):
self.bp.memcpy(nram_buffer_in0[0:calc_size], buffer_in0[once_loop_start:once_loop_start + calc_size])
self.bp.print("calc_size-->",calc_size)
#self.one_dimensional_sum(nram_buffer_in0,0,calc_size -1)
row_count = self.bp.Scalar(dtype = bangpy.int32,name = "row_count",value = self.row_count)
col_count = self.bp.Scalar(dtype = bangpy.int32,name = "col_count",value = self.col_count)
reshape_buffer = nram_buffer_in0[0:calc_size].reshape([row_count,col_count])# (33,33)
#二维数组按列求和
#此处需注意 第二个buffer参数上标索引越界的问题 此处只是展示 并未进行处理 实际使用时应以每次传入函数数据的实际尺寸计算
self.two_dimension_col_sum(reshape_buffer,test_buffer[0:row_count*col_count],row_count,col_count)
self.bp.memcpy(buffer_out[once_loop_start:once_loop_start + calc_size], nram_buffer_in0[:calc_size])
# build a executable module
f = self.bp.BuildBANG(
inputs=[buffer_in0,self.row_count,self.col_count,],
outputs=[buffer_out],
kernel_name=KERNEL_NAME,
)
return f
@tcp.register_mlu_op(DTYPES, TARGET_LIST, KERNEL_NAME)
def build_exp(dtype=None, target=None):
# tasktype fixed in UNION1
task_num = 1 #由4 改为64
f = Exp(dtype, target, task_num).compute_body()
return f
| 1.789063 | 2 |
normalTest/main.py | ForrestHeiYing/tutorials | 0 | 12790828 | <reponame>ForrestHeiYing/tutorials<gh_stars>0
#!usr/bin/env python
# _*_ coding:utf-8 _*_
"""
@author:chaowei
@file: main.py
@time: 2019/07/18
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# from matplotlib import pyplot
import numpy as np
import time
from caffe2.python import core, workspace
from caffe2.proto import caffe2_pb2
from caffe2.python import model_helper
def workspace_test():
print("current blobs in the workspace : {}".format(workspace.Blobs)) # 查看workspace里面所有的blobs
print("Workspace has blob 'X' ?: {}".format(workspace.HasBlob("X"))) # 判断是否有blob X
X = np.random.randn(2, 3).astype(np.float32)
print("Generated X from numpy: \n{}".format(X))
workspace.FeedBlob("X", X) # 将 blob 传入 workspace
print("current blobs in the workspace:{}".format(workspace.Blobs()))
print("Workspace has blob 'X' ?{}".format(workspace.HasBlob("X")))
print("Fethched X:\n{}".format(workspace.FetchBlob("X"))) # 从workspace里面读取blob
# 判断两个矩阵是否相等,不等会抛出异常
np.testing.assert_array_equal(X, workspace.FetchBlob("X"))
# print("a=", np.testing.assert_array_equal(X, workspace.FetchBlob("X")))
print("current workspace: {}".format(workspace.CurrentWorkspace())) # 查看当前workspace
print("current blobs in the workspace: {}".format(workspace.Blobs()))
# The second parameter 'True' indicates that if 'gutentag' does not exist, create one
workspace.SwitchWorkspace("gutentag", True) # switch the workspace.
print("After Switch Workspace ................")
print("current workspace:{}".format(workspace.CurrentWorkspace()))
print("current blobs in the workspace:{}".format(workspace.Blobs()))
def operators_test():
# create operator.
op = core.CreateOperator(
"Relu", # The type of operator that we want to run.
["X"], # A list of input blobs by their names
["Y"], # A list of output blobs ...
)
print("Type of the created op is: {}".format(type(op)))
print("content: \n")
print(str(op))
workspace.FeedBlob("X", np.random.randn(2, 3).astype(np.float32))
print("current blobs in the workspace:{}\n".format(workspace.Blobs()))
workspace.RunOperatorOnce(op) # run op
print("current blobs in the workspace:{}\n".format(workspace.Blobs()))
print("X:\n{}\n".format(workspace.FetchBlob("X")))
print("Y:\n{}\n".format(workspace.FetchBlob("Y")))
print("Expected:\n{}\n".format(np.maximum(workspace.FetchBlob("X"), 1)))
op1 = core.CreateOperator(
"GaussianFill",
[], # GaussianFill does not need any parameters.
["W"],
shape=[100, 100],
mean=1.0,
std=1.0,
)
print("content of op1:\n")
print(str(op1))
workspace.RunOperatorOnce(op1)
temp = workspace.FetchBlob("W")
print("temp=", temp)
# pyplot.hist(temp.flatten(), bins=50)
# pyplot.title("ddd of Z")
def model_helper_test():
data = np.random.rand(16, 100).astype(np.float32) # create the input data
label = (np.random.rand(16)*10).astype(np.int32) # create the label
workspace.FeedBlob("data", data)
workspace.FeedBlob('label', label)
m = model_helper.ModelHelper(name="my_first_net") # create model
weight = m.param_init_net.XavierFill([], 'fc_w', shape=[10, 100])
bias = m.param_init_net.ConstantFill([], 'fc_b', shape=[10, ])
fc_1 = m.net.FC(["data", "fc_w", "fc_b"], "fc1")
pred = m.net.Sigmoid(fc_1, "pred")
softmax, loss = m.net.SoftmaxWithLoss([pred, "label"], ["softmax", "loss"])
print("m.net=", m.net.Proto())
print("m.param_init_net.Proto=", m.param_init_net.Proto())
workspace.RunNetOnce(m.param_init_net)
pass
if __name__ == '__main__':
# workspace_test()
operators_test()
# model_helper_test()
pass | 2.59375 | 3 |
initial/initial_1.py | mhasan13-here/phase-plane-torch | 0 | 12790829 | <filename>initial/initial_1.py
# -*- coding: utf-8 -*-
"""
Created on Sun Oct 17 01:42:08 2021
@author: mhasan13
using exmaple from
https://github.com/Intelligent-Computing-Lab-Yale/BNTT-Batch-Normalization-Through-Time/blob/main/model.py
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
import matplotlib.pyplot as plt
import numpy as np
import os
os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"
class SpikingActivation(torch.autograd.Function):
@staticmethod
def forward(ctx, input):
ctx.save_for_backward(input)
out = torch.zeros_like(input)
out[input > 0] = 1.0
return out
@staticmethod
def backward(ctx, grad_output):
input, = ctx.saved_tensors
grad_input = grad_output.clone()
grad = grad_input * 0.3 * F.threshold(1.0 - torch.abs(input), 0, 0)
return grad
def PoissonGen(inp, rescale_fac=2.0):
rand_inp = torch.rand_like(inp)
return torch.mul(torch.le(rand_inp * rescale_fac, torch.abs(inp)).float(), torch.sign(inp))
x = torch.tensor(1.0)
y = torch.zeros(10)
tt = torch.arange(10)
for i in range(10):
y[i] = PoissonGen(x)
class Net(nn.Module):
def __init__(self, leak_mem=0.95, in_dim=1, num_cls=1):
super(Net, self).__init__()
self.threshold = 1
self.leak_mem = leak_mem
self.fc = nn.Linear(in_dim, num_cls, bias=False)
self.membrane = torch.zeros(in_dim, num_cls)
self.a_membrane = torch.zeros(in_dim, num_cls)
def forward(self, input):
self.out = torch.zeros(1,1)
self.a_membrane = self.leak_mem*self.a_membrane + (1-self.leak_mem)*(input)
# self.membrane = self.leak_mem*self.membrane + (1-self.leak_mem)*self.fc(self.a_membrane)
self.membrane = self.leak_mem*self.membrane + self.fc(self.a_membrane)
self.out[self.membrane > self.threshold] = 1.
self.membrane[self.membrane > self.threshold] = 0.
return self.out, self.membrane, self.a_membrane
# input = torch.ones(1,1)
net = Net()
net.fc.weight = torch.nn.Parameter(torch.tensor([[0.5]]))
out = []
membrane = []
a_membrane = []
spike = []
for t in range(40):
input = PoissonGen(torch.tensor(1.))
spike.append(input.detach().numpy())
input.unsqueeze_(0)
input.unsqueeze_(0)
a, b, c = net(input)
out.append(a.detach().numpy()[0])
membrane.append(b.detach().numpy()[0])
a_membrane.append(c.detach().numpy()[0])
plt.stem(spike)
plt.plot(a_membrane)
plt.plot(membrane)
plt.plot(out)
| 2.359375 | 2 |
logic2_analyzers/TI TCA6408A/pd.py | martonmiklos/sigrokdecoders_to_logic2_analyzers | 5 | 12790830 | ##
## This file is part of the libsigrokdecode project.
##
## Copyright (C) 2012 <NAME> <<EMAIL>>
## Copyright (C) 2013 <NAME> <<EMAIL>>
## Copyright (C) 2014 alberink <<EMAIL>>
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, see <http://www.gnu.org/licenses/>.
##
import sigrokdecode as srd
class Decoder(srd.Decoder):
api_version = 3
id = 'tca6408a'
name = 'TI TCA6408A'
longname = 'Texas Instruments TCA6408A'
desc = 'Texas Instruments TCA6408A 8-bit I²C I/O expander.'
license = 'gplv2+'
inputs = ['i2c']
outputs = []
tags = ['Embedded/industrial', 'IC']
annotations = (
('register', 'Register type'),
('value', 'Register value'),
('warning', 'Warning'),
)
annotation_rows = (
('regs', 'Registers', (0, 1)),
('warnings', 'Warnings', (2,)),
)
def __init__(self):
self.reset()
def reset(self):
self.state = 'IDLE'
self.chip = -1
def start(self):
self.out_ann = self.register(srd.OUTPUT_ANN)
def putx(self, data):
self.put(self.ss, self.es, self.out_ann, data)
def handle_reg_0x00(self, b):
self.putx([1, ['State of inputs: %02X' % b]])
def handle_reg_0x01(self, b):
self.putx([1, ['Outputs set: %02X' % b ]])
def handle_reg_0x02(self, b):
self.putx([1, ['Polarity inverted: %02X' % b]])
def handle_reg_0x03(self, b):
self.putx([1, ['Configuration: %02X' % b]])
def handle_write_reg(self, b):
if b == 0:
self.putx([0, ['Input port', 'In', 'I']])
elif b == 1:
self.putx([0, ['Output port', 'Out', 'O']])
elif b == 2:
self.putx([0, ['Polarity inversion register', 'Pol', 'P']])
elif b == 3:
self.putx([0, ['Configuration register', 'Conf', 'C']])
def check_correct_chip(self, addr):
if addr not in (0x20, 0x21):
self.putx([2, ['Warning: I²C slave 0x%02X not a TCA6408A '
'compatible chip.' % addr]])
self.state = 'IDLE'
def decode(self, ss, es, data):
cmd, databyte = data
# Store the start/end samples of this I²C packet.
self.ss, self.es = ss, es
# State machine.
if self.state == 'IDLE':
# Wait for an I²C START condition.
if cmd != 'START':
return
self.state = 'GET SLAVE ADDR'
elif self.state == 'GET SLAVE ADDR':
self.chip = databyte
self.state = 'GET REG ADDR'
elif self.state == 'GET REG ADDR':
# Wait for a data write (master selects the slave register).
if cmd in ('ADDRESS READ', 'ADDRESS WRITE'):
self.check_correct_chip(databyte)
if cmd != 'DATA WRITE':
return
self.reg = databyte
self.handle_write_reg(self.reg)
self.state = 'WRITE IO REGS'
elif self.state == 'WRITE IO REGS':
# If we see a Repeated Start here, the master wants to read.
if cmd == 'START REPEAT':
self.state = 'READ IO REGS'
return
# Otherwise: Get data bytes until a STOP condition occurs.
if cmd == 'DATA WRITE':
handle_reg = getattr(self, 'handle_reg_0x%02x' % self.reg)
handle_reg(databyte)
elif cmd == 'STOP':
self.state = 'IDLE'
self.chip = -1
elif self.state == 'READ IO REGS':
# Wait for an address read operation.
if cmd == 'ADDRESS READ':
self.state = 'READ IO REGS2'
self.chip = databyte
return
elif self.state == 'READ IO REGS2':
if cmd == 'DATA READ':
handle_reg = getattr(self, 'handle_reg_0x%02x' % self.reg)
handle_reg(databyte)
elif cmd == 'STOP':
self.state = 'IDLE'
| 2.21875 | 2 |
pluto/coms/controller.py | chalant/pluto | 0 | 12790831 | <reponame>chalant/pluto
import uuid
import grpc
from zipline.finance.execution import MarketOrder, StopLimitOrder, StopOrder, LimitOrder
from protos import controller_service_pb2 as ctl, controllable_service_pb2_grpc as cbl_rpc, \
controllable_service_pb2 as cbl, broker_pb2_grpc as broker_rpc, broker_pb2 as br_msg, \
controller_service_pb2_grpc as ctl_rpc, data_bundle_pb2 as dtb
from pluto.coms.utils import server_utils as srv
from pluto.coms.utils import certification as crt
from pluto.coms.utils import conversions as cv
from pluto.utils import files
class BrokerServicer(broker_rpc.BrokerServicer):
'''encapsulates available services per-client'''
# todo: must check the metadata...
def __init__(self, broker, bundle_factory):
# the bundle factory is aggregated, for caching purposes.
self._bundle_factory = bundle_factory
self._tokens = set()
self._accounts = {}
self._broker = broker
def _check_metadata(self, context):
metadata = dict(context.invocation_metadata())
token = metadata['Token']
if not token in self._tokens:
context.abort(grpc.StatusCode.PERMISSION_DENIED, 'The provided token is incorrect')
def add_token(self, token):
self._tokens.add(token)
def add_account(self, account):
self._accounts[account.token] = account
def AccountState(self, request, context):
# todo: these methods aren't necessary
raise NotImplementedError
def PortfolioState(self, request, context):
raise NotImplementedError
def Orders(self, request, context):
self._check_metadata(context)
for order in self._get_dict_values(self._broker.orders()):
yield cv.to_proto_order(order)
def _get_dict_values(self, dict_):
return dict_.values()
def BatchOrder(self, request_iterator, context):
raise NotImplementedError
def CancelAllOrdersForAsset(self, request, context):
raise NotImplementedError
def PositionsState(self, request, context):
raise NotImplementedError
def Transactions(self, request, context):
self._check_metadata(context)
for trx in self._get_dict_values(self._broker.transactions()):
yield cv.to_proto_transaction(trx)
def SingleOrder(self, request, context):
self._check_metadata(context)
req_style = request.style
style = None
if req_style == br_msg.OrderParams.MARKET_ORDER:
style = MarketOrder()
elif req_style == br_msg.OrderParams.LIMIT_ORDER:
style = LimitOrder(request.limit_price)
elif req_style == br_msg.OrderParams.STOP_ORDER:
style = StopOrder(request.stop_price)
elif req_style == br_msg.OrderParams.STOP_LIMIT_ORDER:
style = StopLimitOrder(request.limit_price, request.stop_price)
if style:
return cv.to_proto_order(self._broker.order(cv.to_zp_asset(request.asset), request.amount, style))
else:
context.abort(grpc.StatusCode.INVALID_ARGUMENT, 'unsupported order style argument')
def GetDataBundle(self, request, context):
'''creates a bundle based on the specified 'domains' and sends the bundle as a stream
of bytes.'''
# note: returns data by chunks of 1KB by default.
self._check_metadata(context)
for chunk in self._bundle_factory.get_bundle(request.country_code):
yield dtb.Bundle(data=chunk)
class BrokerMainServer(srv.MainServerFactory):
def __init__(self, bundle_factory, broker_url, broker, key=None, certificate=None):
super(BrokerMainServer, self).__init__(broker_url, key, certificate)
self._acc = BrokerServicer(bundle_factory, broker)
def add_token(self, token):
self._acc.add_token(token)
def _add_servicer_to_server(self, server):
broker_rpc.add_BrokerServicer_to_server(self._acc, server)
class Controllable(object):
'''encapsulates utilities for remotely controlling a strategy.'''
def __init__(self, name, controllable_channel):
self._name = name
self._capital = 0.0
# reference to the controllable so that we can remotely control it
self._ctr = cbl_rpc.ControllableStub(controllable_channel)
# each strategy controller has a server that listens to some generated port...
# one client account per server.
@property
def capital(self):
return self._capital
@capital.setter
def capital(self, value):
self._capital = value
def run(self, start, end, max_leverage, data_frequency='daily', metrics_set='default', live=False):
'''this function is an iterable (generator) '''
if data_frequency == 'daily':
df = cbl.RunParams.DAY
elif data_frequency == 'minutely':
df = cbl.RunParams.MINUTE
else:
raise ValueError('No data frequency of type {} is supported'.format(data_frequency))
for perf in self._ctr.Run(
cbl.RunParams(
capital_base=self._capital,
data_frequency=df,
start_session=cv.to_datetime(start),
end_session=cv.to_datetime(end),
metrics_set=metrics_set,
live=live,
maximum_leverage=max_leverage
)):
yield cv.from_proto_performance_packet(perf)
class ControllerServicer(ctl_rpc.ControllerServicer, srv.IServer):
'''Encapsulates the controller service. This class manages a portfolio of strategies (performs tests routines,
assigns capital etc.'''
# TODO: upon termination, we need to save the generated urls, and relaunch the services
# that server on those urls.
def __init__(self, bundle_factory, broker_url, key=None, certificate=None, ca=None):
# list of controllables (strategies)
self._controllables = {}
self._bundle_factory = bundle_factory
self._key = key
self._cert = certificate
self._ca = ca
self._config = files.JsonFile('controller/config')
self._account_url = broker_url
self._client_account = BrokerMainServer(bundle_factory, broker_url, key, certificate)
# TODO: finish this function (registration of the controllable)
def Register(self, request, context):
'''the controllable calls this function to be registered'''
# TODO: store the url permanently so that the client can be id-ed beyond run lifetime.
#the controllable sends its url
controllable_url = request.url
client_name = request.name
#a token is generated
token = self._create_token(client_name, controllable_url)
controllable = Controllable(
client_name,
self._create_channel(controllable_url)
)
#todo: when and how should we run the controllables? => should we schedule periodic back-testings?
# keep track of controllables so that we can control them etc.
self._controllables[token] = controllable
# send the generated access url to the client (through a secure channel). The client communicate with the
# account through this channel.
# the client must store this url permanently, so that it can be identified
# add token to the client so that it can recognise clients
return ctl.RegisterReply(url=self._account_url, token=token)
def _load_config(self, name):
try:
return next(self._config.load())[name]
except FileNotFoundError:
raise KeyError
def _create_token(self, client_name, client_url):
'''creates a url and maps it to the client url (which is the key)'''
try:
conf = self._load_config(client_name)
return conf['token']
except KeyError:
#create a token and store clients data...
token = str(uuid.uuid4())
self._config.store({client_name: {'url': client_url, 'token': token}})
return token
def _create_channel(self, url):
return srv.create_channel(url, self._ca)
def start(self):
self._client_account.start()
def stop(self, grace=None):
self._client_account.stop(grace)
class ControllerCertificateFactory(crt.CertificateFactory):
def __init__(self, url):
super(ControllerCertificateFactory, self).__init__()
self._url = url
def _create_certificate(self, root_path, cert_name, key, subject):
'''creates a certificate request or returns a certificate if one exists...'''
# create the subject of the certificate request
subject.common_name = 'controller'
subject.alternative_names = [self._url]
# TODO: how do pod ip's,services etc work?
# additional addresses: pod ip, pod dns, master IP...
builder = crt.CertificateSigningRequestBuilder()
builder.name = 'controller'
builder.usages = ['digital signature','key encipherment',
'data encipherment','server auth']
builder.groups = ['system: authenticated']
return builder.get_certificate_signing_request(subject, key)
class ControllerMainServer(srv.MainServerFactory):
def __init__(self, bundle_factory, controller_url, broker_url, key=None, certificate=None):
'''the bundle_factory is an abstraction for creating data bundles.'''
super(ControllerMainServer, self).__init__(controller_url, key, certificate)
self._bdf = bundle_factory
self._key = key
self._cert = certificate
self._blt = broker_url
def _add_servicer_to_server(self, server):
ctl_rpc.add_ControllerServicer_to_server(ControllerServicer(
self._bdf,
self._blt,
self._key,
self._cert),
server)
| 1.84375 | 2 |
sgnlp/models/emotion_entailment/train.py | raymondng76/sgnlp | 14 | 12790832 | import math
import pandas as pd
from transformers import Trainer, TrainingArguments
from .config import RecconEmotionEntailmentConfig
from .data_class import RecconEmotionEntailmentArguments
from .modeling import RecconEmotionEntailmentModel
from .tokenization import RecconEmotionEntailmentTokenizer
from .utils import (
RecconEmotionEntailmentData,
convert_df_to_dataset,
parse_args_and_load_config,
)
def train_model(train_config: RecconEmotionEntailmentArguments):
"""
Method for training RecconEmotionEntailmentModel.
Args:
train_config (:obj:`RecconEmotionEntailmentArguments`):
RecconEmotionEntailmentArguments config load from config file.
Example::
import json
from sgnlp.models.emotion_entailment import train
from sgnlp.models.emotion_entailment.utils import parse_args_and_load_config
config = parse_args_and_load_config('config/emotion_entailment_config.json')
train(config)
"""
config = RecconEmotionEntailmentConfig.from_pretrained(train_config.model_name)
tokenizer = RecconEmotionEntailmentTokenizer.from_pretrained(train_config.model_name)
model = RecconEmotionEntailmentModel.from_pretrained(train_config.model_name, config=config)
train_df = pd.read_csv(train_config.x_train_path)
val_df = pd.read_csv(train_config.x_valid_path)
train_dataset = convert_df_to_dataset(
df=train_df, max_seq_length=train_config.max_seq_length, tokenizer=tokenizer
)
val_dataset = convert_df_to_dataset(
df=val_df, max_seq_length=train_config.max_seq_length, tokenizer=tokenizer
)
train_config.len = len(train_df)
train_config.train_args["eval_steps"] = (
train_config.len / train_config.train_args["per_device_train_batch_size"]
)
train_config.train_args["warmup_steps"] = math.ceil(
(
train_config.len
// train_config.train_args["gradient_accumulation_steps"]
* train_config.train_args["num_train_epochs"]
)
* train_config.train_args["warmup_ratio"]
)
train_args = TrainingArguments(**train_config.train_args)
trainer = Trainer(
model=model,
args=train_args,
train_dataset=RecconEmotionEntailmentData(train_dataset),
eval_dataset=RecconEmotionEntailmentData(val_dataset),
)
trainer.train()
trainer.save_model()
if __name__ == "__main__":
cfg = parse_args_and_load_config()
train_model(cfg)
| 2.625 | 3 |
M2-Recession/M2SL_recession.py | Icosahedral-Dice/FE | 0 | 12790833 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 16 18:49:46 2021
@author: wanderer
"""
### Housekeeping ###
import pandas as pd
import pandas_datareader.data as web
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.ticker as mticker
import seaborn as sns
sns.set_style('white', {"xtick.major.size": 2, "ytick.major.size": 2})
flatui = ["#9b59b6", "#3498db", "#95a5a6", "#e74c3c", "#34495e", "#2ecc71","#f4cae4"]
sns.set_palette(sns.color_palette(flatui,7))
from dateutil.relativedelta import relativedelta
save_loc = '~/Desktop/hu'
redownload = True
if redownload:
f1 = 'USREC' # recession data from FRED
f2 = 'M2SL' # M2 from FRED
start = pd.to_datetime('1959-12-01')
end = pd.to_datetime('2020-12-31')
M2 = web.DataReader([f2], 'fred', start, end)
data_line = M2.pct_change()
data_line = data_line.apply(lambda x: 12*x)
data_shade = web.DataReader([f1], 'fred', start, end)
data = data_shade.join(data_line, how='outer').dropna()
data.to_pickle(save_loc + r'/M2SL.pkl')
data = pd.read_pickle(save_loc + r'/M2SL.pkl')
# recessions are marked as 1 in the data
recs = data.query('USREC==1')
plot_cols = ['M2SL']
mpl.rcParams['font.family'] = 'Helvetica Neue'
fig, axes = plt.subplots(1,1, figsize=(12,6), sharex=True)
data[plot_cols].plot(subplots=True, ax=axes, marker='o', ms=3)
col = plot_cols
ax = axes
for month in recs.index:
ax.axvspan(month, month+ relativedelta(months=+1),
color=sns.xkcd_rgb['grey'], alpha=0.5)
# lets add horizontal zero lines
ax.axhline(0, color='k', linestyle='-', linewidth=1)
# add titles
ax.set_title('Monthly ' + 'M2 percentage change' +
' \nRecessions Shaded Gray',
fontsize=14,
fontweight='demi')
# add axis labels
ax.set_ylabel('% change\n(Annualized)', fontsize=12, fontweight='demi')
ax.set_xlabel('Date', fontsize=12, fontweight='demi')
# upgrade axis tick labels
yticks = ax.get_yticks()
ax.yaxis.set_major_locator(mticker.FixedLocator(yticks))
ax.set_yticklabels(['{:3.1f}%'.format(x*100) for x in yticks]);
dates_rng = pd.date_range(data.index[0], data.index[-1], freq='24M')
plt.xticks(dates_rng, [dtz.strftime('%Y-%m') for dtz in dates_rng],
rotation=45)
# bold up tick axes
ax.tick_params(axis='both', which='major', labelsize=11)
# add cool legend
ax.legend(loc='upper left', fontsize=11, labels=['M2'],
frameon=True).get_frame().set_edgecolor('blue')
plt.savefig('M2SL_recession.png', dpi=300)
| 2.40625 | 2 |
postgres-debezium-ksql-elasticsearch/python_kafka_notify.py | alonsoir/examples-2 | 1,150 | 12790834 | <reponame>alonsoir/examples-2<gh_stars>1000+
# rmoff / 13 Jun 2018
from slackclient import SlackClient
from confluent_kafka import Consumer, KafkaError
import json
import time
import os,sys
token = os.environ.get('SLACK_API_TOKEN')
if token is None:
print('\n\n*******\nYou need to set your Slack API token in the SLACK_API_TOKEN environment variable\n\nExiting.\n\n*******\n')
sys.exit(1)
sc = SlackClient(token)
# Set 'auto.offset.reset': 'smallest' if you want to consume all messages
# from the beginning of the topic
settings = {
'bootstrap.servers': 'localhost:9092',
'group.id': 'python_kafka_notify.py',
'default.topic.config': {'auto.offset.reset': 'largest'}
}
c = Consumer(settings)
c.subscribe(['UNHAPPY_PLATINUM_CUSTOMERS'])
try:
while True:
msg = c.poll(0.1)
time.sleep(5)
if msg is None:
continue
elif not msg.error():
print('Received message: {0}'.format(msg.value()))
if msg.value() is None:
continue
try:
app_msg = json.loads(msg.value().decode())
except:
app_msg = json.loads(msg.value())
try:
email=app_msg['EMAIL']
message=app_msg['MESSAGE']
channel='unhappy-customers'
text=('`%s` just left a bad review :disappointed:\n> %s\n\n_Please contact them immediately and see if we can fix the issue *right here, right now*_' % (email, message))
print('\nSending message "%s" to channel %s' % (text,channel))
except:
print('Failed to get channel/text from message')
channel='general'
text=msg.value()
try:
sc_response = sc.api_call('chat.postMessage', channel=channel,
text=text, username='KSQL Notifications',
icon_emoji=':rocket:')
if not sc_response['ok']:
print('\t** FAILED: %s' % sc_response['error'])
except Exception as e:
print(type(e))
print(dir(e))
elif msg.error().code() == KafkaError._PARTITION_EOF:
print('End of partition reached {0}/{1}'
.format(msg.topic(), msg.partition()))
else:
print('Error occured: {0}'.format(msg.error().str()))
except Exception as e:
print(type(e))
print(dir(e))
finally:
c.close()
| 2.625 | 3 |
Computing Class Test 1/Jerick_FE03.py | granwyntan/Python-Notes | 0 | 12790835 | def maxScore(fe):
FE = {'o': 100,
'e': 10,
'g': 1,
'a': 0,
'b': -1,
'i': -10,
'u': -100}
final=[]
count=len(fe)
for a in range(0,count):
temp = 0
for d in fe[a:]:
temp += int(FE[d])
print(d)
final.append(temp)
temp = 0
for d in fe[:a]:
temp += int(FE[d])
print(d)
final.append(temp)
print(a)
return max(final)
maxScore('aabg')
| 3.34375 | 3 |
src/__init__.py | Irreq/deer-assistant | 0 | 12790836 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# File name: __init__.py
# Description: Basic format for Python scripts
# Author: irreq (<EMAIL>)
# Date: 17/12/2021
"""Documentation"""
| 1.164063 | 1 |
scoreboard.py | cstuller/alieninvasion | 0 | 12790837 | <filename>scoreboard.py
import pygame.font
from pygame.sprite import Group
from ship import Ship
class Scoreboard():
'''a class to report scoring information'''
def __init__(self, ai_settings, screen, stats):
'''initialize scorekeeping attributes'''
self.screen = screen
self.screen_rect = screen.get_rect()
self.ai_settings = ai_settings
self.stats = stats
#font settings for scoring information
self.text_color = (30, 30, 30)
self.font = pygame.font.SysFont(None, 48)
#prepare the initial score image
self.prep_score()
self.prep_high_score()
self.prep_level()
self.prep_ships()
def prep_score(self):
'''turn the score into a rendered image'''
rounded_score = int(round(self.stats.score, -1))
score_str = "Score: {:,}".format((rounded_score))
self.score_image = self.font.render(score_str, True, self.text_color, self.ai_settings.bg_color)
#display the score at the top right of the screen
self.score_rect = self.score_image.get_rect()
self.score_rect.right = self.screen_rect.right - 20
self.score_rect.top = 20
def prep_high_score(self):
'''turn the high score into a rendered image'''
high_score = int(round(self.stats.high_score, -1))
high_score_str = "High Score: {:,}".format((high_score))
self.high_score_image = self.font.render(high_score_str, True, self.text_color, self.ai_settings.bg_color)
#center the high score at the top of the screen
self.high_score_rect = self.high_score_image.get_rect()
self.high_score_rect.centerx = self.screen_rect.centerx
self.high_score_rect.top = self.score_rect.top
def prep_level(self):
'''turn the level into a rendered image'''
level_str = "{}".format(self.stats.level)
self.level_image = self.font.render(level_str, True, self.text_color, self.ai_settings.bg_color)
#position the level below the score
self.level_rect = self.level_image.get_rect()
self.level_rect.right = self.score_rect.right
self.level_rect.top = self.score_rect.bottom + 10
def prep_ships(self):
'''show how many ships are left'''
self.ships = Group()
for ship_number in range(self.stats.ships_left):
ship = Ship(self.ai_settings, self.screen)
ship.rect.x = 10 + ship_number * ship.rect.width
ship.rect.y = 10
self.ships.add(ship)
def show_score(self):
'''draw scores and level to the screen'''
self.screen.blit(self.score_image, self.score_rect)
self.screen.blit(self.high_score_image, self.high_score_rect)
self.screen.blit(self.level_image, self.level_rect)
self.ships.draw(self.screen)
| 3.515625 | 4 |
tests/test_forward_backward_tracking.py | Megscammell/Estimating-Direction | 0 | 12790838 | import numpy as np
import est_dir
def test_1():
"""
Test for compute_forward() - check for flag=True.
"""
np.random.seed(90)
m = 10
f = est_dir.quad_f_noise
const_back = 0.5
const_forward = (1 / const_back)
minimizer = np.ones((m,))
centre_point = np.random.uniform(0, 20, (m, ))
matrix = est_dir.quad_func_params(1, 10, m)
func_args = (minimizer, matrix, 0, 5)
region = 1
step = 0.17741338024633116
forward_tol = 1000000
no_vars = 10
beta, func_evals = est_dir.compute_direction_LS(m, centre_point, f,
func_args, no_vars,
region)
assert(func_evals == 16)
f_old = f(np.copy(centre_point), *func_args)
f_new = f(np.copy(centre_point) - step * beta, *func_args)
track = np.array([[0, f_old], [step, f_new]])
track, count_func_evals, flag = (est_dir.compute_forward
(step, const_forward, forward_tol, track,
centre_point, beta,
f, func_args))
assert(f_old > f_new)
assert(count_func_evals == len(track) - 2)
assert(flag == True)
assert(track[0][0] == 0)
for j in range(1, len(track)):
assert(track[j][0] == step)
step = step * const_forward
if j < len(track) - 1:
assert(track[j][1] < track[j - 1][1])
else:
assert(track[j][1] > track[j - 1][1])
def test_2():
"""
Test for compute_forward() - check that when flag=False, track is returned.
"""
np.random.seed(90)
m = 2
f = est_dir.quad_f_noise
const_back = 0.5
const_forward = (1 / const_back)
minimizer = np.ones((m,))
centre_point = np.array([20, 20])
matrix = np.identity(m)
func_args = (minimizer, matrix, 0, 0.0000001)
step = 1
forward_tol = 100000
beta = np.array([0.0001, 0.0001])
f_old = f(np.copy(centre_point), *func_args)
f_new = f(np.copy(centre_point) - step * beta, *func_args)
track = np.array([[0, f_old], [step, f_new]])
test_track, count_func_evals, flag = (est_dir.compute_forward
(step, const_forward, forward_tol,
track, centre_point, beta, f,
func_args))
assert(f_old > f_new)
assert(flag == False)
assert(count_func_evals > 0)
for j in range(len(test_track)):
assert(test_track[j, 0] < forward_tol)
if j >= 1:
assert(test_track[j, 1] < test_track[j - 1, 1])
assert(test_track[j, 0] * const_forward > forward_tol)
def test_3():
"""
Test for forward_tracking - flag=True and f_new >= track[-2][1]
"""
np.random.seed(90)
m = 10
f = est_dir.quad_f_noise
const_back = 0.5
const_forward = (1 / const_back)
minimizer = np.ones((m,))
centre_point = np.random.uniform(0, 20, (m, ))
matrix = est_dir.quad_func_params(1, 10, m)
func_args = (minimizer, matrix, 0, 5)
step = 0.05
forward_tol = 1000000
no_vars = 10
region = 1
beta, func_evals = est_dir.compute_direction_LS(m, centre_point, f,
func_args, no_vars,
region)
assert(func_evals == 16)
f_old = f(np.copy(centre_point), *func_args)
f_new = f(np.copy(centre_point) - step * beta, *func_args)
assert(f_old > f_new)
track, total_func_evals, flag = (est_dir.forward_tracking
(centre_point, step, f_old, f_new, beta,
const_forward, forward_tol, f,
func_args))
assert(len(track) - 1 == total_func_evals)
assert(np.round(track[0][0], 3) == np.round(0, 3))
assert(np.round(track[1][0], 3) == np.round(step, 3))
assert(flag == True)
for j in range(2, len(track)):
step = step * 2
assert(np.round(track[j][0], 3) == step)
if j == (len(track) - 1):
assert(track[j][1] > track[j - 1][1])
else:
assert(track[j - 1][1] > track[j][1])
def test_4():
"""
Test for forward_tracking - forward_tol not met and f_new < track[-2][1].
"""
np.random.seed(25)
m = 2
f = est_dir.quad_f_noise
const_back = 0.5
const_forward = (1 / const_back)
minimizer = np.ones((m,))
centre_point = np.array([25, 25])
matrix = est_dir.quad_func_params(1, 10, m)
func_args = (minimizer, matrix, 0, 10)
t = 0.005
forward_tol = 10000
beta = np.array([1, 1])
f_old = f(np.copy(centre_point), *func_args)
f_new = f(np.copy(centre_point) - t * beta, *func_args)
assert(f_old > f_new)
track, total_func_evals, flag = (est_dir.forward_tracking
(centre_point, t, f_old, f_new, beta,
const_forward, forward_tol, f,
func_args))
assert(np.round(track[0][0], 3) == np.round(0, 3))
assert(np.round(track[1][0], 3) == np.round(t, 3))
assert(total_func_evals > 0)
assert(flag == True)
for j in range(1, len(track)):
if j == (len(track) - 1):
assert(track[j][1] > track[j-1][1])
else:
assert(track[j-1][1] > track[j][1])
def test_5():
"""
Test for forward_tracking - forward_tol not met initially, f_new <
track[-2][1] and eventually forward_tol is met.
"""
np.random.seed(25)
m = 2
f = est_dir.quad_f_noise
const_back = 0.5
const_forward = (1 / const_back)
minimizer = np.ones((m,))
centre_point = np.array([25, 25])
matrix = est_dir.quad_func_params(1, 10, m)
func_args = (minimizer, matrix, 0, 10)
t = 0.005
forward_tol = 10
beta = np.array([1, 1])
f_old = f(np.copy(centre_point), *func_args)
f_new = f(np.copy(centre_point) - t * beta, *func_args)
assert(f_old > f_new)
track, total_func_evals, flag = (est_dir.forward_tracking
(centre_point, t, f_old, f_new, beta,
const_forward, forward_tol, f,
func_args))
assert(np.round(track[0][0], 3) == np.round(0, 3))
assert(np.round(track[1][0], 3) == np.round(t, 3))
assert(total_func_evals > 0)
assert(flag == False)
for j in range(1, len(track)):
assert(track[j-1][1] > track[j][1])
def test_6():
"""
Test for forward_tracking - forward_tol met.
"""
np.random.seed(90)
m = 2
f = est_dir.quad_f_noise
const_back = 0.5
const_forward = (1 / const_back)
minimizer = np.ones((m,))
centre_point = np.array([20, 20])
matrix = np.identity(m)
func_args = (minimizer, matrix, 0, 0.0000001)
step = 0.5
forward_tol = 1.5
beta = np.array([0.0001, 0.0001])
f_old = f(np.copy(centre_point), *func_args)
f_new = f(np.copy(centre_point) - step * beta, *func_args)
track, total_func_evals, flag = (est_dir.forward_tracking
(centre_point, step, f_old, f_new, beta,
const_forward, forward_tol, f,
func_args))
assert(flag == False)
assert(track[2][1] < track[1][1] < track[0][1])
assert(total_func_evals == 1)
def test_7():
"""
Test for compute_backward - check that when flag=True, track is updated.
"""
np.random.seed(90)
m = 100
f = est_dir.quad_f_noise
const_back = 0.5
minimizer = np.random.uniform(0, 1, (m, ))
centre_point = np.random.uniform(0, 1, (m, ))
matrix = est_dir.quad_func_params(1, 10, m)
func_args = (minimizer, matrix, 0, 0.1)
step = 0.001
back_tol = 0.000001
no_vars = 10
region = 1
beta, func_evals = est_dir.compute_direction_LS(m, centre_point, f,
func_args, no_vars,
region)
assert(func_evals == 16)
f_old = f(np.copy(centre_point), *func_args)
f_new = f(np.copy(centre_point) - step * beta, *func_args)
assert(f_old > f_new)
track = np.array([[0, f_old], [step, f_new]])
track, total_func_evals, flag = (est_dir.compute_backward
(step, const_back, back_tol, track,
centre_point, beta, f, func_args))
assert(total_func_evals == len(track) - 2)
assert(flag == True)
assert(track[0][0] == 0)
for j in range(1, len(track)):
assert(track[j][0] == step)
step = step * const_back
if j < len(track) - 1:
assert(track[j][1] < track[j-1][1])
else:
assert(track[j][1] > track[j-1][1])
def test_8():
"""
Test for compute_backward - check that when flag=False,
original track is returned.
"""
np.random.seed(90)
m = 100
f = est_dir.quad_f_noise
const_back = 0.5
minimizer = np.random.uniform(0, 1, (m, ))
centre_point = np.random.uniform(0, 1, (m, ))
matrix = est_dir.quad_func_params(1, 10, m)
func_args = (minimizer, matrix, 0, 0.1)
step = 0.1
back_tol = 0.075
no_vars = 10
region = 1
beta, func_evals = est_dir.compute_direction_LS(m, centre_point, f,
func_args, no_vars,
region)
assert(func_evals == 16)
f_old = f(np.copy(centre_point), *func_args)
f_new = f(np.copy(centre_point) - step * beta, *func_args)
assert(f_old > f_new)
track = np.array([[0, f_old], [step, f_new]])
track_new, total_func_evals, flag = (est_dir.compute_backward
(step, const_back, back_tol, track,
centre_point, beta, f, func_args))
assert(np.all(track == track_new))
assert(flag == False)
assert(total_func_evals == 0)
def test_9():
"""
Test for backward_tracking - back_tol is met.
"""
np.random.seed(32964)
m = 2
f = est_dir.quad_f_noise
const_back = 0.5
minimizer = np.ones((m,))
centre_point = np.array([25, 25])
matrix = est_dir.quad_func_params(1, 10, m)
func_args = (minimizer, matrix, 0, 5)
t = 1
back_tol = 1
beta = np.array([200, 200])
f_old = f(np.copy(centre_point), *func_args)
f_new = f(np.copy(centre_point) - t * beta, *func_args)
assert(f_old < f_new)
track, count_func_evals = (est_dir.backward_tracking
(centre_point, t, f_old, f_new, beta,
const_back, back_tol, f, func_args))
assert(track.shape == (2, m))
assert(track[0][0] == 0)
assert(track[1][0] == t)
assert(track[1][0] < track[1][1])
assert(count_func_evals == 0)
def test_10():
"""
Test for backward_tracking - back tol is not met and f_new >
track[-2][1].
"""
np.random.seed(32964)
n = 6
m = 2
f = est_dir.quad_f_noise
const_back = 0.5
minimizer = np.ones((m,))
centre_point = np.random.uniform(0, 10, (m,))
matrix = est_dir.quad_func_params(1, 10, m)
func_args = (minimizer, matrix, 0, 5)
t = 97.688932389756
back_tol = 0.000000001
no_vars = m
region = 1
beta, func_evals = est_dir.compute_direction_XY(n, m, centre_point, f,
func_args, no_vars,
region)
assert(func_evals == n)
f_old = f(np.copy(centre_point), *func_args)
f_new = f(np.copy(centre_point) - t * beta, *func_args)
assert(f_old < f_new)
track, total_func_evals = (est_dir.backward_tracking
(centre_point, t, f_old, f_new, beta,
const_back, back_tol, f, func_args))
assert(np.round(track[0][0], 3) == np.round(0, 3))
assert(np.round(track[1][0], 3) == np.round(t, 3))
assert(total_func_evals > 0)
for j in range(1, len(track)):
assert(np.round(track[j][0], 4) == np.round(t, 4))
t = t / 2
assert(np.min(track[:, 1]) < track[1][0])
def test_11():
"""
Test for backward_tracking - back tol is not met and f_new < track[-2][1]
"""
np.random.seed(329998)
n = 20
m = 100
f = est_dir.quad_f_noise
const_back = 0.5
minimizer = np.random.uniform(0, 10, (m,))
centre_point = np.random.uniform(0, 10, (m,))
matrix = est_dir.quad_func_params(1, 10, m)
func_args = (minimizer, matrix, 0, 1000)
t = 17001.993794080016
back_tol = 0.000000001
no_vars = m
region = 1
beta, func_evals = est_dir.compute_direction_XY(n, m, centre_point, f,
func_args, no_vars,
region)
assert(func_evals == n)
f_old = f(np.copy(centre_point), *func_args)
f_new = f(np.copy(centre_point) - t * beta, *func_args)
assert(f_old < f_new)
track, total_func_evals = (est_dir.backward_tracking
(centre_point, t, f_old, f_new, beta,
const_back, back_tol, f, func_args))
assert(np.round(track[0][0], 3) == np.round(0, 3))
assert(np.round(track[1][0], 3) == np.round(t, 3))
assert(total_func_evals > 0)
assert(np.min(track[:, 1]) < track[:, 1][0])
def test_12():
"""
Test for backward_tracking - back tol is not initially met, f_new <
track[-2][1] and eventaully back tol is met.
"""
np.random.seed(329998)
n = 20
m = 100
f = est_dir.quad_f_noise
const_back = 0.5
minimizer = np.random.uniform(0, 10, (m,))
centre_point = np.random.uniform(0, 10, (m,))
matrix = est_dir.quad_func_params(1, 10, m)
func_args = (minimizer, matrix, 0, 1000)
t = 17001.993794080016
back_tol = 1
no_vars = m
region = 1
beta, func_evals = est_dir.compute_direction_XY(n, m, centre_point, f,
func_args, no_vars,
region)
assert(func_evals == n)
f_old = f(np.copy(centre_point), *func_args)
f_new = f(np.copy(centre_point) - t * beta, *func_args)
assert(f_old < f_new)
track, total_func_evals = (est_dir.backward_tracking
(centre_point, t, f_old, f_new, beta,
const_back, back_tol, f, func_args))
assert(np.round(track[0][0], 3) == np.round(0, 3))
assert(np.round(track[1][0], 3) == np.round(t, 3))
assert(total_func_evals > 0)
assert(np.min(track[:, 1]) < track[:, 1][0])
def test_13():
"""Test for compute_coeffs"""
track_y = np.array([100, 200, 50])
track_t = np.array([0, 1, 0.5])
design_matrix_step = np.vstack((np.repeat(track_y[0], len(track_t)),
np.array(track_t),
np.array(track_t) ** 2)).T
assert(np.all(design_matrix_step[0, :] == np.array([100, 0, 0])))
assert(np.all(design_matrix_step[1, :] == np.array([100, 1, 1])))
assert(np.all(design_matrix_step[2, :] == np.array([100, 0.5, 0.25])))
OLS = (np.linalg.inv(design_matrix_step.T @ design_matrix_step) @
design_matrix_step.T @ track_y)
check = -OLS[1] / (2 * OLS[2])
opt_t = est_dir.compute_coeffs(track_y, track_t)
assert(np.all(np.round(check, 5) == np.round(opt_t, 5)))
def test_14():
"""
Test for combine_tracking - check that correct step size is returned when
forward_tol is met.
"""
np.random.seed(90)
m = 2
f = est_dir.quad_f_noise
const_back = 0.5
const_forward = (1 / const_back)
minimizer = np.ones((m,))
centre_point = np.array([20, 20])
matrix = np.identity(m)
func_args = (minimizer, matrix, 0, 0.0000001)
step = 1
forward_tol = 100000
back_tol = 0.0000001
beta = np.array([0.0001, 0.0001])
f_old = f(np.copy(centre_point), *func_args)
upd_point, func_val, total_func_evals = (est_dir.combine_tracking
(centre_point, f_old,
beta, step, const_back,
back_tol, const_forward,
forward_tol, f,
func_args))
assert(upd_point.shape == (m, ))
assert(type(total_func_evals) is int)
assert(func_val < f_old)
def test_15():
"""
Test for combine_tracking - check that correct step size is returned, when
forward_tol is not met.
"""
np.random.seed(3291)
m = 2
f = est_dir.quad_f_noise
const_back = 0.5
const_forward = (1 / const_back)
minimizer = np.ones((m,))
centre_point = np.array([25, 25])
matrix = est_dir.quad_func_params(1, 10, m)
func_args = (minimizer, matrix, 0, 5)
step = 0.005
forward_tol = 10000
back_tol = 0.0000001
beta = np.array([1, 1])
f_old = f(np.copy(centre_point), *func_args)
upd_point, func_val, total_func_evals = (est_dir.combine_tracking
(centre_point, f_old,
beta, step, const_back,
back_tol, const_forward,
forward_tol, f,
func_args))
assert(upd_point.shape == (m, ))
assert(type(total_func_evals) is int)
assert(func_val < f_old)
def test_16():
"""
Test for combine_tracking - check that correct step size is returned,
when back_tol is met.
"""
np.random.seed(32964)
m = 2
f = est_dir.quad_f_noise
const_back = 0.5
const_forward = (1 / const_back)
minimizer = np.ones((m,))
centre_point = np.array([25, 25])
matrix = est_dir.quad_func_params(1, 10, m)
func_args = (minimizer, matrix, 0, 5)
step = 1
back_tol = 1
forward_tol = 100000
beta = np.array([200, 200])
f_old = f(np.copy(centre_point), *func_args)
upd_point, func_val, total_func_evals = (est_dir.combine_tracking
(centre_point, f_old,
beta, step, const_back,
back_tol, const_forward,
forward_tol, f,
func_args))
assert(upd_point.shape == (m, ))
assert(type(total_func_evals) is int)
assert(func_val == f_old)
def test_17():
"""
Test for combine_tracking - check that correct step size is returned,
when back_tol is not met.
"""
np.random.seed(32964)
n = 6
m = 2
f = est_dir.quad_f_noise
const_back = 0.5
const_forward = (1 / const_back)
minimizer = np.ones((m,))
centre_point = np.random.uniform(0, 10, (m,))
matrix = est_dir.quad_func_params(1, 10, m)
func_args = (minimizer, matrix, 0, 5)
step = 10
forward_tol = 1000000
back_tol = 0.000000001
no_vars = m
region = 1
beta, func_evals = est_dir.compute_direction_XY(n, m, centre_point, f,
func_args, no_vars,
region)
assert(func_evals == n)
f_old = f(np.copy(centre_point), *func_args)
upd_point, func_val, total_func_evals = (est_dir.combine_tracking
(centre_point, f_old,
beta, step, const_back,
back_tol, const_forward,
forward_tol, f,
func_args))
assert(upd_point.shape == (m, ))
assert(type(total_func_evals) is int)
assert(func_val < f_old)
def test_18():
"""Test for arrange_track_y_t"""
track = np.array([[0, 100],
[1, 80],
[2, 160],
[4, 40],
[8, 20],
[16, 90]])
track_method = 'Forward'
track_y, track_t = est_dir.arrange_track_y_t(track, track_method)
assert(np.all(track_y == np.array([100, 20, 90])))
assert(np.all(track_t == np.array([0, 8, 16])))
def test_19():
"""Test for arrange_track_y_t"""
track = np.array([[0, 100],
[1, 80],
[2, 70],
[4, 90]])
track_method = 'Forward'
track_y, track_t = est_dir.arrange_track_y_t(track, track_method)
assert(np.all(track_y == np.array([100, 70, 90])))
assert(np.all(track_t == np.array([0, 2, 4])))
def test_20():
"""Test for arrange_track_y_t"""
track = np.array([[0, 100],
[1, 120],
[0.5, 110],
[0.25, 90]])
track_method = 'Backward'
track_y, track_t = est_dir.arrange_track_y_t(track, track_method)
assert(np.all(track_y == np.array([100, 90, 110])))
assert(np.all(track_t == np.array([0, 0.25, 0.5])))
def test_21():
"""Test for arrange_track_y_t"""
track = np.array([[0, 100],
[1, 120],
[0.5, 80]])
track_method = 'Backward'
track_y, track_t = est_dir.arrange_track_y_t(track, track_method)
assert(np.all(track_y == np.array([100, 80, 120])))
assert(np.all(track_t == np.array([0, 0.5, 1])))
def test_22():
"""Test for check_func_val_coeffs when func_val > track_y[1]."""
np.random.seed(90)
m = 10
f = est_dir.quad_f_noise
minimizer = np.ones((m,))
centre_point = np.random.uniform(0, 20, (m, ))
matrix = est_dir.quad_func_params(1, 10, m)
func_args = (minimizer, matrix, 0, 60)
step = 1.8251102718712913
no_vars = 10
region = 1
beta, func_evals = est_dir.compute_direction_LS(m, centre_point, f,
func_args, no_vars,
region)
assert(func_evals == 16)
f_old = f(np.copy(centre_point), *func_args)
f_new = f(np.copy(centre_point) - step * beta, *func_args)
assert(f_old > f_new)
track = np.array([[0, 100],
[1, 160],
[2, 40],
[4, 90]])
track_method = 'Forward'
upd_point, func_val = (est_dir.check_func_val_coeffs
(track, track_method, centre_point, beta, f,
func_args))
assert(upd_point.shape == (m, ))
assert(func_val == 40)
def test_23():
"""Test for check_func_val_coeffs when func_val <= track_y[1]."""
np.random.seed(91)
m = 10
f = est_dir.quad_f_noise
const_back = 0.5
const_forward = (1 / const_back)
minimizer = np.ones((m,))
centre_point = np.random.uniform(0, 20, (m, ))
matrix = est_dir.quad_func_params(1, 10, m)
func_args = (minimizer, matrix, 0, 5)
step = 0.01
forward_tol = 1000000
no_vars = 10
region = 1
beta, func_evals = est_dir.compute_direction_LS(m, centre_point, f,
func_args, no_vars,
region)
assert(func_evals == 16)
f_old = f(np.copy(centre_point), *func_args)
f_new = f(np.copy(centre_point) - step * beta, *func_args)
assert(f_old > f_new)
track, total_func_evals, flag = (est_dir.forward_tracking
(centre_point, step, f_old, f_new, beta,
const_forward, forward_tol, f,
func_args))
assert(flag == True)
assert(total_func_evals > 0)
track_method = 'Forward'
upd_point, func_val = (est_dir.check_func_val_coeffs
(track, track_method, centre_point, beta, f,
func_args))
assert(upd_point.shape == (m, ))
assert(np.all(func_val <= track[:, 1]))
| 2.171875 | 2 |
postreview/admin.py | harshiljhaveri/unicode-website | 16 | 12790839 | <reponame>harshiljhaveri/unicode-website
from django.contrib import admin
from .models import Review
admin.site.register(Review)
| 0.90625 | 1 |
creator/child_views/gender_tab.py | Jerakin/FakemonCreator | 4 | 12790840 | import logging as log
from PyQt5 import QtWidgets, uic
from PyQt5.QtCore import Qt
from creator.utils import util
from creator.child_views import shared
from creator.child_views import list_view
GENDERLESS = 0
MALE = 1
FEMALE = 2
class GenderTab(QtWidgets.QWidget, shared.Tab):
def __init__(self, data):
super(GenderTab, self).__init__()
uic.loadUi(util.RESOURCE_UI / 'GenderTab.ui', self)
self.data = data
self.extended = False
self.list_gender.setContextMenuPolicy(Qt.CustomContextMenu)
self.list_gender.customContextMenuRequested.connect(self.context_menu)
self.pkmn_list = util.pokemon_list()
self.speciesDropdown.addItems(self.pkmn_list)
self.speciesDropdown.activated.connect(self.extend_dropdown)
self.add_button.clicked.connect(self.add)
def extend_dropdown(self):
data = self.data.container.data() if self.data and self.data.container else None
if data and not self.extended:
self.pkmn_list.extend(data["pokemon.json"])
self.extended = True
self.speciesDropdown.clear()
self.speciesDropdown.addItems(self.pkmn_list)
def add(self):
species = self.speciesDropdown.currentText()
gender = GENDERLESS if self.radioNoGender.isChecked() else None
gender = MALE if self.radioMale.isChecked() else gender
gender = FEMALE if self.radioFemale.isChecked() else gender
self.setattr(self.data.gender, "species", species)
self.setattr(self.data.gender, "gender", gender)
def context_menu(self, pos):
context = QtWidgets.QMenu()
delete_action = context.addAction("delete")
action = context.exec_(self.list_gender.mapToGlobal(pos))
if action == delete_action:
self.delete_gender(self.list_gender.selectedItems()[0])
def delete_gender(self, widget_item):
species_name = widget_item.text()
button_reply = QtWidgets.QMessageBox.question(None, 'Delete',
"Would you like to remove {}".format(species_name),
QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.Cancel,
QtWidgets.QMessageBox.Cancel)
if button_reply == QtWidgets.QMessageBox.Yes:
self.data.container.delete_entry("gender.json", species_name)
self.list_gender.takeItem(self.list_gender.currentRow())
self.data._edited = True
if species_name == self.data.gender.species:
self.data.gender.new()
self.update_list_signal.emit()
log.info("Deleted {}".format(species_name))
def update_custom_list(self):
data = self.data.container.data() if self.data.container else None
if not data or "gender.json" not in data:
return
gender_data = data["gender.json"]
self.list_gender.clear()
for _species, _ in gender_data.items():
self.list_gender.addItem(_species)
| 2.21875 | 2 |
App/Server/ChartData.py | sujoy-coder/CFC-2020 | 1 | 12790841 | <reponame>sujoy-coder/CFC-2020
# This Function formating the data to plot in Line Chart ...
def get_LineChartData(date_time,actualPower):
lineChartData = []
for i,j in zip(date_time,actualPower):
lineChartData.append(list((i,j)))
return lineChartData
# This Function formating the data to plot in Bar Chart ...
def get_BarChartData(date_time,wind_speed,wind_deg,humidity):
barChartData = [['Date-Time ', 'Wind Speed (m/s)', 'Wind Direction (°)', 'Humidity (%)']]
for a,b,c,d in zip(date_time,wind_speed,wind_deg,humidity):
barChartData.append(list((a,b,c,d)))
return barChartData
def get_TableData(date_time,actualPower,wind_speed):
tableData = []
for p,q,r in zip(date_time,actualPower,wind_speed):
tableData.append(list((p,q,r)))
return tableData | 3.375 | 3 |
alipay/aop/api/response/AlipayEcoDoctemplateSettingurlQueryResponse.py | antopen/alipay-sdk-python-all | 213 | 12790842 | <filename>alipay/aop/api/response/AlipayEcoDoctemplateSettingurlQueryResponse.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class AlipayEcoDoctemplateSettingurlQueryResponse(AlipayResponse):
def __init__(self):
super(AlipayEcoDoctemplateSettingurlQueryResponse, self).__init__()
self._setting_url = None
@property
def setting_url(self):
return self._setting_url
@setting_url.setter
def setting_url(self, value):
self._setting_url = value
def parse_response_content(self, response_content):
response = super(AlipayEcoDoctemplateSettingurlQueryResponse, self).parse_response_content(response_content)
if 'setting_url' in response:
self.setting_url = response['setting_url']
| 2.171875 | 2 |
cpovc_registry/templatetags/app_class.py | Rebeccacheptoek/cpims-ovc-3.0 | 3 | 12790843 | <reponame>Rebeccacheptoek/cpims-ovc-3.0<gh_stars>1-10
from django import template
register = template.Library()
@register.filter(name='get_class')
def get_class(value, args):
if value == args:
return 'active'
else:
split_vals = value.split('/')
if split_vals[1] == args:
return 'active'
else:
return ''
| 1.953125 | 2 |
ponyconf/management/commands/squashemails.py | JulienPalard/PonyConf | 11 | 12790844 | <reponame>JulienPalard/PonyConf<gh_stars>10-100
from django.core.management.base import BaseCommand
from accounts.models import User
class Command(BaseCommand):
help = 'Squash all users email'
def handle(self, *args, **options):
answer = input("""You are about to squash all users email.
This action is IRREVERSIBLE!
Are you sure you want to do this?
Type 'yes' to continue, or 'no' to cancel: """)
self.stdout.write('\n')
if answer != "yes":
self.stdout.write(self.style.NOTICE('Action cancelled.'))
return
for user in User.objects.all():
user.email = ''
user.save()
self.stdout.write(self.style.SUCCESS('All emails squashed.'))
| 2.421875 | 2 |
starfish/test/full_pipelines/cli/test_iss.py | ttung/starfish | 0 | 12790845 | """
Notes
-----
This test and docs/source/usage/iss/iss_cli.sh test the same code paths and should be updated
together
"""
import os
import unittest
import numpy as np
import pandas as pd
import pytest
from starfish.test.full_pipelines.cli._base_cli_test import CLITest
from starfish.types import Features
EXPERIMENT_JSON_URL = "https://d2nhj9g34unfro.cloudfront.net/20181005/ISS-TEST/experiment.json"
@pytest.mark.slow
class TestWithIssData(CLITest, unittest.TestCase):
@property
def spots_file(self):
return "decoded-spots.nc"
@property
def subdirs(self):
return (
"max_projected",
"transforms",
"registered",
"filtered",
"results",
)
@property
def stages(self):
return (
[
"starfish", "validate", "experiment", EXPERIMENT_JSON_URL,
],
[
"starfish", "filter",
"--input",
f"@{EXPERIMENT_JSON_URL}[fov_001][primary]",
"--output", lambda tempdir, *args, **kwargs: os.path.join(
tempdir, "max_projected", "primary_images.json"),
"MaxProj",
"--dims", "c",
"--dims", "z"
],
[
"starfish", "learn_transform",
"--input", lambda tempdir, *args, **kwargs: os.path.join(
tempdir, "max_projected", "primary_images.json"),
"--output", lambda tempdir, *args, **kwargs: os.path.join(
tempdir, "transforms", "transforms.json"),
"Translation",
"--reference-stack",
f"@{EXPERIMENT_JSON_URL}[fov_001][dots]",
"--upsampling", "1000",
"--axes", "r"
],
[
"starfish", "apply_transform",
"--input",
f"@{EXPERIMENT_JSON_URL}[fov_001][primary]",
"--output", lambda tempdir, *args, **kwargs: os.path.join(
tempdir, "registered", "primary_images.json"),
"--transformation-list", lambda tempdir, *args, **kwargs: os.path.join(
tempdir, "transforms", "transforms.json"),
"Warp",
],
[
"starfish", "filter",
"--input", lambda tempdir, *args, **kwargs: os.path.join(
tempdir, "registered", "primary_images.json"),
"--output", lambda tempdir, *args, **kwargs: os.path.join(
tempdir, "filtered", "primary_images.json"),
"WhiteTophat",
"--masking-radius", "15",
],
[
"starfish", "filter",
"--input",
f"@{EXPERIMENT_JSON_URL}[fov_001][nuclei]",
"--output", lambda tempdir, *args, **kwargs: os.path.join(
tempdir, "filtered", "nuclei.json"),
"WhiteTophat",
"--masking-radius", "15",
],
[
"starfish", "filter",
"--input",
f"@{EXPERIMENT_JSON_URL}[fov_001][dots]",
"--output", lambda tempdir, *args, **kwargs: os.path.join(
tempdir, "filtered", "dots.json"),
"WhiteTophat",
"--masking-radius", "15",
],
[
"starfish", "detect_spots",
"--input", lambda tempdir, *args, **kwargs: os.path.join(
tempdir, "filtered", "primary_images.json"),
"--output", lambda tempdir, *args, **kwargs: os.path.join(
tempdir, "results", "spots.nc"),
"--blobs-stack", lambda tempdir, *args, **kwargs: os.path.join(
tempdir, "filtered", "dots.json"),
"--blobs-axis", "r", "--blobs-axis", "c",
"BlobDetector",
"--min-sigma", "4",
"--max-sigma", "6",
"--num-sigma", "20",
"--threshold", "0.01",
],
[
"starfish", "segment",
"--primary-images", lambda tempdir, *args, **kwargs: os.path.join(
tempdir, "filtered", "primary_images.json"),
"--nuclei", lambda tempdir, *args, **kwargs: os.path.join(
tempdir, "filtered", "nuclei.json"),
"-o", lambda tempdir, *args, **kwargs: os.path.join(
tempdir, "results", "label_image.png"),
"Watershed",
"--nuclei-threshold", ".16",
"--input-threshold", ".22",
"--min-distance", "57",
],
[
"starfish", "target_assignment",
"--label-image",
lambda tempdir, *args, **kwargs: os.path.join(
tempdir, "results", "label_image.png"),
"--intensities", lambda tempdir, *args, **kwargs: os.path.join(
tempdir, "results", "spots.nc"),
"--output", lambda tempdir, *args, **kwargs: os.path.join(
tempdir, "results", "targeted-spots.nc"),
"Label",
],
[
"starfish", "decode",
"-i", lambda tempdir, *args, **kwargs: os.path.join(
tempdir, "results", "targeted-spots.nc"),
"--codebook",
f"@{EXPERIMENT_JSON_URL}",
"-o", lambda tempdir, *args, **kwargs: os.path.join(
tempdir, "results", "decoded-spots.nc"),
"PerRoundMaxChannelDecoder",
],
# Validate results/{spots,targeted-spots,decoded-spots}.nc
[
"starfish", "validate", "xarray",
lambda tempdir, *args, **kwargs: os.path.join(
tempdir, "results", "spots.nc")
],
[
"starfish", "validate", "xarray",
lambda tempdir, *args, **kwargs: os.path.join(
tempdir, "results", "targeted-spots.nc")
],
[
"starfish", "validate", "xarray",
lambda tempdir, *args, **kwargs: os.path.join(
tempdir, "results", "decoded-spots.nc")
],
)
def verify_results(self, intensities):
# TODO make this test stronger
genes, counts = np.unique(
intensities.coords[Features.TARGET], return_counts=True)
gene_counts = pd.Series(counts, genes)
# TODO THERE"S NO HUMAN/MOUSE KEYS?
assert gene_counts['ACTB']
| 1.953125 | 2 |
gym_idsgame/agents/training_agents/q_learning/dqn/dqn.py | FredericoNesti/gym-idsgame | 15 | 12790846 | <filename>gym_idsgame/agents/training_agents/q_learning/dqn/dqn.py
"""
An agent for the IDSGameEnv that implements the DQN algorithm.
"""
from typing import Union
import numpy as np
import time
import tqdm
import torch
from torch.utils.tensorboard import SummaryWriter
from gym_idsgame.envs.rendering.video.idsgame_monitor import IdsGameMonitor
from gym_idsgame.agents.training_agents.q_learning.q_agent_config import QAgentConfig
from gym_idsgame.envs.idsgame_env import IdsGameEnv
from gym_idsgame.agents.dao.experiment_result import ExperimentResult
from gym_idsgame.envs.constants import constants
from gym_idsgame.agents.training_agents.models.fnn_w_linear import FNNwithLinear
from gym_idsgame.agents.training_agents.q_learning.experience_replay.replay_buffer import ReplayBuffer
from gym_idsgame.agents.training_agents.q_learning.q_agent import QAgent
class DQNAgent(QAgent):
"""
An implementation of the DQN algorithm from the paper 'Human-level control through deep reinforcement learning' by
Mnih et. al.
(DQN is originally Neural-fitted Q-iteration but with the addition of a separate target network)
"""
def __init__(self, env:IdsGameEnv, config: QAgentConfig):
"""
Initialize environment and hyperparameters
:param config: the configuration
"""
super(DQNAgent, self).__init__(env, config)
self.attacker_q_network = None
self.attacker_target_network = None
self.defender_q_network = None
self.defender_target_network = None
self.loss_fn = None
self.attacker_optimizer = None
self.defender_optimizer = None
self.attacker_lr_decay = None
self.defender_lr_decay = None
self.tensorboard_writer = SummaryWriter(self.config.dqn_config.tensorboard_dir)
self.buffer = ReplayBuffer(config.dqn_config.replay_memory_size)
self.initialize_models()
self.tensorboard_writer.add_hparams(self.config.hparams_dict(), {})
self.env.idsgame_config.save_trajectories = False
self.env.idsgame_config.save_attack_stats = False
def warmup(self) -> None:
"""
A warmup without any learning just to populate the replay buffer following a random strategy
:return: None
"""
# Setup logging
outer_warmup = tqdm.tqdm(total=self.config.dqn_config.replay_start_size, desc='Warmup', position=0)
outer_warmup.set_description_str("[Warmup] step:{}, buffer_size: {}".format(0, 0))
# Reset env
obs = self.env.reset(update_stats=False)
attacker_obs, defender_obs = obs
obs_state_a = self.update_state(attacker_obs, defender_obs, attacker=True, state=[])
obs_state_d = self.update_state(attacker_obs, defender_obs, attacker=False, state=[])
obs = (obs_state_a, obs_state_d)
self.config.logger.info("Starting warmup phase to fill replay buffer")
# Perform <self.config.dqn_config.replay_start_size> steps and fill the replay memory
for i in range(self.config.dqn_config.replay_start_size):
if i % self.config.train_log_frequency == 0:
log_str = "[Warmup] step:{}, buffer_size: {}".format(i, self.buffer.size())
outer_warmup.set_description_str(log_str)
self.config.logger.info(log_str)
# Select random attacker and defender actions
attacker_actions = list(range(self.env.num_attack_actions))
defender_actions = list(range(self.env.num_defense_actions))
legal_attack_actions = list(filter(lambda action: self.env.is_attack_legal(action), attacker_actions))
legal_defense_actions = list(filter(lambda action: self.env.is_defense_legal(action), defender_actions))
attacker_action = np.random.choice(legal_attack_actions)
defender_action = np.random.choice(legal_defense_actions)
action = (attacker_action, defender_action)
# Take action in the environment
obs_prime, reward, done, info = self.env.step(action)
attacker_obs_prime, defender_obs_prime = obs_prime
obs_state_a_prime = self.update_state(attacker_obs_prime, defender_obs_prime, attacker=True, state=[])
obs_state_d_prime = self.update_state(attacker_obs_prime, defender_obs_prime, attacker=False, state=[])
obs_prime = (obs_state_a_prime, obs_state_d_prime)
# Add transition to replay memory
self.buffer.add_tuple(obs, action, reward, done, obs_prime)
# Move to new state
obs = obs_prime
outer_warmup.update(1)
if done:
obs = self.env.reset(update_stats=False)
attacker_obs, defender_obs = obs
obs_state_a = self.update_state(attacker_obs, defender_obs, attacker=True, state=[])
obs_state_d = self.update_state(attacker_obs, defender_obs, attacker=False, state=[])
obs = (obs_state_a, obs_state_d)
self.config.logger.info("{} Warmup steps completed, replay buffer size: {}".format(
self.config.dqn_config.replay_start_size, self.buffer.size()))
self.env.close()
try:
# Add network graph to tensorboard with a sample batch as input
mini_batch = self.buffer.sample(self.config.dqn_config.batch_size)
s_attacker_batch, s_defender_batch, a_attacker_batch, a_defender_batch, r_attacker_batch, r_defender_batch, \
d_batch, s2_attacker_batch, s2_defender_batch = mini_batch
if self.config.attacker:
s_1 = torch.tensor(s_attacker_batch).float()
# Move to GPU if using GPU
if torch.cuda.is_available() and self.config.dqn_config.gpu:
device = torch.device("cuda:0")
s_1 = s_1.to(device)
self.tensorboard_writer.add_graph(self.attacker_q_network, s_1)
if self.config.defender:
s_1 = torch.tensor(s_defender_batch).float()
# Move to GPU if using GPU
if torch.cuda.is_available() and self.config.dqn_config.gpu:
device = torch.device("cuda:0")
s_1 = s_1.to(device)
self.tensorboard_writer.add_graph(self.defender_q_network, s_1)
except:
self.config.logger.warning("Error when trying to add network graph to tensorboard")
def initialize_models(self) -> None:
"""
Initialize models
:return: None
"""
# Initialize models
self.attacker_q_network = FNNwithLinear(self.config.dqn_config.input_dim, self.config.dqn_config.attacker_output_dim,
self.config.dqn_config.hidden_dim,
num_hidden_layers=self.config.dqn_config.num_hidden_layers,
hidden_activation=self.config.dqn_config.hidden_activation)
self.attacker_target_network = FNNwithLinear(self.config.dqn_config.input_dim, self.config.dqn_config.attacker_output_dim,
self.config.dqn_config.hidden_dim,
num_hidden_layers=self.config.dqn_config.num_hidden_layers,
hidden_activation=self.config.dqn_config.hidden_activation)
self.defender_q_network = FNNwithLinear(self.config.dqn_config.input_dim, self.config.dqn_config.defender_output_dim,
self.config.dqn_config.hidden_dim,
num_hidden_layers=self.config.dqn_config.num_hidden_layers,
hidden_activation=self.config.dqn_config.hidden_activation)
self.defender_target_network = FNNwithLinear(self.config.dqn_config.input_dim, self.config.dqn_config.defender_output_dim,
self.config.dqn_config.hidden_dim,
num_hidden_layers=self.config.dqn_config.num_hidden_layers,
hidden_activation=self.config.dqn_config.hidden_activation)
# Specify device
if torch.cuda.is_available() and self.config.dqn_config.gpu:
device = torch.device("cuda:0")
self.config.logger.info("Running on the GPU")
else:
device = torch.device("cpu")
self.config.logger.info("Running on the CPU")
self.attacker_q_network.to(device)
self.attacker_target_network.to(device)
self.defender_q_network.to(device)
self.defender_target_network.to(device)
# Set the target network to use the same weights initialization as the q-network
self.attacker_target_network.load_state_dict(self.attacker_q_network.state_dict())
self.defender_target_network.load_state_dict(self.defender_q_network.state_dict())
# The target network is not trainable it is only used for predictions, therefore we set it to eval mode
# to turn of dropouts, batch norms, gradient computations etc.
self.attacker_target_network.eval()
self.defender_target_network.eval()
# Construct loss function
if self.config.dqn_config.loss_fn == "MSE":
self.loss_fn = torch.nn.MSELoss()
elif self.config.dqn_config.loss_fn == "Huber":
self.loss_fn = torch.nn.SmoothL1Loss()
else:
raise ValueError("Loss function not recognized")
# Define Optimizer. The call to model.parameters() in the optimizer constructor will contain the learnable
# parameters of the layers in the model
if self.config.dqn_config.optimizer == "Adam":
self.attacker_optimizer = torch.optim.Adam(self.attacker_q_network.parameters(), lr=self.config.alpha)
self.defender_optimizer = torch.optim.Adam(self.defender_q_network.parameters(), lr=self.config.alpha)
elif self.config.dqn_config.optimizer == "SGD":
self.attacker_optimizer = torch.optim.SGD(self.attacker_q_network.parameters(), lr=self.config.alpha)
self.defender_optimizer = torch.optim.SGD(self.defender_q_network.parameters(), lr=self.config.alpha)
else:
raise ValueError("Optimizer not recognized")
# LR decay
if self.config.dqn_config.lr_exp_decay:
self.attacker_lr_decay = torch.optim.lr_scheduler.ExponentialLR(optimizer=self.attacker_optimizer,
gamma=self.config.dqn_config.lr_decay_rate)
self.defender_lr_decay = torch.optim.lr_scheduler.ExponentialLR(optimizer=self.attacker_optimizer,
gamma=self.config.dqn_config.lr_decay_rate)
def training_step(self,
mini_batch: Union[np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray,
np.ndarray, np.ndarray, np.ndarray], attacker: bool = True) -> torch.Tensor:
"""
Performs a training step of the Deep-Q-learning algorithm (implemented in PyTorch)
:param mini_batch: a minibatch to use for the training step
:param attacker: whether doing a training step for the attacker (otherwise defender)
:return: loss
"""
# Unpack batch of transitions from the replay memory
s_attacker_batch, s_defender_batch, a_attacker_batch, a_defender_batch, r_attacker_batch, r_defender_batch, \
d_batch, s2_attacker_batch, s2_defender_batch = mini_batch
# Convert batch into torch tensors and set Q-network in train mode and target network in eval mode
if attacker:
self.attacker_q_network.train()
self.attacker_target_network.eval()
r_1 = torch.tensor(r_attacker_batch).float()
s_1 = torch.tensor(s_attacker_batch).float()
s_2 = torch.tensor(s2_attacker_batch).float()
else:
self.defender_q_network.train()
self.defender_q_network.eval()
r_1 = torch.tensor(r_defender_batch).float()
s_1 = torch.tensor(s_defender_batch).float()
s_2 = torch.tensor(s2_defender_batch).float()
# Move to GPU if using GPU
if torch.cuda.is_available() and self.config.dqn_config.gpu:
device = torch.device("cuda:0")
r_1 = r_1.to(device)
s_1 = s_1.to(device)
s_2 = s_2.to(device)
# Set target baseline. We only want the loss to be computed for the Q-values of the actions taken, not the entire
# vector of all Q-values. Therefore we initialize the target to the Q-values of the Q-network for s
# and then we only update the Q-values for the affected actions with the real targets
if attacker:
target = self.attacker_q_network(s_1)
else:
target = self.defender_q_network(s_1)
# Use the target network to compute the Q-values of s'
with torch.no_grad():
if attacker:
target_next = self.attacker_target_network(s_2).detach()
else:
target_next = self.defender_target_network(s_2).detach()
for i in range(self.config.dqn_config.batch_size):
# As defined by Mnih et. al. : For terminal states the Q-target should be equal to the immediate reward
if d_batch[i]:
if attacker:
target[i][a_attacker_batch[i]] = r_1[i]
else:
target[i][a_defender_batch[i]] = r_1[i]
# For non terminal states the Q-target should be the immediate reward plus the discounted estimated future
# reward when following Q* estimated by the target network.
else:
a = torch.argmax(target_next[i]).detach()
if attacker:
target[i][a_attacker_batch[i]] = r_1[i] + self.config.gamma * (target_next[i][a])
else:
target[i][a_defender_batch[i]] = r_1[i] + self.config.gamma * (target_next[i][a])
# Compute loss
if attacker:
prediction = self.attacker_q_network(s_1)
else:
prediction = self.defender_q_network(s_1)
loss = self.loss_fn(prediction, target)
# Zero gradients, perform a backward pass, and update the weights.
if attacker:
self.attacker_optimizer.zero_grad()
loss.backward()
self.attacker_optimizer.step()
else:
self.defender_optimizer.zero_grad()
loss.backward()
self.defender_optimizer.step()
return loss
def get_action(self, state: np.ndarray, eval : bool = False, attacker : bool = True) -> int:
"""
Samples an action according to a epsilon-greedy strategy using the Q-network
:param state: the state to sample an action for
:param eval: boolean flag whether running in evaluation mode
:param attacker: boolean flag whether running in attacker mode (if false assume defender)
:return: The sampled action id
"""
state = torch.from_numpy(state.flatten()).float()
# Move to GPU if using GPU
if torch.cuda.is_available() and self.config.dqn_config.gpu:
device = torch.device("cuda:0")
state = state.to(device)
if attacker:
actions = list(range(self.env.num_attack_actions))
legal_actions = list(filter(lambda action: self.env.is_attack_legal(action), actions))
else:
actions = list(range(self.env.num_defense_actions))
legal_actions = list(filter(lambda action: self.env.is_defense_legal(action), actions))
if (np.random.rand() < self.config.epsilon and not eval) \
or (eval and np.random.random() < self.config.eval_epsilon):
return np.random.choice(legal_actions)
with torch.no_grad():
if attacker:
act_values = self.attacker_q_network(state)
else:
act_values = self.defender_q_network(state)
return legal_actions[torch.argmax(act_values[legal_actions]).item()]
def train(self) -> ExperimentResult:
"""
Runs the DQN algorithm
:return: Experiment result
"""
self.config.logger.info("Starting Warmup")
self.warmup()
self.config.logger.info("Starting Training")
self.config.logger.info(self.config.to_str())
if len(self.train_result.avg_episode_steps) > 0:
self.config.logger.warning("starting training with non-empty result object")
done = False
obs = self.env.reset(update_stats=False)
attacker_obs, defender_obs = obs
obs_state_a = self.update_state(attacker_obs, defender_obs, attacker=True, state=[])
obs_state_d = self.update_state(attacker_obs, defender_obs, attacker=False, state=[])
obs = (obs_state_a, obs_state_d)
attacker_obs, defender_obs = obs
# Tracking metrics
episode_attacker_rewards = []
episode_defender_rewards = []
episode_steps = []
episode_avg_attacker_loss = []
episode_avg_defender_loss = []
# Logging
self.outer_train.set_description_str("[Train] epsilon:{:.2f},avg_a_R:{:.2f},avg_d_R:{:.2f},"
"avg_t:{:.2f},avg_h:{:.2f},acc_A_R:{:.2f}," \
"acc_D_R:{:.2f}".format(self.config.epsilon, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0))
# Training
for episode in range(self.config.num_episodes):
episode_attacker_reward = 0
episode_defender_reward = 0
episode_step = 0
episode_attacker_loss = 0.0
episode_defender_loss = 0.0
while not done:
if self.config.render:
self.env.render(mode="human")
if not self.config.attacker and not self.config.defender:
raise AssertionError("Must specify whether training an attacker agent or defender agent")
# Default initialization
attacker_action = 0
defender_action = 0
# Get attacker and defender actions
if self.config.attacker:
attacker_action = self.get_action(attacker_obs, attacker=True)
if self.config.defender:
defender_action = self.get_action(defender_obs, attacker=False)
action = (attacker_action, defender_action)
# Take a step in the environment
obs_prime, reward, done, _ = self.env.step(action)
attacker_obs_prime, defender_obs_prime = obs_prime
obs_state_a_prime = self.update_state(attacker_obs_prime, defender_obs_prime, attacker=True, state=[])
obs_state_d_prime = self.update_state(attacker_obs_prime, defender_obs_prime, attacker=False, state=[])
obs_prime = (obs_state_a_prime, obs_state_d_prime)
# Add transition to replay memory
self.buffer.add_tuple(obs, action, reward, done, obs_prime)
# Sample random mini_batch of transitions from replay memory
minibatch = self.buffer.sample(self.config.dqn_config.batch_size)
# Perform a gradient descent step of the Q-network using targets produced by target network
if self.config.attacker:
loss = self.training_step(minibatch, attacker=True)
episode_attacker_loss += loss.item()
if self.config.defender:
loss = self.training_step(minibatch, attacker=False)
episode_defender_loss += loss.item()
# Update metrics
attacker_reward, defender_reward = reward
obs_prime_attacker, obs_prime_defender = obs_prime
episode_attacker_reward += attacker_reward
episode_defender_reward += defender_reward
episode_step += 1
# Move to the next state
obs = obs_prime
attacker_obs = obs_prime_attacker
defender_obs = obs_prime_defender
# Render final frame
if self.config.render:
self.env.render(mode="human")
# Decay LR after every episode
lr = self.config.alpha
if self.config.dqn_config.lr_exp_decay:
self.attacker_lr_decay.step()
lr = self.attacker_lr_decay.get_lr()[0]
# Record episode metrics
self.num_train_games += 1
self.num_train_games_total += 1
if self.env.state.hacked:
self.num_train_hacks += 1
self.num_train_hacks_total += 1
episode_attacker_rewards.append(episode_attacker_reward)
episode_defender_rewards.append(episode_defender_reward)
if episode_step > 0:
if self.config.attacker:
episode_avg_attacker_loss.append(episode_attacker_loss/episode_step)
if self.config.defender:
episode_avg_defender_loss.append(episode_defender_loss / episode_step)
else:
if self.config.attacker:
episode_avg_attacker_loss.append(episode_attacker_loss)
if self.config.defender:
episode_avg_defender_loss.append(episode_defender_loss)
episode_steps.append(episode_step)
# Log average metrics every <self.config.train_log_frequency> episodes
if episode % self.config.train_log_frequency == 0:
if self.num_train_games > 0 and self.num_train_games_total > 0:
self.train_hack_probability = self.num_train_hacks / self.num_train_games
self.train_cumulative_hack_probability = self.num_train_hacks_total / self.num_train_games_total
else:
self.train_hack_probability = 0.0
self.train_cumulative_hack_probability = 0.0
self.log_metrics(episode, self.train_result, episode_attacker_rewards, episode_defender_rewards, episode_steps,
episode_avg_attacker_loss, episode_avg_defender_loss, lr=lr)
# Log values and gradients of the parameters (histogram summary) to tensorboard
if self.config.attacker:
for tag, value in self.attacker_q_network.named_parameters():
tag = tag.replace('.', '/')
self.tensorboard_writer.add_histogram(tag, value.data.cpu().numpy(), episode)
self.tensorboard_writer.add_histogram(tag + '_attacker/grad', value.grad.data.cpu().numpy(),
episode)
if self.config.defender:
for tag, value in self.defender_q_network.named_parameters():
tag = tag.replace('.', '/')
self.tensorboard_writer.add_histogram(tag, value.data.cpu().numpy(), episode)
self.tensorboard_writer.add_histogram(tag + '_defender/grad', value.grad.data.cpu().numpy(),
episode)
episode_attacker_rewards = []
episode_defender_rewards = []
episode_steps = []
self.num_train_games = 0
self.num_train_hacks = 0
# Update target network every <self.config.dqn_config.target_network_update_freq> episodes
if episode % self.config.dqn_config.target_network_update_freq == 0:
self.update_target_network()
# Run evaluation every <self.config.eval_frequency> episodes
if episode % self.config.eval_frequency == 0:
self.eval(episode)
# Save models every <self.config.checkpoint_frequency> episodes
if episode % self.config.checkpoint_freq == 0:
self.save_model()
self.env.save_trajectories(checkpoint=True)
self.env.save_attack_data(checkpoint=True)
if self.config.save_dir is not None:
time_str = str(time.time())
self.train_result.to_csv(self.config.save_dir + "/" + time_str + "_train_results_checkpoint.csv")
self.eval_result.to_csv(self.config.save_dir + "/" + time_str + "_eval_results_checkpoint.csv")
# Reset environment for the next episode and update game stats
done = False
obs = self.env.reset(update_stats=True)
attacker_obs, defender_obs = obs
obs_state_a = self.update_state(attacker_obs, defender_obs, attacker=True, state=[])
obs_state_d = self.update_state(attacker_obs, defender_obs, attacker=False, state=[])
obs = (obs_state_a, obs_state_d)
attacker_obs, defender_obs = obs
self.outer_train.update(1)
# Anneal epsilon linearly
self.anneal_epsilon()
self.config.logger.info("Training Complete")
# Final evaluation (for saving Gifs etc)
self.eval(self.config.num_episodes-1, log=False)
# Save Q-networks
self.save_model()
# Save other game data
self.env.save_trajectories(checkpoint=False)
self.env.save_attack_data(checkpoint=False)
if self.config.save_dir is not None:
time_str = str(time.time())
self.train_result.to_csv(self.config.save_dir + "/" + time_str + "_train_results_checkpoint.csv")
self.eval_result.to_csv(self.config.save_dir + "/" + time_str + "_eval_results_checkpoint.csv")
return self.train_result
def update_target_network(self) -> None:
"""
Updates the target networks. Delayed targets are used to stabilize training and partially remedy the
problem with non-stationary targets in RL with function approximation.
:return: None
"""
self.config.logger.info("Updating target network")
if self.config.attacker:
self.attacker_target_network.load_state_dict(self.attacker_q_network.state_dict())
self.attacker_target_network.eval()
if self.config.defender:
self.defender_target_network.load_state_dict(self.defender_q_network.state_dict())
self.defender_target_network.eval()
def eval(self, train_episode, log=True) -> ExperimentResult:
"""
Performs evaluation with the greedy policy with respect to the learned Q-values
:param train_episode: the train episode to keep track of logging
:param log: whether to log the result
:return: None
"""
self.config.logger.info("Starting Evaluation")
time_str = str(time.time())
self.num_eval_games = 0
self.num_eval_hacks = 0
if len(self.eval_result.avg_episode_steps) > 0:
self.config.logger.warning("starting eval with non-empty result object")
if self.config.eval_episodes < 1:
return
done = False
# Video config
if self.config.video:
if self.config.video_dir is None:
raise AssertionError("Video is set to True but no video_dir is provided, please specify "
"the video_dir argument")
self.env = IdsGameMonitor(self.env, self.config.video_dir + "/" + time_str, force=True,
video_frequency=self.config.video_frequency)
self.env.metadata["video.frames_per_second"] = self.config.video_fps
# Tracking metrics
episode_attacker_rewards = []
episode_defender_rewards = []
episode_steps = []
# Logging
self.outer_eval = tqdm.tqdm(total=self.config.eval_episodes, desc='Eval Episode', position=1)
self.outer_eval.set_description_str(
"[Eval] avg_a_R:{:.2f},avg_d_R:{:.2f},avg_t:{:.2f},avg_h:{:.2f},acc_A_R:{:.2f}," \
"acc_D_R:{:.2f}".format(0.0, 0,0, 0.0, 0.0, 0.0, 0.0))
# Eval
obs = self.env.reset(update_stats=False)
attacker_obs, defender_obs = obs
obs_state_a = self.update_state(attacker_obs, defender_obs, attacker=True, state=[])
obs_state_d = self.update_state(attacker_obs, defender_obs, attacker=False, state=[])
obs = (obs_state_a, obs_state_d)
attacker_obs, defender_obs = obs
for episode in range(self.config.eval_episodes):
episode_attacker_reward = 0
episode_defender_reward = 0
episode_step = 0
while not done:
if self.config.eval_render:
self.env.render()
time.sleep(self.config.eval_sleep)
# Default initialization
attacker_action = 0
defender_action = 0
# Get attacker and defender actions
if self.config.attacker:
attacker_action = self.get_action(attacker_obs, eval=True, attacker=True)
if self.config.defender:
defender_action = self.get_action(defender_obs, eval=True, attacker=False)
action = (attacker_action, defender_action)
# Take a step in the environment
obs_prime, reward, done, _ = self.env.step(action)
attacker_obs_prime, defender_obs_prime = obs_prime
obs_state_a_prime = self.update_state(attacker_obs_prime, defender_obs_prime, attacker=True, state=[])
obs_state_d_prime = self.update_state(attacker_obs_prime, defender_obs_prime, attacker=False, state=[])
obs_prime = (obs_state_a_prime, obs_state_d_prime)
# Update state information and metrics
attacker_reward, defender_reward = reward
obs_prime_attacker, obs_prime_defender = obs_prime
episode_attacker_reward += attacker_reward
episode_defender_reward += defender_reward
episode_step += 1
attacker_obs = obs_prime_attacker
defender_obs = obs_prime_defender
# Render final frame when game completed
if self.config.eval_render:
self.env.render()
time.sleep(self.config.eval_sleep)
self.config.logger.info("Eval episode: {}, Game ended after {} steps".format(episode, episode_step))
# Record episode metrics
episode_attacker_rewards.append(episode_attacker_reward)
episode_defender_rewards.append(episode_defender_reward)
episode_steps.append(episode_step)
# Update eval stats
self.num_eval_games +=1
self.num_eval_games_total += 1
if self.env.state.detected:
self.eval_attacker_cumulative_reward -= constants.GAME_CONFIG.POSITIVE_REWARD
self.eval_defender_cumulative_reward += constants.GAME_CONFIG.POSITIVE_REWARD
if self.env.state.hacked:
self.eval_attacker_cumulative_reward += constants.GAME_CONFIG.POSITIVE_REWARD
self.eval_defender_cumulative_reward -= constants.GAME_CONFIG.POSITIVE_REWARD
self.num_eval_hacks += 1
self.num_eval_hacks_total +=1
# Log average metrics every <self.config.eval_log_frequency> episodes
if episode % self.config.eval_log_frequency == 0 and log:
if self.num_eval_hacks > 0:
self.eval_hack_probability = float(self.num_eval_hacks) / float(self.num_eval_games)
if self.num_eval_games_total > 0:
self.eval_cumulative_hack_probability = float(self.num_eval_hacks_total) / float(
self.num_eval_games_total)
self.log_metrics(episode, self.eval_result, episode_attacker_rewards, episode_defender_rewards, episode_steps,
eval = True, update_stats=False)
# Save gifs
if self.config.gifs and self.config.video:
self.env.generate_gif(self.config.gif_dir + "/episode_" + str(train_episode) + "_"
+ time_str + ".gif", self.config.video_fps)
# Add frames to tensorboard
for idx, frame in enumerate(self.env.episode_frames):
self.tensorboard_writer.add_image(str(train_episode) + "_eval_frames/" + str(idx),
frame, global_step=train_episode,
dataformats = "HWC")
# Reset for new eval episode
done = False
obs = self.env.reset(update_stats=False)
attacker_obs, defender_obs = obs
obs_state_a = self.update_state(attacker_obs, defender_obs, attacker=True, state=[])
obs_state_d = self.update_state(attacker_obs, defender_obs, attacker=False, state=[])
obs = (obs_state_a, obs_state_d)
attacker_obs, defender_obs = obs
self.outer_eval.update(1)
# Log average eval statistics
if log:
if self.num_eval_hacks > 0:
self.eval_hack_probability = float(self.num_eval_hacks) / float(self.num_eval_games)
if self.num_eval_games_total > 0:
self.eval_cumulative_hack_probability = float(self.num_eval_hacks_total) / float(
self.num_eval_games_total)
self.log_metrics(train_episode, self.eval_result, episode_attacker_rewards, episode_defender_rewards,
episode_steps, eval=True, update_stats=True)
self.env.close()
self.config.logger.info("Evaluation Complete")
return self.eval_result
def save_model(self) -> None:
"""
Saves the PyTorch Model Weights
:return: None
"""
time_str = str(time.time())
if self.config.save_dir is not None:
if self.config.attacker:
path = self.config.save_dir + "/" + time_str + "_attacker_q_network.pt"
self.config.logger.info("Saving Q-network to: {}".format(path))
torch.save(self.attacker_q_network.state_dict(), path)
if self.config.defender:
path = self.config.save_dir + "/" + time_str + "_defender_q_network.pt"
self.config.logger.info("Saving Q-network to: {}".format(path))
torch.save(self.defender_q_network.state_dict(), path)
else:
self.config.logger.warning("Save path not defined, not saving Q-networks to disk")
def update_state(self, attacker_obs: np.ndarray = None, defender_obs: np.ndarray = None,
state: np.ndarray = None, attacker: bool = True) -> np.ndarray:
"""
Update approximative Markov state
:param attacker_obs: attacker obs
:param defender_obs: defender observation
:param state: current state
:param attacker: boolean flag whether it is attacker or not
:return: new state
"""
if attacker and self.env.idsgame_config.game_config.reconnaissance_actions:
#if not self.env.local_view_features():
a_obs_len = self.env.idsgame_config.game_config.num_attack_types + 1
defender_obs = attacker_obs[:, a_obs_len:a_obs_len+self.env.idsgame_config.game_config.num_attack_types]
if self.env.idsgame_config.reconnaissance_bool_features:
d_bool_features = attacker_obs[:, a_obs_len+self.env.idsgame_config.game_config.num_attack_types:]
attacker_obs = attacker_obs[:, 0:a_obs_len]
# else:
# a_obs_len = self.env.idsgame_config.game_config.num_attack_types + 1
# defender_obs = attacker_obs[:,
# a_obs_len:a_obs_len + self.env.idsgame_config.game_config.num_attack_types]
# if self.env.idsgame_config.reconnaissance_bool_features:
# d_bool_features = attacker_obs[:,
# a_obs_len + self.env.idsgame_config.game_config.num_attack_types:]
# attacker_obs = attacker_obs[:, 0:a_obs_len]
if not attacker and self.env.local_view_features():
attacker_obs = self.env.state.get_attacker_observation(
self.env.idsgame_config.game_config.network_config,
local_view=False,
reconnaissance=self.env.idsgame_config.reconnaissance_actions)
# Zero mean
if self.config.dqn_config.zero_mean_features:
if not self.env.local_view_features() or not attacker:
attacker_obs_1 = attacker_obs[:, 0:-1]
else:
attacker_obs_1 = attacker_obs[:, 0:-2]
zero_mean_attacker_features = []
for idx, row in enumerate(attacker_obs_1):
mean = np.mean(row)
if mean != 0:
t = row - mean
else:
t = row
if np.isnan(t).any():
t = attacker_obs[idx]
else:
t = t.tolist()
if not self.env.local_view_features() or not attacker:
t.append(attacker_obs[idx][-1])
else:
t.append(attacker_obs[idx][-2])
t.append(attacker_obs[idx][-1])
zero_mean_attacker_features.append(t)
defender_obs_1 = defender_obs[:, 0:-1]
zero_mean_defender_features = []
for idx, row in enumerate(defender_obs_1):
mean = np.mean(row)
if mean != 0:
t = row - mean
else:
t = row
if np.isnan(t).any():
t = defender_obs[idx]
else:
t = t.tolist()
t.append(defender_obs[idx][-1])
zero_mean_defender_features.append(t)
attacker_obs = np.array(zero_mean_attacker_features)
defender_obs = np.array(zero_mean_defender_features)
# Normalize
if self.config.dqn_config.normalize_features:
if not self.env.local_view_features() or not attacker:
attacker_obs_1 = attacker_obs[:, 0:-1] / np.linalg.norm(attacker_obs[:, 0:-1])
else:
attacker_obs_1 = attacker_obs[:, 0:-2] / np.linalg.norm(attacker_obs[:, 0:-2])
normalized_attacker_features = []
for idx, row in enumerate(attacker_obs_1):
if np.isnan(attacker_obs_1).any():
t = attacker_obs[idx]
else:
t = row.tolist()
if not self.env.local_view_features() or not attacker:
t.append(attacker_obs[idx][-1])
else:
t.append(attacker_obs[idx][-2])
t.append(attacker_obs[idx][-1])
normalized_attacker_features.append(t)
if attacker and self.env.idsgame_config.game_config.reconnaissance_actions:
defender_obs_1 = defender_obs[:, 0:-1] / np.linalg.norm(defender_obs[:, 0:-1])
else:
defender_obs_1 = defender_obs / np.linalg.norm(defender_obs)
normalized_defender_features = []
for idx, row in enumerate(defender_obs_1):
if np.isnan(defender_obs_1).any():
t = defender_obs[idx]
else:
if attacker and self.env.idsgame_config.game_config.reconnaissance_actions:
t = row.tolist()
t.append(defender_obs[idx][-1])
else:
t = row
normalized_defender_features.append(t)
attacker_obs = np.array(normalized_attacker_features)
defender_obs = np.array(normalized_defender_features)
if self.env.local_view_features() and attacker:
if not self.env.idsgame_config.game_config.reconnaissance_actions:
neighbor_defense_attributes = np.zeros((attacker_obs.shape[0], defender_obs.shape[1]))
for node in range(attacker_obs.shape[0]):
id = int(attacker_obs[node][-1])
neighbor_defense_attributes[node] = defender_obs[id]
else:
neighbor_defense_attributes = defender_obs
if self.env.fully_observed() or \
(self.env.idsgame_config.game_config.reconnaissance_actions and attacker):
if self.config.dqn_config.merged_ad_features:
if not self.env.local_view_features() or not attacker:
a_pos = attacker_obs[:, -1]
if not self.env.idsgame_config.game_config.reconnaissance_actions:
det_values = defender_obs[:, -1]
temp = defender_obs[:, 0:-1] - attacker_obs[:, 0:-1]
else:
temp = defender_obs[:, 0:] - attacker_obs[:, 0:-1]
features = []
for idx, row in enumerate(temp):
t = row.tolist()
t.append(a_pos[idx])
if self.env.fully_observed():
t.append(det_values[idx])
features.append(t)
else:
node_ids = attacker_obs[:, -1]
if not self.env.idsgame_config.game_config.reconnaissance_actions:
det_values = neighbor_defense_attributes[:, -1]
if not self.env.idsgame_config.game_config.reconnaissance_actions:
temp = neighbor_defense_attributes[:, 0:-1] - attacker_obs[:, 0:-1]
else:
temp = np.full(neighbor_defense_attributes.shape, -1)
for i in range(len(neighbor_defense_attributes)):
if np.sum(neighbor_defense_attributes[i]) > 0:
temp[i] = neighbor_defense_attributes[i] - attacker_obs[i, 0:-1]
features = []
for idx, row in enumerate(temp):
t = row.tolist()
t.append(node_ids[idx])
#t.append(node_reachable[idx])
if not self.env.idsgame_config.game_config.reconnaissance_actions:
t.append(det_values[idx])
features.append(t)
features = np.array(features)
if self.env.idsgame_config.reconnaissance_bool_features:
f = np.zeros((features.shape[0], features.shape[1] + d_bool_features.shape[1]))
for i in range(features.shape[0]):
f[i] = np.append(features[i], d_bool_features[i])
features = f
if self.config.dqn_config.state_length == 1:
return features
if len(state) == 0:
s = np.array([features] * self.config.dqn_config.state_length)
return s
state = np.append(state[1:], np.array([features]), axis=0)
return state
else:
if not self.env.local_view_features() or not attacker:
if self.env.idsgame_config.game_config.reconnaissance_actions and attacker:
combined_features = []
for idx, row in enumerate(attacker_obs):
combined_row = np.append(row, defender_obs[idx])
combined_features.append(combined_row)
if self.env.idsgame_config.reconnaissance_bool_features:
combined_features = np.array(combined_features)
f = np.zeros(
(combined_features.shape[0], combined_features.shape[1] + d_bool_features.shape[1]))
for i in range(combined_features.shape[0]):
f[i] = np.append(combined_features[i], d_bool_features[i])
combined_features = f
return np.array(combined_features)
return np.append(attacker_obs, defender_obs)
else:
if self.env.idsgame_config.reconnaissance_bool_features:
f = np.zeros((attacker_obs.shape[0],
attacker_obs.shape[1] + neighbor_defense_attributes.shape[1] +
d_bool_features.shape[1]))
for i in range(f.shape[0]):
f[i] = np.append(np.append(attacker_obs[i], neighbor_defense_attributes[i]),
d_bool_features[i])
else:
f = np.zeros((attacker_obs.shape[0],
attacker_obs.shape[1] + neighbor_defense_attributes.shape[1]))
for i in range(f.shape[0]):
f[i] = np.append(attacker_obs[i], neighbor_defense_attributes[i])
if self.config.dqn_config.state_length == 1:
return f
if len(state) == 0:
s = np.array([f] * self.config.dqn_config.state_length)
return s
# if not self.env.local_view_features() or not attacker:
# temp = np.append(attacker_obs, defender_obs)
# else:
# temp = np.append(attacker_obs, neighbor_defense_attributes)
state = np.append(state[1:], np.array([f]), axis=0)
return state
else:
if self.config.dqn_config.state_length == 1:
if attacker:
return np.array(attacker_obs)
else:
return np.array(defender_obs)
if len(state) == 0:
if attacker:
return np.array([attacker_obs] * self.config.dqn_config.state_length)
else:
return np.array([defender_obs] * self.config.dqn_config.state_length)
if attacker:
state = np.append(state[1:], np.array([attacker_obs]), axis=0)
else:
state = np.append(state[1:], np.array([defender_obs]), axis=0)
return state
| 2.328125 | 2 |
setup.py | helloqiu/SillyServer | 0 | 12790847 | # !/usr/bin/env python
# coding=utf-8
from setuptools import setup, find_packages
import SillyServer
setup(
name="SillyServer",
version=SillyServer.__VERSION__,
author=SillyServer.__AUTHOR__,
url=SillyServer.__URL__,
license=SillyServer.__LICENSE__,
packages=find_packages(),
description="A web framework that is silly",
keywords="silly server",
test_suite="nose.collector"
)
| 1.0625 | 1 |
test/manual/annotations/test_list_annotations.py | membranepotential/mendeley-python-sdk | 103 | 12790848 | <reponame>membranepotential/mendeley-python-sdk<filename>test/manual/annotations/test_list_annotations.py
from test import get_user_session, cassette, sleep
from test.resources.documents import create_document, delete_all_documents
def test_should_list_annotations():
session = get_user_session()
delete_all_documents()
with cassette('fixtures/resources/annotations/list_annotations/list_annotations.yaml'):
doc = create_document(session)
doc.add_note("A nice annotation")
page = session.annotations.list()
assert len(page.items) == 1
assert page.count == 1
annotation = page.items[0]
assert annotation.text == "A nice annotation"
assert annotation.privacy_level == 'private'
assert annotation.type == 'note'
assert annotation.last_modified
assert annotation.profile.id
assert annotation.profile.display_name
assert annotation.document().id == doc.id
assert annotation.document().title == doc.title
def test_should_page_through_annotations():
session = get_user_session()
delete_all_documents()
with cassette('fixtures/resources/annotations/list_annotations/page_through_annotations.yaml'):
doc = create_document(session)
file = doc.attach_file('fixtures/resources/files/basket.txt')
file.add_sticky_note("annotation 1", 100, 200, 1)
file.add_sticky_note("annotation 2", 100, 200, 1)
file.add_sticky_note("annotation 3", 100, 200, 1)
first_page = session.annotations.list(page_size=2)
assert len(first_page.items) == 2
assert first_page.count == 3
assert first_page.items[0].text == 'annotation 2'
assert first_page.items[1].text == 'annotation 1'
second_page = first_page.next_page
assert len(second_page.items) == 1
assert second_page.count == 3
assert second_page.items[0].text == 'annotation 3'
def test_should_list_annotations_modified_since():
session = get_user_session()
delete_all_documents()
with cassette('fixtures/resources/annotations/list_annotations/modified_since.yaml'):
doc = create_document(session, 'title 1')
file = doc.attach_file('fixtures/resources/files/basket.txt')
annotation = file.add_sticky_note("annotation 1", 100, 200, 1)
sleep(2)
file.add_sticky_note("annotation 2", 100, 200, 1)
file.add_sticky_note("annotation 3", 100, 200, 1)
page = session.annotations.list(modified_since=annotation.created.replace(seconds=+1))
assert len(page.items) == 2
assert page.count == 2
assert page.items[0].text == 'annotation 2'
assert page.items[1].text == 'annotation 3'
def test_should_list_annotations_deleted_since():
session = get_user_session()
delete_all_documents()
with cassette('fixtures/resources/annotations/list_annotations/deleted_since.yaml'):
doc = create_document(session, 'title 1')
file = doc.attach_file('fixtures/resources/files/basket.txt')
annotation1 = file.add_sticky_note("annotation 1", 100, 200, 1)
annotation2 = file.add_sticky_note("annotation 2", 100, 200, 1)
annotation3 = file.add_sticky_note("annotation 3", 100, 200, 1)
annotation1.delete()
sleep(2)
annotation2.delete()
annotation3.delete()
page = session.annotations.list(deleted_since=annotation3.created.replace(seconds=+1))
assert len(page.items) == 2
assert page.count == 2 | 2.328125 | 2 |
app/hive_sbi_api/v1/filters.py | josephsavage/hive-sbi-api | 0 | 12790849 | from django_filters import rest_framework as filters
from hive_sbi_api.core.models import Transaction
class TransactionFilter(filters.FilterSet):
account = filters.CharFilter(
field_name='account__account',
label='account',
)
sponsor = filters.CharFilter(
field_name='sponsor__account',
label='sponsor',
)
sponsee = filters.CharFilter(
field_name='sponsees__account__account',
label='sponsee',
)
class Meta:
model = Transaction
fields = (
'source',
'account',
'sponsor',
'status',
'share_type',
'sponsee',
)
| 2.046875 | 2 |
applications/tensorflow2/image_classification/custom_exceptions.py | payoto/graphcore_examples | 260 | 12790850 | # Copyright (c) 2021 Graphcore Ltd. All rights reserved.
class UnsupportedFormat(TypeError):
pass
class DimensionError(ValueError):
pass
class MissingArgumentException(ValueError):
pass
class InvalidPrecisionException(NameError):
pass
class UnallowedConfigurationError(ValueError):
pass
| 1.75 | 2 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.