code
stringlengths 20
1.05M
| apis
sequence | extract_api
stringlengths 75
5.24M
|
---|---|---|
from __future__ import print_function
import numpy as np
from configSetup import Configuration
from visualize import imshow
from read_mesh import get_1d_meshes
# trick to make nice colorbars
# see http://joseph-long.com/writing/colorbars/
def colorbar(mappable,
loc="right",
orientation="vertical",
size="5%",
pad=0.05,
ticklocation='auto'):
ax = mappable.axes
fig = ax.figure
divider = make_axes_locatable(ax)
cax = divider.append_axes(loc, size=size, pad=pad)
return fig.colorbar(mappable, cax=cax, orientation=orientation, ticklocation=ticklocation)
if __name__ == "__main__":
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
fig = plt.figure(1, figsize=(6.974, 1.4))
plt.rc('font', family='serif', size=8)
plt.rc('xtick', labelsize=8)
plt.rc('ytick', labelsize=8)
plt.rc('axes', labelsize=8)
gs = plt.GridSpec(1, 2)
gs.update(hspace = 0.5)
gs.update(wspace = 0.07)
axs = []
axs.append( plt.subplot(gs[0]) )
axs.append( plt.subplot(gs[1]) )
# what to plot
ispcs = 0
vdir = "x"
prefix = 'twostream/out/'
conf = Configuration('config-twostream-relativistic.ini')
#simulation box size
xmin = 0.0
ymin = 0.0
xmax = conf.dx*conf.Nx*conf.NxMesh
ymax = conf.dy*conf.Ny*conf.NyMesh
# create panels from snapshots
laps = range(0, 4001, 1000)
for i, lap in enumerate(laps):
print(lap)
# check if this is top/bottom panel
if i == 0:
top = True
else:
top = False
if i == len(laps)-1:
bottom = True
else:
bottom = False
# clear figure before we start
for ax in axs:
ax.clear()
if top:
axleft = 0.10
axbottom = 0.24
axright = 0.96
axtop = 0.80
fig.subplots_adjust(left=axleft, bottom=axbottom, right=axright, top=axtop)
useLog = True
# read left
##################################################
meshesL = get_1d_meshes(prefix, lap, conf, 0, vdir)
print("atmosphere max dens: ", np.nanmax(meshesL['data']))
norm_fac = np.nanmax(meshesL['data'])
#print(norm_fac)
meshesL['data'] = meshesL['data']/norm_fac
if useLog:
meshesL['data'] = np.log10(meshesL['data'])
imL = imshow(axs[0], meshesL['data'],
xmin, xmax,
meshesL['vmin'], meshesL['vmax'],
cmap = 'plasma_r',
#vmin = 0.0,
#vmax = 1.0,
#clip = 1.0e-5,
vmin = -3.0,
vmax = 0,
clip = None
)
# read right
##################################################
meshesR = get_1d_meshes(prefix, lap, conf, 1, vdir)
print("beam max dens:", np.nanmax(meshesR['data']))
#norm_fac = np.max(meshesR['data'])
norm_fac = np.nanmax(meshesR['data'])
meshesR['data'] = meshesR['data']/norm_fac
print("relative beam max dens:", np.nanmax(meshesR['data']))
if useLog:
meshesR['data'] = np.log10(meshesR['data'])
imR = imshow(axs[1], meshesR['data'],
xmin, xmax,
meshesR['vmin'], meshesR['vmax'],
cmap = 'plasma_r',
#vmin = 0.0,
#vmax = 1.0,
#clip = 1.0e-5,
vmin = -3.0,
vmax = 0,
clip = None
)
# remove ytick labels from second panel
axs[1].set_yticklabels([])
# set ylabel for leftmost panel
axs[0].set_ylabel(r'$u_x$')
# header panel
if top:
cbL = colorbar(imL, loc="top", orientation="horizontal", size="8%", pad=0.03, ticklocation='top')
cbR = colorbar(imR, loc="top", orientation="horizontal", size="8%", pad=0.03, ticklocation='top')
# middle panels
if not(bottom):
for ax in axs:
ax.set_xticklabels([])
# tail panel
if bottom:
for ax in axs:
ax.set_xlabel(r'Location $x$ ($\lambda_p$)')
# add timestamp
tcur = lap*conf.dx*conf.cfl
tstamp = "$t = $" + str(tcur) + " $\omega_p^{-1}$"
txt = fig.text(axright+0.02, 0.5, tstamp, rotation=270, ha='center', va='center')
slap = str(lap).rjust(5, '0')
fname = prefix+'mesh_{}_{}.png'.format(0, slap)
plt.savefig(fname, bbox_inches='tight')
fname = prefix+'mesh_{}_{}.pdf'.format(0, slap)
plt.savefig(fname, bbox_inches='tight')
##################################################
# hacks to clean up the figure so we can re-use the same Fig object
# finally we need to remove colorbar before next round
if top:
cbL.remove()
cbR.remove()
txt.remove()
| [
"read_mesh.get_1d_meshes",
"numpy.log10",
"matplotlib.pyplot.savefig",
"configSetup.Configuration",
"matplotlib.pyplot.GridSpec",
"matplotlib.pyplot.figure",
"numpy.nanmax",
"mpl_toolkits.axes_grid1.make_axes_locatable",
"visualize.imshow",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.rc"
] | [((449, 472), 'mpl_toolkits.axes_grid1.make_axes_locatable', 'make_axes_locatable', (['ax'], {}), '(ax)\n', (468, 472), False, 'from mpl_toolkits.axes_grid1 import make_axes_locatable\n'), ((760, 795), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {'figsize': '(6.974, 1.4)'}), '(1, figsize=(6.974, 1.4))\n', (770, 795), True, 'import matplotlib.pyplot as plt\n'), ((800, 838), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'family': '"""serif"""', 'size': '(8)'}), "('font', family='serif', size=8)\n", (806, 838), True, 'import matplotlib.pyplot as plt\n'), ((843, 871), 'matplotlib.pyplot.rc', 'plt.rc', (['"""xtick"""'], {'labelsize': '(8)'}), "('xtick', labelsize=8)\n", (849, 871), True, 'import matplotlib.pyplot as plt\n'), ((876, 904), 'matplotlib.pyplot.rc', 'plt.rc', (['"""ytick"""'], {'labelsize': '(8)'}), "('ytick', labelsize=8)\n", (882, 904), True, 'import matplotlib.pyplot as plt\n'), ((909, 936), 'matplotlib.pyplot.rc', 'plt.rc', (['"""axes"""'], {'labelsize': '(8)'}), "('axes', labelsize=8)\n", (915, 936), True, 'import matplotlib.pyplot as plt\n'), ((960, 978), 'matplotlib.pyplot.GridSpec', 'plt.GridSpec', (['(1)', '(2)'], {}), '(1, 2)\n', (972, 978), True, 'import matplotlib.pyplot as plt\n'), ((1224, 1274), 'configSetup.Configuration', 'Configuration', (['"""config-twostream-relativistic.ini"""'], {}), "('config-twostream-relativistic.ini')\n", (1237, 1274), False, 'from configSetup import Configuration\n'), ((1070, 1088), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[0]'], {}), '(gs[0])\n', (1081, 1088), True, 'import matplotlib.pyplot as plt\n'), ((1107, 1125), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[1]'], {}), '(gs[1])\n', (1118, 1125), True, 'import matplotlib.pyplot as plt\n'), ((2191, 2232), 'read_mesh.get_1d_meshes', 'get_1d_meshes', (['prefix', 'lap', 'conf', '(0)', 'vdir'], {}), '(prefix, lap, conf, 0, vdir)\n', (2204, 2232), False, 'from read_mesh import get_1d_meshes\n'), ((2321, 2347), 'numpy.nanmax', 'np.nanmax', (["meshesL['data']"], {}), "(meshesL['data'])\n", (2330, 2347), True, 'import numpy as np\n'), ((2515, 2644), 'visualize.imshow', 'imshow', (['axs[0]', "meshesL['data']", 'xmin', 'xmax', "meshesL['vmin']", "meshesL['vmax']"], {'cmap': '"""plasma_r"""', 'vmin': '(-3.0)', 'vmax': '(0)', 'clip': 'None'}), "(axs[0], meshesL['data'], xmin, xmax, meshesL['vmin'], meshesL['vmax'\n ], cmap='plasma_r', vmin=-3.0, vmax=0, clip=None)\n", (2521, 2644), False, 'from visualize import imshow\n'), ((2949, 2990), 'read_mesh.get_1d_meshes', 'get_1d_meshes', (['prefix', 'lap', 'conf', '(1)', 'vdir'], {}), '(prefix, lap, conf, 1, vdir)\n', (2962, 2990), False, 'from read_mesh import get_1d_meshes\n'), ((3116, 3142), 'numpy.nanmax', 'np.nanmax', (["meshesR['data']"], {}), "(meshesR['data'])\n", (3125, 3142), True, 'import numpy as np\n'), ((3354, 3483), 'visualize.imshow', 'imshow', (['axs[1]', "meshesR['data']", 'xmin', 'xmax', "meshesR['vmin']", "meshesR['vmax']"], {'cmap': '"""plasma_r"""', 'vmin': '(-3.0)', 'vmax': '(0)', 'clip': 'None'}), "(axs[1], meshesR['data'], xmin, xmax, meshesR['vmin'], meshesR['vmax'\n ], cmap='plasma_r', vmin=-3.0, vmax=0, clip=None)\n", (3360, 3483), False, 'from visualize import imshow\n'), ((4678, 4717), 'matplotlib.pyplot.savefig', 'plt.savefig', (['fname'], {'bbox_inches': '"""tight"""'}), "(fname, bbox_inches='tight')\n", (4689, 4717), True, 'import matplotlib.pyplot as plt\n'), ((4783, 4822), 'matplotlib.pyplot.savefig', 'plt.savefig', (['fname'], {'bbox_inches': '"""tight"""'}), "(fname, bbox_inches='tight')\n", (4794, 4822), True, 'import matplotlib.pyplot as plt\n'), ((2274, 2300), 'numpy.nanmax', 'np.nanmax', (["meshesL['data']"], {}), "(meshesL['data'])\n", (2283, 2300), True, 'import numpy as np\n'), ((2474, 2499), 'numpy.log10', 'np.log10', (["meshesL['data']"], {}), "(meshesL['data'])\n", (2482, 2499), True, 'import numpy as np\n'), ((3025, 3051), 'numpy.nanmax', 'np.nanmax', (["meshesR['data']"], {}), "(meshesR['data'])\n", (3034, 3051), True, 'import numpy as np\n'), ((3235, 3261), 'numpy.nanmax', 'np.nanmax', (["meshesR['data']"], {}), "(meshesR['data'])\n", (3244, 3261), True, 'import numpy as np\n'), ((3313, 3338), 'numpy.log10', 'np.log10', (["meshesR['data']"], {}), "(meshesR['data'])\n", (3321, 3338), True, 'import numpy as np\n')] |
# <NAME> 2019-04-01
# Fisher Iris Data Set
# Splitting The Data into Training And Testing Dataset.
# Import all the necessary packages to use the various classification algorithms
# Load libraries
import pandas as pd
from pandas.plotting import scatter_matrix
import matplotlib.pyplot as plt
from sklearn import model_selection
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
# # Load dataset
data = ("iris.csv")
names = ['sepal-length', 'sepal-width', 'petal-length', 'petal-width', 'species']
dataset = pd.read_csv(data, header=0)
# Split-out validation dataset
array = dataset.values
X = array[:,0:4]
Y = array[:,4]
validation_size = 0.20
seed = 7
X_train, X_validation, Y_train, Y_validation = model_selection.train_test_split(X, Y, test_size=validation_size, random_state=seed)
seed = 7
scoring = 'accuracy' # This will check the scoring accuracy of each algorithm.
# Spot Check Algorithms
models = []
models.append(('LR', LogisticRegression(solver='liblinear', multi_class='ovr')))
models.append(('LDA', LinearDiscriminantAnalysis()))
models.append(('KNN', KNeighborsClassifier()))
models.append(('CART', DecisionTreeClassifier()))
models.append(('NB', GaussianNB()))
models.append(('SVM', SVC(gamma='auto')))
# evaluate each model in turn
results = []
names = []
for name, model in models:
kfold = model_selection.KFold(n_splits=10, random_state=seed)
cv_results = model_selection.cross_val_score(model, X_train, Y_train, cv=kfold, scoring=scoring)
results.append(cv_results)
names.append(name)
msg = "%s: %f (%f)" % (name, cv_results.mean(), cv_results.std())
print(msg)
| [
"sklearn.svm.SVC",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"sklearn.tree.DecisionTreeClassifier",
"sklearn.neighbors.KNeighborsClassifier",
"sklearn.linear_model.LogisticRegression",
"sklearn.naive_bayes.GaussianNB",
"sklearn.model_selection.KFold",
"sklearn.discriminant_analysis.LinearDiscriminantAnalysis",
"sklearn.model_selection.cross_val_score"
] | [((892, 919), 'pandas.read_csv', 'pd.read_csv', (['data'], {'header': '(0)'}), '(data, header=0)\n', (903, 919), True, 'import pandas as pd\n'), ((1087, 1175), 'sklearn.model_selection.train_test_split', 'model_selection.train_test_split', (['X', 'Y'], {'test_size': 'validation_size', 'random_state': 'seed'}), '(X, Y, test_size=validation_size,\n random_state=seed)\n', (1119, 1175), False, 'from sklearn import model_selection\n'), ((1698, 1751), 'sklearn.model_selection.KFold', 'model_selection.KFold', ([], {'n_splits': '(10)', 'random_state': 'seed'}), '(n_splits=10, random_state=seed)\n', (1719, 1751), False, 'from sklearn import model_selection\n'), ((1766, 1854), 'sklearn.model_selection.cross_val_score', 'model_selection.cross_val_score', (['model', 'X_train', 'Y_train'], {'cv': 'kfold', 'scoring': 'scoring'}), '(model, X_train, Y_train, cv=kfold, scoring=\n scoring)\n', (1797, 1854), False, 'from sklearn import model_selection\n'), ((1320, 1377), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'solver': '"""liblinear"""', 'multi_class': '"""ovr"""'}), "(solver='liblinear', multi_class='ovr')\n", (1338, 1377), False, 'from sklearn.linear_model import LogisticRegression\n'), ((1402, 1430), 'sklearn.discriminant_analysis.LinearDiscriminantAnalysis', 'LinearDiscriminantAnalysis', ([], {}), '()\n', (1428, 1430), False, 'from sklearn.discriminant_analysis import LinearDiscriminantAnalysis\n'), ((1455, 1477), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {}), '()\n', (1475, 1477), False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((1503, 1527), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {}), '()\n', (1525, 1527), False, 'from sklearn.tree import DecisionTreeClassifier\n'), ((1551, 1563), 'sklearn.naive_bayes.GaussianNB', 'GaussianNB', ([], {}), '()\n', (1561, 1563), False, 'from sklearn.naive_bayes import GaussianNB\n'), ((1588, 1605), 'sklearn.svm.SVC', 'SVC', ([], {'gamma': '"""auto"""'}), "(gamma='auto')\n", (1591, 1605), False, 'from sklearn.svm import SVC\n')] |
# lpdemo.py Demo/test program for MicroPython asyncio low power operation
# Author: <NAME>
# Copyright <NAME> 2018-2019 Released under the MIT license
import rtc_time_cfg
rtc_time_cfg.enabled = True
from pyb import LED, Pin
import aswitch
import uasyncio as asyncio
try:
if asyncio.version[0] != 'fast_io':
raise AttributeError
except AttributeError:
raise OSError('This requires fast_io fork of uasyncio.')
from rtc_time import Latency
class Button(aswitch.Switch):
def __init__(self, pin):
super().__init__(pin)
self.close_func(self._sw_close)
self._flag = False
def pressed(self):
f = self._flag
self._flag = False
return f
def _sw_close(self):
self._flag = True
running = False
def start(loop, leds, tims):
global running
running = True
coros = []
# Demo: assume app requires higher speed (not true in this instance)
Latency(50)
# Here you might apply power to external hardware
for x, led in enumerate(leds): # Create a coroutine for each LED
coros.append(toggle(led, tims[x]))
loop.create_task(coros[-1])
return coros
def stop(leds, coros):
global running
running = False
while coros:
asyncio.cancel(coros.pop())
# Remove power from external hardware
for led in leds:
led.off()
Latency(200) # Slow down scheduler to conserve power
async def monitor(loop, button):
leds = [LED(x) for x in (1, 2, 3)] # Create list of LED's and times
tims = [200, 700, 1200]
coros = start(loop, leds, tims)
while True:
if button.pressed():
if running:
stop(leds, coros)
else:
coros = start(loop, leds, tims)
await asyncio.sleep_ms(0)
async def toggle(objLED, time_ms):
while True:
await asyncio.sleep_ms(time_ms)
objLED.toggle()
loop = asyncio.get_event_loop()
button = Button(Pin('X1', Pin.IN, Pin.PULL_UP))
loop.create_task(monitor(loop, button))
loop.run_forever()
| [
"pyb.Pin",
"uasyncio.sleep_ms",
"rtc_time.Latency",
"uasyncio.get_event_loop",
"pyb.LED"
] | [((1916, 1940), 'uasyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (1938, 1940), True, 'import uasyncio as asyncio\n'), ((931, 942), 'rtc_time.Latency', 'Latency', (['(50)'], {}), '(50)\n', (938, 942), False, 'from rtc_time import Latency\n'), ((1364, 1376), 'rtc_time.Latency', 'Latency', (['(200)'], {}), '(200)\n', (1371, 1376), False, 'from rtc_time import Latency\n'), ((1957, 1987), 'pyb.Pin', 'Pin', (['"""X1"""', 'Pin.IN', 'Pin.PULL_UP'], {}), "('X1', Pin.IN, Pin.PULL_UP)\n", (1960, 1987), False, 'from pyb import LED, Pin\n'), ((1464, 1470), 'pyb.LED', 'LED', (['x'], {}), '(x)\n', (1467, 1470), False, 'from pyb import LED, Pin\n'), ((1772, 1791), 'uasyncio.sleep_ms', 'asyncio.sleep_ms', (['(0)'], {}), '(0)\n', (1788, 1791), True, 'import uasyncio as asyncio\n'), ((1858, 1883), 'uasyncio.sleep_ms', 'asyncio.sleep_ms', (['time_ms'], {}), '(time_ms)\n', (1874, 1883), True, 'import uasyncio as asyncio\n')] |
import torch
import torch.nn as nn
from bert.layers.layer_norm import BertLayerNorm
from transformer.layers.layer_norm import LayerNorm
class BertEmbeddings(nn.Module):
'''
BERT的嵌入由三个嵌入层组成:
1. 标记嵌入层(WordEmbedding)
2. 位置嵌入层(PositionalEmbedding)
3. 片段嵌入层(TokenTypeEmbedding)
BERT输入中的每个单词分别输入到三个嵌入层中,得到三个嵌入向量表示,然后这三个表示加起来就是BERT的输入嵌入。
这三个层都是通过学习得来的。
'''
def __init__(self, config):
super(BertEmbeddings, self).__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
self.layer_norm = BertLayerNorm(config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_ids, token_type_ids=None):
'''
:param input_ids: 输入标记的ID列表
:param token_type_ids: 片段ID,如果是句子对(A,B),比如来自A句子的单词映射为0,来自B句子的单词映射为1
:return:
'''
seq_len = input_ids.size(1)
# position_ids就是位置信息[0, 1, 2, ..., seq_len - 1]
position_ids = torch.arange(seq_len, dtype=torch.long, device=input_ids.device)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids) # (seq_len, ) -> (batch_size, seq_len)
# 如果输入的是单个句子,则全部为0即可
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
word_embeddings = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = word_embeddings + position_embeddings + token_type_embeddings
embeddings = self.layer_norm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
| [
"torch.nn.Dropout",
"torch.nn.Embedding",
"bert.layers.layer_norm.BertLayerNorm",
"torch.zeros_like",
"torch.arange"
] | [((497, 548), 'torch.nn.Embedding', 'nn.Embedding', (['config.vocab_size', 'config.hidden_size'], {}), '(config.vocab_size, config.hidden_size)\n', (509, 548), True, 'import torch.nn as nn\n'), ((584, 648), 'torch.nn.Embedding', 'nn.Embedding', (['config.max_position_embeddings', 'config.hidden_size'], {}), '(config.max_position_embeddings, config.hidden_size)\n', (596, 648), True, 'import torch.nn as nn\n'), ((686, 742), 'torch.nn.Embedding', 'nn.Embedding', (['config.type_vocab_size', 'config.hidden_size'], {}), '(config.type_vocab_size, config.hidden_size)\n', (698, 742), True, 'import torch.nn as nn\n'), ((770, 803), 'bert.layers.layer_norm.BertLayerNorm', 'BertLayerNorm', (['config.hidden_size'], {}), '(config.hidden_size)\n', (783, 803), False, 'from bert.layers.layer_norm import BertLayerNorm\n'), ((827, 865), 'torch.nn.Dropout', 'nn.Dropout', (['config.hidden_dropout_prob'], {}), '(config.hidden_dropout_prob)\n', (837, 865), True, 'import torch.nn as nn\n'), ((1191, 1255), 'torch.arange', 'torch.arange', (['seq_len'], {'dtype': 'torch.long', 'device': 'input_ids.device'}), '(seq_len, dtype=torch.long, device=input_ids.device)\n', (1203, 1255), False, 'import torch\n'), ((1459, 1486), 'torch.zeros_like', 'torch.zeros_like', (['input_ids'], {}), '(input_ids)\n', (1475, 1486), False, 'import torch\n')] |
from snooble import ratelimit
import time # used to monkeypatch this module
from unittest import mock
import pytest
class TestRatelimit(object):
def test_bursty(self):
limiter = ratelimit.RateLimiter(5, 1, bursty=False)
assert limiter.current_bucket == 1
assert limiter.refresh_period == 1 / 5
limiter.take()
assert limiter.current_bucket == 0
def test_bursty_property(self):
limiter = ratelimit.RateLimiter(5, 1, bursty=True)
assert limiter.current_bucket == 5
assert limiter.refresh_period == 1
limiter.bursty = False
assert limiter.bucket_size == 1
assert limiter.current_bucket == 1
assert limiter.refresh_period == 1 / 5
limiter.bursty = False
assert limiter.bucket_size == 1
assert limiter.current_bucket == 1
assert limiter.refresh_period == 1 / 5
limiter.take()
assert limiter.current_bucket == 0
limiter.bursty = True
assert limiter.bucket_size == 5
assert limiter.current_bucket == 0
assert limiter.refresh_period == 1
limiter.bursty = True
assert limiter.bucket_size == 5
assert limiter.current_bucket == 0
assert limiter.refresh_period == 1
def test_able_to_take_when_bucket_filled(self, monkeypatch):
mocker = mock.Mock()
monkeypatch.setattr(time, 'sleep', mocker)
limiter = ratelimit.RateLimiter(5, 1)
assert limiter.current_bucket == 5
limiter.take()
assert limiter.current_bucket == 4
limiter.take(4)
assert limiter.current_bucket == 0
assert not mocker.called
def test_sleeps_until_finished(self, monkeypatch):
sleep_mocker = mock.Mock()
timer_mocker = mock.Mock(side_effect=[0, 0.1, 0.2, 1])
monkeypatch.setattr(time, 'sleep', sleep_mocker)
monkeypatch.setattr(time, 'perf_counter', timer_mocker)
limiter = ratelimit.RateLimiter(1, 1)
assert limiter.current_bucket == 1
limiter.take()
assert limiter.current_bucket == 0
assert not sleep_mocker.called
limiter.take()
assert limiter.current_bucket == 0
assert sleep_mocker.called
assert sleep_mocker.call_args_list == [mock.call(0.9), mock.call(0.8)]
assert len(timer_mocker.call_args_list) == 4
def test_taking_many_at_once_small_bucket(self, monkeypatch):
sleep_mocker = mock.Mock()
timer_mocker = mock.Mock(side_effect=range(100))
monkeypatch.setattr(time, 'sleep', sleep_mocker)
monkeypatch.setattr(time, 'perf_counter', timer_mocker)
small_bucket = ratelimit.RateLimiter(1, 1)
assert small_bucket.current_bucket == 1
small_bucket.take()
assert small_bucket.current_bucket == 0
small_bucket.take(8)
assert small_bucket.current_bucket == 0
assert len(timer_mocker.call_args_list) == 9
def test_taking_many_at_once_big_bucket(self, monkeypatch):
sleep_mocker = mock.Mock()
timer_mocker = mock.Mock(side_effect=range(100))
monkeypatch.setattr(time, 'sleep', sleep_mocker)
monkeypatch.setattr(time, 'perf_counter', timer_mocker)
big_bucket = ratelimit.RateLimiter(3, 1)
assert big_bucket.current_bucket == 3
big_bucket.take()
assert big_bucket.current_bucket == 2
assert not sleep_mocker.called
big_bucket.take(8)
assert big_bucket.current_bucket == 0
assert len(timer_mocker.call_args_list) == 3
def test_equality(self):
limit1 = ratelimit.RateLimiter(rate=60, per=60, bursty=False)
limit2 = ratelimit.RateLimiter(rate=60, per=60, bursty=True)
limit3 = ratelimit.RateLimiter(rate=25, per=50, bursty=True)
limit4 = ratelimit.RateLimiter(rate=60, per=60, bursty=False)
assert limit1 == limit4 and limit4 == limit1
assert limit1 != limit2
assert limit1 != limit3
assert limit1 != (60, 60)
def test_repr(self):
rl = ratelimit.RateLimiter(rate=60, per=60, bursty=True)
assert "rate=60" in repr(rl)
assert "per=60" in repr(rl)
assert "bursty=True" in repr(rl)
assert "current=60" in repr(rl)
rl.take(30)
assert "current=30" in repr(rl)
class TestLimitation(object):
def test_wrapping(self):
take_mocker = mock.Mock(return_value=True)
ratelimiter = ratelimit.RateLimiter(1, 1)
ratelimiter.take = take_mocker
test_object = mock.Mock()
limited_object = ratelimiter.limitate(test_object, ['limited_method', 'limited_uncalled_method'])
limited_object.arbitrary_method()
assert not take_mocker.called
assert test_object.arbitrary_method.called
test_object.reset_mock()
take_mocker.reset_mock()
limited_object.arbitrary_uncalled_method
assert not take_mocker.called
assert not test_object.arbitrary_uncalled_method.called
test_object.reset_mock()
take_mocker.reset_mock()
limited_object.limited_method()
assert take_mocker.called
assert test_object.limited_method.called
test_object.reset_mock()
take_mocker.reset_mock()
limited_object.limited_uncalled_method
assert not take_mocker.called
assert not test_object.limited_uncalled_method.called
test_object.reset_mock()
take_mocker.reset_mock()
test_object = mock.Mock()
test_object.arbitrary_attribute = "arbitrary"
test_object.limited_attribute = "limited"
limited_object = ratelimiter.limitate(test_object, ['limited_attribute'])
limited_object.arbitrary_attribute
assert not take_mocker.called
test_object.reset_mock()
take_mocker.reset_mock()
limited_object.limited_attribute
assert take_mocker.called
def test_wrapper_passes_information_through(self):
take_mocker = mock.Mock(return_value=True)
ratelimiter = ratelimit.RateLimiter(1, 1)
ratelimiter.take = take_mocker
test_object = mock.Mock()
limited_object = ratelimiter.limitate(test_object, ['limited_method'])
limited_object.arbitrary_method("arg1", "arg2", ["args4", "and 5"], name="hello")
assert not take_mocker.called
assert (test_object.arbitrary_method.call_args ==
mock.call("arg1", "arg2", ["args4", "and 5"], name="hello"))
test_object.reset_mock()
limited_object.limited_method("arg1", "arg2", ["args4", "and 5"], name="hello")
assert take_mocker.called
assert (test_object.limited_method.call_args ==
mock.call("arg1", "arg2", ["args4", "and 5"], name="hello"))
@pytest.mark.xfail
def test_wrapper_looks_like_object(self):
take_mocker = mock.Mock(return_value=True)
ratelimiter = ratelimit.RateLimiter(1, 1)
ratelimiter.take = take_mocker
class MyCustomObject(object):
def limited_method(self):
return "limited"
def unlimited_method(self, arg1, arg2="hello"):
return "unlimited"
test_object = MyCustomObject()
limited_object = ratelimiter.limitate(test_object, ['limited_method'])
assert isinstance(limited_object, MyCustomObject)
assert hasattr(limited_object, 'limited_method')
assert hasattr(limited_object, 'unlimited_method')
# TODO: method signatures are alike
| [
"unittest.mock.call",
"unittest.mock.Mock",
"snooble.ratelimit.RateLimiter"
] | [((196, 237), 'snooble.ratelimit.RateLimiter', 'ratelimit.RateLimiter', (['(5)', '(1)'], {'bursty': '(False)'}), '(5, 1, bursty=False)\n', (217, 237), False, 'from snooble import ratelimit\n'), ((449, 489), 'snooble.ratelimit.RateLimiter', 'ratelimit.RateLimiter', (['(5)', '(1)'], {'bursty': '(True)'}), '(5, 1, bursty=True)\n', (470, 489), False, 'from snooble import ratelimit\n'), ((1363, 1374), 'unittest.mock.Mock', 'mock.Mock', ([], {}), '()\n', (1372, 1374), False, 'from unittest import mock\n'), ((1445, 1472), 'snooble.ratelimit.RateLimiter', 'ratelimit.RateLimiter', (['(5)', '(1)'], {}), '(5, 1)\n', (1466, 1472), False, 'from snooble import ratelimit\n'), ((1762, 1773), 'unittest.mock.Mock', 'mock.Mock', ([], {}), '()\n', (1771, 1773), False, 'from unittest import mock\n'), ((1797, 1836), 'unittest.mock.Mock', 'mock.Mock', ([], {'side_effect': '[0, 0.1, 0.2, 1]'}), '(side_effect=[0, 0.1, 0.2, 1])\n', (1806, 1836), False, 'from unittest import mock\n'), ((1977, 2004), 'snooble.ratelimit.RateLimiter', 'ratelimit.RateLimiter', (['(1)', '(1)'], {}), '(1, 1)\n', (1998, 2004), False, 'from snooble import ratelimit\n'), ((2478, 2489), 'unittest.mock.Mock', 'mock.Mock', ([], {}), '()\n', (2487, 2489), False, 'from unittest import mock\n'), ((2692, 2719), 'snooble.ratelimit.RateLimiter', 'ratelimit.RateLimiter', (['(1)', '(1)'], {}), '(1, 1)\n', (2713, 2719), False, 'from snooble import ratelimit\n'), ((3064, 3075), 'unittest.mock.Mock', 'mock.Mock', ([], {}), '()\n', (3073, 3075), False, 'from unittest import mock\n'), ((3276, 3303), 'snooble.ratelimit.RateLimiter', 'ratelimit.RateLimiter', (['(3)', '(1)'], {}), '(3, 1)\n', (3297, 3303), False, 'from snooble import ratelimit\n'), ((3637, 3689), 'snooble.ratelimit.RateLimiter', 'ratelimit.RateLimiter', ([], {'rate': '(60)', 'per': '(60)', 'bursty': '(False)'}), '(rate=60, per=60, bursty=False)\n', (3658, 3689), False, 'from snooble import ratelimit\n'), ((3707, 3758), 'snooble.ratelimit.RateLimiter', 'ratelimit.RateLimiter', ([], {'rate': '(60)', 'per': '(60)', 'bursty': '(True)'}), '(rate=60, per=60, bursty=True)\n', (3728, 3758), False, 'from snooble import ratelimit\n'), ((3776, 3827), 'snooble.ratelimit.RateLimiter', 'ratelimit.RateLimiter', ([], {'rate': '(25)', 'per': '(50)', 'bursty': '(True)'}), '(rate=25, per=50, bursty=True)\n', (3797, 3827), False, 'from snooble import ratelimit\n'), ((3845, 3897), 'snooble.ratelimit.RateLimiter', 'ratelimit.RateLimiter', ([], {'rate': '(60)', 'per': '(60)', 'bursty': '(False)'}), '(rate=60, per=60, bursty=False)\n', (3866, 3897), False, 'from snooble import ratelimit\n'), ((4089, 4140), 'snooble.ratelimit.RateLimiter', 'ratelimit.RateLimiter', ([], {'rate': '(60)', 'per': '(60)', 'bursty': '(True)'}), '(rate=60, per=60, bursty=True)\n', (4110, 4140), False, 'from snooble import ratelimit\n'), ((4439, 4467), 'unittest.mock.Mock', 'mock.Mock', ([], {'return_value': '(True)'}), '(return_value=True)\n', (4448, 4467), False, 'from unittest import mock\n'), ((4490, 4517), 'snooble.ratelimit.RateLimiter', 'ratelimit.RateLimiter', (['(1)', '(1)'], {}), '(1, 1)\n', (4511, 4517), False, 'from snooble import ratelimit\n'), ((4580, 4591), 'unittest.mock.Mock', 'mock.Mock', ([], {}), '()\n', (4589, 4591), False, 'from unittest import mock\n'), ((5541, 5552), 'unittest.mock.Mock', 'mock.Mock', ([], {}), '()\n', (5550, 5552), False, 'from unittest import mock\n'), ((6041, 6069), 'unittest.mock.Mock', 'mock.Mock', ([], {'return_value': '(True)'}), '(return_value=True)\n', (6050, 6069), False, 'from unittest import mock\n'), ((6092, 6119), 'snooble.ratelimit.RateLimiter', 'ratelimit.RateLimiter', (['(1)', '(1)'], {}), '(1, 1)\n', (6113, 6119), False, 'from snooble import ratelimit\n'), ((6182, 6193), 'unittest.mock.Mock', 'mock.Mock', ([], {}), '()\n', (6191, 6193), False, 'from unittest import mock\n'), ((6919, 6947), 'unittest.mock.Mock', 'mock.Mock', ([], {'return_value': '(True)'}), '(return_value=True)\n', (6928, 6947), False, 'from unittest import mock\n'), ((6970, 6997), 'snooble.ratelimit.RateLimiter', 'ratelimit.RateLimiter', (['(1)', '(1)'], {}), '(1, 1)\n', (6991, 6997), False, 'from snooble import ratelimit\n'), ((6476, 6535), 'unittest.mock.call', 'mock.call', (['"""arg1"""', '"""arg2"""', "['args4', 'and 5']"], {'name': '"""hello"""'}), "('arg1', 'arg2', ['args4', 'and 5'], name='hello')\n", (6485, 6535), False, 'from unittest import mock\n'), ((6766, 6825), 'unittest.mock.call', 'mock.call', (['"""arg1"""', '"""arg2"""', "['args4', 'and 5']"], {'name': '"""hello"""'}), "('arg1', 'arg2', ['args4', 'and 5'], name='hello')\n", (6775, 6825), False, 'from unittest import mock\n'), ((2303, 2317), 'unittest.mock.call', 'mock.call', (['(0.9)'], {}), '(0.9)\n', (2312, 2317), False, 'from unittest import mock\n'), ((2319, 2333), 'unittest.mock.call', 'mock.call', (['(0.8)'], {}), '(0.8)\n', (2328, 2333), False, 'from unittest import mock\n')] |
# file: services.py
# vim:fileencoding=utf-8:fdm=marker:ft=python
#
# Copyright © 2015-2017 <NAME> <<EMAIL>>.
# SPDX-License-Identifier: MIT
# Created: 2015-09-05T19:01:04+0200
# Last modified: 2022-04-09T16:52:09+0200
import re
def services(filename="/etc/services"): # {{{1
"""
Generate a dictionary of the available services from the services file,
by default /etc/services.
Arguments:
filename: Name of the services file.
Returns:
A dict in the form of {25: 'smtp', 80: 'http', ...}
"""
with open(filename) as serv:
data = serv.read()
matches = re.findall("\n"+r"(\S+)\s+(\d+)/", data)
return {int(num): name for name, num in set(matches)}
| [
"re.findall"
] | [((611, 655), 're.findall', 're.findall', (["('\\n' + '(\\\\S+)\\\\s+(\\\\d+)/')", 'data'], {}), "('\\n' + '(\\\\S+)\\\\s+(\\\\d+)/', data)\n", (621, 655), False, 'import re\n')] |
from PIL import Image
import importlib
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.transforms as tvf
from assets.archs_zoo.r2d2_orig import PatchNet
class HNDesc(PatchNet):
""" Same than L2_Net, but replace the final 8x8 conv by 3 successive 2x2 convs.
"""
def __init__(self, dim=128, mchan=4, relu22=False, **kw):
PatchNet.__init__(self, **kw)
self._add_conv(8 * mchan)
self._add_conv(8 * mchan)
self._add_conv(16 * mchan, stride=2)
self._add_conv(16 * mchan)
self._add_conv(32 * mchan, stride=2)
self._add_conv(32 * mchan)
# replace last 8x8 convolution with 3 2x2 convolutions
self._add_conv(32 * mchan, k=2, stride=2, relu=relu22)
self._add_conv(32 * mchan, k=2, stride=2, relu=relu22)
self._add_conv(dim, k=2, stride=2, bn=False, relu=True)
self.out_dim = dim
@staticmethod
def img_transform():
rgb_mean = [0.485, 0.456, 0.406]
rgb_std = [0.229, 0.224, 0.225]
return tvf.Compose([tvf.ToTensor(),
tvf.Normalize(mean=rgb_mean, std=rgb_std)])
@staticmethod
def img_preprocessing(fname, device, resize_max=None, bbxs=None, resize_480x640=False):
img = Image.open(fname).convert('RGB')
if resize_480x640:
img = img.resize((640, 480))
if bbxs is not None:
img = img.crop(bbxs)
w, h = img.size
if resize_max and max(w, h) > resize_max:
scale = resize_max / max(h, w)
h_new, w_new = int(round(h * scale)), int(round(w * scale))
img = img.resize((w_new, h_new))
preprocess = HNDesc.img_transform()
net_input = preprocess(img)[None].to(device)
return net_input
@staticmethod
def load_network(model_fn):
checkpoint = torch.load(model_fn)
model = HNDesc()
model.load_state_dict(checkpoint['state_dict'])
return model.eval()
def class_for_name(module_name, class_name):
# load the module, will raise ImportError if module cannot be loaded
m = importlib.import_module(module_name)
return getattr(m, class_name)
class conv(nn.Module):
def __init__(self, num_in_layers, num_out_layers, kernel_size, stride):
super(conv, self).__init__()
self.kernel_size = kernel_size
self.conv = nn.Conv2d(num_in_layers,
num_out_layers,
kernel_size=kernel_size,
stride=stride,
padding=(self.kernel_size - 1) // 2)
self.bn = nn.BatchNorm2d(num_out_layers)
def forward(self, x):
return F.elu(self.bn(self.conv(x)), inplace=True)
class upconv(nn.Module):
def __init__(self, num_in_layers, num_out_layers, kernel_size, scale):
super(upconv, self).__init__()
self.scale = scale
self.conv = conv(num_in_layers, num_out_layers, kernel_size, 1)
def forward(self, x):
x = nn.functional.interpolate(x, scale_factor=self.scale, align_corners=True, mode='bilinear')
return self.conv(x)
class CapsDesc(nn.Module):
def __init__(self,
encoder='resnet50',
pretrained=True
):
super(CapsDesc, self).__init__()
filters = [256, 512, 1024, 2048]
resnet = class_for_name("torchvision.models", encoder)(pretrained=pretrained)
self.firstconv = resnet.conv1 # H/2
self.firstbn = resnet.bn1
self.firstrelu = resnet.relu
self.firstmaxpool = resnet.maxpool # H/4
# encoder
self.layer1 = resnet.layer1 # H/4
self.layer2 = resnet.layer2 # H/8
self.layer3 = resnet.layer3 # H/16
# coarse-level conv
self.conv_coarse = conv(filters[2], 128, 1, 1)
# decoder
self.upconv3 = upconv(filters[2], 512, 3, 2)
self.iconv3 = conv(filters[1] + 512, 512, 3, 1)
self.upconv2 = upconv(512, 256, 3, 2)
self.iconv2 = conv(filters[0] + 256, 256, 3, 1)
# fine-level conv
self.conv_fine = conv(256, 128, 1, 1)
def skipconnect(self, x1, x2):
diffY = x2.size()[2] - x1.size()[2]
diffX = x2.size()[3] - x1.size()[3]
x1 = F.pad(x1, (diffX // 2, diffX - diffX // 2,
diffY // 2, diffY - diffY // 2))
# for padding issues, see
# https://github.com/HaiyongJiang/U-Net-Pytorch-Unstructured-Buggy/commit/0e854509c2cea854e247a9c615f175f76fbb2e3a
# https://github.com/xiaopeng-liao/Pytorch-UNet/commit/8ebac70e633bac59fc22bb5195e513d5832fb3bd
x = torch.cat([x2, x1], dim=1)
return x
def forward(self, x):
x = self.firstrelu(self.firstbn(self.firstconv(x)))
x = self.firstmaxpool(x)
x1 = self.layer1(x)
x2 = self.layer2(x1)
x3 = self.layer3(x2)
x_coarse = self.conv_coarse(x3)
x = self.upconv3(x3)
x = self.skipconnect(x2, x)
x = self.iconv3(x)
x = self.upconv2(x)
x = self.skipconnect(x1, x)
x = self.iconv2(x)
x_fine = self.conv_fine(x)
#return [x_coarse, x_fine]
return x_fine
@staticmethod
def img_transform():
rgb_mean = [0.485, 0.456, 0.406]
rgb_std = [0.229, 0.224, 0.225]
return tvf.Compose([tvf.ToTensor(),
tvf.Normalize(mean=rgb_mean, std=rgb_std)])
@staticmethod
def img_preprocessing(fname, device, resize_max=None, bbxs=None, resize_480x640=False):
img = Image.open(fname).convert('RGB')
if resize_480x640:
img = img.resize((640, 480))
if bbxs is not None:
img = img.crop(bbxs)
w, h = img.size
if resize_max and max(w, h) > resize_max:
scale = resize_max / max(h, w)
h_new, w_new = int(round(h * scale)), int(round(w * scale))
img = img.resize((w_new, h_new))
preprocess = CapsDesc.img_transform()
net_input = preprocess(img)[None].to(device)
return net_input
@staticmethod
def load_network(model_fn):
checkpoint = torch.load(model_fn)
model = CapsDesc()
model.load_state_dict(checkpoint['state_dict'])
return model.eval()
| [
"torch.nn.BatchNorm2d",
"PIL.Image.open",
"importlib.import_module",
"torch.load",
"torch.nn.Conv2d",
"torch.nn.functional.interpolate",
"torchvision.transforms.Normalize",
"torch.nn.functional.pad",
"torchvision.transforms.ToTensor",
"assets.archs_zoo.r2d2_orig.PatchNet.__init__",
"torch.cat"
] | [((2138, 2174), 'importlib.import_module', 'importlib.import_module', (['module_name'], {}), '(module_name)\n', (2161, 2174), False, 'import importlib\n'), ((379, 408), 'assets.archs_zoo.r2d2_orig.PatchNet.__init__', 'PatchNet.__init__', (['self'], {}), '(self, **kw)\n', (396, 408), False, 'from assets.archs_zoo.r2d2_orig import PatchNet\n'), ((1880, 1900), 'torch.load', 'torch.load', (['model_fn'], {}), '(model_fn)\n', (1890, 1900), False, 'import torch\n'), ((2406, 2528), 'torch.nn.Conv2d', 'nn.Conv2d', (['num_in_layers', 'num_out_layers'], {'kernel_size': 'kernel_size', 'stride': 'stride', 'padding': '((self.kernel_size - 1) // 2)'}), '(num_in_layers, num_out_layers, kernel_size=kernel_size, stride=\n stride, padding=(self.kernel_size - 1) // 2)\n', (2415, 2528), True, 'import torch.nn as nn\n'), ((2662, 2692), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['num_out_layers'], {}), '(num_out_layers)\n', (2676, 2692), True, 'import torch.nn as nn\n'), ((3057, 3151), 'torch.nn.functional.interpolate', 'nn.functional.interpolate', (['x'], {'scale_factor': 'self.scale', 'align_corners': '(True)', 'mode': '"""bilinear"""'}), "(x, scale_factor=self.scale, align_corners=True,\n mode='bilinear')\n", (3082, 3151), True, 'import torch.nn as nn\n'), ((4327, 4402), 'torch.nn.functional.pad', 'F.pad', (['x1', '(diffX // 2, diffX - diffX // 2, diffY // 2, diffY - diffY // 2)'], {}), '(x1, (diffX // 2, diffX - diffX // 2, diffY // 2, diffY - diffY // 2))\n', (4332, 4402), True, 'import torch.nn.functional as F\n'), ((4702, 4728), 'torch.cat', 'torch.cat', (['[x2, x1]'], {'dim': '(1)'}), '([x2, x1], dim=1)\n', (4711, 4728), False, 'import torch\n'), ((6237, 6257), 'torch.load', 'torch.load', (['model_fn'], {}), '(model_fn)\n', (6247, 6257), False, 'import torch\n'), ((1071, 1085), 'torchvision.transforms.ToTensor', 'tvf.ToTensor', ([], {}), '()\n', (1083, 1085), True, 'import torchvision.transforms as tvf\n'), ((1115, 1156), 'torchvision.transforms.Normalize', 'tvf.Normalize', ([], {'mean': 'rgb_mean', 'std': 'rgb_std'}), '(mean=rgb_mean, std=rgb_std)\n', (1128, 1156), True, 'import torchvision.transforms as tvf\n'), ((1284, 1301), 'PIL.Image.open', 'Image.open', (['fname'], {}), '(fname)\n', (1294, 1301), False, 'from PIL import Image\n'), ((5426, 5440), 'torchvision.transforms.ToTensor', 'tvf.ToTensor', ([], {}), '()\n', (5438, 5440), True, 'import torchvision.transforms as tvf\n'), ((5470, 5511), 'torchvision.transforms.Normalize', 'tvf.Normalize', ([], {'mean': 'rgb_mean', 'std': 'rgb_std'}), '(mean=rgb_mean, std=rgb_std)\n', (5483, 5511), True, 'import torchvision.transforms as tvf\n'), ((5639, 5656), 'PIL.Image.open', 'Image.open', (['fname'], {}), '(fname)\n', (5649, 5656), False, 'from PIL import Image\n')] |
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
import bookkeeper.proto.storage_pb2 as storage__pb2
class MetaRangeServiceStub(object):
"""public service for other operations in range server
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetActiveRanges = channel.unary_unary(
'/bookkeeper.proto.storage.MetaRangeService/GetActiveRanges',
request_serializer=storage__pb2.GetActiveRangesRequest.SerializeToString,
response_deserializer=storage__pb2.GetActiveRangesResponse.FromString,
)
class MetaRangeServiceServicer(object):
"""public service for other operations in range server
"""
def GetActiveRanges(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_MetaRangeServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetActiveRanges': grpc.unary_unary_rpc_method_handler(
servicer.GetActiveRanges,
request_deserializer=storage__pb2.GetActiveRangesRequest.FromString,
response_serializer=storage__pb2.GetActiveRangesResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'bookkeeper.proto.storage.MetaRangeService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
class RootRangeServiceStub(object):
"""public service for metadata services
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.CreateNamespace = channel.unary_unary(
'/bookkeeper.proto.storage.RootRangeService/CreateNamespace',
request_serializer=storage__pb2.CreateNamespaceRequest.SerializeToString,
response_deserializer=storage__pb2.CreateNamespaceResponse.FromString,
)
self.DeleteNamespace = channel.unary_unary(
'/bookkeeper.proto.storage.RootRangeService/DeleteNamespace',
request_serializer=storage__pb2.DeleteNamespaceRequest.SerializeToString,
response_deserializer=storage__pb2.DeleteNamespaceResponse.FromString,
)
self.GetNamespace = channel.unary_unary(
'/bookkeeper.proto.storage.RootRangeService/GetNamespace',
request_serializer=storage__pb2.GetNamespaceRequest.SerializeToString,
response_deserializer=storage__pb2.GetNamespaceResponse.FromString,
)
self.CreateStream = channel.unary_unary(
'/bookkeeper.proto.storage.RootRangeService/CreateStream',
request_serializer=storage__pb2.CreateStreamRequest.SerializeToString,
response_deserializer=storage__pb2.CreateStreamResponse.FromString,
)
self.DeleteStream = channel.unary_unary(
'/bookkeeper.proto.storage.RootRangeService/DeleteStream',
request_serializer=storage__pb2.DeleteStreamRequest.SerializeToString,
response_deserializer=storage__pb2.DeleteStreamResponse.FromString,
)
self.GetStream = channel.unary_unary(
'/bookkeeper.proto.storage.RootRangeService/GetStream',
request_serializer=storage__pb2.GetStreamRequest.SerializeToString,
response_deserializer=storage__pb2.GetStreamResponse.FromString,
)
class RootRangeServiceServicer(object):
"""public service for metadata services
"""
def CreateNamespace(self, request, context):
"""
Namespace Methods
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DeleteNamespace(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetNamespace(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CreateStream(self, request, context):
"""
Stream Methods
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DeleteStream(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetStream(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_RootRangeServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'CreateNamespace': grpc.unary_unary_rpc_method_handler(
servicer.CreateNamespace,
request_deserializer=storage__pb2.CreateNamespaceRequest.FromString,
response_serializer=storage__pb2.CreateNamespaceResponse.SerializeToString,
),
'DeleteNamespace': grpc.unary_unary_rpc_method_handler(
servicer.DeleteNamespace,
request_deserializer=storage__pb2.DeleteNamespaceRequest.FromString,
response_serializer=storage__pb2.DeleteNamespaceResponse.SerializeToString,
),
'GetNamespace': grpc.unary_unary_rpc_method_handler(
servicer.GetNamespace,
request_deserializer=storage__pb2.GetNamespaceRequest.FromString,
response_serializer=storage__pb2.GetNamespaceResponse.SerializeToString,
),
'CreateStream': grpc.unary_unary_rpc_method_handler(
servicer.CreateStream,
request_deserializer=storage__pb2.CreateStreamRequest.FromString,
response_serializer=storage__pb2.CreateStreamResponse.SerializeToString,
),
'DeleteStream': grpc.unary_unary_rpc_method_handler(
servicer.DeleteStream,
request_deserializer=storage__pb2.DeleteStreamRequest.FromString,
response_serializer=storage__pb2.DeleteStreamResponse.SerializeToString,
),
'GetStream': grpc.unary_unary_rpc_method_handler(
servicer.GetStream,
request_deserializer=storage__pb2.GetStreamRequest.FromString,
response_serializer=storage__pb2.GetStreamResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'bookkeeper.proto.storage.RootRangeService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
class StorageContainerServiceStub(object):
"""A general range server service
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetStorageContainerEndpoint = channel.unary_unary(
'/bookkeeper.proto.storage.StorageContainerService/GetStorageContainerEndpoint',
request_serializer=storage__pb2.GetStorageContainerEndpointRequest.SerializeToString,
response_deserializer=storage__pb2.GetStorageContainerEndpointResponse.FromString,
)
class StorageContainerServiceServicer(object):
"""A general range server service
"""
def GetStorageContainerEndpoint(self, request, context):
"""Get the storage container endpoints
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_StorageContainerServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetStorageContainerEndpoint': grpc.unary_unary_rpc_method_handler(
servicer.GetStorageContainerEndpoint,
request_deserializer=storage__pb2.GetStorageContainerEndpointRequest.FromString,
response_serializer=storage__pb2.GetStorageContainerEndpointResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'bookkeeper.proto.storage.StorageContainerService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
| [
"grpc.method_handlers_generic_handler",
"grpc.unary_unary_rpc_method_handler"
] | [((1397, 1504), 'grpc.method_handlers_generic_handler', 'grpc.method_handlers_generic_handler', (['"""bookkeeper.proto.storage.MetaRangeService"""', 'rpc_method_handlers'], {}), "(\n 'bookkeeper.proto.storage.MetaRangeService', rpc_method_handlers)\n", (1433, 1504), False, 'import grpc\n'), ((6791, 6898), 'grpc.method_handlers_generic_handler', 'grpc.method_handlers_generic_handler', (['"""bookkeeper.proto.storage.RootRangeService"""', 'rpc_method_handlers'], {}), "(\n 'bookkeeper.proto.storage.RootRangeService', rpc_method_handlers)\n", (6827, 6898), False, 'import grpc\n'), ((8291, 8405), 'grpc.method_handlers_generic_handler', 'grpc.method_handlers_generic_handler', (['"""bookkeeper.proto.storage.StorageContainerService"""', 'rpc_method_handlers'], {}), "(\n 'bookkeeper.proto.storage.StorageContainerService', rpc_method_handlers)\n", (8327, 8405), False, 'import grpc\n'), ((1126, 1340), 'grpc.unary_unary_rpc_method_handler', 'grpc.unary_unary_rpc_method_handler', (['servicer.GetActiveRanges'], {'request_deserializer': 'storage__pb2.GetActiveRangesRequest.FromString', 'response_serializer': 'storage__pb2.GetActiveRangesResponse.SerializeToString'}), '(servicer.GetActiveRanges,\n request_deserializer=storage__pb2.GetActiveRangesRequest.FromString,\n response_serializer=storage__pb2.GetActiveRangesResponse.SerializeToString)\n', (1161, 1340), False, 'import grpc\n'), ((5220, 5434), 'grpc.unary_unary_rpc_method_handler', 'grpc.unary_unary_rpc_method_handler', (['servicer.CreateNamespace'], {'request_deserializer': 'storage__pb2.CreateNamespaceRequest.FromString', 'response_serializer': 'storage__pb2.CreateNamespaceResponse.SerializeToString'}), '(servicer.CreateNamespace,\n request_deserializer=storage__pb2.CreateNamespaceRequest.FromString,\n response_serializer=storage__pb2.CreateNamespaceResponse.SerializeToString)\n', (5255, 5434), False, 'import grpc\n'), ((5492, 5706), 'grpc.unary_unary_rpc_method_handler', 'grpc.unary_unary_rpc_method_handler', (['servicer.DeleteNamespace'], {'request_deserializer': 'storage__pb2.DeleteNamespaceRequest.FromString', 'response_serializer': 'storage__pb2.DeleteNamespaceResponse.SerializeToString'}), '(servicer.DeleteNamespace,\n request_deserializer=storage__pb2.DeleteNamespaceRequest.FromString,\n response_serializer=storage__pb2.DeleteNamespaceResponse.SerializeToString)\n', (5527, 5706), False, 'import grpc\n'), ((5761, 5966), 'grpc.unary_unary_rpc_method_handler', 'grpc.unary_unary_rpc_method_handler', (['servicer.GetNamespace'], {'request_deserializer': 'storage__pb2.GetNamespaceRequest.FromString', 'response_serializer': 'storage__pb2.GetNamespaceResponse.SerializeToString'}), '(servicer.GetNamespace,\n request_deserializer=storage__pb2.GetNamespaceRequest.FromString,\n response_serializer=storage__pb2.GetNamespaceResponse.SerializeToString)\n', (5796, 5966), False, 'import grpc\n'), ((6021, 6226), 'grpc.unary_unary_rpc_method_handler', 'grpc.unary_unary_rpc_method_handler', (['servicer.CreateStream'], {'request_deserializer': 'storage__pb2.CreateStreamRequest.FromString', 'response_serializer': 'storage__pb2.CreateStreamResponse.SerializeToString'}), '(servicer.CreateStream,\n request_deserializer=storage__pb2.CreateStreamRequest.FromString,\n response_serializer=storage__pb2.CreateStreamResponse.SerializeToString)\n', (6056, 6226), False, 'import grpc\n'), ((6281, 6486), 'grpc.unary_unary_rpc_method_handler', 'grpc.unary_unary_rpc_method_handler', (['servicer.DeleteStream'], {'request_deserializer': 'storage__pb2.DeleteStreamRequest.FromString', 'response_serializer': 'storage__pb2.DeleteStreamResponse.SerializeToString'}), '(servicer.DeleteStream,\n request_deserializer=storage__pb2.DeleteStreamRequest.FromString,\n response_serializer=storage__pb2.DeleteStreamResponse.SerializeToString)\n', (6316, 6486), False, 'import grpc\n'), ((6538, 6734), 'grpc.unary_unary_rpc_method_handler', 'grpc.unary_unary_rpc_method_handler', (['servicer.GetStream'], {'request_deserializer': 'storage__pb2.GetStreamRequest.FromString', 'response_serializer': 'storage__pb2.GetStreamResponse.SerializeToString'}), '(servicer.GetStream,\n request_deserializer=storage__pb2.GetStreamRequest.FromString,\n response_serializer=storage__pb2.GetStreamResponse.SerializeToString)\n', (6573, 6734), False, 'import grpc\n'), ((7984, 8240), 'grpc.unary_unary_rpc_method_handler', 'grpc.unary_unary_rpc_method_handler', (['servicer.GetStorageContainerEndpoint'], {'request_deserializer': 'storage__pb2.GetStorageContainerEndpointRequest.FromString', 'response_serializer': 'storage__pb2.GetStorageContainerEndpointResponse.SerializeToString'}), '(servicer.GetStorageContainerEndpoint,\n request_deserializer=storage__pb2.GetStorageContainerEndpointRequest.\n FromString, response_serializer=storage__pb2.\n GetStorageContainerEndpointResponse.SerializeToString)\n', (8019, 8240), False, 'import grpc\n')] |
# noqa D100
import mock
def test_touch_setup(cap1xxx):
"""Test that touch initialises Cap1166."""
from gfxhat import touch
touch.setup()
cap1xxx.Cap1166.assert_called_with(i2c_addr=touch.I2C_ADDR)
def test_touch_names():
"""Test that get_name returns a sensible value."""
from gfxhat import touch
assert touch.get_name(0) == 'up'
def test_touch_set_led(cap1xxx):
"""Test that set_led calls Cap1166.set_led_state."""
from gfxhat import touch
touch.set_led(0, True)
cap1xxx.Cap1166().set_led_state.assert_called_with(touch.LED_MAPPING[0], True)
def test_touch_high_sensitivity(cap1xxx):
"""Test that high sensitivity calls custom commands.
Tests should be updated if/when this behaviour is formalised in Cap1xxx.
"""
from gfxhat import touch
touch.high_sensitivity()
cap1xxx.Cap1166()._write_byte.assert_has_calls([
mock.call(0x00, 0b11000000),
mock.call(0x1f, 0b00000000)
])
def test_touch_repeat(cap1xxx):
"""Test that touch repeat calls Cap1166.enable_repeat()."""
from gfxhat import touch
touch.enable_repeat(True)
cap1xxx.Cap1166().enable_repeat.assert_called_with(0b11111111)
touch.enable_repeat(False)
cap1xxx.Cap1166().enable_repeat.assert_called_with(0b00000000)
def test_touch_repeat_rate(cap1xxx):
"""Test that touch repeat rate calls Cap1166.set_repeat_rate()."""
from gfxhat import touch
touch.set_repeat_rate(35)
cap1xxx.Cap1166().set_repeat_rate.assert_called_with(35)
def test_attach_handler(cap1xxx):
"""Test that both handler modes call Cap1166.on()."""
from gfxhat import touch
decorator = touch.on([1])
def handler():
pass
decorator(handler)
cap1xxx.Cap1166().on.assert_has_calls([
mock.call(channel=1, event='press', handler=handler),
mock.call(channel=1, event='release', handler=handler),
mock.call(channel=1, event='held', handler=handler),
])
touch.on([0], handler=handler)
cap1xxx.Cap1166().on.assert_has_calls([
mock.call(channel=0, event='press', handler=handler),
mock.call(channel=0, event='release', handler=handler),
mock.call(channel=0, event='held', handler=handler),
])
| [
"gfxhat.touch.get_name",
"gfxhat.touch.setup",
"gfxhat.touch.set_repeat_rate",
"gfxhat.touch.on",
"gfxhat.touch.high_sensitivity",
"mock.call",
"gfxhat.touch.enable_repeat",
"gfxhat.touch.set_led"
] | [((146, 159), 'gfxhat.touch.setup', 'touch.setup', ([], {}), '()\n', (157, 159), False, 'from gfxhat import touch\n'), ((515, 537), 'gfxhat.touch.set_led', 'touch.set_led', (['(0)', '(True)'], {}), '(0, True)\n', (528, 537), False, 'from gfxhat import touch\n'), ((857, 881), 'gfxhat.touch.high_sensitivity', 'touch.high_sensitivity', ([], {}), '()\n', (879, 881), False, 'from gfxhat import touch\n'), ((1160, 1185), 'gfxhat.touch.enable_repeat', 'touch.enable_repeat', (['(True)'], {}), '(True)\n', (1179, 1185), False, 'from gfxhat import touch\n'), ((1261, 1287), 'gfxhat.touch.enable_repeat', 'touch.enable_repeat', (['(False)'], {}), '(False)\n', (1280, 1287), False, 'from gfxhat import touch\n'), ((1507, 1532), 'gfxhat.touch.set_repeat_rate', 'touch.set_repeat_rate', (['(35)'], {}), '(35)\n', (1528, 1532), False, 'from gfxhat import touch\n'), ((1742, 1755), 'gfxhat.touch.on', 'touch.on', (['[1]'], {}), '([1])\n', (1750, 1755), False, 'from gfxhat import touch\n'), ((2070, 2100), 'gfxhat.touch.on', 'touch.on', (['[0]'], {'handler': 'handler'}), '([0], handler=handler)\n', (2078, 2100), False, 'from gfxhat import touch\n'), ((356, 373), 'gfxhat.touch.get_name', 'touch.get_name', (['(0)'], {}), '(0)\n', (370, 373), False, 'from gfxhat import touch\n'), ((947, 964), 'mock.call', 'mock.call', (['(0)', '(192)'], {}), '(0, 192)\n', (956, 964), False, 'import mock\n'), ((985, 1001), 'mock.call', 'mock.call', (['(31)', '(0)'], {}), '(31, 0)\n', (994, 1001), False, 'import mock\n'), ((1874, 1926), 'mock.call', 'mock.call', ([], {'channel': '(1)', 'event': '"""press"""', 'handler': 'handler'}), "(channel=1, event='press', handler=handler)\n", (1883, 1926), False, 'import mock\n'), ((1937, 1991), 'mock.call', 'mock.call', ([], {'channel': '(1)', 'event': '"""release"""', 'handler': 'handler'}), "(channel=1, event='release', handler=handler)\n", (1946, 1991), False, 'import mock\n'), ((2002, 2053), 'mock.call', 'mock.call', ([], {'channel': '(1)', 'event': '"""held"""', 'handler': 'handler'}), "(channel=1, event='held', handler=handler)\n", (2011, 2053), False, 'import mock\n'), ((2157, 2209), 'mock.call', 'mock.call', ([], {'channel': '(0)', 'event': '"""press"""', 'handler': 'handler'}), "(channel=0, event='press', handler=handler)\n", (2166, 2209), False, 'import mock\n'), ((2220, 2274), 'mock.call', 'mock.call', ([], {'channel': '(0)', 'event': '"""release"""', 'handler': 'handler'}), "(channel=0, event='release', handler=handler)\n", (2229, 2274), False, 'import mock\n'), ((2285, 2336), 'mock.call', 'mock.call', ([], {'channel': '(0)', 'event': '"""held"""', 'handler': 'handler'}), "(channel=0, event='held', handler=handler)\n", (2294, 2336), False, 'import mock\n')] |
import ast
import os
import re
from .writer import Writer
from .constants import *
from .utils import unique, Counter
class Stmt:
def __init__(self, node, defs, uses):
self.node_ = node
self.succ_ = []
self.defs_ = unique(defs)
self.uses_ = unique(uses)
def add_succ(self, node):
self.succ_.append(node)
def get_loc(self):
if isinstance(self.node_, ast.If):
return self._get_loc(self.node_.test)
elif isinstance(self.node_, ast.For):
return self._get_loc(self.node_.target)
elif isinstance(self.node_, ast.While):
return self._get_loc(self.node_.test)
elif isinstance(self.node_, ast.withitem):
return self._get_loc(self.node_.context_expr)
else:
return self._get_loc(self.node_)
def _get_loc(self, node):
return node.lineno, node.col_offset + 1, node.end_lineno, node.end_col_offset + 1
def get_meta(self):
if isinstance(self.node_, ast.arg):
return META_ARG
elif isinstance(self.node_, ast.Return):
return META_RET
elif isinstance(self.node_, ast.Call):
return META_CALL
else:
return 0
def write(self, analysis, writer):
writer.write_token(TOKEN_STATEMENT)
# Statement id
self._write_stmt_id(analysis, writer, self.node_)
# Successors
writer.write_u8(len(self.succ_))
for succ in self.succ_:
self._write_stmt_id(analysis, writer, succ)
# Defs
writer.write_u8(len(self.defs_))
for access in self.defs_:
self._write_access(analysis, writer, access)
# Uses
writer.write_u8(len(self.uses_))
for access in self.uses_:
self._write_access(analysis, writer, access)
# Location
loc = self.get_loc()
writer.write_u64(analysis.file_id_)
writer.write_u32(loc[0])
writer.write_u32(loc[1])
writer.write_u32(loc[2])
writer.write_u32(loc[3])
# Metadata
writer.write_u8(self.get_meta())
def _write_stmt_id(self, analysis, writer, node):
writer.write_u64(analysis.file_id_)
writer.write_u64(analysis.nodes_.get(node))
def _write_access(self, analysis, writer, access):
if access.is_scalar():
writer.write_token(TOKEN_VALUE_SCALAR)
writer.write_u64(analysis.values_.get(access))
elif access.is_structural():
writer.write_token(TOKEN_VALUE_STRUCTURAL)
self._write_access(analysis, writer, access.base_)
self._write_access(analysis, writer, access.accessors_)
elif access.is_array_like():
writer.write_token(TOKEN_VALUE_ARRAY_LIKE)
self._write_access(analysis, writer, access.base_)
writer.write_u32(len(access.accessors_))
for index in access.accessors_:
self._write_access(analysis, writer, index)
class StaticData:
def __init__(self, analysis):
self.analysis_ = analysis
self.functions_ = Counter()
def write(self, outdir=None):
if outdir is None:
outdir = os.getcwd()
prefix = os.path.commonprefix([os.getcwd() + os.path.sep, self.analysis_.filename_])
output = self.analysis_.filename_.replace(prefix, '') + '.aard'
output = os.path.join(outdir, output)
output = os.path.realpath(output)
os.makedirs(os.path.dirname(output), exist_ok=True)
writer = Writer(output)
writer.write_str('AARD/S1')
for func, body in self.analysis_.ctx_store_.items():
# Empty function
if all([len(block) == 0 for block in body]):
continue
func_name = re.sub(r'\[\d+\]', '', func)
func_id = self.functions_.get_inc(func_name)
# Can happen when e.g. @property with @foo.setter decorators are
# used.
if func_id != 1:
func_name += f'@{func_id}'
writer.write_token(TOKEN_FUNCTION)
writer.write_cstr(func_name)
stmts = self._get_stmts(body)
for stmt in stmts:
stmt.write(self.analysis_, writer)
writer.write_token(TOKEN_FILENAMES)
writer.write_u32(1)
writer.write_u64(self.analysis_.file_id_)
writer.write_cstr(self.analysis_.filename_)
writer.close()
def _get_stmts(self, func_body):
# Normalize the basic blocks first. It properly reconnect the edges of
# empty basic blocks.
for block in func_body:
block.normalize()
stmts = []
for block in func_body:
prev = None
for node in block:
defs = self.analysis_.defs_.get(node, [])
uses = self.analysis_.uses_.get(node, [])
stmt = Stmt(node, defs, uses)
stmts.append(stmt)
if prev is not None:
prev.add_succ(stmt.node_)
prev = stmt
if prev is not None:
for succ in block.succ():
if len(succ) > 0:
prev.add_succ(succ.entry())
else:
assert len(list(succ.succ())) == 0
return stmts
| [
"os.path.join",
"os.getcwd",
"os.path.realpath",
"os.path.dirname",
"re.sub"
] | [((3428, 3456), 'os.path.join', 'os.path.join', (['outdir', 'output'], {}), '(outdir, output)\n', (3440, 3456), False, 'import os\n'), ((3474, 3498), 'os.path.realpath', 'os.path.realpath', (['output'], {}), '(output)\n', (3490, 3498), False, 'import os\n'), ((3233, 3244), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (3242, 3244), False, 'import os\n'), ((3520, 3543), 'os.path.dirname', 'os.path.dirname', (['output'], {}), '(output)\n', (3535, 3543), False, 'import os\n'), ((3827, 3857), 're.sub', 're.sub', (['"""\\\\[\\\\d+\\\\]"""', '""""""', 'func'], {}), "('\\\\[\\\\d+\\\\]', '', func)\n", (3833, 3857), False, 'import re\n'), ((3285, 3296), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (3294, 3296), False, 'import os\n')] |
from assertpy import assert_that
from django.contrib.auth import get_user_model
from django.test import TestCase
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APIClient
CREATE_USER_URL = reverse("user:create-user")
class PublicUserApiTest(TestCase):
def setUp(self):
self.client = APIClient()
def get_create_payload(self, **kwargs):
return {
"email": kwargs.get("email", "<EMAIL>"),
"password": kwargs.get("email", "<PASSWORD>"),
"name": kwargs.get("email", "<NAME>"),
}
def test_create_valid_success(self):
payload = self.get_create_payload()
response = self.client.post(CREATE_USER_URL, payload)
user = get_user_model().objects.get(**response.data)
assert_that(response.status_code).is_equal_to(status.HTTP_201_CREATED)
assert_that(user.check_password(payload["password"])).is_true()
assert_that("password").is_not_in(response.data)
def test_create_existing_user(self):
payload = self.get_create_payload()
self.client.post(CREATE_USER_URL, payload)
response = self.client.post(CREATE_USER_URL, payload)
assert_that(response.status_code).is_equal_to(status.HTTP_400_BAD_REQUEST)
def test_password_too_short(self):
payload = self.get_create_payload(password="<PASSWORD>")
response = self.client.post(CREATE_USER_URL, payload)
user_exists = get_user_model().objects.filter(email=payload["email"]).exists()
assert_that(response.status_code).is_equal_to(status.HTTP_400_BAD_REQUEST)
assert_that(user_exists).is_false()
| [
"rest_framework.test.APIClient",
"assertpy.assert_that",
"django.contrib.auth.get_user_model",
"django.urls.reverse"
] | [((240, 267), 'django.urls.reverse', 'reverse', (['"""user:create-user"""'], {}), "('user:create-user')\n", (247, 267), False, 'from django.urls import reverse\n'), ((348, 359), 'rest_framework.test.APIClient', 'APIClient', ([], {}), '()\n', (357, 359), False, 'from rest_framework.test import APIClient\n'), ((813, 846), 'assertpy.assert_that', 'assert_that', (['response.status_code'], {}), '(response.status_code)\n', (824, 846), False, 'from assertpy import assert_that\n'), ((964, 987), 'assertpy.assert_that', 'assert_that', (['"""password"""'], {}), "('password')\n", (975, 987), False, 'from assertpy import assert_that\n'), ((1221, 1254), 'assertpy.assert_that', 'assert_that', (['response.status_code'], {}), '(response.status_code)\n', (1232, 1254), False, 'from assertpy import assert_that\n'), ((1559, 1592), 'assertpy.assert_that', 'assert_that', (['response.status_code'], {}), '(response.status_code)\n', (1570, 1592), False, 'from assertpy import assert_that\n'), ((1642, 1666), 'assertpy.assert_that', 'assert_that', (['user_exists'], {}), '(user_exists)\n', (1653, 1666), False, 'from assertpy import assert_that\n'), ((758, 774), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (772, 774), False, 'from django.contrib.auth import get_user_model\n'), ((1485, 1501), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (1499, 1501), False, 'from django.contrib.auth import get_user_model\n')] |
# --------------
#Importing header files
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#Path of the file
path
data = pd.read_csv(path)
#Code starts here
data.rename(columns = {'Total' :'Total_Medals'},inplace = True)
print(data.head(10))
# --------------
#Code starts here
data['Better_Event']= np.where(data['Total_Summer']>data['Total_Winter'],'Summer',
(np.where(data['Total_Summer']<data['Total_Winter'],'Winter','Both')))
#print(data['Better_Event'])
summ_med= data['Better_Event'].value_counts()
print(summ_med)
#wint_med=data['Winter'].value_counts()
if summ_med[0]>summ_med[1]:
better_event = 'Summer'
elif summ_med[0]<summ_med[1]:
better_event = 'Winter'
else:
better_event = 'Both'
print(better_event)
#print(wint_med)
#better_event = np.where(summ_med>wint_med,'Summer','Winter')
#data['Better_Event'].value.counts()
print(better_event)
# --------------
#Code starts here
top_countries = data[['Country_Name','Total_Summer', 'Total_Winter','Total_Medals']].copy()
top_countries.drop(top_countries.tail(1).index, inplace = True)
def top_ten(top_countries , top):
country_list = []
country_list=top_countries.nlargest(10,top)
return country_list
top_10_s = top_ten(top_countries,'Total_Summer')
top_10_summer =[]
top_10_summer = top_10_s['Country_Name'].tolist()
top_10_win = top_ten(top_countries,'Total_Winter')
top_10_winter =[]
top_10_winter = top_10_win['Country_Name'].tolist()
top_tan = top_ten(top_countries,'Total_Medals')
top_10 =[]
top_10 = top_tan['Country_Name'].tolist()
print(top_10_summer)
print(top_10_winter)
print(top_10)
com= set(top_10_summer).intersection(top_10_winter, top_10)
common=list(com)
print(common)
# --------------
#Code starts here
summer_df = data[data['Country_Name'].isin(top_10_summer)]
print(summer_df)
winter_df = data[data['Country_Name'].isin(top_10_winter)]
print(winter_df)
top_df = data[data['Country_Name'].isin(top_10)]
print(top_df)
plt.bar(summer_df['Country_Name'],summer_df['Total_Summer'])
plt.show()
plt.bar(winter_df['Country_Name'],winter_df['Total_Summer'],color = 'g')
plt.show()
plt.bar(top_df['Country_Name'],top_df['Total_Summer'],color = 'r')
plt.show()
# --------------
#Code starts here
# Summer Ratio
summer_df['Golden_Ratio'] = summer_df['Gold_Summer']/summer_df['Total_Summer']
sort_by = summer_df.sort_values('Golden_Ratio',ascending=False)
summer_max_ratio = sort_by['Golden_Ratio'].iloc[0]
print(summer_max_ratio)
summer_country_gold = sort_by['Country_Name'].iloc[0]
print(summer_country_gold)
# Winter Ratio
winter_df['Golden_Ratio'] = winter_df['Gold_Winter']/winter_df['Total_Winter']
sort_by_1 = winter_df.sort_values('Golden_Ratio',ascending=False)
winter_max_ratio = sort_by_1['Golden_Ratio'].iloc[0]
print(winter_max_ratio)
winter_country_gold = sort_by_1['Country_Name'].iloc[0]
print(winter_country_gold)
# Total Top Ratio
top_df['Golden_Ratio'] = top_df['Gold_Total']/top_df['Total_Medals']
sort_by_2 = top_df.sort_values('Golden_Ratio',ascending=False)
top_max_ratio = sort_by_2['Golden_Ratio'].iloc[0]
print(top_max_ratio)
top_country_gold= sort_by_2['Country_Name'].iloc[0]
print(top_country_gold)
# --------------
#Code starts here
#data_1 = data.copy()
#data_1=data_1.drop(data_1.tail(1).index, inplace = True)
#print(data_1.head(1))
data_1 = data.iloc[:-1]
#GT =data_1['Gold_Total'].value_counts()
#print(GT)
#GTT =GT*3
#print(GTT)
data_1['Total_Points'] = data_1.loc[:,'Gold_Total'].mul(3)+data_1.loc[:,'Silver_Total'].mul(2)+data_1.loc[:,'Bronze_Total'].mul(1)
sort_by5 = data_1.sort_values('Total_Points',ascending=False)
most_points = sort_by5['Total_Points'].iloc[0]
print(most_points)
best_country = sort_by5['Country_Name'].iloc[0]
print(best_country)
# --------------
#Code starts here
best = data[data['Country_Name'] == best_country]
sd = best[['Gold_Total','Silver_Total','Bronze_Total']]
best = sd
best.plot.bar()
plt.xlabel("United States")
plt.ylabel("Medals Tally")
plt.xticks(rotation =45)
plt.show()
| [
"pandas.read_csv",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xticks",
"numpy.where",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.bar",
"matplotlib.pyplot.show"
] | [((150, 167), 'pandas.read_csv', 'pd.read_csv', (['path'], {}), '(path)\n', (161, 167), True, 'import pandas as pd\n'), ((2018, 2079), 'matplotlib.pyplot.bar', 'plt.bar', (["summer_df['Country_Name']", "summer_df['Total_Summer']"], {}), "(summer_df['Country_Name'], summer_df['Total_Summer'])\n", (2025, 2079), True, 'import matplotlib.pyplot as plt\n'), ((2079, 2089), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2087, 2089), True, 'import matplotlib.pyplot as plt\n'), ((2090, 2162), 'matplotlib.pyplot.bar', 'plt.bar', (["winter_df['Country_Name']", "winter_df['Total_Summer']"], {'color': '"""g"""'}), "(winter_df['Country_Name'], winter_df['Total_Summer'], color='g')\n", (2097, 2162), True, 'import matplotlib.pyplot as plt\n'), ((2163, 2173), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2171, 2173), True, 'import matplotlib.pyplot as plt\n'), ((2174, 2240), 'matplotlib.pyplot.bar', 'plt.bar', (["top_df['Country_Name']", "top_df['Total_Summer']"], {'color': '"""r"""'}), "(top_df['Country_Name'], top_df['Total_Summer'], color='r')\n", (2181, 2240), True, 'import matplotlib.pyplot as plt\n'), ((2241, 2251), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2249, 2251), True, 'import matplotlib.pyplot as plt\n'), ((3991, 4018), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""United States"""'], {}), "('United States')\n", (4001, 4018), True, 'import matplotlib.pyplot as plt\n'), ((4019, 4045), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Medals Tally"""'], {}), "('Medals Tally')\n", (4029, 4045), True, 'import matplotlib.pyplot as plt\n'), ((4046, 4069), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(45)'}), '(rotation=45)\n', (4056, 4069), True, 'import matplotlib.pyplot as plt\n'), ((4071, 4081), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4079, 4081), True, 'import matplotlib.pyplot as plt\n'), ((404, 475), 'numpy.where', 'np.where', (["(data['Total_Summer'] < data['Total_Winter'])", '"""Winter"""', '"""Both"""'], {}), "(data['Total_Summer'] < data['Total_Winter'], 'Winter', 'Both')\n", (412, 475), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-09-10 21:13
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('fcm_app', '0006_auto_20170910_2226'),
]
operations = [
migrations.RemoveField(
model_name='fcm',
name='user',
),
]
| [
"django.db.migrations.RemoveField"
] | [((290, 343), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""fcm"""', 'name': '"""user"""'}), "(model_name='fcm', name='user')\n", (312, 343), False, 'from django.db import migrations\n')] |
import json
def fetch_tweets(oauth, params):
url = "https://api.twitter.com/1.1/statuses/user_timeline.json"
req = oauth.get(url, params=params)
if req.status_code != 200:
raise TwitterAPIError(req)
return json.loads(req.text)
def fetch_tweets_loop(oauth, params, loop):
tweets = []
params["count"] = 200
params["include_rts"] = 1
for i in range(loop):
req = fetch_tweets(oauth, params)
if len(req) < 2:
tweets.extend(req)
break
tweets.extend(req[:-1])
params["max_id"] = req[-1]["id"]
return tweets
class TwitterAPIError(Exception):
def __init__(self, req):
self.req = req
def __str__(self):
return str(self.req.status_code)
| [
"json.loads"
] | [((231, 251), 'json.loads', 'json.loads', (['req.text'], {}), '(req.text)\n', (241, 251), False, 'import json\n')] |
#
# Copyright (c) 2017, <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. All advertising materials mentioning features or use of this software
# must display the following acknowledgement:
# This product includes software developed by <NAME>, QuTech.
# 4. Neither the name of the QuTech organization nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER ''AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from SimulaQron.virtNode.crudeSimulator import simpleEngine
def testSimpleEngine():
print("RUNNING TESTS: Simple Engine\n")
tracingTest()
gateTest()
measureTest()
def tracingTest():
print("Testing the partial trace...")
se = simpleEngine(10)
se2 = simpleEngine(10)
se.add_fresh_qubit()
se.add_fresh_qubit()
se.add_fresh_qubit()
se2.add_fresh_qubit()
se2.add_fresh_qubit()
se2.add_fresh_qubit()
se.apply_X(0)
se.apply_X(2)
se2.apply_X(0)
se2.apply_X(1)
se.remove_qubit(1)
se2.remove_qubit(2)
if se.qubitReg != se2.qubitReg:
print("ERROR: Partial trace failed\n")
print("ok\n")
def gateTest():
print("Testing the gates...")
se = simpleEngine(10)
se.add_fresh_qubit()
savedQubit = se.qubitReg
se.apply_H(0)
se.apply_Z(0)
se.apply_H(0)
se.apply_X(0)
if savedQubit != se.qubitReg:
print("ERROR: Gate test failed\n")
print("ok\n")
def measureTest():
print("Testing a measurement...")
se = simpleEngine()
se.add_fresh_qubit()
outcome = se.measure_qubit(0)
if outcome != 0:
print("ERROR: Measurement test failed\n")
se.add_fresh_qubit()
se.apply_X(0)
outcome = se.measure_qubit(0)
if outcome != 1:
print("ERROR: Measurement test failed\n")
print("ok\n")
def main():
testSimpleEngine()
main()
| [
"SimulaQron.virtNode.crudeSimulator.simpleEngine"
] | [((1952, 1968), 'SimulaQron.virtNode.crudeSimulator.simpleEngine', 'simpleEngine', (['(10)'], {}), '(10)\n', (1964, 1968), False, 'from SimulaQron.virtNode.crudeSimulator import simpleEngine\n'), ((1979, 1995), 'SimulaQron.virtNode.crudeSimulator.simpleEngine', 'simpleEngine', (['(10)'], {}), '(10)\n', (1991, 1995), False, 'from SimulaQron.virtNode.crudeSimulator import simpleEngine\n'), ((2438, 2454), 'SimulaQron.virtNode.crudeSimulator.simpleEngine', 'simpleEngine', (['(10)'], {}), '(10)\n', (2450, 2454), False, 'from SimulaQron.virtNode.crudeSimulator import simpleEngine\n'), ((2747, 2761), 'SimulaQron.virtNode.crudeSimulator.simpleEngine', 'simpleEngine', ([], {}), '()\n', (2759, 2761), False, 'from SimulaQron.virtNode.crudeSimulator import simpleEngine\n')] |
import logging
import numpy as np
import keras
from keras_text_cls.model.base_model import BaseModel
from keras_text_cls.model.utils import init_embedding_layer
from keras_text_cls.layer import MaskedGlobalAvgPool1D
class TextMLP(BaseModel):
"""
Multiple Layer Perceptron for Text Classification
Attributes
----------
num_classes: int
the number of classes
embedding_dim: int
the dimension of embedding vector, default is 128
embedding_matrix: 2d np.array
pre-trained embedding matrix is an array of embedding vector,
where index 0 must be reserved for SYMBOL_PADDING, index 1 must be reserved for SYMBOL_UNKNOWN
default is None
embedding_trainable: bool
Is the embedding layer trainable in the network. It must be set to True, when embedding matrix is None.
Default is False.
Set False if embedding matrix is pre-trained and set in the model
embedding_vocab_size: int
the vocabulary size for embedding.
Default is None, which indicates the size is equal to the length of embedding matrix
embedding_vocab_size must be set to initialize the size of the embedding layer, when embedding_matrix=None
pooling_strategy: str
pooling strategy for word sequences, either "REDUCE_MEAN" or "REDUCE_MAX"
max_seq_len: int
maximum length of words for each text, longer text will be truncated more than max_seq_len,
shorter text will be padded
num_hidden_units: integer array
an array of positive integers, indicating the number of units for each hidden layer
hidden_activation: str
activation function of neutral unit, default is "relu"
dropout: float (0,1)
dropout rate, must be equal or greater than 0 and equal or less than 1, default is 0.5
multi_label: bool
is the labels are multi-label classification, default is True
"""
def __init__(self, num_classes,
embedding_dim=128, embedding_matrix=None, embedding_trainable=False, embedding_vocab_size=None,
pooling_strategy="REDUCE_MEAN", max_seq_len=300,
num_hidden_units=[100], hidden_activation="relu",
dropout=0.5, multi_label=True):
super(TextMLP, self).__init__(name='TextMLP')
self.num_classes = num_classes
self.embedding_dim = embedding_dim
self.embedding_matrix = embedding_matrix
self.embedding_trainable = embedding_trainable
self.pooling_strategy = pooling_strategy
self.max_seq_len = max_seq_len
self.num_hidden_units = num_hidden_units
self.hidden_activation = hidden_activation
self.dropout = dropout
self.multi_label = multi_label
layer_input = keras.layers.Input(shape=(self.max_seq_len,), dtype='int32')
layer_embedding = init_embedding_layer(embedding_matrix, embedding_dim, embedding_vocab_size,
embedding_trainable, max_seq_len, mask_zero=True)
if pooling_strategy == "REDUCE_MEAN":
layer_pooling = MaskedGlobalAvgPool1D()
elif pooling_strategy == "REDUCE_MAX":
layer_pooling = keras.layers.MaxPool1D()
else:
raise ValueError("Unknown pooling strategy, only REDUCE_MEAN, REDUCE_MAX are supported")
layer_hiddens = []
prev_input_dim = embedding_dim
for n in num_hidden_units:
layer_hiddens.append(
keras.layers.Dense(n, input_dim=prev_input_dim, activation=hidden_activation)
)
if dropout > 0:
layer_hiddens.append(keras.layers.Dropout(dropout))
prev_input_dim = n
if multi_label:
output_activation = "sigmoid"
else:
output_activation = "softmax"
layer_output = keras.layers.Dense(num_classes, activation=output_activation)
x = layer_embedding(layer_input)
x = layer_pooling(x)
for hidden in layer_hiddens:
x = hidden(x)
output = layer_output(x)
self.model = keras.Model(layer_input, output)
def call(self, inputs):
"""
:param inputs: 2-dim np.array, the element is the word index
:return: predicted class probabilities
"""
assert len(inputs.shape) == 2
return self.model(inputs)
| [
"keras.Model",
"keras.layers.MaxPool1D",
"keras.layers.Input",
"keras_text_cls.model.utils.init_embedding_layer",
"keras_text_cls.layer.MaskedGlobalAvgPool1D",
"keras.layers.Dense",
"keras.layers.Dropout"
] | [((2783, 2843), 'keras.layers.Input', 'keras.layers.Input', ([], {'shape': '(self.max_seq_len,)', 'dtype': '"""int32"""'}), "(shape=(self.max_seq_len,), dtype='int32')\n", (2801, 2843), False, 'import keras\n'), ((2871, 3000), 'keras_text_cls.model.utils.init_embedding_layer', 'init_embedding_layer', (['embedding_matrix', 'embedding_dim', 'embedding_vocab_size', 'embedding_trainable', 'max_seq_len'], {'mask_zero': '(True)'}), '(embedding_matrix, embedding_dim, embedding_vocab_size,\n embedding_trainable, max_seq_len, mask_zero=True)\n', (2891, 3000), False, 'from keras_text_cls.model.utils import init_embedding_layer\n'), ((3875, 3936), 'keras.layers.Dense', 'keras.layers.Dense', (['num_classes'], {'activation': 'output_activation'}), '(num_classes, activation=output_activation)\n', (3893, 3936), False, 'import keras\n'), ((4125, 4157), 'keras.Model', 'keras.Model', (['layer_input', 'output'], {}), '(layer_input, output)\n', (4136, 4157), False, 'import keras\n'), ((3119, 3142), 'keras_text_cls.layer.MaskedGlobalAvgPool1D', 'MaskedGlobalAvgPool1D', ([], {}), '()\n', (3140, 3142), False, 'from keras_text_cls.layer import MaskedGlobalAvgPool1D\n'), ((3218, 3242), 'keras.layers.MaxPool1D', 'keras.layers.MaxPool1D', ([], {}), '()\n', (3240, 3242), False, 'import keras\n'), ((3510, 3587), 'keras.layers.Dense', 'keras.layers.Dense', (['n'], {'input_dim': 'prev_input_dim', 'activation': 'hidden_activation'}), '(n, input_dim=prev_input_dim, activation=hidden_activation)\n', (3528, 3587), False, 'import keras\n'), ((3667, 3696), 'keras.layers.Dropout', 'keras.layers.Dropout', (['dropout'], {}), '(dropout)\n', (3687, 3696), False, 'import keras\n')] |
"""Tic Tac Toe Game.
Rules:
The object of Tic Tac Toe is to get three in a row.
You play on a three by three game board.
The first player is known as X and the second is O.
Players alternate placing Xs and Os on the game board,
until either opponent has three in a row or all nine squares are filled.
X always goes first, and in the event that no one has three in a row,
the stalemate is called a cat game.
Version 1.0: Player vs Dumb AI
Human Player decide if he wants to start or not.
Then roles are switched for every game.
Dumb AI will place a mark on a random slot.
Version 2.0: Dumb AI + score limit
Player scores are memorized and displayed.
Player A starts, player B choose score limit.
# sets score limit
choose_score_limit(player=ai_player)
game = Game()
game.score_limit = 123
game.current_turn = "X"
"""
import json
from abc import ABC
from abc import abstractmethod
from typing import Any
from typing import Callable
from typing import Dict
from typing import List
from typing import Literal
from typing import Optional
from typing import Sequence
from typing import Tuple
from typing import Union
from tic_tac_toe_game.AI.mcts import mcts_move
from tic_tac_toe_game.AI.naive import naive_move
from tic_tac_toe_game.AI.negamax import negamax_move
from tic_tac_toe_game.errors import OverwriteCellError
Coordinates = Sequence[int]
class Move:
"""Move performed by a Player on the Grid."""
def __init__(
self,
x: Union[int, float, str],
y: Union[int, float, str],
player: Union[int, float, str],
) -> None:
"""Inits."""
self.x = int(x)
self.y = int(y)
self._player = int(player)
@property
def player(self) -> int:
"""TODO."""
return self._player
@player.setter
def player(self, value: Union[int, float, str]) -> None:
if int(value) != 1 and int(value) != -1:
raise ValueError("Player should be 1 or -1")
self._player = int(value)
@property
def coordinates(self) -> Coordinates:
"""TODO."""
return self.x, self.y
def __repr__(self) -> str:
"""Repr."""
return f"{self.__class__.__name__}({self.x!r}, {self.y!r}, {self.player!r})"
def to_dict(self) -> Dict[str, Union[int, str]]:
"""Converts the Move instance to a dictionary."""
return dict(
x=self.x,
y=self.y,
player=self.player,
__class=self.__class__.__name__,
)
@classmethod
def from_dict(cls, data: Dict[str, Union[int, str]]) -> "Move":
"""Constructs Move instance from dictionary."""
data = dict(data) # local copy
if not isinstance(data, dict) or data.pop("__class") != cls.__name__:
raise ValueError
return cls(**data)
def __eq__(self, other: "Move"):
"""Check whether other equals self elementwise."""
if not isinstance(other, Move):
return False
return self.__dict__ == other.__dict__
class Board:
"""Board class.
Attributes:
grid: A 3*3 matrix of string values.
"""
x = 1
o = -1
_empty_cell = 0
_vertical_separator = "│"
_horizontal_separator = "─"
_intersection = "┼"
_marks = {0: "_", -1: "O", 1: "X"}
def __init__(
self,
grid: Optional[List[List[int]]] = None,
history: Optional[List[Move]] = None,
) -> None:
"""Inits Grid with an empty grid."""
if grid is None:
grid = [[Board._empty_cell] * 3 for _ in range(3)]
self.grid: List[List[int]] = grid
if history is None:
history = []
self.history: List[Move] = history
def get_cell(self, coord: Coordinates) -> int:
"""Returns value for cell located at `coord`."""
return self.grid[coord[0]][coord[1]]
def make_move(self, move: Move) -> None:
"""Sets `value` for cell located at `coord` if cell is empty.
Consumes action.
"""
if not self.is_empty_cell(move.coordinates):
raise OverwriteCellError(move.coordinates)
self.grid[move.x][move.y] = move.player
self.history.append(move)
def is_empty_cell(self, coord: Coordinates) -> bool:
"""Checks if cell located at `coord` is empty."""
return bool(self.get_cell(coord) == Board._empty_cell)
def is_full(self) -> bool:
"""Checks if grid is full. Gris is full if there is no empty cell left."""
return not any(
self.is_empty_cell((row_id, col_id))
for row_id, row in enumerate(self.grid)
for col_id, col in enumerate(row)
)
def is_winning_move(self, move: Move) -> bool:
"""Checks if playing `value` at `coord` leads to a win.
Only checks the combinations containing the cell with the given coordinates.
Checks the one row, the one column and eventually the two diagonals.
"""
has_winning_row = all([cell == move.player for cell in self.grid[move.x]])
has_winning_col = all([row[move.y] == move.player for row in self.grid])
has_winning_diag = False
if move.x == move.y:
has_winning_diag = all(
self.grid[row_id][row_id] == move.player
for row_id, row in enumerate(self.grid)
)
if move.x + move.y == 2:
has_winning_diag = has_winning_diag or all(
self.grid[2 - row_id][row_id] == move.player
for row_id, row in enumerate(self.grid)
)
return bool(has_winning_row or has_winning_col or has_winning_diag)
def display(self) -> List[List[str]]:
"""Returns the grid."""
return [[Board._marks[elem] for elem in row] for row in self.grid]
def framed_grid(self) -> str:
"""Returns the grid with an additional frame to facilitate reading."""
framed = []
for idx, row in enumerate(self.display()):
framed.append(Board._vertical_separator.join(row))
if idx != len(self.grid) - 1:
framed.append(Board._intersection.join(Board._horizontal_separator * 3))
return "\n".join(framed)
def winner(self) -> Optional[int]:
"""Returns game result.
This property should return:
1 if player #1 wins
-1 if player #2 wins
0 if there is a draw
None if result is unknown
Returns
-------
int
"""
try:
last_move = self.history[-1]
except IndexError: # no history means there is no winner
return None
has_winning_row = all(
[cell == last_move.player for cell in self.grid[last_move.x]]
)
has_winning_col = all(
[row[last_move.y] == last_move.player for row in self.grid]
)
has_winning_diag = False
if last_move.x == last_move.y:
has_winning_diag = all(
self.grid[row_id][row_id] == last_move.player
for row_id, row in enumerate(self.grid)
)
if last_move.x + last_move.y == 2:
has_winning_diag = has_winning_diag or all(
self.grid[2 - row_id][row_id] == last_move.player
for row_id, row in enumerate(self.grid)
)
if has_winning_row or has_winning_col or has_winning_diag:
return last_move.player
elif self.is_full():
return 0
else:
return None
def is_over(self) -> bool:
"""Returns boolean indicating if the game is over.
Simplest implementation may just be
`return self.game_result() is not None`
Returns
-------
boolean
"""
return self.winner() is not None
def is_tie(self) -> bool:
"""Returns boolean indicating if the game is over.
Simplest implementation may just be
`return self.game_result() is not None`
Returns
-------
boolean
"""
return self.winner() == 0
def is_won(self) -> bool:
"""Returns boolean indicating if the game is over.
Simplest implementation may just be
`return self.game_result() is not None`
Returns
-------
boolean
"""
return self.is_over() and not self.is_tie()
def to_json(self) -> str:
"""Creates a JSON representation of an instance of Board."""
d = {"__class": self.__class__.__name__, **self.__dict__}
return json.dumps(d, sort_keys=True, indent=4)
@classmethod
def from_json(cls, json_string: str) -> "Board":
"""Instantiate a Board object from a JSON description of it.
The JSON should have been produced by calling .to_json() on the object.
Args:
json_string: str, A serialized JSON object.
Returns:
An instance of the Board class.
Raises:
ValueError: If json_string was not produced by calling .to_json().
"""
attributes = json.loads(json_string)
if (
not isinstance(attributes, dict)
or attributes.pop("__class") != cls.__name__
):
raise ValueError
return cls(**attributes)
def __repr__(self) -> str:
"""Returns instance representation."""
return f"{self.__class__.__name__}({self.grid!r}, {self.history!r})"
def to_dict(self) -> Dict[str, Any]:
"""Converts the Board instance to a dictionary."""
return dict(
grid=self.grid,
history=[move.to_dict() for move in self.history],
__class=self.__class__.__name__,
)
@classmethod
def from_dict(cls, data: Dict[str, Any]) -> "Board":
"""Constructs Board instance from dictionary."""
data = dict(data) # local copy
if not isinstance(data, dict) or data.pop("__class") != cls.__name__:
raise ValueError
return cls(
data.get("grid"),
[Move.from_dict(move) for move in data.get("history")],
)
def __eq__(self, other: "Board"):
"""Check whether other equals self elementwise."""
if not isinstance(other, Board):
return False
return self.__dict__ == other.__dict__
class Player(ABC):
"""Base Player class.
Attributes:
name: The name of the player.
mark: The value of the mark currently used. Must be "X" or "O".
"""
_available_moves = [naive_move, mcts_move, negamax_move]
def __init__(
self,
mark: int,
name: str,
moves: Optional[Callable[[List[List[int]], int], Coordinates]] = None,
score: int = 0,
) -> None:
"""Constructor.
Args:
name: str, name of the player.
mark: str, player's mark.
moves: callable, handles choice of moves.
score: int, player's score.
"""
self.name = name
self._mark = mark
self.moves = moves
self._score = score
def set_mark(self, mark: int) -> None:
"""Sets the player's mark for this game.
Args:
mark: int, player's mark
"""
self._mark = mark
def get_mark(self) -> int:
"""Returns the player's mark for this game.
Returns:
An integer with the value of the mark.
"""
return self._mark
def display_mark(self) -> str:
"""Returns the pretty print mark for the player.
Returns:
A string with the value of the mark.
"""
return "X" if self._mark == 1 else "O"
def record_win(self) -> None:
"""Records player's win and updates score."""
self._score += 1
def get_score(self) -> int:
"""Returns the player's score for this game.
Returns:
An integer with the value of the score.
"""
return self._score
@abstractmethod
def ask_move(self, grid: List[List[int]]) -> Optional[Coordinates]:
"""Asks the player what move he wants to play."""
raise NotImplementedError
def __repr__(self) -> str:
"""Returns instance representation."""
repr_moves = (
self.moves.__name__ if self.moves in self._available_moves else None
)
return (
f"{self.__class__.__name__}("
f"{self.name!r}, {self._mark!r}, {repr_moves!r}, {self._score!r})"
)
def to_dict(self) -> Dict[str, Any]:
"""Converts the Player instance to a dictionary."""
repr_moves = (
self.moves.__name__ if self.moves in self._available_moves else None
)
return dict(
name=self.name,
mark=self._mark,
moves=repr_moves,
score=self._score,
__class=self.__class__.__name__,
)
@classmethod
def from_dict(cls, data: Dict[str, Any]) -> "Player":
"""Constructs Player instance from dictionary."""
data = dict(data) # local copy
class_name = data.pop("__class")
available_moves = {move.__name__: move for move in cls._available_moves}
data["moves"] = (
available_moves[data["moves"]] if data["moves"] in available_moves else None
)
if class_name == "HumanPlayer":
return HumanPlayer(**data)
elif class_name == "AIPlayer":
return AIPlayer(**data)
def __eq__(self, other):
"""Check whether other equals self elementwise."""
if not isinstance(other, Player):
return False
return self.__dict__ == other.__dict__
class AIPlayer(Player):
"""Player class for an AI-managed player."""
def __init__(
self,
mark: int,
name: str,
moves: Optional[Callable[[List[List[int]], int], Coordinates]] = naive_move,
score: int = 0,
) -> None:
"""Constructor.
Args:
name: str, name of the player.
mark: str, player's mark.
moves: callable, handles choice of moves.
score: int, player's score.
"""
super().__init__(mark=mark, name=name, moves=moves, score=score)
def ask_move(self, grid: List[List[int]]) -> Optional[Coordinates]:
"""Asks the player what move he wants to play."""
if self.moves is not None:
return self.moves(grid, self.get_mark())
return None
class HumanPlayer(Player):
"""Player class for a Human-managed player."""
def __init__(
self,
mark: int,
name: str,
moves: Optional[Callable[[List[List[int]], int], Coordinates]] = None,
score: int = 0,
) -> None:
"""Constructor.
Args:
name: str, name of the player.
mark: str, player's mark.
moves: callable, handles choice of moves.
score: int, player's score.
"""
super().__init__(mark=mark, name=name, moves=moves, score=score)
def ask_move(self, grid: List[List[int]]) -> Optional[Coordinates]:
"""Asks the player what move he wants to play."""
if self.moves is not None:
return self.moves(grid, self.get_mark())
return None
class PlayersMatch:
"""Matches players together and manages turns.
Attributes:
players: tuple(Player, Player), Players playing against each other.
_current_player: Player, Holds the player currently playing.
"""
def __init__(self, player_x: Player, player_o: Player) -> None:
"""Constructor.
Args:
player_x: Player, Player with the "X" mark. Will begin game.
player_o: Player, Player with the "Y" mark.
"""
self.players: Sequence[Player, Player] = (player_x, player_o)
# Holds the player currently playing. Rules dictate that "X" starts the game.
self._current_player: Player = player_x
def update_ai_algorithm(
self, algorithm: Callable[[List[List[int]], int], Coordinates]
) -> None:
"""Updates the AI algorithm of the AIPlayer."""
ai_player = next(
player for player in self.players if isinstance(player, AIPlayer)
)
ai_player.moves = algorithm
def switch(self) -> None: # pragma: no cover
"""Updates `_current_player` with the other player."""
self._current_player = next(
player for player in self.players if player != self._current_player
)
def current(self) -> Player:
"""Returns `_current_player`."""
return self._current_player
def player(self, id_: int) -> Player:
"""Returns `_current_player`."""
if id_ != 1 and id_ != -1:
raise ValueError("id argument should be 1 or -1")
return next(player for player in self.players if player.get_mark() == id_)
def __repr__(self) -> str:
"""Returns instance representation."""
return f"{self.__class__.__name__}({self.players!r}, {self._current_player!r})"
def to_dict(self) -> Dict[str, Any]:
"""Converts the PlayersMatch instance to a dictionary."""
return dict(
players=[player.to_dict() for player in self.players],
current_player=self._current_player.to_dict(),
__class=self.__class__.__name__,
)
@classmethod
def from_dict(cls, data: Dict[str, Any]) -> "PlayersMatch":
"""Constructs PlayersMatch instance from dictionary."""
data = dict(data) # local copy
players = [Player.from_dict(player) for player in data.get("players")]
current_player = Player.from_dict(data.get("current_player"))
players_match = cls(*players)
if players_match.current() != current_player:
players_match.switch()
return players_match
def __eq__(self, other: "PlayersMatch"):
"""Check whether other equals self elementwise."""
if not isinstance(other, PlayersMatch):
return False
return self.__dict__ == other.__dict__
class TicTacToeGame:
"""TicTacToeGame.
Attributes:
board: Board, The current board being played.
"""
def __init__(self, players_match: PlayersMatch, board: Board) -> None:
"""Returns a Game instance initialized with players params."""
self.players_match = players_match
self.board = board
def get_move(self) -> Optional[Coordinates]:
"""Gets a move from the current player.
If the player is an
AI_Player, then this method will invoke the AI algorithm to choose the
move. If the player is a Human_Player, then the interaction with the
human is via the text terminal.
"""
return self.players_match.current().ask_move(self.board.grid)
def get_scores(self) -> List[Tuple[str, int]]:
"""Returns the scores."""
scores = [
(player.display_mark(), player.get_score())
for player in self.players_match.players
]
return scores
def winner(self) -> Optional[Player]:
"""TODO."""
board_winner = self.board.winner()
return (
self.players_match.player(board_winner)
if board_winner == 1 or board_winner == -1
else None
)
def __repr__(self) -> str:
"""Returns instance representation."""
return f"{self.__class__.__name__}({self.players_match!r}, {self.board!r})"
def to_dict(self) -> Dict[str, Any]:
"""Converts the TicTacToeGame instance to a dictionary."""
return dict(
players_match=self.players_match.to_dict(),
board=self.board.to_dict(),
__class=self.__class__.__name__,
)
@classmethod
def from_dict(cls, data: Dict[str, Any]) -> "TicTacToeGame":
"""Constructs TicTacToeGame instance from dictionary."""
return cls(
PlayersMatch.from_dict(data.get("players_match")),
Board.from_dict(data.get("board")),
)
def __eq__(self, other: "TicTacToeGame"):
"""Check whether other equals self elementwise."""
if not isinstance(other, TicTacToeGame):
return False
return self.__dict__ == other.__dict__
MODE = Literal["single", "multi"]
def build_game(
player_1_name: Optional[str] = None,
player_2_name: Optional[str] = None,
player_1_starts: bool = True,
mode: MODE = "single",
) -> TicTacToeGame:
"""Returns a game object."""
player_1_mark = 1 if player_1_starts is True else -1
player_1: Player = HumanPlayer(player_1_mark, player_1_name or "Player 1")
player_2_mark = 0 - player_1_mark
player_2: Player
if mode == "single":
player_2 = AIPlayer(player_2_mark, player_2_name or "Bot")
else:
player_2 = HumanPlayer(player_2_mark, player_2_name or "Player 2")
return TicTacToeGame(PlayersMatch(player_1, player_2), Board())
| [
"json.loads",
"json.dumps",
"tic_tac_toe_game.errors.OverwriteCellError"
] | [((8676, 8715), 'json.dumps', 'json.dumps', (['d'], {'sort_keys': '(True)', 'indent': '(4)'}), '(d, sort_keys=True, indent=4)\n', (8686, 8715), False, 'import json\n'), ((9199, 9222), 'json.loads', 'json.loads', (['json_string'], {}), '(json_string)\n', (9209, 9222), False, 'import json\n'), ((4176, 4212), 'tic_tac_toe_game.errors.OverwriteCellError', 'OverwriteCellError', (['move.coordinates'], {}), '(move.coordinates)\n', (4194, 4212), False, 'from tic_tac_toe_game.errors import OverwriteCellError\n')] |
"""Copyright 2018 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
from tcav.activation_generator import DiscreteActivationGeneratorBase
from tcav.tcav_examples.discrete.kdd99_model import encode_variables, kBytesIndices, kFloatIndices, kIntIndices
import numpy as np
import tensorflow as tf
class KDD99DiscreteActivationGenerator(DiscreteActivationGeneratorBase):
""" Activation generator for the KDD99 dataset.
This uses the KDD99 dataset from sklearn. It is automatically loaded when we
try to train models, so no data downloading is required.
You can read more about it here:
https://scikit-learn.org/stable/modules/generated/sklearn.datasets.fetch_kddcup99.html
To see it in action, please check kdd99_discrete_example.ipynb
"""
def __init__(self, model, source_dir, acts_dir, max_examples):
self.source_dir = source_dir
super(KDD99DiscreteActivationGenerator,
self).__init__(model, source_dir, acts_dir, max_examples)
def load_data(self, concept):
""" Reads csv files into a numpy.ndarray containing income data
For this case, we create directories follow the following structure
source_dir >>
concept >>
>> concept.csv
The concepts will then be extracted into a numpy ndarray.
Args:
concept: string, This method takes names of a concept folder is an input.
They should be located in source_dir
Returns:
texts: A numpy array, where each subarray contains one row of the dataset
"""
concept_folder = os.path.join(self.source_dir, concept)
concept_file = os.path.join(concept_folder, concept + '.csv')
with tf.io.gfile.GFile(concept_file, 'r') as f:
texts = [
l.strip().split(',') for l in f.readlines()[:self.max_examples + 1]
]
texts = np.array(texts, dtype='O')
texts = texts[1:] # remove headers
texts = texts[:, :-1] # remove labels
texts = self._convert_types(texts) # Assign proper data types
return texts
def transform_data(self, data):
""" Encodes categorical columns and returns them as a numpy array
We first encode our categorical variables, so that they can be parsed by
the model. Finally,
we return the data in the form of a numpy array.
Args:
data: numpy.ndarray, parsed from load_data
Returns:
encoded_data: numpy.ndarray. Categorical variables are encoded.
"""
encoded_data = encode_variables(data)
return encoded_data
def _convert_types(self, texts):
""" When read from .csv, all variables are parsed as string.
This function assigns the proper types
Args:
texts: numpy.ndarray. Contains data parsed from dataset.
Returns:
texts: numpy.ndarray. Returns data with the proper types assigned
"""
texts[:, kBytesIndices] = texts[:, kBytesIndices].astype(str)
texts[:, kFloatIndices] = texts[:, kFloatIndices].astype(np.float32)
texts[:, kIntIndices] = texts[:, kIntIndices].astype(np.int)
return texts
| [
"numpy.array",
"os.path.join",
"tcav.tcav_examples.discrete.kdd99_model.encode_variables",
"tensorflow.io.gfile.GFile"
] | [((2054, 2092), 'os.path.join', 'os.path.join', (['self.source_dir', 'concept'], {}), '(self.source_dir, concept)\n', (2066, 2092), False, 'import os\n'), ((2112, 2158), 'os.path.join', 'os.path.join', (['concept_folder', "(concept + '.csv')"], {}), "(concept_folder, concept + '.csv')\n", (2124, 2158), False, 'import os\n'), ((2325, 2351), 'numpy.array', 'np.array', (['texts'], {'dtype': '"""O"""'}), "(texts, dtype='O')\n", (2333, 2351), True, 'import numpy as np\n'), ((2955, 2977), 'tcav.tcav_examples.discrete.kdd99_model.encode_variables', 'encode_variables', (['data'], {}), '(data)\n', (2971, 2977), False, 'from tcav.tcav_examples.discrete.kdd99_model import encode_variables, kBytesIndices, kFloatIndices, kIntIndices\n'), ((2168, 2204), 'tensorflow.io.gfile.GFile', 'tf.io.gfile.GFile', (['concept_file', '"""r"""'], {}), "(concept_file, 'r')\n", (2185, 2204), True, 'import tensorflow as tf\n')] |
"""Custom Gym environments.
Every class inside this module should extend a gym.Env class. The file
structure should be similar to gym.envs file structure, e.g. if you're
implementing a mujoco env, you would implement it under gym.mujoco submodule.
"""
import gym
CUSTOM_GYM_ENVIRONMENTS_PATH = __package__
MUJOCO_ENVIRONMENTS_PATH = f'{CUSTOM_GYM_ENVIRONMENTS_PATH}.mujoco'
MUJOCO_ENVIRONMENT_SPECS = (
{
'id': 'Walker2d-Energy0-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.walker2d:Walker2dEnv'),
},
{
'id': 'Walker2d-EnergyPoint5-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.walker2d:Walker2dEnv'),
},
{
'id': 'Walker2d-EnergyOne-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.walker2d:Walker2dEnv'),
},
{
'id': 'Walker2d-EnergyOnePoint5-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.walker2d:Walker2dEnv'),
},
{
'id': 'Walker2d-EnergyTwo-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.walker2d:Walker2dEnv'),
},
{
'id': 'Walker2d-EnergyFour-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.walker2d:Walker2dEnv'),
},
{
'id': 'Walker2d-EnergySix-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.walker2d:Walker2dEnv'),
},
{
'id': 'HalfCheetah-EnergySix-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:HalfCheetahEnv'),
},
{
'id': 'HalfCheetah-EnergyFour-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:HalfCheetahEnv'),
},
{
'id': 'HalfCheetah-EnergyTwo-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:HalfCheetahEnv'),
},
{
'id': 'HalfCheetah-EnergyOnePoint5-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:HalfCheetahEnv'),
},
{
'id': 'HalfCheetah-EnergyOne-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:HalfCheetahEnv'),
},
{
'id': 'HalfCheetah-EnergyPoint5-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:HalfCheetahEnv'),
},
{
'id': 'HalfCheetah-EnergyPoint1-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:HalfCheetahEnv'),
},
{
'id': 'HalfCheetah-Energy0-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:HalfCheetahEnv'),
},
{
'id': 'HalfCheetah-Energy0-v1',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:HalfCheetahEnv'),
},
{
'id': 'HalfCheetah-Energyz-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:HalfCheetahEnv'),
},
{
'id': 'HalfCheetah5dof-EnergyOne-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:HalfCheetahEnv_5dof'),
},
{
'id': 'HalfCheetah5dof-Energy0-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:HalfCheetahEnv_5dof'),
},
{
'id': 'HalfCheetah5dof-Energyz-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:HalfCheetahEnv_5dof'),
},
{
'id': 'HalfCheetah5dofv2-Energy0-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:HalfCheetahEnv_5dofv2'),
},
{
'id': 'HalfCheetah5dofv3-Energy0-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:HalfCheetahEnv_5dofv3'),
},
{
'id': 'HalfCheetah5dofv4-Energy0-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:HalfCheetahEnv_5dofv4'),
},
{
'id': 'HalfCheetah5dofv5-Energy0-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:HalfCheetahEnv_5dofv5'),
},
{
'id': 'HalfCheetah5dofv6-Energy0-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:HalfCheetahEnv_5dofv6'),
},
{
'id': 'HalfCheetah4dof-EnergyOne-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:HalfCheetahEnv_4dof'),
},
{
'id': 'HalfCheetah4dof-Energy0-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:HalfCheetahEnv_4dof'),
},
{
'id': 'HalfCheetah4dof-Energyz-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:HalfCheetahEnv_4dof'),
},
{
'id': 'HalfCheetah4dofv2-Energy0-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:HalfCheetahEnv_4dofv2'),
},
{
'id': 'HalfCheetah4dofv3-Energy0-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:HalfCheetahEnv_4dofv3'),
},
{
'id': 'HalfCheetah4dofv4-Energy0-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:HalfCheetahEnv_4dofv4'),
},
{
'id': 'HalfCheetah4dofv5-Energy0-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:HalfCheetahEnv_4dofv5'),
},
{
'id': 'HalfCheetah4dofv6-Energy0-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:HalfCheetahEnv_4dofv6'),
},
{
'id': 'HalfCheetah3doff-EnergyOne-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:HalfCheetahEnv_3doff'),
},
{
'id': 'HalfCheetah3doff-Energy0-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:HalfCheetahEnv_3doff'),
},
{
'id': 'HalfCheetah3dofv3-Energy0-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:HalfCheetahEnv_3dofv3'),
},
{
'id': 'HalfCheetah3dofv4-Energy0-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:HalfCheetahEnv_3dofv4'),
},
{
'id': 'HalfCheetah3doff-Energyz-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:HalfCheetahEnv_3doff'),
},
{
'id': 'HalfCheetah3dofb-EnergyOne-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:HalfCheetahEnv_3dofb'),
},
{
'id': 'HalfCheetah3dofb-Energy0-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:HalfCheetahEnv_3dofb'),
},
{
'id': 'HalfCheetah3dofb-Energyz-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:HalfCheetahEnv_3dofb'),
},
{
'id': 'HalfCheetah2dof-EnergyOne-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:HalfCheetahEnv_2dof'),
},
{
'id': 'HalfCheetah2dof-Energy0-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:HalfCheetahEnv_2dof'),
},
{
'id': 'HalfCheetah2dof-Energyz-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:HalfCheetahEnv_2dof'),
},
{
'id': 'HalfCheetah2dofv2-Energy0-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:HalfCheetahEnv_2dofv2'),
},
{
'id': 'HalfCheetah2dofv3-Energy0-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:HalfCheetahEnv_2dofv3'),
},
{
'id': 'HalfCheetah2dofv4-Energy0-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:HalfCheetahEnv_2dofv4'),
},
{
'id': 'HalfCheetah2dofv5-Energy0-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:HalfCheetahEnv_2dofv5'),
},
{
'id': 'Giraffe-Energy0-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.giraffe:GiraffeEnv'),
},
{
'id': 'HalfCheetahHeavy-Energy0-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:HalfCheetahHeavyEnv'),
},
{
'id': 'VA-Energy0-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.vertical_arm:VA'),
},
{
'id': 'VA-smallRange-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.vertical_arm:VA'),
},
{
'id': 'VA-bigRange-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.vertical_arm:VA'),
},
{
'id': 'VA4dof-Energy0-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.vertical_arm:VA4dof'),
},
{
'id': 'VA4dof-smallRange-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.vertical_arm:VA4dof'),
},
{
'id': 'VA4dof-bigRange-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.vertical_arm:VA4dof'),
},
{
'id': 'VA6dof-Energy0-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.vertical_arm:VA6dof'),
},
{
'id': 'VA6dof-smallRange-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.vertical_arm:VA6dof'),
},
{
'id': 'VA6dof-bigRange-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.vertical_arm:VA6dof'),
},
{
'id': 'VA8dof-Energy0-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.vertical_arm:VA8dof'),
},
{
'id': 'VA8dof-smallRange-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.vertical_arm:VA8dof'),
},
{
'id': 'VA8dof-bigRange-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.vertical_arm:VA8dof'),
},
{
'id': 'VA-Energyz-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.vertical_arm:VA'),
},
{
'id': 'VA4dof-Energyz-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.vertical_arm:VA4dof'),
},
{
'id': 'VA6dof-Energyz-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.vertical_arm:VA6dof'),
},
{
'id': 'VA8dof-Energyz-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.vertical_arm:VA8dof'),
},
{
'id': 'VA-EnergyOne-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.vertical_arm:VA'),
},
{
'id': 'VA4dof-EnergyOne-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.vertical_arm:VA4dof'),
},
{
'id': 'VA6dof-EnergyOne-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.vertical_arm:VA6dof'),
},
{
'id': 'VA8dof-EnergyOne-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.vertical_arm:VA8dof'),
},
{
'id': 'VA-EnergyPoint5-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.vertical_arm:VA'),
},
{
'id': 'VA4dof-EnergyPoint5-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.vertical_arm:VA4dof'),
},
{
'id': 'VA6dof-EnergyPoint5-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.vertical_arm:VA6dof'),
},
{
'id': 'VA8dof-EnergyPoint5-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.vertical_arm:VA8dof'),
},
{
'id': 'Centripede-Energy0-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.centripede:CentripedeEnv'),
},
{
'id': 'FullCheetahHeavy-MinSpringGc-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:FullCheetahEnv'),
},
{
'id': 'FullCheetahHeavy-MinSpringGc-v2',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:FullCheetahEnv'),
},
{
'id': 'FullCheetahHeavy-MinSpringGc-v4',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:FullCheetahEnv'),
},
{
'id': 'FullCheetahHeavy-MinSpringGc-v6',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:FullCheetahEnv'),
},
{
'id': 'FullCheetahHeavy-ExSpringGc-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:FullCheetahEnv'),
},
{
'id': 'FullCheetahHeavy-ExSpringGc-v2',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:FullCheetahEnv'),
},
{
'id': 'FullCheetahHeavy-ExSpringGc-v4',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:FullCheetahEnv'),
},
{
'id': 'FullCheetahHeavy-ExSpringGc-v6',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:FullCheetahEnv'),
},
{
'id': 'FullCheetahHeavy-SymlossGc-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:FullCheetahEnv'),
},
{
'id': 'FullCheetahHeavy-SymlossGc-v2',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:FullCheetahEnv'),
},
{
'id': 'FullCheetahHeavy-SymlossGc-v4',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:FullCheetahEnv'),
},
{
'id': 'FullCheetahHeavy-SymlossGc-v6',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:FullCheetahEnv'),
},
{
'id': 'FullCheetahHeavy-SymlossGphase-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:FullCheetahEnv'),
},
{
'id': 'FullCheetahHeavy-MinSpringGphase-v4',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:FullCheetahEnv'),
},
{
'id': 'FullCheetahHeavy-SymlossGphase-v4',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:FullCheetahEnv'),
},
{
'id': 'FullCheetahHeavy-ExSpringGphase-v4',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:FullCheetahEnv'),
},
{
'id': 'FullCheetahHeavy-MinSpringG-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:FullCheetahEnv'),
},
{
'id': 'FullCheetahHeavy-MinSpringG-v2',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:FullCheetahEnv'),
},
{
'id': 'FullCheetahHeavy-MinSpringG-v4',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:FullCheetahEnv'),
},
{
'id': 'FullCheetahHeavy-MinSpringG-v6',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:FullCheetahEnv'),
},
{
'id': 'FullCheetahHeavy-MinSpringT-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:FullCheetahEnv'),
},
{
'id': 'FullCheetahHeavy-MinSpringT-v2',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:FullCheetahEnv'),
},
{
'id': 'FullCheetahHeavy-MinSpringT-v4',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:FullCheetahEnv'),
},
{
'id': 'FullCheetahHeavy-MinSpringT-v6',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:FullCheetahEnv'),
},
{
'id': 'FullCheetahHeavy-MinSpring-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:FullCheetahEnv'),
},
{
'id': 'FullCheetahHeavy-MinSpring-v00',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:FullCheetahEnv'),
},
{
'id': 'FullCheetahHeavy-MinSpring-v2',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:FullCheetahEnv'),
},
{
'id': 'FullCheetahHeavy-MinSpring-v25',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:FullCheetahEnv'),
},
{
'id': 'FullCheetahHeavy-MinSpring-v4',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:FullCheetahEnv'),
},
{
'id': 'FullCheetahHeavy-MinSpring-v45',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:FullCheetahEnv'),
},
{
'id': 'FullCheetahHeavy-MinSpring-v6',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:FullCheetahEnv'),
},
{
'id': 'FullCheetahHeavy-MinSpring-v65',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:FullCheetahEnv'),
},
{
'id': 'FullCheetahHeavy-LessSpring-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:FullCheetahEnv'),
},
{
'id': 'FullCheetahHeavy-LessSpring-v2',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:FullCheetahEnv'),
},
{
'id': 'FullCheetahHeavy-LessSpring-v4',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:FullCheetahEnv'),
},
{
'id': 'FullCheetahHeavy-LessSpring-v6',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:FullCheetahEnv'),
},
{
'id': 'FullCheetahHeavy-MoreSpring-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:FullCheetahEnv'),
},
{
'id': 'FullCheetahHeavy-MoreSpring-v2',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:FullCheetahEnv'),
},
{
'id': 'FullCheetahHeavy-MoreSpring-v4',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:FullCheetahEnv'),
},
{
'id': 'FullCheetahHeavy-MoreSpring-v6',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:FullCheetahEnv'),
},
{
'id': 'FullCheetahHeavy-ExSpringG-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:FullCheetahEnv'),
},
{
'id': 'FullCheetahHeavy-ExSpringG-v2',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:FullCheetahEnv'),
},
{
'id': 'FullCheetahHeavy-ExSpringG-v4',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:FullCheetahEnv'),
},
{
'id': 'FullCheetahHeavy-ExSpringG-v6',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:FullCheetahEnv'),
},
{
'id': 'FullCheetahHeavy-ExSpringT-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:FullCheetahEnv'),
},
{
'id': 'FullCheetahHeavy-ExSpringT-v2',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:FullCheetahEnv'),
},
{
'id': 'FullCheetahHeavy-ExSpringT-v4',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:FullCheetahEnv'),
},
{
'id': 'FullCheetahHeavy-ExSpringT-v6',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:FullCheetahEnv'),
},
{
'id': 'FullCheetahHeavy-ExSpring-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:FullCheetahEnv'),
},
{
'id': 'FullCheetahHeavy-ExSpring-v00',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:FullCheetahEnv'),
},
{
'id': 'FullCheetahHeavy-ExSpring-v2',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:FullCheetahEnv'),
},
{
'id': 'FullCheetahHeavy-ExSpring-v21',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:FullCheetahEnv'),
},
{
'id': 'FullCheetahHeavy-ExSpring-v25',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:FullCheetahEnv'),
},
{
'id': 'FullCheetahHeavy-ExSpring-v210',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:FullCheetahEnv'),
},
{
'id': 'FullCheetahHeavy-ExSpring-v4',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:FullCheetahEnv'),
},
{
'id': 'FullCheetahHeavy-ExSpring-v41',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:FullCheetahEnv'),
},
{
'id': 'FullCheetahHeavy-ExSpring-v45',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:FullCheetahEnv'),
},
{
'id': 'FullCheetahHeavy-ExSpring-v410',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:FullCheetahEnv'),
},
{
'id': 'FullCheetahHeavy-ExSpring-v6',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:FullCheetahEnv'),
},
{
'id': 'FullCheetahHeavy-ExSpring-v61',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:FullCheetahEnv'),
},
{
'id': 'FullCheetahHeavy-ExSpring-v65',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:FullCheetahEnv'),
},
{
'id': 'FullCheetahHeavy-ExSpring-v610',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:FullCheetahEnv'),
},
{
'id': 'FullCheetahHeavy-Energy0-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:FullCheetahEnv'),
},
{
'id': 'FullCheetahHeavy-Energy0-v00',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:FullCheetahEnv'),
},
{
'id': 'FullCheetahHeavy-Energy0-v1',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:FullCheetahEnv'),
},
{
'id': 'FullCheetahHeavy-Energy0-v2',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:FullCheetahEnv'),
},
{
'id': 'FullCheetahHeavy-Energy0-v25',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:FullCheetahEnv'),
},
{
'id': 'FullCheetahHeavy-Energy0-v3',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:FullCheetahEnv'),
},
{
'id': 'FullCheetahHeavy-Energy0-v4',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:FullCheetahEnv'),
},
{
'id': 'FullCheetahHeavy-Energy0-v45',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:FullCheetahEnv'),
},
{
'id': 'FullCheetahHeavy-Energy0-v5',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:FullCheetahEnv'),
},
{
'id': 'FullCheetahHeavy-Energy0-v6',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:FullCheetahEnv'),
},
{
'id': 'FullCheetahHeavy-Energy0-v65',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:FullCheetahEnv'),
},
{
'id': 'FullCheetahHeavy-RealFC-v1',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:FullCheetahEnv'),
},
{
'id': 'FullCheetahHeavy-RealFC-v2',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:FullCheetahEnv'),
},
{
'id': 'FullCheetahHeavy-RealFC-v3',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:FullCheetahEnv'),
},
{
'id': 'FullCheetahHeavy-RealFC-v4',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:FullCheetahEnv'),
},
{
'id': 'FullCheetahHeavy-RealFC-v5',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:FullCheetahEnv'),
},
{
'id': 'FullCheetahHeavy-RealFC-v6',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:FullCheetahEnv'),
},
{
'id': 'FullCheetahHeavy-RealFCT-v1',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:FullCheetahEnv'),
},
{
'id': 'FullCheetahHeavy-RealFCT-v2',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:FullCheetahEnv'),
},
{
'id': 'FullCheetahHeavy-RealFCT-v3',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:FullCheetahEnv'),
},
{
'id': 'FullCheetahHeavy-RealFCT-v4',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:FullCheetahEnv'),
},
{
'id': 'FullCheetahHeavy-RealFCT-v5',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:FullCheetahEnv'),
},
{
'id': 'FullCheetahHeavy-RealFCT-v6',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:FullCheetahEnv'),
},
{
'id': 'FullCheetahHeavy-RealFCG-v1',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:FullCheetahEnv'),
},
{
'id': 'FullCheetahHeavy-RealFCG-v2',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:FullCheetahEnv'),
},
{
'id': 'FullCheetahHeavy-RealFCG-v3',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:FullCheetahEnv'),
},
{
'id': 'FullCheetahHeavy-RealFCG-v4',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:FullCheetahEnv'),
},
{
'id': 'FullCheetahHeavy-RealFCG-v5',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:FullCheetahEnv'),
},
{
'id': 'FullCheetahHeavy-RealFCG-v6',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:FullCheetahEnv'),
},
{
'id': 'FullCheetahHeavy-SymlossGfblr-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:FullCheetahEnv'),
},
{
'id': 'FullCheetahHeavy-SymlossG-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:FullCheetahEnv'),
},
{
'id': 'FullCheetahHeavy-SymlossT-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:FullCheetahEnv'),
},
{
'id': 'FullCheetahHeavy-SymPenG-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:FullCheetahEnv'),
},
{
'id': 'FullCheetahHeavy-SymPenT-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:FullCheetahEnv'),
},
{
'id': 'FullCheetahHeavy-SymPenG-v1',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:FullCheetahEnv'),
},
{
'id': 'FullCheetahHeavy-SymPenG-v2',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:FullCheetahEnv'),
},
{
'id': 'FullCheetahHeavy-SymPenG-v3',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:FullCheetahEnv'),
},
{
'id': 'FullCheetahHeavy-SymPenG-v4',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:FullCheetahEnv'),
},
{
'id': 'FullCheetahHeavy-SymPenG-v5',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:FullCheetahEnv'),
},
{
'id': 'FullCheetahHeavy-SymPenG-v6',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:FullCheetahEnv'),
},
{
'id': 'FullCheetahHeavy-SymPenG-v7',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:FullCheetahEnv'),
},
{
'id': 'FullCheetahHeavy-SymPenT-v1',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:FullCheetahEnv'),
},
{
'id': 'FullCheetahHeavy-SymlossG-v1',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:FullCheetahEnv'),
},
{
'id': 'FullCheetahHeavy-SymlossT-v1',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:FullCheetahEnv'),
},
{
'id': 'FullCheetahHeavy-SymlossG-v2',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:FullCheetahEnv'),
},
{
'id': 'FullCheetahHeavy-SymlossT-v2',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:FullCheetahEnv'),
},
{
'id': 'FullCheetahHeavy-SymlossG-v3',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:FullCheetahEnv'),
},
{
'id': 'FullCheetahHeavy-SymlossT-v3',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:FullCheetahEnv'),
},
{
'id': 'FullCheetahHeavy-SymlossG-v4',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:FullCheetahEnv'),
},
{
'id': 'FullCheetahHeavy-SymlossT-v4',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:FullCheetahEnv'),
},
{
'id': 'FullCheetahHeavy-SymlossG-v5',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:FullCheetahEnv'),
},
{
'id': 'FullCheetahHeavy-SymlossT-v5',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:FullCheetahEnv'),
},
{
'id': 'FullCheetahHeavy-SymlossG-v6',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:FullCheetahEnv'),
},
{
'id': 'FullCheetahHeavy-SymlossT-v6',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:FullCheetahEnv'),
},
{
'id': 'FullCheetahHeavy-SymlossT-v7',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:FullCheetahEnv'),
},
{
'id': 'FullCheetahHeavy-SymlossT-v8',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:FullCheetahEnv'),
},
{
'id': 'FullCheetah-Energy0-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:FullCheetahEnv'),
},
{
'id': 'HalfCheetah-PerfIndex-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:HalfCheetahEnv2'),
},
{
'id': 'HalfCheetah-InvPerfIndex-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:HalfCheetahEnv3'),
},
{
'id': 'Ant-Energy0-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.ant:AntEnv'),
},
{
'id': 'HalfCheetahSquat2dof-Energyz-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:HalfCheetahSquat2dof'),
},
{
'id': 'HalfCheetahSquat2dof-Energy0-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:HalfCheetahSquat2dof'),
},
{
'id': 'HalfCheetahSquat2dof-EnergyPoint25-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:HalfCheetahSquat2dof'),
},
{
'id': 'HalfCheetahSquat2dof-EnergyAlt-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:HalfCheetahSquat2dof'),
},
{
'id': 'HalfCheetahSquat4dof-Energyz-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:HalfCheetahSquat4dof'),
},
{
'id': 'HalfCheetahSquat4dof-Energy0-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:HalfCheetahSquat4dof'),
},
{
'id': 'HalfCheetahSquat4dof-EnergyAlt-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:HalfCheetahSquat4dof'),
},
{
'id': 'HalfCheetahSquat6dof-EnergyAlt-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:HalfCheetahSquat6dof'),
}, #
{
'id': 'HalfCheetahSquat6dof-Energyz-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:HalfCheetahSquat6dof'),
}, #
{
'id': 'HalfCheetahSquat6dof-Energy0-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:HalfCheetahSquat6dof'),
},#
{
'id': 'HalfCheetahSquat4dof-EnergyPoint1-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:HalfCheetahSquat4dof'),
},
{
'id': 'HalfCheetahSquat4dof-EnergyPoint25-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:HalfCheetahSquat4dof'),
},
{
'id': 'HalfCheetahSquat6dof-EnergyPoint1-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:HalfCheetahSquat6dof'),
},
{
'id': 'HalfCheetahSquat6dof-EnergyPoint25-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.half_cheetah:HalfCheetahSquat6dof'),
},
{
'id': 'RealArm7dof-Energy0-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.vertical_arm:RealArm7dof'),
},
{
'id': 'RealArm7dof-Energy0-v1',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.vertical_arm:RealArm7dof'),
},
{
'id': 'RealArm7dof-Energy0-v2',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.vertical_arm:RealArm7dof'),
},
{
'id': 'RealArm7dof-Energy0-v3',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.vertical_arm:RealArm7dof'),
},
{
'id': 'RealArm7dof-Energy0-v4',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.vertical_arm:RealArm7dof'),
},
{
'id': 'RealArm7dof-Energy0-v5',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.vertical_arm:RealArm7dof'),
},
{
'id': 'RealArm7dof-Energy0-v6',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.vertical_arm:RealArm7dof'),
},
{
'id': 'RealArm7dof-Energy0-v7',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.vertical_arm:RealArm7dof'),
},
{
'id': 'RealArm7dof-Energy0-v8',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.vertical_arm:RealArm7dof'),
},
{
'id': 'RealArm7dof-Energy0-v9',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.vertical_arm:RealArm7dof'),
},
{
'id': 'RealArm6dof-Energy0-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.vertical_arm:RealArm6dof'),
},
{
'id': 'RealArm6dof-Energy0-v1',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.vertical_arm:RealArm6dof'),
},
{
'id': 'RealArm6dof-Energy0-v2',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.vertical_arm:RealArm6dof'),
},
{
'id': 'RealArm5dof-Energy0-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.vertical_arm:RealArm5dof'),
},
{
'id': 'RealArm5dof-Energy0-v1',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.vertical_arm:RealArm5dof'),
},
{
'id': 'RealArm5dof-Energy0-v2',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.vertical_arm:RealArm5dof'),
},
{
'id': 'RealArm5dofMinE-Energy0-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.vertical_arm:RealArm5dof'),
},
{
'id': 'RealArm4dofMinE-Energy0-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.vertical_arm:RealArm4dof'),
},
{
'id': 'RealArm5dofLT-Energy0-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.vertical_arm:RealArm5dof'),
},
{
'id': 'RealArm4dofLT-Energy0-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.vertical_arm:RealArm4dof'),
},
{
'id': 'RealArm4dof-Energy0-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.vertical_arm:RealArm4dof'),
},
{
'id': 'RealArm4dof-Energy0-v1',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.vertical_arm:RealArm4dof'),
},
{
'id': 'RealArm4dof-Energy0-v2',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.vertical_arm:RealArm4dof'),
},
{
'id': 'RealArm3dof-Energy0-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.vertical_arm:RealArm3dof'),
},
{
'id': 'RealArm3dof-Energy0-v1',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.vertical_arm:RealArm3dof'),
},
{
'id': 'RealArm3dof-Energy0-v2',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.vertical_arm:RealArm3dof'),
},
{
'id': 'AntSquaT-Energy0-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.ant:AntSquaTEnv'),
},
{
'id': 'AntSquaTRedundant-Energy0-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.ant:AntSquaTRedundantEnv'),
},
{
'id': 'AntRun-Energy0-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.ant:AntRunEnv'),
},
{
'id': 'AntHeavy-Energy0-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.ant:AntHeavyEnv'),
},
{
'id': 'Ant-EnergyPoint5-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.ant:AntEnv'),
},
{
'id': 'Ant-EnergyOne-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.ant:AntEnv'),
},
{
'id': 'Ant-EnergyOnePoint5-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.ant:AntEnv'),
},
{
'id': 'Ant-EnergyTwo-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.ant:AntEnv'),
},
{
'id': 'Humanoid-Energy0-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.humanoid:HumanoidEnv'),
},
{
'id': 'Humanoid-EnergyOne-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.humanoid:HumanoidEnv'),
},
{
'id': 'Humanoid-EnergyPoint5-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.humanoid:HumanoidEnv'),
},
{
'id': 'Humanoid-EnergyPoint1-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.humanoid:HumanoidEnv'),
},
{
'id': 'Humanoid-EnergyPz5-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.humanoid:HumanoidEnv'),
},
{
'id': 'Humanoidrllab-Energy0-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.humanoid_rllab:HumanoidEnv'),
},
{
'id': 'Humanoidrllab-EnergyOne-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.humanoid_rllab:HumanoidEnv'),
},
{
'id': 'Humanoidrllab-EnergyP5-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.humanoid_rllab:HumanoidEnv'),
},
{
'id': 'Humanoidrllab-EnergyP1-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.humanoid_rllab:HumanoidEnv'),
},
{
'id': 'Humanoidrllab-EnergyPz5-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.humanoid_rllab:HumanoidEnv'),
},
{
'id': 'Pusher2d-Default-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.pusher_2d:Pusher2dEnv'),
},
{
'id': 'Pusher2d-DefaultReach-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.pusher_2d:ForkReacherEnv'),
},
{
'id': 'Pusher2d-ImageDefault-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.image_pusher_2d:ImagePusher2dEnv'),
},
{
'id': 'Pusher2d-ImageReach-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.image_pusher_2d:ImageForkReacher2dEnv'),
},
{
'id': 'Pusher2d-BlindReach-v0',
'entry_point': (f'{MUJOCO_ENVIRONMENTS_PATH}'
'.image_pusher_2d:BlindForkReacher2dEnv'),
},
)
GENERAL_ENVIRONMENT_SPECS = (
{
'id': 'MultiGoal-Default-v0',
'entry_point': (f'{CUSTOM_GYM_ENVIRONMENTS_PATH}'
'.multi_goal:MultiGoalEnv')
},
)
MULTIWORLD_ENVIRONMENT_SPECS = (
{
'id': 'Point2DEnv-Default-v0',
'entry_point': 'multiworld.envs.pygame.point2d:Point2DWallEnv'
},
{
'id': 'Point2DEnv-Wall-v0',
'entry_point': 'multiworld.envs.pygame.point2d:Point2DWallEnv'
},
)
MUJOCO_ENVIRONMENTS = tuple(
environment_spec['id']
for environment_spec in MUJOCO_ENVIRONMENT_SPECS)
GENERAL_ENVIRONMENTS = tuple(
environment_spec['id']
for environment_spec in GENERAL_ENVIRONMENT_SPECS)
MULTIWORLD_ENVIRONMENTS = tuple(
environment_spec['id']
for environment_spec in MULTIWORLD_ENVIRONMENT_SPECS)
GYM_ENVIRONMENTS = (
*MUJOCO_ENVIRONMENTS,
*GENERAL_ENVIRONMENTS,
*MULTIWORLD_ENVIRONMENTS,
)
def register_mujoco_environments():
"""Register softlearning mujoco environments."""
for mujoco_environment in MUJOCO_ENVIRONMENT_SPECS:
gym.register(**mujoco_environment)
gym_ids = tuple(
environment_spec['id']
for environment_spec in MUJOCO_ENVIRONMENT_SPECS)
return gym_ids
def register_general_environments():
"""Register gym environments that don't fall under a specific category."""
for general_environment in GENERAL_ENVIRONMENT_SPECS:
gym.register(**general_environment)
gym_ids = tuple(
environment_spec['id']
for environment_spec in GENERAL_ENVIRONMENT_SPECS)
return gym_ids
def register_multiworld_environments():
"""Register custom environments from multiworld package."""
for multiworld_environment in MULTIWORLD_ENVIRONMENT_SPECS:
gym.register(**multiworld_environment)
gym_ids = tuple(
environment_spec['id']
for environment_spec in MULTIWORLD_ENVIRONMENT_SPECS)
return gym_ids
def register_environments():
registered_mujoco_environments = register_mujoco_environments()
registered_general_environments = register_general_environments()
registered_multiworld_environments = register_multiworld_environments()
return (
*registered_mujoco_environments,
*registered_general_environments,
*registered_multiworld_environments,
)
| [
"gym.register"
] | [((44920, 44954), 'gym.register', 'gym.register', ([], {}), '(**mujoco_environment)\n', (44932, 44954), False, 'import gym\n'), ((45271, 45306), 'gym.register', 'gym.register', ([], {}), '(**general_environment)\n', (45283, 45306), False, 'import gym\n'), ((45618, 45656), 'gym.register', 'gym.register', ([], {}), '(**multiworld_environment)\n', (45630, 45656), False, 'import gym\n')] |
from base.base_model import BaseModel
import tensorflow as tf
class SignsModel(BaseModel):
def __init__(self, config):
super(SignsModel, self).__init__(config)
self.build_model()
self.init_saver()
def build_model(self):
self.is_training = tf.compat.v1.placeholder(tf.bool)
# input output
self.x = tf.compat.v1.placeholder(tf.float32, shape=[None] + self.config.state_size)
self.y = tf.compat.v1.placeholder(tf.float32, shape=[None, 6])
# initialize filters
W1 = tf.compat.v1.get_variable("W1", [4,4,3,8], initializer = tf.contrib.layers.xavier_initializer(seed=0))
W2 = tf.compat.v1.get_variable("W2", [2,2,8,16], initializer = tf.contrib.layers.xavier_initializer(seed=0))
# network architecture or computation graph
# CONV2D: stride of 1, padding 'SAME'
Z1 = tf.nn.conv2d(self.x, W1, strides = [1,1,1,1], padding = 'SAME')
# RELU
A1 = tf.nn.relu(Z1)
# MAXPOOL: window 8x8, stride 8, padding 'SAME'
P1 = tf.nn.max_pool2d(A1, ksize = [1,8,8,1], strides = [1,8,8,1], padding = 'SAME')
# CONV2D: filters W2, stride 1, padding 'SAME'
Z2 = tf.nn.conv2d(P1, W2, strides = [1,1,1,1], padding = 'SAME')
# RELU
A2 = tf.nn.relu(Z2)
# MAXPOOL: window 4x4, stride 4, padding 'SAME'
P2 = tf.nn.max_pool2d(A2, ksize = [1,4,4,1], strides = [1,4,4,1], padding = 'SAME')
# FLATTEN
F = tf.contrib.layers.flatten(P2) # tf error, warning here
# FULLY-CONNECTED without non-linear activation function (not not call softmax).
# 6 neurons in output layer. Hint: one of the arguments should be "activation_fn=None"
Z3 = tf.contrib.layers.fully_connected(F, 6, activation_fn=None)
with tf.name_scope("loss"):
self.cross_entropy = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels=self.y, logits=Z3)
)
update_ops = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
self.train_step = tf.compat.v1.train.AdamOptimizer(
self.config.learning_rate
).minimize(
self.cross_entropy,
global_step=self.global_step_tensor
)
correct_prediction = tf.equal(tf.argmax(Z3, 1), tf.argmax(self.y, 1))
self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
def init_saver(self):
# here you initialize the tensorflow saver that will be used in saving the checkpoints.
self.saver = tf.compat.v1.train.Saver(max_to_keep=self.config.max_to_keep)
| [
"tensorflow.compat.v1.placeholder",
"tensorflow.nn.conv2d",
"tensorflow.contrib.layers.flatten",
"tensorflow.nn.relu",
"tensorflow.nn.max_pool2d",
"tensorflow.contrib.layers.fully_connected",
"tensorflow.compat.v1.train.AdamOptimizer",
"tensorflow.contrib.layers.xavier_initializer",
"tensorflow.argmax",
"tensorflow.name_scope",
"tensorflow.nn.softmax_cross_entropy_with_logits",
"tensorflow.control_dependencies",
"tensorflow.compat.v1.get_collection",
"tensorflow.cast",
"tensorflow.compat.v1.train.Saver"
] | [((282, 315), 'tensorflow.compat.v1.placeholder', 'tf.compat.v1.placeholder', (['tf.bool'], {}), '(tf.bool)\n', (306, 315), True, 'import tensorflow as tf\n'), ((357, 432), 'tensorflow.compat.v1.placeholder', 'tf.compat.v1.placeholder', (['tf.float32'], {'shape': '([None] + self.config.state_size)'}), '(tf.float32, shape=[None] + self.config.state_size)\n', (381, 432), True, 'import tensorflow as tf\n'), ((450, 503), 'tensorflow.compat.v1.placeholder', 'tf.compat.v1.placeholder', (['tf.float32'], {'shape': '[None, 6]'}), '(tf.float32, shape=[None, 6])\n', (474, 503), True, 'import tensorflow as tf\n'), ((887, 949), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['self.x', 'W1'], {'strides': '[1, 1, 1, 1]', 'padding': '"""SAME"""'}), "(self.x, W1, strides=[1, 1, 1, 1], padding='SAME')\n", (899, 949), True, 'import tensorflow as tf\n'), ((979, 993), 'tensorflow.nn.relu', 'tf.nn.relu', (['Z1'], {}), '(Z1)\n', (989, 993), True, 'import tensorflow as tf\n'), ((1063, 1141), 'tensorflow.nn.max_pool2d', 'tf.nn.max_pool2d', (['A1'], {'ksize': '[1, 8, 8, 1]', 'strides': '[1, 8, 8, 1]', 'padding': '"""SAME"""'}), "(A1, ksize=[1, 8, 8, 1], strides=[1, 8, 8, 1], padding='SAME')\n", (1079, 1141), True, 'import tensorflow as tf\n'), ((1210, 1268), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['P1', 'W2'], {'strides': '[1, 1, 1, 1]', 'padding': '"""SAME"""'}), "(P1, W2, strides=[1, 1, 1, 1], padding='SAME')\n", (1222, 1268), True, 'import tensorflow as tf\n'), ((1298, 1312), 'tensorflow.nn.relu', 'tf.nn.relu', (['Z2'], {}), '(Z2)\n', (1308, 1312), True, 'import tensorflow as tf\n'), ((1382, 1460), 'tensorflow.nn.max_pool2d', 'tf.nn.max_pool2d', (['A2'], {'ksize': '[1, 4, 4, 1]', 'strides': '[1, 4, 4, 1]', 'padding': '"""SAME"""'}), "(A2, ksize=[1, 4, 4, 1], strides=[1, 4, 4, 1], padding='SAME')\n", (1398, 1460), True, 'import tensorflow as tf\n'), ((1491, 1520), 'tensorflow.contrib.layers.flatten', 'tf.contrib.layers.flatten', (['P2'], {}), '(P2)\n', (1516, 1520), True, 'import tensorflow as tf\n'), ((1744, 1803), 'tensorflow.contrib.layers.fully_connected', 'tf.contrib.layers.fully_connected', (['F', '(6)'], {'activation_fn': 'None'}), '(F, 6, activation_fn=None)\n', (1777, 1803), True, 'import tensorflow as tf\n'), ((2709, 2770), 'tensorflow.compat.v1.train.Saver', 'tf.compat.v1.train.Saver', ([], {'max_to_keep': 'self.config.max_to_keep'}), '(max_to_keep=self.config.max_to_keep)\n', (2733, 2770), True, 'import tensorflow as tf\n'), ((1818, 1839), 'tensorflow.name_scope', 'tf.name_scope', (['"""loss"""'], {}), "('loss')\n", (1831, 1839), True, 'import tensorflow as tf\n'), ((2013, 2075), 'tensorflow.compat.v1.get_collection', 'tf.compat.v1.get_collection', (['tf.compat.v1.GraphKeys.UPDATE_OPS'], {}), '(tf.compat.v1.GraphKeys.UPDATE_OPS)\n', (2040, 2075), True, 'import tensorflow as tf\n'), ((604, 648), 'tensorflow.contrib.layers.xavier_initializer', 'tf.contrib.layers.xavier_initializer', ([], {'seed': '(0)'}), '(seed=0)\n', (640, 648), True, 'import tensorflow as tf\n'), ((721, 765), 'tensorflow.contrib.layers.xavier_initializer', 'tf.contrib.layers.xavier_initializer', ([], {'seed': '(0)'}), '(seed=0)\n', (757, 765), True, 'import tensorflow as tf\n'), ((1907, 1972), 'tensorflow.nn.softmax_cross_entropy_with_logits', 'tf.nn.softmax_cross_entropy_with_logits', ([], {'labels': 'self.y', 'logits': 'Z3'}), '(labels=self.y, logits=Z3)\n', (1946, 1972), True, 'import tensorflow as tf\n'), ((2094, 2129), 'tensorflow.control_dependencies', 'tf.control_dependencies', (['update_ops'], {}), '(update_ops)\n', (2117, 2129), True, 'import tensorflow as tf\n'), ((2439, 2455), 'tensorflow.argmax', 'tf.argmax', (['Z3', '(1)'], {}), '(Z3, 1)\n', (2448, 2455), True, 'import tensorflow as tf\n'), ((2457, 2477), 'tensorflow.argmax', 'tf.argmax', (['self.y', '(1)'], {}), '(self.y, 1)\n', (2466, 2477), True, 'import tensorflow as tf\n'), ((2523, 2562), 'tensorflow.cast', 'tf.cast', (['correct_prediction', 'tf.float32'], {}), '(correct_prediction, tf.float32)\n', (2530, 2562), True, 'import tensorflow as tf\n'), ((2166, 2225), 'tensorflow.compat.v1.train.AdamOptimizer', 'tf.compat.v1.train.AdamOptimizer', (['self.config.learning_rate'], {}), '(self.config.learning_rate)\n', (2198, 2225), True, 'import tensorflow as tf\n')] |
from cerebral import logger as l
import Pyro4
import logging
from cerebral.nameserver import ports
from ares.main import RFID
# Configure pyro.
Pyro4.config.SERIALIZERS_ACCEPTED = frozenset(['pickle', 'serpent'])
Pyro4.config.SERIALIZER = 'pickle'
# Logging.
logger = logging.getLogger('universe')
class SuperAres:
def __init__(self):
self.rfid = RFID()
def read(self):
return self.rfid.read()
super_ares = SuperAres()
if __name__ == '__main__':
# Create a daemon.
port = ports['worker3']
daemon = Pyro4.Daemon('localhost', port)
# Register all objects.
daemon.register(super_ares, 'super_ares')
# Log server event.
logger.info('Worker 3 started!')
# Start event loop.
daemon.requestLoop() | [
"logging.getLogger",
"ares.main.RFID",
"Pyro4.Daemon"
] | [((272, 301), 'logging.getLogger', 'logging.getLogger', (['"""universe"""'], {}), "('universe')\n", (289, 301), False, 'import logging\n'), ((545, 576), 'Pyro4.Daemon', 'Pyro4.Daemon', (['"""localhost"""', 'port'], {}), "('localhost', port)\n", (557, 576), False, 'import Pyro4\n'), ((365, 371), 'ares.main.RFID', 'RFID', ([], {}), '()\n', (369, 371), False, 'from ares.main import RFID\n')] |
import os
hostname = "google.com"
response = os.system("ping -c 1 www.wynnlasvegas.com")
if response == 0:
print (hostname, 'is up!')
else:
print (hostname, 'is down!')
abs
| [
"os.system"
] | [((45, 88), 'os.system', 'os.system', (['"""ping -c 1 www.wynnlasvegas.com"""'], {}), "('ping -c 1 www.wynnlasvegas.com')\n", (54, 88), False, 'import os\n')] |
"""
https://data-apis.github.io/array-api/latest/API_specification/type_promotion.html
"""
import pytest
from hypothesis import given, example
from hypothesis.strategies import from_type, data
from .hypothesis_helpers import shapes
from .pytest_helpers import nargs
from .array_helpers import assert_exactly_equal
from .function_stubs import elementwise_functions
from ._array_module import (ones, int8, int16, int32, int64, uint8,
uint16, uint32, uint64, float32, float64)
from . import _array_module
dtype_mapping = {
'i1': int8,
'i2': int16,
'i4': int32,
'i8': int64,
'u1': uint8,
'u2': uint16,
'u4': uint32,
'u8': uint64,
'f4': float32,
'f8': float64,
}
signed_integer_promotion_table = {
('i1', 'i1'): 'i1',
('i1', 'i2'): 'i2',
('i1', 'i4'): 'i4',
('i1', 'i8'): 'i8',
('i2', 'i1'): 'i2',
('i2', 'i2'): 'i2',
('i2', 'i4'): 'i4',
('i2', 'i8'): 'i8',
('i4', 'i1'): 'i4',
('i4', 'i2'): 'i4',
('i4', 'i4'): 'i4',
('i4', 'i8'): 'i8',
('i8', 'i1'): 'i8',
('i8', 'i2'): 'i8',
('i8', 'i4'): 'i8',
('i8', 'i8'): 'i8',
}
unsigned_integer_promotion_table = {
('u1', 'u1'): 'u1',
('u1', 'u2'): 'u2',
('u1', 'u4'): 'u4',
('u1', 'u8'): 'u8',
('u2', 'u1'): 'u2',
('u2', 'u2'): 'u2',
('u2', 'u4'): 'u4',
('u2', 'u8'): 'u8',
('u4', 'u1'): 'u4',
('u4', 'u2'): 'u4',
('u4', 'u4'): 'u4',
('u4', 'u8'): 'u8',
('u8', 'u1'): 'u8',
('u8', 'u2'): 'u8',
('u8', 'u4'): 'u8',
('u8', 'u8'): 'u8',
}
mixed_signed_unsigned_promotion_table = {
('i1', 'u1'): 'i2',
('i1', 'u2'): 'i4',
('i1', 'u4'): 'i8',
('i2', 'u1'): 'i2',
('i2', 'u2'): 'i4',
('i2', 'u4'): 'i8',
('i4', 'u1'): 'i4',
('i4', 'u2'): 'i4',
('i4', 'u4'): 'i8',
}
flipped_mixed_signed_unsigned_promotion_table = {(u, i): p for (i, u), p in mixed_signed_unsigned_promotion_table.items()}
float_promotion_table = {
('f4', 'f4'): 'f4',
('f4', 'f8'): 'f8',
('f8', 'f4'): 'f8',
('f8', 'f8'): 'f8',
}
promotion_table = {
**signed_integer_promotion_table,
**unsigned_integer_promotion_table,
**mixed_signed_unsigned_promotion_table,
**flipped_mixed_signed_unsigned_promotion_table,
**float_promotion_table,
}
binary_operators = {
'__add__': '+',
'__and__': '&',
'__eq__': '==',
'__floordiv__': '//',
'__ge__': '>=',
'__gt__': '>',
'__le__': '<=',
'__lshift__': '<<',
'__lt__': '<',
'__matmul__': '@',
'__mod__': '%',
'__mul__': '*',
'__ne__': '!=',
'__or__': '|',
'__pow__': '**',
'__rshift__': '>>',
'__sub__': '-',
'__truediv__': '/',
'__xor__': '^',
}
unary_operators = {
'__invert__': '~',
'__neg__': '-',
'__pos__': '+',
}
dtypes_to_scalar = {
_array_module.bool: bool,
_array_module.int8: int,
_array_module.int16: int,
_array_module.int32: int,
_array_module.int64: int,
_array_module.uint8: int,
_array_module.uint16: int,
_array_module.uint32: int,
_array_module.uint64: int,
_array_module.float32: float,
_array_module.float64: float,
}
scalar_to_dtype = {s: [d for d, _s in dtypes_to_scalar.items() if _s == s] for
s in dtypes_to_scalar.values()}
# TODO: Extend this to all functions (not just elementwise), and handle
# functions that take more than 2 args
@pytest.mark.parametrize('func_name', [i for i in
elementwise_functions.__all__ if
nargs(i) > 1])
@pytest.mark.parametrize('dtypes', promotion_table.items())
# The spec explicitly requires type promotion to work for shape 0
@example(shape=(0,))
@given(shape=shapes)
def test_promotion(func_name, shape, dtypes):
assert nargs(func_name) == 2
func = getattr(_array_module, func_name)
(type1, type2), res_type = dtypes
dtype1 = dtype_mapping[type1]
dtype2 = dtype_mapping[type2]
res_dtype = dtype_mapping[res_type]
for i in [func, dtype1, dtype2, res_dtype]:
if isinstance(i, _array_module._UndefinedStub):
func._raise()
a1 = ones(shape, dtype=dtype1)
a2 = ones(shape, dtype=dtype2)
res = func(a1, a2)
assert res.dtype == res_dtype, f"{func_name}({dtype1}, {dtype2}) promoted to {res.dtype}, should have promoted to {res_dtype} (shape={shape})"
@pytest.mark.parametrize('binary_op', sorted(set(binary_operators.values()) - {'@'}))
@pytest.mark.parametrize('scalar_type,dtype', [(s, d) for s in scalar_to_dtype
for d in scalar_to_dtype[s]])
@given(shape=shapes, scalars=data())
def test_operator_scalar_promotion(binary_op, scalar_type, dtype, shape, scalars):
"""
See https://data-apis.github.io/array-api/latest/API_specification/type_promotion.html#mixing-arrays-with-python-scalars
"""
if binary_op == '@':
pytest.skip("matmul (@) is not supported for scalars")
a = ones(shape, dtype=dtype)
s = scalars.draw(from_type(scalar_type))
scalar_as_array = _array_module.full((), s, dtype=dtype)
get_locals = lambda: dict(a=a, s=s, scalar_as_array=scalar_as_array)
# As per the spec:
# The expected behavior is then equivalent to:
#
# 1. Convert the scalar to a 0-D array with the same dtype as that of the
# array used in the expression.
#
# 2. Execute the operation for `array <op> 0-D array` (or `0-D array <op>
# array` if `scalar` was the left-hand argument).
array_scalar = f'a {binary_op} s'
array_scalar_expected = f'a {binary_op} scalar_as_array'
res = eval(array_scalar, get_locals())
expected = eval(array_scalar_expected, get_locals())
assert_exactly_equal(res, expected)
scalar_array = f's {binary_op} a'
scalar_array_expected = f'scalar_as_array {binary_op} a'
res = eval(scalar_array, get_locals())
expected = eval(scalar_array_expected, get_locals())
assert_exactly_equal(res, expected)
if __name__ == '__main__':
for (i, j), p in promotion_table.items():
print(f"({i}, {j}) -> {p}")
| [
"hypothesis.example",
"hypothesis.strategies.from_type",
"hypothesis.strategies.data",
"pytest.mark.parametrize",
"hypothesis.given",
"pytest.skip"
] | [((3735, 3754), 'hypothesis.example', 'example', ([], {'shape': '(0,)'}), '(shape=(0,))\n', (3742, 3754), False, 'from hypothesis import given, example\n'), ((3756, 3775), 'hypothesis.given', 'given', ([], {'shape': 'shapes'}), '(shape=shapes)\n', (3761, 3775), False, 'from hypothesis import given, example\n'), ((4509, 4620), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""scalar_type,dtype"""', '[(s, d) for s in scalar_to_dtype for d in scalar_to_dtype[s]]'], {}), "('scalar_type,dtype', [(s, d) for s in\n scalar_to_dtype for d in scalar_to_dtype[s]])\n", (4532, 4620), False, 'import pytest\n'), ((4958, 5012), 'pytest.skip', 'pytest.skip', (['"""matmul (@) is not supported for scalars"""'], {}), "('matmul (@) is not supported for scalars')\n", (4969, 5012), False, 'import pytest\n'), ((5067, 5089), 'hypothesis.strategies.from_type', 'from_type', (['scalar_type'], {}), '(scalar_type)\n', (5076, 5089), False, 'from hypothesis.strategies import from_type, data\n'), ((4693, 4699), 'hypothesis.strategies.data', 'data', ([], {}), '()\n', (4697, 4699), False, 'from hypothesis.strategies import from_type, data\n')] |
"""misc.py
Various utility functions.
"""
from typing import List, Tuple, TypeVar, Optional
def reorder_indices(lst: List, target: List) -> Tuple[int, ...]:
"""
Determine how to bring a list with unique entries to a different order.
Supports only lists of strings.
:param lst: input list
:param target: list in the desired order
:return: the indices that will reorder the input to obtain the target.
:raises: ``ValueError`` for invalid inputs.
"""
if set([type(i) for i in lst]) != {str}:
raise ValueError('Only lists of strings are supported')
if len(set(lst)) < len(lst):
raise ValueError('Input list elements are not unique.')
if set(lst) != set(target) or len(lst) != len(target):
raise ValueError('Contents of input and target do not match.')
idxs = []
for elt in target:
idxs.append(lst.index(elt))
return tuple(idxs)
def reorder_indices_from_new_positions(lst: List[str], **pos: int) \
-> Tuple[int, ...]:
"""
Determine how to bring a list with unique entries to a different order.
:param lst: input list (of strings)
:param pos: new positions in the format ``element = new_position``.
non-specified elements will be adjusted automatically.
:return: the indices that will reorder the input to obtain the target.
:raises: ``ValueError`` for invalid inputs.
"""
if set([type(i) for i in lst]) != {str}:
raise ValueError('Only lists of strings are supported')
if len(set(lst)) < len(lst):
raise ValueError('Input list elements are not unique.')
target = lst.copy()
for item, newidx in pos.items():
oldidx = target.index(item)
del target[oldidx]
target.insert(newidx, item)
return reorder_indices(lst, target)
T = TypeVar('T')
def unwrap_optional(val: Optional[T]) -> T:
"""Covert a variable of type Optional[T] to T
If the variable has value None a ValueError will be raised
"""
if val is None:
raise ValueError("Expected a not None value but got a None value.")
return val
| [
"typing.TypeVar"
] | [((1832, 1844), 'typing.TypeVar', 'TypeVar', (['"""T"""'], {}), "('T')\n", (1839, 1844), False, 'from typing import List, Tuple, TypeVar, Optional\n')] |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
from fclib.common.utils import git_repo_path, module_exists, system_type, module_path
def test_git_repo_path():
# implicitly test for no exception
assert git_repo_path() is not None
def test_module_exists():
assert module_exists("numpy")
assert not module_exists("fakepkgxyz")
def test_system_type():
assert system_type() in ["linux", "mac", "win"]
def test_module_path():
# look for binaries we use in this repo
assert module_path("forecasting_env", "python") != ""
assert module_path("forecasting_env", "tensorboard") != ""
| [
"fclib.common.utils.module_path",
"fclib.common.utils.git_repo_path",
"fclib.common.utils.module_exists",
"fclib.common.utils.system_type"
] | [((305, 327), 'fclib.common.utils.module_exists', 'module_exists', (['"""numpy"""'], {}), "('numpy')\n", (318, 327), False, 'from fclib.common.utils import git_repo_path, module_exists, system_type, module_path\n'), ((238, 253), 'fclib.common.utils.git_repo_path', 'git_repo_path', ([], {}), '()\n', (251, 253), False, 'from fclib.common.utils import git_repo_path, module_exists, system_type, module_path\n'), ((343, 370), 'fclib.common.utils.module_exists', 'module_exists', (['"""fakepkgxyz"""'], {}), "('fakepkgxyz')\n", (356, 370), False, 'from fclib.common.utils import git_repo_path, module_exists, system_type, module_path\n'), ((408, 421), 'fclib.common.utils.system_type', 'system_type', ([], {}), '()\n', (419, 421), False, 'from fclib.common.utils import git_repo_path, module_exists, system_type, module_path\n'), ((530, 570), 'fclib.common.utils.module_path', 'module_path', (['"""forecasting_env"""', '"""python"""'], {}), "('forecasting_env', 'python')\n", (541, 570), False, 'from fclib.common.utils import git_repo_path, module_exists, system_type, module_path\n'), ((588, 633), 'fclib.common.utils.module_path', 'module_path', (['"""forecasting_env"""', '"""tensorboard"""'], {}), "('forecasting_env', 'tensorboard')\n", (599, 633), False, 'from fclib.common.utils import git_repo_path, module_exists, system_type, module_path\n')] |
import pygame
import sys
from pathlib import Path
from typing import Any
pygame.init()
pygame.mixer.init()
DEBUG = "-d" in sys.argv or "--debug" in sys.argv
DEBUG_FREEZE = False
PATH = Path(__file__).parent.parent
window = pygame.display.set_mode((620, 620), pygame.RESIZABLE)
pygame.display.set_caption("ParaPac - Loading...")
clock = pygame.time.Clock()
font = pygame.font.Font(PATH / "assets/VT323.ttf", 24)
font2 = pygame.font.Font(PATH / "assets/VT323.ttf", 20)
font64 = pygame.font.Font(PATH / "assets/VT323.ttf", 64)
fps = 0
maps = []
map_area_x, map_area_y = 0, 0
map_area_width, map_area_height = 1, 1
map_area_ratio = 1
# Uses Any to make PyCharm shut up
player: Any = None
dashboard = None
active_map: Any = None
game_loop: Any = None
active_map_id: int = 0
alpha: int = 255
score: int = 0
coins: int = 0
class Transition:
"""Enum for the transition"""
NOT_TRANSITIONING = 0
FADING = 1
REAPPEARING = 2
transitioning_mode = Transition.NOT_TRANSITIONING
transition_timer = -25
| [
"pygame.display.set_caption",
"pygame.init",
"pathlib.Path",
"pygame.display.set_mode",
"pygame.time.Clock",
"pygame.font.Font",
"pygame.mixer.init"
] | [((74, 87), 'pygame.init', 'pygame.init', ([], {}), '()\n', (85, 87), False, 'import pygame\n'), ((88, 107), 'pygame.mixer.init', 'pygame.mixer.init', ([], {}), '()\n', (105, 107), False, 'import pygame\n'), ((225, 278), 'pygame.display.set_mode', 'pygame.display.set_mode', (['(620, 620)', 'pygame.RESIZABLE'], {}), '((620, 620), pygame.RESIZABLE)\n', (248, 278), False, 'import pygame\n'), ((279, 329), 'pygame.display.set_caption', 'pygame.display.set_caption', (['"""ParaPac - Loading..."""'], {}), "('ParaPac - Loading...')\n", (305, 329), False, 'import pygame\n'), ((338, 357), 'pygame.time.Clock', 'pygame.time.Clock', ([], {}), '()\n', (355, 357), False, 'import pygame\n'), ((365, 412), 'pygame.font.Font', 'pygame.font.Font', (["(PATH / 'assets/VT323.ttf')", '(24)'], {}), "(PATH / 'assets/VT323.ttf', 24)\n", (381, 412), False, 'import pygame\n'), ((421, 468), 'pygame.font.Font', 'pygame.font.Font', (["(PATH / 'assets/VT323.ttf')", '(20)'], {}), "(PATH / 'assets/VT323.ttf', 20)\n", (437, 468), False, 'import pygame\n'), ((478, 525), 'pygame.font.Font', 'pygame.font.Font', (["(PATH / 'assets/VT323.ttf')", '(64)'], {}), "(PATH / 'assets/VT323.ttf', 64)\n", (494, 525), False, 'import pygame\n'), ((187, 201), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (191, 201), False, 'from pathlib import Path\n')] |
import io
import os
from typing import List
from apiclient import discovery
# noinspection PyPackageRequirements
from googleapiclient.http import MediaIoBaseDownload
from oauth2client.service_account import ServiceAccountCredentials
class Drive2:
SCOPES = ['https://www.googleapis.com/auth/drive.readonly.metadata',
'https://www.googleapis.com/auth/drive.readonly',
'https://www.googleapis.com/auth/drive.metadata']
DRIVE = discovery.build('drive', 'v3',
credentials=ServiceAccountCredentials.from_json_keyfile_name('google-cloud.json', SCOPES))
@classmethod
def get_folders(cls) -> List[str]:
main_folder: list = cls.DRIVE.files().list(q="name = 'celeb'").execute().get('files', list())
if not main_folder:
return list()
sub_folders = cls.DRIVE.files().list(q=f"'{main_folder[0]['id']}' in parents").execute().get('files', list())
return [folder['name'] for folder in sub_folders]
@classmethod
def folder_exists(cls, folder_name: str) -> bool:
return folder_name in cls.get_folders()
@classmethod
def get_files(cls, folder_name: str) -> List[dict]:
folder: list = cls.DRIVE.files().list(q=f"name = '{folder_name}'").execute().get('files', list())
if not folder:
return list()
files: list = cls.DRIVE.files().list(q=f"'{folder[0]['id']}' in parents",
pageSize=1000).execute().get('files', list())
return files
@classmethod
def file_exists(cls, folder_name: str, file_name: str) -> bool:
files = cls.get_files(folder_name)
if not files:
return False
return file_name in [file['name'] for file in files]
@classmethod
def download(cls, folder_name: str):
static_dir = f"flask_app/static/{folder_name}"
os.makedirs(static_dir, exist_ok=True)
files = cls.get_files(folder_name)
for file in files:
request = cls.DRIVE.files().get_media(fileId=file['id'])
file_handle = io.FileIO(f"{static_dir}/{file['name']}", 'wb')
downloader = MediaIoBaseDownload(file_handle, request)
done = False
while done is False:
_, done = downloader.next_chunk()
return
@classmethod
def rename(cls, file: dict, new_file_name) -> dict:
new_title = {'name': new_file_name}
file = cls.DRIVE.files().update(fileId=file['id'], body=new_title, fields='name').execute()
return file
| [
"oauth2client.service_account.ServiceAccountCredentials.from_json_keyfile_name",
"googleapiclient.http.MediaIoBaseDownload",
"io.FileIO",
"os.makedirs"
] | [((1904, 1942), 'os.makedirs', 'os.makedirs', (['static_dir'], {'exist_ok': '(True)'}), '(static_dir, exist_ok=True)\n', (1915, 1942), False, 'import os\n'), ((534, 611), 'oauth2client.service_account.ServiceAccountCredentials.from_json_keyfile_name', 'ServiceAccountCredentials.from_json_keyfile_name', (['"""google-cloud.json"""', 'SCOPES'], {}), "('google-cloud.json', SCOPES)\n", (582, 611), False, 'from oauth2client.service_account import ServiceAccountCredentials\n'), ((2108, 2155), 'io.FileIO', 'io.FileIO', (['f"""{static_dir}/{file[\'name\']}"""', '"""wb"""'], {}), '(f"{static_dir}/{file[\'name\']}", \'wb\')\n', (2117, 2155), False, 'import io\n'), ((2181, 2222), 'googleapiclient.http.MediaIoBaseDownload', 'MediaIoBaseDownload', (['file_handle', 'request'], {}), '(file_handle, request)\n', (2200, 2222), False, 'from googleapiclient.http import MediaIoBaseDownload\n')] |
"""
Contains miscellaneous parameters.
"""
__all__ = [
"basis",
"filters",
"logger",
]
import astropy.units as u
import tables as tb
import numpy as np
import logging
import os
####################################
# SI basis
####################################
# Basis for decomposition
basis = [u.kg, u.km, u.s, u.C, u.T, u.rad]
####################################
# pytables variables
####################################
# Use blosc for good performance
filters = tb.Filters(complevel=5, complib="blosc")
####################################
# Logging
####################################
__log_formatter = logging.Formatter(
fmt="%(asctime)s | %(message)s",
datefmt="%m/%d/%Y %I:%M:%S %p",
)
def logger(name, log_file, level=logging.INFO):
"""To set up as manny loggers as needed"""
fname = os.path.join("logs", f"{log_file}.log")
if os.path.exists(fname):
os.remove(fname)
handler = logging.FileHandler(os.path.join("logs", f"{log_file}.log"))
handler.setFormatter(__log_formatter)
logger = logging.getLogger(name)
logger.setLevel(level)
logger.addHandler(handler)
return logger
| [
"logging.getLogger",
"os.path.exists",
"logging.Formatter",
"os.path.join",
"tables.Filters",
"os.remove"
] | [((493, 533), 'tables.Filters', 'tb.Filters', ([], {'complevel': '(5)', 'complib': '"""blosc"""'}), "(complevel=5, complib='blosc')\n", (503, 533), True, 'import tables as tb\n'), ((638, 725), 'logging.Formatter', 'logging.Formatter', ([], {'fmt': '"""%(asctime)s | %(message)s"""', 'datefmt': '"""%m/%d/%Y %I:%M:%S %p"""'}), "(fmt='%(asctime)s | %(message)s', datefmt=\n '%m/%d/%Y %I:%M:%S %p')\n", (655, 725), False, 'import logging\n'), ((842, 881), 'os.path.join', 'os.path.join', (['"""logs"""', 'f"""{log_file}.log"""'], {}), "('logs', f'{log_file}.log')\n", (854, 881), False, 'import os\n'), ((889, 910), 'os.path.exists', 'os.path.exists', (['fname'], {}), '(fname)\n', (903, 910), False, 'import os\n'), ((1068, 1091), 'logging.getLogger', 'logging.getLogger', (['name'], {}), '(name)\n', (1085, 1091), False, 'import logging\n'), ((920, 936), 'os.remove', 'os.remove', (['fname'], {}), '(fname)\n', (929, 936), False, 'import os\n'), ((972, 1011), 'os.path.join', 'os.path.join', (['"""logs"""', 'f"""{log_file}.log"""'], {}), "('logs', f'{log_file}.log')\n", (984, 1011), False, 'import os\n')] |
import inspect
import numpy as np
import chainer
import chainer.links as L
import chainer.functions as F
from chainer.dataset import convert
from third_party_library import projection_simplex_sort
class LRE(chainer.training.StandardUpdater):
"""
"""
def __init__(self, iterator, optimizer, converter=convert.concat_examples,
device=None, loss_func=None, loss_scale=None, alpha=0.001):
super(LRE, self).__init__(
iterator, optimizer, converter, device, loss_func, loss_scale)
self.alpha = alpha
def update_core(self):
it = self._iterators['main']
batch = it.next()
batchsize = len(batch)
*x, t = self.converter(batch, self.device)
batch_val = self._iterators['val'].next()
*x_val, t_val = self.converter(batch_val, self.device)
optimizer = self._optimizers['main']
loss_func = self.loss_func or optimizer.target.lossfun
model = optimizer.target
model_tmp = model.copy()
# Line 4 (Algorithm 1 in https://arxiv.org/abs/1803.09050)
ys = model_tmp.predictor(*x)
# Line 5
weight = L.Linear(batchsize, 1, nobias=True, initialW=0)
weight.to_gpu()
# F.sigmoid_cross_entropyにはenable_double_backpropがないので
if 'enable_double_backprop' in inspect.getargspec(loss_func).args:
loss_f = weight(loss_func(ys, t, reduce='no',
enable_double_backprop=True)[None])
else:
loss_f = weight(loss_func(ys, t, reduce='no')[None])
# Line 6
model_tmp.cleargrads()
weight.cleargrads()
loss_f.backward(retain_grad=True, enable_double_backprop=True)
# Line 7
for link in model_tmp.predictor.links(skipself=True):
for name, param in link.namedparams():
if name.count('/') == 1:
super(chainer.Link, link).__setattr__(
name[1:], param - self.alpha * param.grad_var)
# Line 8
ys_val = model_tmp.predictor(*x_val)
# Line 9
loss_g = loss_func(ys_val, t_val)
# Line 10
model_tmp.cleargrads()
weight.cleargrads()
# Line 11
w = - chainer.grad([loss_g], [weight.W])[0].data
w[w < 0] = 0
if F.sum(w).data != 0:
w /= F.sum(w).data
weight.W.data[:] = w
y = model.predictor(*x)
loss_f2 = F.sum(weight(loss_func(y, t, reduce='no')[None]))
model.cleargrads()
loss_f2.backward()
optimizer.update()
# compatibility with chainer_chemistry.models.Classifier
if isinstance(model.accfun, dict):
metrics = {key: value(y, t) for key, value in model.accfun.items()}
chainer.reporter.report(metrics, model)
chainer.reporter.report({'loss': loss_f2}, model)
else:
chainer.reporter.report(
{'loss': loss_f2, 'accuracy': model.accfun(y, t)}, model)
class Proposed(chainer.training.StandardUpdater):
"""
"""
def __init__(self, iterator, optimizer, converter=convert.concat_examples,
device=None, loss_func=None, loss_scale=None, alpha=0.001, sampling_size=32):
super(Proposed, self).__init__(iterator, optimizer,
converter, device, loss_func, loss_scale)
self.alpha = alpha
self.sampling_size = sampling_size
def update_core(self):
it = self._iterators['main']
batch = it.next()
*x, t = self.converter(batch, self.device)
batchsize = len(batch)
optimizer = self._optimizers['main']
loss_func = self.loss_func or optimizer.target.lossfun
model = optimizer.target
model_tmp = model.copy()
# Line 4 (Algorithm 1 in https://arxiv.org/abs/1803.09050)
ys = model_tmp.predictor(*x)
# Line 5
weight = L.Linear(batchsize, 1, nobias=True, initialW=0)
weight.to_gpu()
# F.sigmoid_cross_entropyにはenable_double_backpropがないので
if 'enable_double_backprop' in inspect.getargspec(loss_func).args:
loss_f = weight(loss_func(ys, t, reduce='no',
enable_double_backprop=True)[None])
else:
loss_f = weight(loss_func(ys, t, reduce='no')[None])
# Line 6
model_tmp.cleargrads()
weight.cleargrads()
loss_f.backward(retain_grad=True, enable_double_backprop=True)
# Line 7
for link in model_tmp.predictor.links(skipself=True):
for name, param in link.namedparams():
if name.count('/') == 1:
super(chainer.Link, link).__setattr__(
name[1:], param - self.alpha * param.grad_var)
xp = chainer.backends.cuda.get_array_module(t)
val_ind = xp.where(t == 1)[0]
# Line 8
ys_val = model_tmp.predictor(*x)[val_ind]
t_val = t[val_ind]
# Line 9
loss_g = loss_func(ys_val, t_val)
# Line 10
model_tmp.cleargrads()
weight.cleargrads()
# Line 11
w_tmp = chainer.grad([loss_g], [weight.W])[0].data
w_tmp = projection_simplex_sort(-w_tmp[0])
val_ind = xp.random.choice(batchsize, size=self.sampling_size, p=w_tmp)
y = model.predictor(*x)
loss_f2 = loss_func(y[val_ind], t[val_ind])
model.cleargrads()
loss_f2.backward()
optimizer.update()
# compatibility with chainer_chemistry.models.Classifier
if isinstance(model.accfun, dict):
metrics = {key: value(y, t) for key, value in model.accfun.items()}
chainer.reporter.report(metrics, model)
chainer.reporter.report({'loss': loss_f2}, model)
else:
chainer.reporter.report(
{'loss': loss_f2, 'accuracy': model.accfun(y, t)}, model)
class LossImportanceSampling(chainer.training.StandardUpdater):
"""
"""
def __init__(self, iterator, optimizer, converter=convert.concat_examples,
device=None, loss_func=None, loss_scale=None, sampling_size=32):
super(LossImportanceSampling, self).__init__(
iterator, optimizer, converter, device, loss_func, loss_scale)
self.sampling_size = sampling_size
def update_core(self):
optimizer = self._optimizers['main']
loss_func = self.loss_func or optimizer.target.lossfun
model = optimizer.target
it = self._iterators['main']
batch = it.next()
batchsize = len(batch)
*x, t = self.converter(batch, self.device)
y = model.predictor(*x)
loss = loss_func(y, t, reduce='no')
loss_all = F.mean(loss)
prob = loss.data[:, 0] + 1e-10
prob /= prob.sum()
index = np.random.choice(
batchsize, size=self.sampling_size, replace=True, p=prob)
weight = L.Linear(batchsize, 1, nobias=True, initialW=0)
weight.to_gpu()
weight.W.data[:, index] = 1 / (batchsize * prob[index])
loss_im = F.sum(weight(loss[None]))
model.cleargrads()
weight.cleargrads()
loss_im.backward()
optimizer.update()
# compatibility with chainer_chemistry.models.Classifier
if isinstance(model.accfun, dict):
metrics = {key: value(y, t) for key, value in model.accfun.items()}
chainer.reporter.report(metrics, model)
chainer.reporter.report({'loss': F.sum(loss_all)}, model)
else:
chainer.report(
{'loss': F.sum(loss), 'accuracy': model.accfun(y, t)}, model)
| [
"chainer.grad",
"chainer.functions.sum",
"numpy.random.choice",
"inspect.getargspec",
"chainer.links.Linear",
"third_party_library.projection_simplex_sort",
"chainer.backends.cuda.get_array_module",
"chainer.functions.mean",
"chainer.reporter.report"
] | [((1157, 1204), 'chainer.links.Linear', 'L.Linear', (['batchsize', '(1)'], {'nobias': '(True)', 'initialW': '(0)'}), '(batchsize, 1, nobias=True, initialW=0)\n', (1165, 1204), True, 'import chainer.links as L\n'), ((3970, 4017), 'chainer.links.Linear', 'L.Linear', (['batchsize', '(1)'], {'nobias': '(True)', 'initialW': '(0)'}), '(batchsize, 1, nobias=True, initialW=0)\n', (3978, 4017), True, 'import chainer.links as L\n'), ((4856, 4897), 'chainer.backends.cuda.get_array_module', 'chainer.backends.cuda.get_array_module', (['t'], {}), '(t)\n', (4894, 4897), False, 'import chainer\n'), ((5260, 5294), 'third_party_library.projection_simplex_sort', 'projection_simplex_sort', (['(-w_tmp[0])'], {}), '(-w_tmp[0])\n', (5283, 5294), False, 'from third_party_library import projection_simplex_sort\n'), ((6797, 6809), 'chainer.functions.mean', 'F.mean', (['loss'], {}), '(loss)\n', (6803, 6809), True, 'import chainer.functions as F\n'), ((6893, 6967), 'numpy.random.choice', 'np.random.choice', (['batchsize'], {'size': 'self.sampling_size', 'replace': '(True)', 'p': 'prob'}), '(batchsize, size=self.sampling_size, replace=True, p=prob)\n', (6909, 6967), True, 'import numpy as np\n'), ((6998, 7045), 'chainer.links.Linear', 'L.Linear', (['batchsize', '(1)'], {'nobias': '(True)', 'initialW': '(0)'}), '(batchsize, 1, nobias=True, initialW=0)\n', (7006, 7045), True, 'import chainer.links as L\n'), ((2801, 2840), 'chainer.reporter.report', 'chainer.reporter.report', (['metrics', 'model'], {}), '(metrics, model)\n', (2824, 2840), False, 'import chainer\n'), ((2853, 2902), 'chainer.reporter.report', 'chainer.reporter.report', (["{'loss': loss_f2}", 'model'], {}), "({'loss': loss_f2}, model)\n", (2876, 2902), False, 'import chainer\n'), ((5741, 5780), 'chainer.reporter.report', 'chainer.reporter.report', (['metrics', 'model'], {}), '(metrics, model)\n', (5764, 5780), False, 'import chainer\n'), ((5793, 5842), 'chainer.reporter.report', 'chainer.reporter.report', (["{'loss': loss_f2}", 'model'], {}), "({'loss': loss_f2}, model)\n", (5816, 5842), False, 'import chainer\n'), ((7489, 7528), 'chainer.reporter.report', 'chainer.reporter.report', (['metrics', 'model'], {}), '(metrics, model)\n', (7512, 7528), False, 'import chainer\n'), ((1332, 1361), 'inspect.getargspec', 'inspect.getargspec', (['loss_func'], {}), '(loss_func)\n', (1350, 1361), False, 'import inspect\n'), ((2338, 2346), 'chainer.functions.sum', 'F.sum', (['w'], {}), '(w)\n', (2343, 2346), True, 'import chainer.functions as F\n'), ((2375, 2383), 'chainer.functions.sum', 'F.sum', (['w'], {}), '(w)\n', (2380, 2383), True, 'import chainer.functions as F\n'), ((4145, 4174), 'inspect.getargspec', 'inspect.getargspec', (['loss_func'], {}), '(loss_func)\n', (4163, 4174), False, 'import inspect\n'), ((5201, 5235), 'chainer.grad', 'chainer.grad', (['[loss_g]', '[weight.W]'], {}), '([loss_g], [weight.W])\n', (5213, 5235), False, 'import chainer\n'), ((2263, 2297), 'chainer.grad', 'chainer.grad', (['[loss_g]', '[weight.W]'], {}), '([loss_g], [weight.W])\n', (2275, 2297), False, 'import chainer\n'), ((7574, 7589), 'chainer.functions.sum', 'F.sum', (['loss_all'], {}), '(loss_all)\n', (7579, 7589), True, 'import chainer.functions as F\n'), ((7666, 7677), 'chainer.functions.sum', 'F.sum', (['loss'], {}), '(loss)\n', (7671, 7677), True, 'import chainer.functions as F\n')] |
from rest_framework import viewsets, mixins, status
from rest_framework.decorators import action
from rest_framework.response import Response
from rest_framework.authentication import TokenAuthentication
from rest_framework.permissions import IsAuthenticated
from rest_framework import filters
from django_filters.rest_framework import DjangoFilterBackend
from django.utils.decorators import method_decorator
from django.views.decorators.cache import cache_page
from core.models import Genre, Theme, Platform, Developer, Publisher, Game, \
User
from gig import serializers
class BaseGameAttrViewSet(viewsets.GenericViewSet, mixins.ListModelMixin):
"""Base viewset for game attributes"""
authentication_classes = (TokenAuthentication,)
def get_queryset(self):
"""Return objects"""
assigned_only = bool(self.request.query_params.get('assigned_only'))
queryset = self.queryset
if assigned_only:
queryset = queryset.filter(game__isnull=False)
return queryset.order_by('-name').distinct()
@method_decorator(cache_page(60))
def dispatch(self, *args, **kwargs):
return super(BaseGameAttrViewSet, self).dispatch(*args, **kwargs)
class GenreViewSet(BaseGameAttrViewSet):
"""View genres in the database"""
queryset = Genre.objects.all()
serializer_class = serializers.GenreSerializer
class ThemeViewSet(BaseGameAttrViewSet):
"""View themes in the database"""
queryset = Theme.objects.all()
serializer_class = serializers.ThemeSerializer
class PlatformViewSet(BaseGameAttrViewSet):
"""View platforms in the database"""
queryset = Platform.objects.all()
serializer_class = serializers.PlatformSerializer
class DeveloperViewSet(BaseGameAttrViewSet):
"""View developers in the database"""
queryset = Developer.objects.all()
serializer_class = serializers.DeveloperSerializer
class PublisherViewSet(BaseGameAttrViewSet):
"""View publishers in the database"""
queryset = Publisher.objects.all()
serializer_class = serializers.PublisherSerializer
class BaseGameViewSet(viewsets.ModelViewSet):
"""Base viewset for game"""
serializer_class = serializers.GameSerializer
authentication_classes = (TokenAuthentication,)
filter_backends = [DjangoFilterBackend, filters.SearchFilter,
filters.OrderingFilter]
filterset_fields = {
'first_release_date': ['exact', 'lte', 'gte'],
'rating': ['exact', 'lte', 'gte']
}
ordering_fields = ['rating', 'first_release_date', 'popularity']
def _params_to_ints(self, qs):
"""Convert a list of string IDs to a list of integers"""
return [int(str_id) for str_id in qs.split(',')]
def get_queryset(self):
"""Retrieve the games"""
genres = self.request.query_params.get('genres')
themes = self.request.query_params.get('themes')
platforms = self.request.query_params.get('platforms')
developers = self.request.query_params.get('developers')
publishers = self.request.query_params.get('publishers')
ids = self.request.query_params.get('ids')
adult = self.request.query_params.get('is_adult')
queryset = self.queryset
if genres:
genre_ids = self._params_to_ints(genres)
queryset = queryset.filter(genres__id__in=genre_ids)
if themes:
theme_ids = self._params_to_ints(themes)
queryset = queryset.filter(themes__id__in=theme_ids)
if platforms:
platform_ids = self._params_to_ints(platforms)
queryset = queryset.filter(platforms__id__in=platform_ids)
if developers:
developer_ids = self._params_to_ints(developers)
queryset = queryset.filter(developers__id__in=developer_ids)
if publishers:
publisher_ids = self._params_to_ints(publishers)
queryset = queryset.filter(publishers__id__in=publisher_ids)
if ids:
ids_arr = self._params_to_ints(ids)
queryset = queryset.filter(id__in=ids_arr)
if adult != 'y':
try:
queryset = queryset.exclude(themes__name='Erotic')
except Theme.DoesNotExist:
None
if self.saved:
return queryset.filter(user=self.request.user)
return queryset.distinct()
class GameViewSet(BaseGameViewSet):
"""View games in the database"""
saved = False
queryset = Game.objects.order_by('-rating')
search_fields = ['name', 'genres__name', 'themes__name']
def get_serializer_class(self):
"""Return appropriate serializer class"""
if self.action == 'retrieve':
return serializers.GameDetailSerializer
return self.serializer_class
@action(methods=['GET', 'POST'], detail=True, url_path='add-to-saved')
def add_to_saved(self, request, pk=None):
"""Add a game to saved"""
game = self.get_object()
try:
user = User.objects.get(id=self.request.user.id)
except User.DoesNotExist:
return Response(
status=status.HTTP_401_UNAUTHORIZED
)
else:
user.saved.add(game)
return Response(
status=status.HTTP_200_OK
)
@action(methods=['GET', 'POST'], detail=True, url_path='remove-from-saved')
def remove_from_saved(self, request, pk=None):
"""Remove a game from saved"""
game = self.get_object()
try:
user = User.objects.get(id=self.request.user.id)
except User.DoesNotExist:
return Response(
status=status.HTTP_401_UNAUTHORIZED
)
else:
user.saved.remove(game)
return Response(
status=status.HTTP_200_OK
)
@method_decorator(cache_page(60))
def dispatch(self, *args, **kwargs):
return super(GameViewSet, self).dispatch(*args, **kwargs)
class SavedViewSet(BaseGameViewSet):
"""View saved in the database"""
saved = True
queryset = Game.objects.all()
permission_classes = (IsAuthenticated,)
| [
"core.models.Game.objects.order_by",
"core.models.Game.objects.all",
"core.models.Theme.objects.all",
"core.models.Platform.objects.all",
"rest_framework.response.Response",
"core.models.Genre.objects.all",
"django.views.decorators.cache.cache_page",
"core.models.Developer.objects.all",
"core.models.Publisher.objects.all",
"rest_framework.decorators.action",
"core.models.User.objects.get"
] | [((1374, 1393), 'core.models.Genre.objects.all', 'Genre.objects.all', ([], {}), '()\n', (1391, 1393), False, 'from core.models import Genre, Theme, Platform, Developer, Publisher, Game, User\n'), ((1541, 1560), 'core.models.Theme.objects.all', 'Theme.objects.all', ([], {}), '()\n', (1558, 1560), False, 'from core.models import Genre, Theme, Platform, Developer, Publisher, Game, User\n'), ((1714, 1736), 'core.models.Platform.objects.all', 'Platform.objects.all', ([], {}), '()\n', (1734, 1736), False, 'from core.models import Genre, Theme, Platform, Developer, Publisher, Game, User\n'), ((1895, 1918), 'core.models.Developer.objects.all', 'Developer.objects.all', ([], {}), '()\n', (1916, 1918), False, 'from core.models import Genre, Theme, Platform, Developer, Publisher, Game, User\n'), ((2078, 2101), 'core.models.Publisher.objects.all', 'Publisher.objects.all', ([], {}), '()\n', (2099, 2101), False, 'from core.models import Genre, Theme, Platform, Developer, Publisher, Game, User\n'), ((4572, 4604), 'core.models.Game.objects.order_by', 'Game.objects.order_by', (['"""-rating"""'], {}), "('-rating')\n", (4593, 4604), False, 'from core.models import Genre, Theme, Platform, Developer, Publisher, Game, User\n'), ((4887, 4956), 'rest_framework.decorators.action', 'action', ([], {'methods': "['GET', 'POST']", 'detail': '(True)', 'url_path': '"""add-to-saved"""'}), "(methods=['GET', 'POST'], detail=True, url_path='add-to-saved')\n", (4893, 4956), False, 'from rest_framework.decorators import action\n'), ((5412, 5486), 'rest_framework.decorators.action', 'action', ([], {'methods': "['GET', 'POST']", 'detail': '(True)', 'url_path': '"""remove-from-saved"""'}), "(methods=['GET', 'POST'], detail=True, url_path='remove-from-saved')\n", (5418, 5486), False, 'from rest_framework.decorators import action\n'), ((6203, 6221), 'core.models.Game.objects.all', 'Game.objects.all', ([], {}), '()\n', (6219, 6221), False, 'from core.models import Genre, Theme, Platform, Developer, Publisher, Game, User\n'), ((1147, 1161), 'django.views.decorators.cache.cache_page', 'cache_page', (['(60)'], {}), '(60)\n', (1157, 1161), False, 'from django.views.decorators.cache import cache_page\n'), ((5972, 5986), 'django.views.decorators.cache.cache_page', 'cache_page', (['(60)'], {}), '(60)\n', (5982, 5986), False, 'from django.views.decorators.cache import cache_page\n'), ((5103, 5144), 'core.models.User.objects.get', 'User.objects.get', ([], {'id': 'self.request.user.id'}), '(id=self.request.user.id)\n', (5119, 5144), False, 'from core.models import Genre, Theme, Platform, Developer, Publisher, Game, User\n'), ((5340, 5375), 'rest_framework.response.Response', 'Response', ([], {'status': 'status.HTTP_200_OK'}), '(status=status.HTTP_200_OK)\n', (5348, 5375), False, 'from rest_framework.response import Response\n'), ((5643, 5684), 'core.models.User.objects.get', 'User.objects.get', ([], {'id': 'self.request.user.id'}), '(id=self.request.user.id)\n', (5659, 5684), False, 'from core.models import Genre, Theme, Platform, Developer, Publisher, Game, User\n'), ((5883, 5918), 'rest_framework.response.Response', 'Response', ([], {'status': 'status.HTTP_200_OK'}), '(status=status.HTTP_200_OK)\n', (5891, 5918), False, 'from rest_framework.response import Response\n'), ((5198, 5243), 'rest_framework.response.Response', 'Response', ([], {'status': 'status.HTTP_401_UNAUTHORIZED'}), '(status=status.HTTP_401_UNAUTHORIZED)\n', (5206, 5243), False, 'from rest_framework.response import Response\n'), ((5738, 5783), 'rest_framework.response.Response', 'Response', ([], {'status': 'status.HTTP_401_UNAUTHORIZED'}), '(status=status.HTTP_401_UNAUTHORIZED)\n', (5746, 5783), False, 'from rest_framework.response import Response\n')] |
"""
============================================
vidgear library code is placed under the MIT license
Copyright (c) 2019 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
===============================================
"""
from vidgear.gears import WriteGear
import numpy as np
import pytest
def test_assertfailedwrite():
"""
IO Test - made to fail with Wrong Output file path
"""
np.random.seed(0)
# generate random data for 10 frames
random_data = np.random.random(size=(10, 1080, 1920, 3)) * 255
input_data = random_data.astype(np.uint8)
with pytest.raises(AssertionError):
# wrong folder path does not exist
writer = WriteGear("wrong_path/output.mp4")
writer.write(input_data)
writer.close()
def test_failedextension():
"""
IO Test - made to fail with filename with wrong extention
"""
np.random.seed(0)
# generate random data for 10 frames
random_data = np.random.random(size=(10, 1080, 1920, 3)) * 255
input_data = random_data.astype(np.uint8)
# 'garbage' extension does not exist
with pytest.raises(ValueError):
writer = WriteGear("garbage.garbage")
writer.write(input_data)
writer.close()
| [
"numpy.random.random",
"pytest.raises",
"numpy.random.seed",
"vidgear.gears.WriteGear"
] | [((1405, 1422), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (1419, 1422), True, 'import numpy as np\n'), ((1851, 1868), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (1865, 1868), True, 'import numpy as np\n'), ((1478, 1520), 'numpy.random.random', 'np.random.random', ([], {'size': '(10, 1080, 1920, 3)'}), '(size=(10, 1080, 1920, 3))\n', (1494, 1520), True, 'import numpy as np\n'), ((1580, 1609), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (1593, 1609), False, 'import pytest\n'), ((1661, 1695), 'vidgear.gears.WriteGear', 'WriteGear', (['"""wrong_path/output.mp4"""'], {}), "('wrong_path/output.mp4')\n", (1670, 1695), False, 'from vidgear.gears import WriteGear\n'), ((1924, 1966), 'numpy.random.random', 'np.random.random', ([], {'size': '(10, 1080, 1920, 3)'}), '(size=(10, 1080, 1920, 3))\n', (1940, 1966), True, 'import numpy as np\n'), ((2066, 2091), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2079, 2091), False, 'import pytest\n'), ((2105, 2133), 'vidgear.gears.WriteGear', 'WriteGear', (['"""garbage.garbage"""'], {}), "('garbage.garbage')\n", (2114, 2133), False, 'from vidgear.gears import WriteGear\n')] |
#!/usr/bin/env kross
import re, Kross, KSpread
T = Kross.module("kdetranslation")
func = KSpread.function("PYREGEXP")
func.minparam = 3
func.maxparam = 3
func.comment = T.i18n("The PYREGEXP() function replaces substring in the input string with a given string using regular expression.")
func.syntax = T.i18n("PYREGEXP(string)")
func.addParameter("String", T.i18n("The input string."))
func.addParameter("String", T.i18n("The regular expression."))
func.addParameter("String", T.i18n("Replace with."))
func.addExample(T.i18n("PYREGEXP(\"Some String\",\"(S|m)\",\"A\")"))
def update(args):
s = args[0]
regexp = args[1]
repl = args[2]
try:
p = re.compile(regexp)
func.result = p.sub(repl, s)
except:
func.error = T.i18n("Invalid regexp")
func.connect("called(QVariantList)", update)
func.registerFunction()
| [
"KSpread.function",
"Kross.module",
"re.compile"
] | [((53, 83), 'Kross.module', 'Kross.module', (['"""kdetranslation"""'], {}), "('kdetranslation')\n", (65, 83), False, 'import re, Kross, KSpread\n'), ((92, 120), 'KSpread.function', 'KSpread.function', (['"""PYREGEXP"""'], {}), "('PYREGEXP')\n", (108, 120), False, 'import re, Kross, KSpread\n'), ((670, 688), 're.compile', 're.compile', (['regexp'], {}), '(regexp)\n', (680, 688), False, 'import re, Kross, KSpread\n')] |
import json
import logging
import os
import re
from functools import cache
from requests.exceptions import HTTPError
from threading import Lock
from .job import Job
from .state import ChapterData, MediaData
from .util.media_type import MediaType
from .util.name_parser import (find_media_with_similar_name_in_list,
get_alt_names)
def get_extension(url):
_, ext = os.path.splitext(url.split("?")[0])
if ext and ext[0] == ".":
ext = ext[1:]
return ext
class RequestServer:
session = None
settings = None
# If true a cloudscraper object should be given instead of a normal session
need_cloud_scraper = False
def __init__(self, session, settings=None):
self.settings = settings
if self.need_cloud_scraper:
import cloudscraper
if getattr(RequestServer, "cloudscraper", None) is None:
RequestServer.cloudscraper = cloudscraper.create_scraper(browser={
'browser': 'firefox',
'platform': 'linux',
'desktop': True
})
# TODO remove on new cloudscraper release
RequestServer.cloudscraper.cookies = session.cookies
self.session = RequestServer.cloudscraper
else:
self.session = session
self._lock = Lock()
@classmethod
def get_instances(clazz, session, settings=None):
return [clazz(session, settings)]
def _request(self, get, url, **kwargs):
logging.info("Making %s request to %s ", "GET" if get else "POST", url)
logging.debug("Request args: %s ", kwargs)
kwargs["verify"] = not self.settings.get_disable_ssl_verification(self.id)
r = self.session.get(url, **kwargs) if get else self.session.post(url, **kwargs)
if r.status_code != 200:
logging.warning("HTTP Error: %d", r.status_code)
r.raise_for_status()
return r
def session_get_cookie(self, name, domain=None):
for cookie in self.session.cookies:
if cookie.name == name and (domain in cookie.domain or cookie.domain in domain):
return cookie.value
return None
@cache
def session_get_mem_cache(self, url, **kwargs):
return self.session_get(url, **kwargs)
def session_get_cache_json(self, url, skip_cache=False, **kwargs):
file = self.settings.get_web_cache(url)
if skip_cache:
return self.session_get(url, **kwargs).json()
try:
with open(file, "r") as f:
return json.load(f)
except (json.decoder.JSONDecodeError, FileNotFoundError):
pass
r = self.session_get(url, **kwargs)
data = r.json()
for i in range(2):
try:
with open(file, "w") as f:
json.dump(data, f)
break
except FileNotFoundError:
os.makedirs(self.settings.cache_dir, exist_ok=True)
return data
def session_get(self, url, **kwargs):
return self._request(True, url, **kwargs)
def session_post(self, url, **kwargs):
return self._request(False, url, **kwargs)
def soupify(self, BeautifulSoup, r):
return BeautifulSoup(r.text, self.settings.bs4_parser)
class MediaServer(RequestServer):
def create_media_data(self, id, name, season_id=None, season_title="", dir_name=None, offset=0, alt_id=None, progress_volumes=None, lang="", **kwargs):
if lang is None:
match = re.search(r"\((\w*) Dub\)", name) or re.search(r"\((\w*) Dub\)", season_title)
if match:
lang = match.group(1) if match else ""
else:
match = re.search(r"\(Dub\)", name) or re.search(r"\(Dub\)", season_title)
lang = "dub" if match else ""
return MediaData(dict(server_id=self.id, id=id, dir_name=dir_name if dir_name else re.sub(r"[\W]", "", name.replace(" ", "_")), name=name, media_type=self.media_type.value, media_type_name=self.media_type.name, progress=0, season_id=season_id, season_title=season_title, offset=offset, alt_id=alt_id, trackers={}, progress_volumes=progress_volumes if progress_volumes is not None else self.progress_volumes, tags=[], lang=lang, **kwargs))
def update_chapter_data(self, media_data, id, title, number, premium=False, alt_id=None, special=False, date=None, subtitles=None, inaccessible=False, **kwargs):
if number is None or number == "" or isinstance(number, str) and number.isalpha():
number = 0
special = True
id = str(id)
if isinstance(number, str):
try:
number = float(number.replace("-", "."))
except ValueError:
special = True
number = float(re.search("\d+", number).group(0))
if media_data["offset"]:
number = round(number - media_data["offset"], 4)
if number % 1 == 0:
number = int(number)
new_values = dict(id=id, title=title, number=number, premium=premium, alt_id=alt_id, special=special, date=date, subtitles=subtitles, inaccessible=inaccessible, **kwargs)
if id in media_data["chapters"]:
media_data["chapters"][id].update(new_values)
else:
media_data["chapters"][id] = ChapterData(new_values)
media_data["chapters"][id]["read"] = False
return True
def create_page_data(self, url, id=None, encryption_key=None, ext=None):
if not ext:
ext = get_extension(url)
assert ext, url
return dict(url=url, id=id, encryption_key=encryption_key, ext=ext)
class GenericServer(MediaServer):
"""
This class is intended to separate the overridable methods of Server from
the internal business logic.
Servers need not override most of the methods of this. Some have default
values that are sane in some common situations
"""
# Unique id of the server
id = None
# If set this value will be used for credential lookup instead of id
alias = None
media_type = MediaType.MANGA
# Pattern to match to determine if this server can stream a given url.
# It is also used to determine if server can add the media based on its chapter url
stream_url_regex = None
# Measures progress in volumes instead of chapter/episodes
progress_volumes = False
# True if the server only provides properly licensed media
official = True
# Download a single page from this server at a time
synchronize_chapter_downloads = False
# If the server has some free media (used for testing)
has_free_chapters = True
# Used to determine if the account can access premium content
is_premium = False
def get_media_list(self, limit=None): # pragma: no cover
"""
Returns an arbitrary selection of media.
"""
raise NotImplementedError
def search(self, term, limit=20):
"""
Searches for a media containing term
Different servers will handle search differently. Some are very literal while others do prefix matching and some would match any word
"""
items = find_media_with_similar_name_in_list(get_alt_names(term), self.get_media_list())
return sorted(items, key=lambda x: abs(len(x["name"]) - len(term)))[:limit]
def update_media_data(self, media_data): # pragma: no cover
"""
Returns media data from API
"""
raise NotImplementedError
def get_media_chapter_data(self, media_data, chapter_data, stream_index=0):
"""
Returns a list of page/episode data. For anime (specifically for video files) this may be a list of size 1
The default implementation is for anime servers and will contain the preferred stream url
"""
last_err = None
urls = self.get_stream_urls(media_data=media_data, chapter_data=chapter_data)
logging.debug("Stream urls %s", urls)
if stream_index != 0:
urls = urls[stream_index:] + urls[:stream_index]
for url in urls:
ext = get_extension(url)
try:
if ext == "m3u8":
import m3u8
m = m3u8.load(url)
if not m.segments:
playlist = sorted(m.playlists, key=lambda x: x.stream_info.bandwidth, reverse=True)
m = m3u8.load(playlist[0].uri)
assert m.segments
return [self.create_page_data(url=segment.uri, encryption_key=segment.key, ext="ts") for segment in m.segments]
else:
return [self.create_page_data(url=url, ext=ext)]
except ImportError as e:
last_err = e
raise last_err
def save_chapter_page(self, page_data, path):
""" Save the page designated by page_data to path
By default it blindly writes the specified url to disk, decrypting it
if needed.
"""
r = self.session_get(page_data["url"], stream=True)
content = r.content
key = page_data["encryption_key"]
if key:
from Crypto.Cipher import AES
key_bytes = self.session_get_mem_cache(key.uri).content
iv = int(key.iv, 16).to_bytes(16, "big") if key.iv else None
content = AES.new(key_bytes, AES.MODE_CBC, iv).decrypt(content)
with open(path, 'wb') as fp:
fp.write(content)
def get_media_data_from_url(self, url): # pragma: no cover
""" Return the media data related to this url
url should be the page needed to view the episode/chapter.
The protocol, query parameters or presence of "www" should be ignored.
The media does not need to have its chapter's list populated but it is
allowed to.
"""
raise NotImplementedError
def get_chapter_id_for_url(self, url): # pragma: no cover
""" Return the chapter id related to this url
Like get_media_data_from_url but returns just the chapter id
"""
raise NotImplementedError
def can_stream_url(self, url):
return self.stream_url_regex and self.stream_url_regex.search(url)
################ ANIME ONLY #####################
def get_stream_url(self, media_data, chapter_data, stream_index=0):
""" Returns a url to stream from
Override get_stream_urls instead
"""
return list(self.get_stream_urls(media_data=media_data, chapter_data=chapter_data))[stream_index]
def get_stream_urls(self, media_data, chapter_data): # pragma: no cover
raise NotImplementedError
def download_subtitles(self, media_data, chapter_data, dir_path):
""" Only for ANIME, Download subtitles to dir_path
By default does nothing. Subtitles should generally have the same name
as the final media
"""
pass
################ Needed for servers requiring logins #####################
def needs_authentication(self):
"""
Checks if the user is logged in
If the user is not logged in (and needs to login to access all content),
this method should return true.
"""
return self.has_login() and not self.is_logged_in
def login(self, username, password): # pragma: no cover
""" Used the specified username/passowrd to authenticate
This method should return True iff login succeeded even if the account isn't premium
Set `is_premium` if the account is premium.
If it is perfectly fine to throw an HTTPError on failed authentication.
"""
raise NotImplementedError
################ OPTIONAL #####################
def post_download(self, media_data, chapter_data, dir_path, pages):
""" Runs after all pages have been downloaded
"""
pass
class Server(GenericServer):
"""
The methods contained in this class should rarely be overridden
"""
_is_logged_in = False
DOWNLOAD_MARKER = ".downloaded"
@property
def is_logged_in(self):
return self._is_logged_in
def has_login(self):
return self.login.__func__ is not GenericServer.login
def is_local_server(self):
return self.download_chapter.__func__ is not Server.download_chapter
def get_credentials(self):
return self.settings.get_credentials(self.id if not self.alias else self.alias)
def relogin(self):
username, password = self.get_credentials()
try:
self._is_logged_in = self.login(username, password)
except HTTPError:
self._is_logged_in = False
if not self._is_logged_in:
logging.warning("Could not login with username: %s", username)
else:
logging.info("Logged into %s; premium %s", self.id, self.is_premium)
return self._is_logged_in
def is_fully_downloaded(self, media_data, chapter_data):
dir_path = self.settings.get_chapter_dir(media_data, chapter_data)
return os.path.exists(os.path.join(dir_path, self.DOWNLOAD_MARKER))
def mark_download_complete(self, dir_path):
open(os.path.join(dir_path, self.DOWNLOAD_MARKER), 'w').close()
def download_if_missing(self, page_data, full_path):
if os.path.exists(full_path):
logging.debug("Page %s already download", full_path)
else:
logging.info("downloading %s", full_path)
temp_path = os.path.join(os.path.dirname(full_path), ".tmp-" + os.path.basename(full_path))
self.save_chapter_page(page_data, temp_path)
os.rename(temp_path, full_path)
def get_children(self, media_data, chapter_data):
return "{}/*".format(self.settings.get_chapter_dir(media_data, chapter_data))
def needs_to_login(self):
try:
return not self.is_logged_in and self.needs_authentication()
except HTTPError:
return True
def pre_download(self, media_data, chapter_data):
if chapter_data["inaccessible"]:
logging.info("Chapter is not accessible")
raise ValueError("Cannot access chapter")
if chapter_data["premium"] and not self.is_premium:
if self.needs_to_login():
logging.info("Server is not authenticated; relogging in")
if not self.relogin():
logging.info("Cannot access chapter %s #%s %s", media_data["name"], str(chapter_data["number"]), chapter_data["title"])
else:
self._is_logged_in = True
if not self.is_premium:
logging.info("Cannot access chapter %s #%s %s because account is not premium", media_data["name"], str(chapter_data["number"]), chapter_data["title"])
raise ValueError("Cannot access premium chapter")
if self.media_type == MediaType.ANIME:
sub_dir = os.path.join(self.settings.get_chapter_dir(media_data, chapter_data), self.settings.subtitles_dir)
os.makedirs(sub_dir, exist_ok=True)
self.download_subtitles(media_data, chapter_data, dir_path=sub_dir)
def download_chapter(self, media_data, chapter_data, page_limit=None, offset=0, stream_index=0):
if self.is_fully_downloaded(media_data, chapter_data):
logging.info("Already downloaded %s %s", media_data["name"], chapter_data["title"])
return False
try:
if self.synchronize_chapter_downloads:
self._lock.acquire()
return self._download_chapter(media_data, chapter_data, page_limit, offset, stream_index)
finally:
if self.synchronize_chapter_downloads:
self._lock.release()
def _download_chapter(self, media_data, chapter_data, page_limit=None, offset=0, stream_index=0):
logging.info("Starting download of %s %s", media_data["name"], chapter_data["title"])
dir_path = self.settings.get_chapter_dir(media_data, chapter_data)
os.makedirs(dir_path, exist_ok=True)
self.pre_download(media_data, chapter_data)
list_of_pages = []
# download pages
job = Job(self.settings.get_threads(media_data), raiseException=True)
for i, page_data in enumerate(self.get_media_chapter_data(media_data, chapter_data, stream_index=stream_index)):
if page_limit is not None and i == page_limit:
break
if i >= offset:
list_of_pages.append(page_data)
page_data["path"] = os.path.join(dir_path, self.settings.get_page_file_name(media_data, chapter_data, ext=page_data["ext"], page_number=i))
job.add(lambda page_data=page_data: self.download_if_missing(page_data, page_data["path"]))
job.run()
assert list_of_pages
self.post_download(media_data, chapter_data, dir_path, list_of_pages)
self.settings.post_process(media_data, (page_data["path"] for page_data in list_of_pages), dir_path)
self.mark_download_complete(dir_path)
logging.info("%s %d %s is downloaded; Total pages %d", media_data["name"], chapter_data["number"], chapter_data["title"], len(list_of_pages))
return True
class TorrentHelper(MediaServer):
id = None
media_type = MediaType.ANIME
official = False
progress_volumes = True
def download_torrent_file(self, media_data):
"""
Downloads the raw torrent file
"""
self.save_torrent_file(media_data, self.settings.get_external_downloads_path(media_data))
def save_torrent_file(self, media_data, path): # pragma: no cover
"""Save the torrent file to disk"""
raise NotImplementedError
class Tracker(RequestServer):
id = None
official = True
def get_media_dict(self, id, media_type, name, progress, progress_volumes=None, score=0, time_spent=0, year=0, year_end=0, season=None, genres=[], tags=[], studio=[]):
return {"id": id, "media_type": media_type, "name": name, "progress": progress, "progress_volumes": progress_volumes,
"score": score, "time_spent": time_spent, "year": year, "year_end": year_end, "season": season, "genres": genres, "tags": tags, "studio": studio
}
def get_auth_url(self): # pragma: no cover
""" Return the url the user can goto to get the auth token"""
raise NotImplementedError
def update(self, list_of_updates): # pragma: no cover
""" Updates progress to remote tracker
list_of_updates is a list of tuples -- tracker_id, progress, progress_volumes
where progress is the numerical value to update to and progress_volumes is
whether to treat this a chapter/episode progress or volume progress
"""
raise NotImplementedError
def get_full_list_data(self, user_name=None, id=None):
return self.get_tracker_list(user_name, id, status=None)
def get_tracker_list(self, user_name=None, id=None, status="CURRENT"): # pragma: no cover
""" Returns a list of media dicts
See get_media_dict
"""
raise NotImplementedError
| [
"os.path.exists",
"logging.debug",
"os.makedirs",
"threading.Lock",
"os.rename",
"m3u8.load",
"logging.warning",
"os.path.join",
"cloudscraper.create_scraper",
"os.path.dirname",
"Crypto.Cipher.AES.new",
"os.path.basename",
"json.load",
"logging.info",
"json.dump",
"re.search"
] | [((1371, 1377), 'threading.Lock', 'Lock', ([], {}), '()\n', (1375, 1377), False, 'from threading import Lock\n'), ((1545, 1616), 'logging.info', 'logging.info', (['"""Making %s request to %s """', "('GET' if get else 'POST')", 'url'], {}), "('Making %s request to %s ', 'GET' if get else 'POST', url)\n", (1557, 1616), False, 'import logging\n'), ((1625, 1667), 'logging.debug', 'logging.debug', (['"""Request args: %s """', 'kwargs'], {}), "('Request args: %s ', kwargs)\n", (1638, 1667), False, 'import logging\n'), ((8034, 8071), 'logging.debug', 'logging.debug', (['"""Stream urls %s"""', 'urls'], {}), "('Stream urls %s', urls)\n", (8047, 8071), False, 'import logging\n'), ((13441, 13466), 'os.path.exists', 'os.path.exists', (['full_path'], {}), '(full_path)\n', (13455, 13466), False, 'import os\n'), ((16000, 16090), 'logging.info', 'logging.info', (['"""Starting download of %s %s"""', "media_data['name']", "chapter_data['title']"], {}), "('Starting download of %s %s', media_data['name'], chapter_data\n ['title'])\n", (16012, 16090), False, 'import logging\n'), ((16169, 16205), 'os.makedirs', 'os.makedirs', (['dir_path'], {'exist_ok': '(True)'}), '(dir_path, exist_ok=True)\n', (16180, 16205), False, 'import os\n'), ((1885, 1933), 'logging.warning', 'logging.warning', (['"""HTTP Error: %d"""', 'r.status_code'], {}), "('HTTP Error: %d', r.status_code)\n", (1900, 1933), False, 'import logging\n'), ((12846, 12908), 'logging.warning', 'logging.warning', (['"""Could not login with username: %s"""', 'username'], {}), "('Could not login with username: %s', username)\n", (12861, 12908), False, 'import logging\n'), ((12935, 13003), 'logging.info', 'logging.info', (['"""Logged into %s; premium %s"""', 'self.id', 'self.is_premium'], {}), "('Logged into %s; premium %s', self.id, self.is_premium)\n", (12947, 13003), False, 'import logging\n'), ((13205, 13249), 'os.path.join', 'os.path.join', (['dir_path', 'self.DOWNLOAD_MARKER'], {}), '(dir_path, self.DOWNLOAD_MARKER)\n', (13217, 13249), False, 'import os\n'), ((13480, 13532), 'logging.debug', 'logging.debug', (['"""Page %s already download"""', 'full_path'], {}), "('Page %s already download', full_path)\n", (13493, 13532), False, 'import logging\n'), ((13559, 13600), 'logging.info', 'logging.info', (['"""downloading %s"""', 'full_path'], {}), "('downloading %s', full_path)\n", (13571, 13600), False, 'import logging\n'), ((13774, 13805), 'os.rename', 'os.rename', (['temp_path', 'full_path'], {}), '(temp_path, full_path)\n', (13783, 13805), False, 'import os\n'), ((14222, 14263), 'logging.info', 'logging.info', (['"""Chapter is not accessible"""'], {}), "('Chapter is not accessible')\n", (14234, 14263), False, 'import logging\n'), ((15179, 15214), 'os.makedirs', 'os.makedirs', (['sub_dir'], {'exist_ok': '(True)'}), '(sub_dir, exist_ok=True)\n', (15190, 15214), False, 'import os\n'), ((15472, 15560), 'logging.info', 'logging.info', (['"""Already downloaded %s %s"""', "media_data['name']", "chapter_data['title']"], {}), "('Already downloaded %s %s', media_data['name'], chapter_data[\n 'title'])\n", (15484, 15560), False, 'import logging\n'), ((944, 1045), 'cloudscraper.create_scraper', 'cloudscraper.create_scraper', ([], {'browser': "{'browser': 'firefox', 'platform': 'linux', 'desktop': True}"}), "(browser={'browser': 'firefox', 'platform':\n 'linux', 'desktop': True})\n", (971, 1045), False, 'import cloudscraper\n'), ((2614, 2626), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2623, 2626), False, 'import json\n'), ((3587, 3622), 're.search', 're.search', (['"""\\\\((\\\\w*) Dub\\\\)"""', 'name'], {}), "('\\\\((\\\\w*) Dub\\\\)', name)\n", (3596, 3622), False, 'import re\n'), ((3624, 3667), 're.search', 're.search', (['"""\\\\((\\\\w*) Dub\\\\)"""', 'season_title'], {}), "('\\\\((\\\\w*) Dub\\\\)', season_title)\n", (3633, 3667), False, 'import re\n'), ((13638, 13664), 'os.path.dirname', 'os.path.dirname', (['full_path'], {}), '(full_path)\n', (13653, 13664), False, 'import os\n'), ((14432, 14489), 'logging.info', 'logging.info', (['"""Server is not authenticated; relogging in"""'], {}), "('Server is not authenticated; relogging in')\n", (14444, 14489), False, 'import logging\n'), ((2886, 2904), 'json.dump', 'json.dump', (['data', 'f'], {}), '(data, f)\n', (2895, 2904), False, 'import json\n'), ((2985, 3036), 'os.makedirs', 'os.makedirs', (['self.settings.cache_dir'], {'exist_ok': '(True)'}), '(self.settings.cache_dir, exist_ok=True)\n', (2996, 3036), False, 'import os\n'), ((3785, 3813), 're.search', 're.search', (['"""\\\\(Dub\\\\)"""', 'name'], {}), "('\\\\(Dub\\\\)', name)\n", (3794, 3813), False, 'import re\n'), ((3816, 3852), 're.search', 're.search', (['"""\\\\(Dub\\\\)"""', 'season_title'], {}), "('\\\\(Dub\\\\)', season_title)\n", (3825, 3852), False, 'import re\n'), ((8333, 8347), 'm3u8.load', 'm3u8.load', (['url'], {}), '(url)\n', (8342, 8347), False, 'import m3u8\n'), ((9469, 9505), 'Crypto.Cipher.AES.new', 'AES.new', (['key_bytes', 'AES.MODE_CBC', 'iv'], {}), '(key_bytes, AES.MODE_CBC, iv)\n', (9476, 9505), False, 'from Crypto.Cipher import AES\n'), ((13313, 13357), 'os.path.join', 'os.path.join', (['dir_path', 'self.DOWNLOAD_MARKER'], {}), '(dir_path, self.DOWNLOAD_MARKER)\n', (13325, 13357), False, 'import os\n'), ((13676, 13703), 'os.path.basename', 'os.path.basename', (['full_path'], {}), '(full_path)\n', (13692, 13703), False, 'import os\n'), ((8523, 8549), 'm3u8.load', 'm3u8.load', (['playlist[0].uri'], {}), '(playlist[0].uri)\n', (8532, 8549), False, 'import m3u8\n'), ((4878, 4903), 're.search', 're.search', (['"""\\\\d+"""', 'number'], {}), "('\\\\d+', number)\n", (4887, 4903), False, 'import re\n')] |
#!/usr/bin/env python
#############################################################################
##
# This file is part of Taurus
##
# http://taurus-scada.org
##
# Copyright 2011 CELLS / ALBA Synchrotron, Bellaterra, Spain
##
# Taurus is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
##
# Taurus is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
##
# You should have received a copy of the GNU Lesser General Public License
# along with Taurus. If not, see <http://www.gnu.org/licenses/>.
##
#############################################################################
"""Do not use. Deprecated. Backwards compatibility module for transition from
TaurusDatabase to TaurusAuthority"""
from logging import warn
warn('taurusdatabase module is deprecated. Use taurusauthority instead')
import traceback
traceback.print_stack()
from .taurusauthority import *
TaurusDatabase = TaurusAuthority
# The following block is commented out because it produces a circular import
# try:
# from taurus.core.tango.tangodatabase import TangoInfo as TaurusInfo
# from taurus.core.tango.tangodatabase import TangoAttrInfo as TaurusAttrInfo
# from taurus.core.tango.tangodatabase import TangoDevInfo as TaurusDevInfo
# from taurus.core.tango.tangodatabase import TangoDevClassInfo as \
# TaurusDevClassInfo
# from taurus.core.tango.tangodatabase import TangoServInfo as TaurusServInfo
# except ImportError, e:
# warn('taurusdatabase: Cannot import tango info objects: %s', repr(e))
#
| [
"logging.warn",
"traceback.print_stack"
] | [((1093, 1165), 'logging.warn', 'warn', (['"""taurusdatabase module is deprecated. Use taurusauthority instead"""'], {}), "('taurusdatabase module is deprecated. Use taurusauthority instead')\n", (1097, 1165), False, 'from logging import warn\n'), ((1184, 1207), 'traceback.print_stack', 'traceback.print_stack', ([], {}), '()\n', (1205, 1207), False, 'import traceback\n')] |
from collections import Counter
class AuxiliarMethods:
#Empty constructor
def __init__(self):
pass
#Method that return the base hit rate
@classmethod
def base_accuracy(self, markings):
base_hit = max(Counter(markings).values())
base_hit_rate = 100.0 * base_hit / len(markings)
return base_hit_rate
#Method that fit and predict the model hit rate
@classmethod
def model_fit_and_predict(self, model, training_data, training_markups, test_data, test_markups):
model.fit(training_data, training_markups)
result = model.predict(test_data)
#Where the results have the same value in the test_markups
hits = result == test_markups
total_hits = sum(hits)
total_elements = len(test_data)
#Turning into percentage
hit_rate = 100.0 * total_hits / total_elements
return hit_rate
#Method that predict the model answer
@classmethod
def model_predict(self, model, validation_data):
result = model.predict(validation_data)
return result
#Method to transform values into binary columns as the model
@classmethod
def transform_values(self, origem, prouni, tpo_ingresso, sexo, idade, nota):
# Atribui valor de origem para binário
if origem.upper() == "AJU":
origem_bin = [1, 0, 0]
elif origem.upper() == "OUTROS":
origem_bin = [0, 1, 0]
else:
origem_bin = [0, 0, 1]
# Atribui valor de prouni para binário
if prouni.upper() == "S":
prouni_bin = [1, 0]
else:
prouni_bin = [0, 1]
# Atribui valor de tipo_ingresso para binário
if tpo_ingresso.upper() == "PORTADOR_DIPLOMA":
tpo_ingresso_bin = [1, 0, 0, 0, 0]
elif tpo_ingresso.upper() == "PROUNI":
tpo_ingresso_bin = [0, 1, 0, 0, 0]
elif tpo_ingresso.upper() == "TRANSF_EXTERNA":
tpo_ingresso_bin = [0, 0, 1, 0, 0]
elif tpo_ingresso.upper() == "TRANSF_INTERNA":
tpo_ingresso_bin = [0, 0, 0, 1, 0]
else:
tpo_ingresso_bin = [0, 0, 0, 0, 1]
# Atribui valor de sexo para binário
if sexo.upper() == "F":
sexo_bin = [1, 0]
else:
sexo_bin = [0, 1]
# Transforma o valor de idade para inteiro
idade_bin = [int(idade)]
# Transforma o valor de nota para inteiro e atribui a nota_bin
nota = int(nota)
if nota > 9.6:
nota_bin = [10]
elif nota > 8.6:
nota_bin = [9]
elif nota > 7.6:
nota_bin = [8]
elif nota > 6.6:
nota_bin = [7]
elif nota > 5.6:
nota_bin = [6]
elif nota > 4.6:
nota_bin = [5]
elif nota > 3.6:
nota_bin = [4]
elif nota > 2.6:
nota_bin = [3]
elif nota > 1.6:
nota_bin = [2]
elif nota > 0.6:
nota_bin = [1]
else:
nota_bin = [0]
# Concatena todos valores binários na ordem desejada e retorna
result = idade_bin + origem_bin + prouni_bin + tpo_ingresso_bin + sexo_bin + nota_bin
return result | [
"collections.Counter"
] | [((246, 263), 'collections.Counter', 'Counter', (['markings'], {}), '(markings)\n', (253, 263), False, 'from collections import Counter\n')] |
# TODO: Replace this with a proper colab notebook
import torch
from src.transformers import AutoModelForSequenceClassification, AutoTokenizer
if __name__ == "__main__":
"""A temporary example to highlight changes implemented for AdapterDrop at inference"""
model = AutoModelForSequenceClassification.from_pretrained("bert-base-uncased")
model.load_adapter("sentiment/sst-2@ukp")
tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased")
tokens = tokenizer.tokenize("AdapterHub is awesome!")
input_tensor = torch.tensor([tokenizer.convert_tokens_to_ids(tokens)])
model.set_active_adapters("sst-2")
outputs_nodrop = model(input_tensor)
model.set_active_adapters("sst-2", skip_layers=[0, 1])
outputs_adapterdrop = model(input_tensor)
# different probs
assert not torch.equal(outputs_nodrop[0], outputs_adapterdrop[0])
# but they should still result in the same prediction
assert torch.equal(torch.argmax(outputs_nodrop[0]), torch.argmax(outputs_adapterdrop[0]))
| [
"src.transformers.AutoTokenizer.from_pretrained",
"torch.equal",
"src.transformers.AutoModelForSequenceClassification.from_pretrained",
"torch.argmax"
] | [((276, 347), 'src.transformers.AutoModelForSequenceClassification.from_pretrained', 'AutoModelForSequenceClassification.from_pretrained', (['"""bert-base-uncased"""'], {}), "('bert-base-uncased')\n", (326, 347), False, 'from src.transformers import AutoModelForSequenceClassification, AutoTokenizer\n'), ((411, 461), 'src.transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['"""bert-base-uncased"""'], {}), "('bert-base-uncased')\n", (440, 461), False, 'from src.transformers import AutoModelForSequenceClassification, AutoTokenizer\n'), ((820, 874), 'torch.equal', 'torch.equal', (['outputs_nodrop[0]', 'outputs_adapterdrop[0]'], {}), '(outputs_nodrop[0], outputs_adapterdrop[0])\n', (831, 874), False, 'import torch\n'), ((956, 987), 'torch.argmax', 'torch.argmax', (['outputs_nodrop[0]'], {}), '(outputs_nodrop[0])\n', (968, 987), False, 'import torch\n'), ((989, 1025), 'torch.argmax', 'torch.argmax', (['outputs_adapterdrop[0]'], {}), '(outputs_adapterdrop[0])\n', (1001, 1025), False, 'import torch\n')] |
import argparse
import brutemultiway.multiwayexsort as multiwayexsort
class Student:
def __init__(self, rawline):
input = rawline.split(' ')
# print('splitted: %s'%input)
name, score = input
self.name, self.score = name, int(score)
def __eq__(self, that):
return self.score == that.score
def __lt__(self, that):
return self.score < that.score
def __str__(self):
return self.name + ' ' + str(self.score)
def __repr__(self): return "'" + __str__() + "'"
class StudentReader:
cache = None
def __init__(self, filename):
self.f = open(filename, mode='r', encoding='utf8')
# print('self.f.tell() -> %d; name: %s'%(self.f.tell(), filename))
def peek(self):
if self.cache: return self.cache
s = self.f.readline()
if s:
self.cache = Student(s)
return self.cache
def read(self):
s, self.cache = self.peek(), None
if s: return s
def eof(self):
return not bool(self.peek())
def close(self): self.f.close()
def __enter__(self): pass
def __exit__(self): self.close()
class StudentWriter:
def __init__(self, filename):
self.f = open(filename, mode='w', encoding='utf8')
def write(self, val):
# print('val: %s'%val)
self.f.write(str(val))
self.f.write('\n')
self.f.flush()
def close(self):
self.f.flush()
self.f.close()
def __enter__(self): pass
def __exit__(self): self.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--infile', dest='infile')
parser.add_argument('-o', '--outfile', dest='outfile')
args = parser.parse_args()
multiwayexsort.sort(args.infile, args.outfile, 64, StudentReader, StudentWriter)
| [
"brutemultiway.multiwayexsort.sort",
"argparse.ArgumentParser"
] | [((1578, 1603), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1601, 1603), False, 'import argparse\n'), ((1757, 1842), 'brutemultiway.multiwayexsort.sort', 'multiwayexsort.sort', (['args.infile', 'args.outfile', '(64)', 'StudentReader', 'StudentWriter'], {}), '(args.infile, args.outfile, 64, StudentReader, StudentWriter\n )\n', (1776, 1842), True, 'import brutemultiway.multiwayexsort as multiwayexsort\n')] |
import argparse
import pandas as pd
from sklearn.linear_model import LogisticRegression
from sklearn.feature_selection import RFECV
from sklearn.ensemble import RandomForestClassifier
from beveridge.models import ModelStorage
import pickle
parser = argparse.ArgumentParser(description="Create model from CSV stats data.")
parser.add_argument('file')
parser.add_argument('outfile')
args = parser.parse_args()
#Create DataFrame in Pandas
data = pd.read_csv(args.file)
#Drop team
del data['team']
#Cleanse to numeric data
data = data.apply(lambda x: pd.to_numeric(x, errors='coerce'))
#Delete any completely empty columns
data = data.dropna(axis=1, how='all')
#Delete any rows with empty values
data = data.dropna(axis=0, how='any')
#Set up some columns
data['home'] = data['home'].astype('bool')
data['win'] = data['win'].astype('bool')
#Build relative columns
data['relRebounds'] = data['rebounds'] / data['oppRebounds']
data['relDisposals'] = data['disposals'] / data['oppDisposals']
data['relKicks'] = data['kicks'] / data['oppKicks']
data['relHandballs'] = data['handballs'] / data['oppHandballs']
data['relClearances'] = data['clearances'] / data['oppClearances']
data['relHitouts'] = data['hitouts'] / data['oppHitouts']
data['relMarks'] = data['marks'] / data['oppMarks']
data['relInside50s'] = data['inside50s'] / data['oppInside50s']
data['relTackles'] = data['tackles'] / data['oppTackles']
data['relClangers'] = data['clangers'] / data['oppClangers']
data['relFrees'] = data['frees'] / data['oppFrees']
data['relContested'] = data['contested'] / data['oppContested']
data['relUncontested'] = data['uncontested'] / data['oppUncontested']
data['relContestedMarks'] = data['contestedMarks'] / data['oppContestedMarks']
data['relMarksIn50'] = data['marksIn50'] / data['oppMarksIn50']
data['relOnePercenters'] = data['onePercenters'] / data['oppOnePercenters']
data['relBounces'] = data['bounces'] / data['oppBounces']
#Try building a logistic regression model
print("Building initial logistic regression model.")
model = LogisticRegression()
#Only use the relative columns. I've tested with the absolute values and they are much less useful than relative.
trainColumns = pd.Series(['relRebounds', 'relDisposals', 'relKicks', 'relHandballs', 'relClearances', 'relHitouts', 'relMarks', 'relInside50s', 'relTackles', 'relClangers', 'relFrees', 'relContested', 'relUncontested', 'relContestedMarks', 'relMarksIn50', 'relOnePercenters', 'relBounces', 'home'])
model.fit(data[trainColumns], data['win'])
print("Training data accuracy: {:%}".format(model.score(data[trainColumns], data['win'])))
#Recursive feature selection with cross-validation
print("Running feature selection.")
fs = RFECV(model)
fs.fit(data[trainColumns], data['win'])
print("Accuracy after feature selection: {:%}".format(fs.score(data[trainColumns], data['win'])))
filteredColumns = trainColumns[fs.support_]
#Ignoring filtered columns for the random forest. Seems to produce better results
#Create a random forest model
print("Building random forest")
rf = RandomForestClassifier(n_estimators=100, min_samples_split=0.02, class_weight='balanced')
rf.fit(data[trainColumns], data['win'])
print("Random forest accuracy: {:%}".format(rf.score(data[trainColumns], data['win'])))
#Save random forest model to given filename
with open(args.outfile, 'wb') as file:
storage = ModelStorage(trainColumns, rf)
pickle.dump(storage, file) | [
"pandas.Series",
"pickle.dump",
"argparse.ArgumentParser",
"pandas.read_csv",
"sklearn.ensemble.RandomForestClassifier",
"beveridge.models.ModelStorage",
"sklearn.linear_model.LogisticRegression",
"sklearn.feature_selection.RFECV",
"pandas.to_numeric"
] | [((250, 322), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Create model from CSV stats data."""'}), "(description='Create model from CSV stats data.')\n", (273, 322), False, 'import argparse\n'), ((445, 467), 'pandas.read_csv', 'pd.read_csv', (['args.file'], {}), '(args.file)\n', (456, 467), True, 'import pandas as pd\n'), ((2028, 2048), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {}), '()\n', (2046, 2048), False, 'from sklearn.linear_model import LogisticRegression\n'), ((2178, 2477), 'pandas.Series', 'pd.Series', (["['relRebounds', 'relDisposals', 'relKicks', 'relHandballs', 'relClearances',\n 'relHitouts', 'relMarks', 'relInside50s', 'relTackles', 'relClangers',\n 'relFrees', 'relContested', 'relUncontested', 'relContestedMarks',\n 'relMarksIn50', 'relOnePercenters', 'relBounces', 'home']"], {}), "(['relRebounds', 'relDisposals', 'relKicks', 'relHandballs',\n 'relClearances', 'relHitouts', 'relMarks', 'relInside50s', 'relTackles',\n 'relClangers', 'relFrees', 'relContested', 'relUncontested',\n 'relContestedMarks', 'relMarksIn50', 'relOnePercenters', 'relBounces',\n 'home'])\n", (2187, 2477), True, 'import pandas as pd\n'), ((2688, 2700), 'sklearn.feature_selection.RFECV', 'RFECV', (['model'], {}), '(model)\n', (2693, 2700), False, 'from sklearn.feature_selection import RFECV\n'), ((3032, 3125), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(100)', 'min_samples_split': '(0.02)', 'class_weight': '"""balanced"""'}), "(n_estimators=100, min_samples_split=0.02,\n class_weight='balanced')\n", (3054, 3125), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((3347, 3377), 'beveridge.models.ModelStorage', 'ModelStorage', (['trainColumns', 'rf'], {}), '(trainColumns, rf)\n', (3359, 3377), False, 'from beveridge.models import ModelStorage\n'), ((3382, 3408), 'pickle.dump', 'pickle.dump', (['storage', 'file'], {}), '(storage, file)\n', (3393, 3408), False, 'import pickle\n'), ((549, 582), 'pandas.to_numeric', 'pd.to_numeric', (['x'], {'errors': '"""coerce"""'}), "(x, errors='coerce')\n", (562, 582), True, 'import pandas as pd\n')] |
import numpy as np
import pkg_resources
from tensorflow.keras.models import load_model
from tensorflow.keras.preprocessing.sequence import pad_sequences
import pickle
class IRIS():
def __init__(self):
# Initialise emotion classes and index
self.classes = ['joy', 'fear', 'anger', 'sadness', 'love', 'surprise']
self.class_to_index = dict((c,i) for i, c in enumerate(self.classes))
self.index_to_class = dict((v, k) for k, v in self.class_to_index.items())
# Initialize SA-Model-Final-v8
self.model_path = pkg_resources.resource_filename('iris_emotion', 'SA_Model_Final_v8/')
self.model= load_model(self.model_path)
# Initialize Tokenizer
self.tk_path = pkg_resources.resource_filename('iris_emotion', 'tokenizer.pickle')
with open(self.tk_path, 'rb') as handle:
self.tokenizer = pickle.load(handle)
def get_sequences(self, msg):
# Preprocess message
maxlen = 50
sequences = self.tokenizer.texts_to_sequences([msg])
padded = pad_sequences(sequences, truncating='post' , padding='post', maxlen = maxlen)
# Return tokenized message
return padded
def get_emotion(self, msg):
# Tokenize message
msg_seq = self.get_sequences(msg)
# Predict emotion
p = self.model.predict(msg_seq)[0]
pred_class = self.index_to_class[np.argmax(p).astype('uint8')]
# Return predicted emotion
return pred_class
| [
"tensorflow.keras.preprocessing.sequence.pad_sequences",
"pickle.load",
"numpy.argmax",
"pkg_resources.resource_filename",
"tensorflow.keras.models.load_model"
] | [((559, 628), 'pkg_resources.resource_filename', 'pkg_resources.resource_filename', (['"""iris_emotion"""', '"""SA_Model_Final_v8/"""'], {}), "('iris_emotion', 'SA_Model_Final_v8/')\n", (590, 628), False, 'import pkg_resources\n'), ((649, 676), 'tensorflow.keras.models.load_model', 'load_model', (['self.model_path'], {}), '(self.model_path)\n', (659, 676), False, 'from tensorflow.keras.models import load_model\n'), ((732, 799), 'pkg_resources.resource_filename', 'pkg_resources.resource_filename', (['"""iris_emotion"""', '"""tokenizer.pickle"""'], {}), "('iris_emotion', 'tokenizer.pickle')\n", (763, 799), False, 'import pkg_resources\n'), ((1060, 1134), 'tensorflow.keras.preprocessing.sequence.pad_sequences', 'pad_sequences', (['sequences'], {'truncating': '"""post"""', 'padding': '"""post"""', 'maxlen': 'maxlen'}), "(sequences, truncating='post', padding='post', maxlen=maxlen)\n", (1073, 1134), False, 'from tensorflow.keras.preprocessing.sequence import pad_sequences\n'), ((878, 897), 'pickle.load', 'pickle.load', (['handle'], {}), '(handle)\n', (889, 897), False, 'import pickle\n'), ((1409, 1421), 'numpy.argmax', 'np.argmax', (['p'], {}), '(p)\n', (1418, 1421), True, 'import numpy as np\n')] |
#! python3
import imapclient
import imaplib
import bs4
import pyzmail
import webbrowser
def unsubscribe(imap_address, email_address, password):
"""Checks unsubscribe links within emails and opens link
Args:
imap_address (str): email providers imap address
email_address (str): email address
password (str): password for email
Returns:
None
"""
imaplib._MAXLINE = 10000000
imapObj = imapclient.IMAPClient(imap_address, ssl=True)
# See https://support.google.com/accounts/answer/6010255 if (Login Error)
imapObj.login(email_address, password)
imapObj.select_folder('INBOX', readonly=True)
UIDs = imapObj.search(['ALL'])
for u in UIDs:
rawMessages = imapObj.fetch([u], ['BODY[]', 'FLAGS'])
message = pyzmail.PyzMessage.factory(rawMessages[u][b'BODY[]'])
if message.html_part:
html = message.html_part.get_payload().decode(message.html_part.charset)
soup = bs4.BeautifulSoup(html, 'html.parser')
linkElems = soup.select('a')
for link in linkElems:
if 'unsubscribe' in link.text.lower():
url = link.get('href')
print('opening {}: '.format(url))
webbrowser.open(url)
imapObj.logout()
if __name__ == "__main__":
email = input('Enter your email: ')
password = input('Enter your email password: ')
unsubscribe('imap.gmail.com', email, password) | [
"bs4.BeautifulSoup",
"webbrowser.open",
"imapclient.IMAPClient",
"pyzmail.PyzMessage.factory"
] | [((439, 484), 'imapclient.IMAPClient', 'imapclient.IMAPClient', (['imap_address'], {'ssl': '(True)'}), '(imap_address, ssl=True)\n', (460, 484), False, 'import imapclient\n'), ((791, 844), 'pyzmail.PyzMessage.factory', 'pyzmail.PyzMessage.factory', (["rawMessages[u][b'BODY[]']"], {}), "(rawMessages[u][b'BODY[]'])\n", (817, 844), False, 'import pyzmail\n'), ((980, 1018), 'bs4.BeautifulSoup', 'bs4.BeautifulSoup', (['html', '"""html.parser"""'], {}), "(html, 'html.parser')\n", (997, 1018), False, 'import bs4\n'), ((1273, 1293), 'webbrowser.open', 'webbrowser.open', (['url'], {}), '(url)\n', (1288, 1293), False, 'import webbrowser\n')] |
import json
from pathlib import Path
from unittest.case import TestCase
from support.helpers import nuke
from tinydb import Query, TinyDB
from lib.ride_enhancer import RideEnhancer, polyline_to_geojson
class TestRideEnhancer(TestCase):
"""Test the RideEnhancer."""
def test_processing(self):
"""Test it updates the DB records."""
nuke("tmp/test-data.json")
database = TinyDB("tmp/test-data.json")
database.insert(
{
"name": "With GeoJSON",
"id": 1,
"map": {"polyline": "abc123xyz"},
"GeoJSON": {"type": "LineString", "coordinates": [[0, 0]]},
}
)
database.insert(
{
"name": "Without GeoJSON",
"id": 2,
"map": {
"polyline": Path("tests/fixtures/polylines/vicky-park")
.read_text()
.strip()
},
}
)
fixture = json.loads(Path("tests/fixtures/GeoJSON/vicky-park.json").read_text())
processor = RideEnhancer(database)
processor.process()
self.assertEqual(database.search(Query().id == 2)[0]["GeoJSON"], fixture)
self.assertEqual(
database.search(Query().id == 2)[0]["centre"],
[51.53752, -0.039459999999999995],
)
def test_polyline_to_geojson():
"""Test it converts a polyline to GeoJSON."""
polyline = Path("tests/fixtures/polylines/vicky-park").read_text().strip()
fixture = json.loads(Path("tests/fixtures/GeoJSON/vicky-park.json").read_text())
assert polyline_to_geojson(polyline) == fixture
| [
"tinydb.TinyDB",
"pathlib.Path",
"lib.ride_enhancer.polyline_to_geojson",
"tinydb.Query",
"lib.ride_enhancer.RideEnhancer",
"support.helpers.nuke"
] | [((359, 385), 'support.helpers.nuke', 'nuke', (['"""tmp/test-data.json"""'], {}), "('tmp/test-data.json')\n", (363, 385), False, 'from support.helpers import nuke\n'), ((405, 433), 'tinydb.TinyDB', 'TinyDB', (['"""tmp/test-data.json"""'], {}), "('tmp/test-data.json')\n", (411, 433), False, 'from tinydb import Query, TinyDB\n'), ((1111, 1133), 'lib.ride_enhancer.RideEnhancer', 'RideEnhancer', (['database'], {}), '(database)\n', (1123, 1133), False, 'from lib.ride_enhancer import RideEnhancer, polyline_to_geojson\n'), ((1646, 1675), 'lib.ride_enhancer.polyline_to_geojson', 'polyline_to_geojson', (['polyline'], {}), '(polyline)\n', (1665, 1675), False, 'from lib.ride_enhancer import RideEnhancer, polyline_to_geojson\n'), ((1575, 1621), 'pathlib.Path', 'Path', (['"""tests/fixtures/GeoJSON/vicky-park.json"""'], {}), "('tests/fixtures/GeoJSON/vicky-park.json')\n", (1579, 1621), False, 'from pathlib import Path\n'), ((1030, 1076), 'pathlib.Path', 'Path', (['"""tests/fixtures/GeoJSON/vicky-park.json"""'], {}), "('tests/fixtures/GeoJSON/vicky-park.json')\n", (1034, 1076), False, 'from pathlib import Path\n'), ((1486, 1529), 'pathlib.Path', 'Path', (['"""tests/fixtures/polylines/vicky-park"""'], {}), "('tests/fixtures/polylines/vicky-park')\n", (1490, 1529), False, 'from pathlib import Path\n'), ((1204, 1211), 'tinydb.Query', 'Query', ([], {}), '()\n', (1209, 1211), False, 'from tinydb import Query, TinyDB\n'), ((1299, 1306), 'tinydb.Query', 'Query', ([], {}), '()\n', (1304, 1306), False, 'from tinydb import Query, TinyDB\n'), ((852, 895), 'pathlib.Path', 'Path', (['"""tests/fixtures/polylines/vicky-park"""'], {}), "('tests/fixtures/polylines/vicky-park')\n", (856, 895), False, 'from pathlib import Path\n')] |
__all__ = ["S3BucketPolicyPrincipalRule"]
import logging
from typing import Dict, Optional
from pycfmodel.model.resources.s3_bucket_policy import S3BucketPolicy
from cfripper.model.enums import RuleGranularity, RuleRisk
from cfripper.model.result import Result
from cfripper.model.utils import get_account_id_from_principal
from cfripper.rules.base_rules import PrincipalCheckingRule, ResourceSpecificRule
logger = logging.getLogger(__file__)
class S3BucketPolicyPrincipalRule(PrincipalCheckingRule, ResourceSpecificRule):
"""
Checks for non-allowed principals in S3 bucket policies.
Risk:
This is designed to block unintended access from third party accounts to your buckets.
Fix:
All principals connected to S3 Bucket Policies should be known. CFRipper checks that **all** principals meet
the requirements expected. The list of valid accounts is defined in `valid_principals`, which is set in the config.
Filters context:
| Parameter | Type | Description |
|:-----------:|:------------------:|:--------------------------------------------------------------:|
|`config` | str | `config` variable available inside the rule |
|`extras` | str | `extras` variable available inside the rule |
|`logical_id` | str | ID used in Cloudformation to refer the resource being analysed |
|`resource` | `S3BucketPolicy` | Resource that is being addressed |
|`statement` | `Statement` | Statement being checked found in the Resource |
|`principal` | str | AWS Principal being checked found in the statement |
|`account_id` | str | Account ID found in the principal |
"""
GRANULARITY = RuleGranularity.RESOURCE
REASON = "S3 Bucket {} policy has non-allowed principals {}"
RISK_VALUE = RuleRisk.HIGH
RESOURCE_TYPES = (S3BucketPolicy,)
def resource_invoke(self, resource: S3BucketPolicy, logical_id: str, extras: Optional[Dict] = None) -> Result:
result = Result()
for statement in resource.Properties.PolicyDocument._statement_as_list():
for principal in statement.get_principal_list():
account_id = get_account_id_from_principal(principal)
if not account_id:
continue
if account_id not in self.valid_principals:
if statement.Condition and statement.Condition.dict():
logger.warning(
f"Not adding {type(self).__name__} failure in {logical_id} "
f"because there are conditions: {statement.Condition}"
)
else:
self.add_failure_to_result(
result,
self.REASON.format(logical_id, account_id),
resource_ids={logical_id},
context={
"config": self._config,
"extras": extras,
"logical_id": logical_id,
"resource": resource,
"statement": statement,
"principal": principal,
"account_id": account_id,
},
)
return result
| [
"logging.getLogger",
"cfripper.model.utils.get_account_id_from_principal",
"cfripper.model.result.Result"
] | [((418, 445), 'logging.getLogger', 'logging.getLogger', (['__file__'], {}), '(__file__)\n', (435, 445), False, 'import logging\n'), ((2287, 2295), 'cfripper.model.result.Result', 'Result', ([], {}), '()\n', (2293, 2295), False, 'from cfripper.model.result import Result\n'), ((2469, 2509), 'cfripper.model.utils.get_account_id_from_principal', 'get_account_id_from_principal', (['principal'], {}), '(principal)\n', (2498, 2509), False, 'from cfripper.model.utils import get_account_id_from_principal\n')] |
"""views for profile app"""
from rest_framework import status
from rest_framework.generics import RetrieveUpdateAPIView, RetrieveAPIView
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework.exceptions import NotFound
from .models import Profile, Following
from .renderers import ProfileJSONRenderer
from .serializers import ProfileSerializer, FollowingSerializer
from .exceptions import ProfileDoesNotExist
from authors.apps.authentication.models import User
class UserProfile(RetrieveUpdateAPIView):
"""Profile views class"""
permission_classes = (IsAuthenticated, )
renderer_classes = (ProfileJSONRenderer, )
serializer_class = ProfileSerializer
def retrieve(self, request, username, *args, **kwargs):
""" function to retrieve user profile information """
try:
profile = Profile.objects.select_related('user').get(
user__username=username)
except Exception as e:
raise ProfileDoesNotExist
serializer = self.serializer_class(profile)
return Response(serializer.data, status=status.HTTP_200_OK)
def update(self, request, *args, **kwargs):
"""User profile update information """
param_name = self.kwargs["username"]
active_user = request.user.username
if param_name == active_user:
user_data = request.data.get('profile', {})
serializer_data = {
'username':
user_data.get('username', request.user.username),
'bio':
user_data.get('bio', request.user.profile.bio),
'location':
user_data.get('location', request.user.profile.location),
'fun_fact':
user_data.get('fun_fact', request.user.profile.fun_fact),
'image':
user_data.get('image', request.user.profile.image)
}
serializer = self.serializer_class(
request.user.profile,
data=serializer_data,
context={'request': request},
partial=True)
serializer.is_valid(raise_exception=True)
serializer.update(request.user.profile, serializer_data)
try:
serializer.update(request.user, serializer_data)
except Exception as e:
return Response({
"error":
"Username or email already exist, " + "create a unique one"
}, status.HTTP_400_BAD_REQUEST)
return Response(serializer.data, status=status.HTTP_200_OK)
else:
return Response({
"error": "You can only update your own profile"
}, status.HTTP_400_BAD_REQUEST)
class ListProfiles(RetrieveAPIView):
permission_classes = (IsAuthenticated, )
renderer_classes = (ProfileJSONRenderer, )
serializer_class = ProfileSerializer
def retrieve(self, request, *args, **kwargs):
""" function to retrieve user profile information """
users = Profile.objects.all()
serializer = self.serializer_class(users, many=True)
return Response({
"Authors": serializer.data
},
status=status.HTTP_200_OK)
class UserFollow(APIView):
"""
View class for user to follow other users
"""
permission_classes = (IsAuthenticated, )
serializer_class = FollowingSerializer
def post(self, request, username):
"""
This method allows users to follow others
"""
following_id = Following.check_exists(username)
user_id = request.user.id
if user_id == following_id:
message = "As awesome as you may be, you cannot follow yourself!"
status_code = status.HTTP_400_BAD_REQUEST
elif Following.already_following(user_id, following_id):
message = "You're already following %s!" % (username, )
status_code = status.HTTP_200_OK
else:
serializer_data = {"user": user_id, "following_id": following_id}
serializer = self.serializer_class(data=serializer_data)
serializer.is_valid(raise_exception=True)
serializer.save()
string = ("You're now following %s! " +
"You will receive notifications about their posts")
message = string % (username, )
status_code = status.HTTP_201_CREATED
return Response({"message": message}, status=status_code)
class UserUnfollow(APIView):
"""
View class which allows users to unfollow other users
"""
permission_classes = (IsAuthenticated, )
def delete(self, request, username):
"""
This method allows users to unfollow others
"""
following_id = Following.check_exists(username)
user_id = request.user.id
if Following.already_following(user_id, following_id):
message = Following.unfollow(user_id, following_id, username)
status_code = status.HTTP_200_OK
else:
message = "You are currently not following %s" % (username, )
status_code = status.HTTP_400_BAD_REQUEST
return Response({"message": message}, status=status_code)
class UserFollowers(APIView):
"""
View class for users to see who follows them
"""
permission_classes = (IsAuthenticated, )
serializer_class = ProfileSerializer
query = "followers"
def get(self, request, username):
follower_list = Following.get_list(request, username, self.query,
self.serializer_class)
return Response({self.query: follower_list}, status=status.HTTP_200_OK)
class UserFollowing(UserFollowers):
"""
View class for users to see everyone they are following
"""
query = "following"
| [
"rest_framework.response.Response"
] | [((1156, 1208), 'rest_framework.response.Response', 'Response', (['serializer.data'], {'status': 'status.HTTP_200_OK'}), '(serializer.data, status=status.HTTP_200_OK)\n', (1164, 1208), False, 'from rest_framework.response import Response\n'), ((3270, 3335), 'rest_framework.response.Response', 'Response', (["{'Authors': serializer.data}"], {'status': 'status.HTTP_200_OK'}), "({'Authors': serializer.data}, status=status.HTTP_200_OK)\n", (3278, 3335), False, 'from rest_framework.response import Response\n'), ((4592, 4642), 'rest_framework.response.Response', 'Response', (["{'message': message}"], {'status': 'status_code'}), "({'message': message}, status=status_code)\n", (4600, 4642), False, 'from rest_framework.response import Response\n'), ((5341, 5391), 'rest_framework.response.Response', 'Response', (["{'message': message}"], {'status': 'status_code'}), "({'message': message}, status=status_code)\n", (5349, 5391), False, 'from rest_framework.response import Response\n'), ((5794, 5858), 'rest_framework.response.Response', 'Response', (['{self.query: follower_list}'], {'status': 'status.HTTP_200_OK'}), '({self.query: follower_list}, status=status.HTTP_200_OK)\n', (5802, 5858), False, 'from rest_framework.response import Response\n'), ((2665, 2717), 'rest_framework.response.Response', 'Response', (['serializer.data'], {'status': 'status.HTTP_200_OK'}), '(serializer.data, status=status.HTTP_200_OK)\n', (2673, 2717), False, 'from rest_framework.response import Response\n'), ((2751, 2844), 'rest_framework.response.Response', 'Response', (["{'error': 'You can only update your own profile'}", 'status.HTTP_400_BAD_REQUEST'], {}), "({'error': 'You can only update your own profile'}, status.\n HTTP_400_BAD_REQUEST)\n", (2759, 2844), False, 'from rest_framework.response import Response\n'), ((2477, 2590), 'rest_framework.response.Response', 'Response', (["{'error': 'Username or email already exist, ' + 'create a unique one'}", 'status.HTTP_400_BAD_REQUEST'], {}), "({'error': 'Username or email already exist, ' +\n 'create a unique one'}, status.HTTP_400_BAD_REQUEST)\n", (2485, 2590), False, 'from rest_framework.response import Response\n')] |
"""
Sophos Api Client.
Author: <NAME>
Email: <EMAIL>
Version: 1.0.0
"""
from typing import Dict
import requests
import sophosApi.partnerApi as partnerApi
import sophosApi.whoamiApi as whoamiApi
from sophosApi.auth import Auth
from sophosApi.helpers import backoff_handler
__all__ = [
'ApiClient'
]
class ApiClient(object):
_request: requests.request
_whoami: whoamiApi.IAm
tenants: Dict[str, partnerApi.Tenant]
def __init__(self, c_id: str, c_token: str) -> None:
"""loads initial state"""
# All requests to be wrapped with oauth and backoff handler
auth = Auth(c_id, c_token)
self._session = requests.Session()
self._request = auth.oauth_handler(backoff_handler(self._session.request))
@property
def _whoami(self):
return whoamiApi.WhoamiApi(self._request).whoami
@property
def tenants(self) -> Dict[str, partnerApi.Tenant]:
return partnerApi.PartnerApi(self._request, self._whoami.id).tenants
def __getitem__(self, item) -> partnerApi.Tenant:
return partnerApi.PartnerApi(self._request, self._whoami.id)[item]
def close(self):
self._session.close()
| [
"requests.Session",
"sophosApi.whoamiApi.WhoamiApi",
"sophosApi.partnerApi.PartnerApi",
"sophosApi.auth.Auth",
"sophosApi.helpers.backoff_handler"
] | [((609, 628), 'sophosApi.auth.Auth', 'Auth', (['c_id', 'c_token'], {}), '(c_id, c_token)\n', (613, 628), False, 'from sophosApi.auth import Auth\n'), ((653, 671), 'requests.Session', 'requests.Session', ([], {}), '()\n', (669, 671), False, 'import requests\n'), ((715, 753), 'sophosApi.helpers.backoff_handler', 'backoff_handler', (['self._session.request'], {}), '(self._session.request)\n', (730, 753), False, 'from sophosApi.helpers import backoff_handler\n'), ((808, 842), 'sophosApi.whoamiApi.WhoamiApi', 'whoamiApi.WhoamiApi', (['self._request'], {}), '(self._request)\n', (827, 842), True, 'import sophosApi.whoamiApi as whoamiApi\n'), ((935, 988), 'sophosApi.partnerApi.PartnerApi', 'partnerApi.PartnerApi', (['self._request', 'self._whoami.id'], {}), '(self._request, self._whoami.id)\n', (956, 988), True, 'import sophosApi.partnerApi as partnerApi\n'), ((1067, 1120), 'sophosApi.partnerApi.PartnerApi', 'partnerApi.PartnerApi', (['self._request', 'self._whoami.id'], {}), '(self._request, self._whoami.id)\n', (1088, 1120), True, 'import sophosApi.partnerApi as partnerApi\n')] |
import requests
import json
# import related models here
from requests.auth import HTTPBasicAuth
from .models import CarDealer, DealerReview
# Create a `get_request` to make HTTP GET requests
# e.g., response = requests.get(url, params=params, headers={'Content-Type': 'application/json'},
# auth=HTTPBasicAuth('apikey', api_key))
def get_request(url, **kwargs):
print(kwargs)
print(f"GET from url: {url}")
# try:
# Call to NLU service includes API key
if "apikey" in kwargs:
params = dict()
params["text"] = kwargs["text"]
params["version"] = kwargs["version"]
params["features"] = kwargs["features"]
params["return_analyzed_text"] = kwargs["return_analyzed_text"]
response = requests.get(url, headers={"Content-Type": "application/json"},
params=kwargs, auth=HTTPBasicAuth("apikey", kwargs["apikey"]))
# Call to Cloudant DB
else:
print(">>> try to read database")
# Call get method of requests library with URL and parameters
response = requests.get(url, headers={"Content-Type": "application/json"},
params=kwargs)
# except:
# # If any error occurs
# print("Network exception occurred")
status_code = response.status_code
print(f"With status: {status_code}")
json_data = json.loads(response.text)
return json_data
# Create a `post_request` to make HTTP POST requests
# e.g., response = requests.post(url, params=kwargs, json=payload)
def post_request(url, payload, **kwargs):
print(kwargs)
print(f"POST from url: {url}")
try:
response = requests.post(url, params=kwargs, json=payload)
except:
#If any error occurs
print("Network Exception Occurred, POST request did not succeed.")
status_code = response.status_code
print(f"Status: {status_code}")
json_data = json.loads(response.text)
return json_data
# Create a get_dealers_from_cf method to get dealers from a cloud function
# def get_dealers_from_cf(url, **kwargs):
# - Call get_request() with specified arguments
# - Parse JSON results into a CarDealer object list
def get_dealers_from_cf(url, **kwargs):
results = []
# Call get_request with a URL parameter
json_result = get_request(url)
print(f">>>> json_result: '{json_result}'")
if json_result:
#entries list of the json_result is the list of dealerships
dealers = json_result["entries"]
#create a dealer object for each entry
for dealer in dealers:
dealer_obj = CarDealer(CDid=dealer["id"], city=dealer["city"], state=dealer["state"], st=dealer["st"],
address=dealer["address"], zipAd=dealer["zip"], lat=dealer["lat"], longit=dealer["long"],
short_name=dealer["short_name"], full_name=dealer["full_name"])
results.append(dealer_obj)
return results
# def get_dealers_by_state_from_cf(url, **kwargs):
# return get_dealers_from_cf(url, **kwargs)
# Create a get_dealer_reviews_from_cf method to get reviews by dealer id from a cloud function
# def get_dealer_by_id_from_cf(url, dealerId):
# - Call get_request() with specified arguments
# - Parse JSON results into a DealerView object list
def get_dealer_reviews_from_cf(url, **kwargs):
results = []
json_result = get_request(url, **kwargs)
if json_result:
# url will return entries for reviews for a particular dealer
reviews = json_result["entries"]
# For each dealer object
for review in reviews:
print("********* Inside of review: ", review)
# Create a CarDealer object with values in `doc` object
review_obj = DealerReview( Rid=review["id"], name=review["name"], dealership=review["dealership"],
review=review["review"], purchase=review["purchase"]
)
#Check if optional attributes were returned for this specific review
if "purchase_date" in review:
review_obj.purchase_date=review["purchase_date"]
if "car_make" in review:
review_obj.car_make=review["car_make"],
if "car_model" in review:
review_obj.car_model=review["car_model"]
if "car_year" in review:
review_obj.car_year=review["car_year"]
# Assign Watson NLU review sentiment result
review_obj.sentiment = analyze_review_sentiments(review_obj.review)
results.append(review_obj)
return results
# Create an `analyze_review_sentiments` method to call Watson NLU and analyze text
# def analyze_review_sentiments(text):
# - Call get_request() with specified arguments
# - Get the returned sentiment label such as Positive or Negative
def analyze_review_sentiments(reviewText):
url = "https://api.us-south.natural-language-understanding.watson.cloud.ibm.com/instances/77f304ca-db74-4c86-9768-18346001104e/v1/analyze"
params = {}
params["apikey"] = "<KEY>"
params["text"] = reviewText
params["version"] = "2021-03-25"
params["features"] = ["sentiment"]
params["return_analyzed_text"] = False
params["language"] = "en"
response = get_request(url, **params)
if "sentiment" in response:
return response["sentiment"]["document"]["label"]
else:
return ""
| [
"json.loads",
"requests.post",
"requests.auth.HTTPBasicAuth",
"requests.get"
] | [((1451, 1476), 'json.loads', 'json.loads', (['response.text'], {}), '(response.text)\n', (1461, 1476), False, 'import json\n'), ((2000, 2025), 'json.loads', 'json.loads', (['response.text'], {}), '(response.text)\n', (2010, 2025), False, 'import json\n'), ((1144, 1222), 'requests.get', 'requests.get', (['url'], {'headers': "{'Content-Type': 'application/json'}", 'params': 'kwargs'}), "(url, headers={'Content-Type': 'application/json'}, params=kwargs)\n", (1156, 1222), False, 'import requests\n'), ((1743, 1790), 'requests.post', 'requests.post', (['url'], {'params': 'kwargs', 'json': 'payload'}), '(url, params=kwargs, json=payload)\n', (1756, 1790), False, 'import requests\n'), ((904, 945), 'requests.auth.HTTPBasicAuth', 'HTTPBasicAuth', (['"""apikey"""', "kwargs['apikey']"], {}), "('apikey', kwargs['apikey'])\n", (917, 945), False, 'from requests.auth import HTTPBasicAuth\n')] |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
from elasticsearch import Elasticsearch, helpers
from exchange_harness import ExchangeHarness
import logging
import functools
# import schedule
import asyncio
import settings
import utils
import random
import time
from concurrent.futures import ThreadPoolExecutor
from concurrent.futures import ProcessPoolExecutor
from time import sleep
def main():
logging.basicConfig(format='%(levelname)s:%(asctime)s %(message)s',level=settings.LOGLEVEL)
es = Elasticsearch(settings.ELASTICSEARCH_CONNECT_STRING)
logging.info('Market Refresh Rate: ' + str(settings.MARKET_REFRESH_RATE) + ' seconds.')
logging.info('Initial Sleep: ' + str(5) + ' seconds.')
logging.info('Application Started.')
RESTful_exchanges = ['bittrex', 'kraken', 'poloniex', 'kucoin', 'cryptopia']
exchanges = [ExchangeHarness(x) for x in RESTful_exchanges]
# print active exchanges and create indexes in kibana based on products listed in each market
for exchange in exchanges:
logging.info(exchange.exchange.id + ': activated and indexed.')
for product, kibana_index in exchange.products.items():
utils.create_index(es, kibana_index['ticker'])
utils.create_index(es, kibana_index['orderbook'])
logging.warning('Initiating Market Tracking.')
#Record Ticks
while True:
loop = asyncio.get_event_loop()
try:
for exchange in exchanges:
asyncio.ensure_future(exchange.record_data(es))
loop.run_forever()
except Exception as e:
logging.warning(e)
loop.close()
if __name__ == '__main__':
main()
| [
"logging.basicConfig",
"exchange_harness.ExchangeHarness",
"utils.create_index",
"elasticsearch.Elasticsearch",
"logging.warning",
"asyncio.get_event_loop",
"logging.info"
] | [((402, 499), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(levelname)s:%(asctime)s %(message)s"""', 'level': 'settings.LOGLEVEL'}), "(format='%(levelname)s:%(asctime)s %(message)s', level=\n settings.LOGLEVEL)\n", (421, 499), False, 'import logging\n'), ((503, 555), 'elasticsearch.Elasticsearch', 'Elasticsearch', (['settings.ELASTICSEARCH_CONNECT_STRING'], {}), '(settings.ELASTICSEARCH_CONNECT_STRING)\n', (516, 555), False, 'from elasticsearch import Elasticsearch, helpers\n'), ((713, 749), 'logging.info', 'logging.info', (['"""Application Started."""'], {}), "('Application Started.')\n", (725, 749), False, 'import logging\n'), ((1287, 1333), 'logging.warning', 'logging.warning', (['"""Initiating Market Tracking."""'], {}), "('Initiating Market Tracking.')\n", (1302, 1333), False, 'import logging\n'), ((848, 866), 'exchange_harness.ExchangeHarness', 'ExchangeHarness', (['x'], {}), '(x)\n', (863, 866), False, 'from exchange_harness import ExchangeHarness\n'), ((1033, 1096), 'logging.info', 'logging.info', (["(exchange.exchange.id + ': activated and indexed.')"], {}), "(exchange.exchange.id + ': activated and indexed.')\n", (1045, 1096), False, 'import logging\n'), ((1384, 1408), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (1406, 1408), False, 'import asyncio\n'), ((1173, 1219), 'utils.create_index', 'utils.create_index', (['es', "kibana_index['ticker']"], {}), "(es, kibana_index['ticker'])\n", (1191, 1219), False, 'import utils\n'), ((1232, 1281), 'utils.create_index', 'utils.create_index', (['es', "kibana_index['orderbook']"], {}), "(es, kibana_index['orderbook'])\n", (1250, 1281), False, 'import utils\n'), ((1599, 1617), 'logging.warning', 'logging.warning', (['e'], {}), '(e)\n', (1614, 1617), False, 'import logging\n')] |
"""Train a model."""
import numpy as np
import torch as th
import time
import pyexr
import skimage.io as skio
from torch.utils.data import DataLoader
import os
from tensorboardX import SummaryWriter
from ttools.modules.image_operators import crop_like
from torch.optim import lr_scheduler
from sbmc import modules
import ttools
import sbmc
from sbmc import losses
LOG = ttools.get_logger(__name__)
def main(args):
#Fix seed
np.random.seed(0)
th.manual_seed(0)
data_args = dict(spp=args.spp, mode=sbmc.TilesDataset.KPCN_MODE if
args.kpcn_mode else sbmc.TilesDataset.SBMC_MODE,
load_coords=args.load_coords,
load_gbuffer=args.load_gbuffer, load_p=args.load_p,
load_ld=args.load_ld, load_bt=args.load_bt)
# Make the checkpoint dir
os.makedirs(args.checkpoint_dir, exist_ok=True)
# Train with fixed spp
data = sbmc.TilesDataset(args.data, **data_args)
LOG.info("Training with a single sample count: %dspp" % args.spp)
if args.emil_mode:
LOG.info("Model: Temporal Sample-Based Denoising [Peters2021]")
model = sbmc.RecurrentMultisteps(data.num_features, data.num_global_features,
ksize=args.ksize, splat=not args.gather,
pixel=args.pixel)
model_params = dict(ksize=args.ksize, gather=args.gather,
pixel=args.pixel)
else:
LOG.info("Model: sample-based [Gharbi2019]")
model = sbmc.Multisteps(data.num_features, data.num_global_features,
ksize=args.ksize, splat=not args.gather,
pixel=args.pixel)
model_params = dict(ksize=args.ksize, gather=args.gather,
pixel=args.pixel)
dataloader = DataLoader(
data, batch_size=args.bs, num_workers=args.num_worker_threads,
shuffle=False)
meta = dict(model_params=model_params, kpcn_mode=args.kpcn_mode,
data_params=data_args)
LOG.info("Model configuration: {}".format(model_params))
# Loss functions
loss_fn = losses.TonemappedRelativeMSE()
rmse_fn = losses.RelativeMSE()
optimizer = th.optim.Adam(model.parameters(), lr=1e-4)
scheduler = lr_scheduler.StepLR(optimizer, step_size=100, gamma=0.1)
# Load the model if needed
checkpoint = os.path.join(args.checkpoint_dir, "training_end.pth")
if os.path.exists(checkpoint):
model, optimizer, start_epoch = load_checkpoint(model, optimizer, checkpoint)
LOG.info(f"Continuing training at epoch {start_epoch}")
else:
LOG.info(f"Training new model from scratch")
# Tensorboard writer
writer = SummaryWriter()
# Enable CUDA//CPU
device = 'cuda' if th.cuda.is_available() else 'cpu'
if device == 'cuda':
LOG.info('Using CUDA')
loss_fn.cuda()
rmse_fn.cuda()
model.cuda()
model.train()
# Training params
num_epochs = 2
# Save randomly initialized model to compare with later epochs
save_checkpoint(model, optimizer, os.path.join(args.checkpoint_dir, "start.pth"), -1)
total_loss = 0
total_rmse = 0
LOG.info("[Start of training]")
for epoch in range(num_epochs):
# Start of an epoch
for batch_idx, batch in enumerate(dataloader):
optimizer.zero_grad()
# Start of a batch
# Forward pass
for k in batch:
if not batch[k].__class__ == th.Tensor:
continue
batch[k] = batch[k].to(device)
output = model(batch)["radiance"]
# Backward pass
target = crop_like(batch["target_image"], output)
loss = loss_fn(output, target)
total_loss += loss.item()
loss.backward()
for i in range(len(list(model.parameters()))):
t = list(model.parameters())[i].grad
print(t)
# if 'cpu' in str(t):
# print(list(model.parameters())[i].grad)
# Clip the gradiants
clip = 1000
actual = th.nn.utils.clip_grad_norm_(model.parameters(), clip)
if actual > clip:
LOG.info("Clipped gradients {} -> {}".format(clip, actual))
optimizer.step()
if batch_idx == 0:
rad = output.detach()
save_img(rad, args.checkpoint_dir, str(epoch))
with th.no_grad():
total_rmse += rmse_fn(output, target)
printProgressBar(batch_idx+1, len(dataloader), prefix=f'Epoch {epoch}', suffix=f'{batch_idx+1}/{len(dataloader)} loss: {round(total_loss / (batch_idx+1), 15)}') # Print out progress after batch is finished
# End of an epoch
scheduler.step()
# print(optimizer.state_dict()["state"])
# Write data to tensorboard for visualization
writer.add_scalar('Learning_Rate', scheduler.get_lr(), epoch)
writer.add_scalar('Loss/train', total_loss / len(dataloader), epoch)
writer.add_scalar('RMSE/train', total_rmse / len(dataloader), epoch)
#
total_loss = 0
total_rmse = 0
# Close writer when done with training
writer.close()
# Save final model
save_checkpoint(model, optimizer, os.path.join(args.checkpoint_dir, "training_end.pth"), num_epochs)
# Check if training succeeded
# tmp_model = sbmc.Multisteps(data.num_features, data.num_global_features,
# ksize=args.ksize, splat=not args.gather,
# pixel=args.pixel)
tmp_model = sbmc.RecurrentMultisteps(data.num_features, data.num_global_features,
ksize=args.ksize, splat=not args.gather,
pixel=args.pixel)
tmp_opt = th.optim.Adam(tmp_model.parameters(), lr=args.lr)
load_checkpoint(tmp_model, tmp_opt, os.path.join(args.checkpoint_dir, "start.pth"))
tmp_model.cuda()
compare_models(model, tmp_model)
# Print iterations progress
def printProgressBar (iteration, total, prefix = '', suffix = '', decimals = 1, length = 50, fill = '█', printEnd = "\r"):
"""
Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
length - Optional : character length of bar (Int)
fill - Optional : bar fill character (Str)
printEnd - Optional : end character (e.g. "\r", "\r\n") (Str)
"""
percent = ("{0:." + str(decimals) + "f}").format(100 * (iteration / float(total)))
filledLength = int(length * iteration // total)
bar = fill * filledLength + '-' * (length - filledLength)
print(f'\r{prefix} |{bar}| {percent}% {suffix}', end = printEnd)
# Print New Line on Complete
if iteration == total:
print()
def save_img(radiance, checkpoint_dir, name):
data = th.clamp(radiance, 0)
data /= 1 + data
data = th.pow(data, 1.0/2.2)
data = th.clamp(data, 0, 1)
data = data[0, ...].cpu().detach().numpy().transpose([1, 2, 0])
os.makedirs(checkpoint_dir, exist_ok=True)
outputfile = os.path.join(checkpoint_dir, f'{name}.png')
pyexr.write(outputfile, data)
png = outputfile.replace(".exr", ".png")
skio.imsave(png, (np.clip(data, 0, 1)*255).astype(np.uint8))
def save_checkpoint(model, optimizer, save_path, epoch):
th.save({
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'epoch': epoch
}, save_path)
def load_checkpoint(model, optimizer, load_path):
checkpoint = th.load(load_path)
model.load_state_dict(checkpoint['model_state_dict'])
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
epoch = checkpoint['epoch']
return model, optimizer, epoch
"""
Check whether two models differ in parameter values
Used to verify that the model is in fact training
"""
def compare_models(model_1, model_2):
models_differ = 0
for key_item_1, key_item_2 in zip(model_1.state_dict().items(), model_2.state_dict().items()):
if th.equal(key_item_1[1], key_item_2[1]):
pass
else:
models_differ += 1
if (key_item_1[0] == key_item_2[0]):
print('Mismtach found at', key_item_1[0])
else:
raise Exception
if models_differ == 0:
print('Models match perfectly! :)')
if __name__ == "__main__":
parser = ttools.BasicArgumentParser()
parser.add_argument(
'--spp', type=int, default=8, help="Max number of samples per pixel.")
# Model parameters
parser.add_argument(
'--kpcn_mode', dest="kpcn_mode", action="store_true", default=False,
help="if True, use the model from [Bako2017]: useful for comparison.")
parser.add_argument(
'--emil_mode', dest="emil_mode", action="store_true", default=False,
help="if True, use the model from [Peters2021]: temporal extension to [Gharbi2019].")
parser.add_argument(
'--gather', dest="gather", action="store_true", default=False,
help="if True, use gather kernels instead of splat.")
parser.add_argument(
'--pixel', dest="pixel", action="store_true", default=False,
help="if True, use per-pixel model instead of samples.")
parser.add_argument(
'--ksize', type=int, default=21, help="Size of the kernels")
# Data configuration
parser.add_argument('--constant_spp', dest="randomize_spp",
action="store_false", default=True)
parser.add_argument('--dont_use_coords', dest="load_coords",
action="store_false", default=True)
parser.add_argument('--dont_use_gbuffer', dest="load_gbuffer",
action="store_false", default=True)
parser.add_argument('--dont_use_p', dest="load_p",
action="store_false", default=True)
parser.add_argument('--dont_use_ld', dest="load_ld",
action="store_false", default=True)
parser.add_argument('--dont_use_bt', dest="load_bt",
action="store_false", default=True)
args = parser.parse_args()
ttools.set_logger(args.debug)
main(args) | [
"numpy.clip",
"sbmc.losses.RelativeMSE",
"sbmc.losses.TonemappedRelativeMSE",
"torch.pow",
"ttools.set_logger",
"sbmc.Multisteps",
"torch.cuda.is_available",
"ttools.BasicArgumentParser",
"ttools.modules.image_operators.crop_like",
"os.path.exists",
"tensorboardX.SummaryWriter",
"sbmc.TilesDataset",
"numpy.random.seed",
"pyexr.write",
"torch.equal",
"sbmc.RecurrentMultisteps",
"ttools.get_logger",
"torch.clamp",
"torch.manual_seed",
"os.makedirs",
"torch.load",
"os.path.join",
"torch.optim.lr_scheduler.StepLR",
"torch.utils.data.DataLoader",
"torch.no_grad"
] | [((376, 403), 'ttools.get_logger', 'ttools.get_logger', (['__name__'], {}), '(__name__)\n', (393, 403), False, 'import ttools\n'), ((439, 456), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (453, 456), True, 'import numpy as np\n'), ((461, 478), 'torch.manual_seed', 'th.manual_seed', (['(0)'], {}), '(0)\n', (475, 478), True, 'import torch as th\n'), ((845, 892), 'os.makedirs', 'os.makedirs', (['args.checkpoint_dir'], {'exist_ok': '(True)'}), '(args.checkpoint_dir, exist_ok=True)\n', (856, 892), False, 'import os\n'), ((932, 973), 'sbmc.TilesDataset', 'sbmc.TilesDataset', (['args.data'], {}), '(args.data, **data_args)\n', (949, 973), False, 'import sbmc\n'), ((1861, 1953), 'torch.utils.data.DataLoader', 'DataLoader', (['data'], {'batch_size': 'args.bs', 'num_workers': 'args.num_worker_threads', 'shuffle': '(False)'}), '(data, batch_size=args.bs, num_workers=args.num_worker_threads,\n shuffle=False)\n', (1871, 1953), False, 'from torch.utils.data import DataLoader\n'), ((2179, 2209), 'sbmc.losses.TonemappedRelativeMSE', 'losses.TonemappedRelativeMSE', ([], {}), '()\n', (2207, 2209), False, 'from sbmc import losses\n'), ((2224, 2244), 'sbmc.losses.RelativeMSE', 'losses.RelativeMSE', ([], {}), '()\n', (2242, 2244), False, 'from sbmc import losses\n'), ((2326, 2382), 'torch.optim.lr_scheduler.StepLR', 'lr_scheduler.StepLR', (['optimizer'], {'step_size': '(100)', 'gamma': '(0.1)'}), '(optimizer, step_size=100, gamma=0.1)\n', (2345, 2382), False, 'from torch.optim import lr_scheduler\n'), ((2432, 2485), 'os.path.join', 'os.path.join', (['args.checkpoint_dir', '"""training_end.pth"""'], {}), "(args.checkpoint_dir, 'training_end.pth')\n", (2444, 2485), False, 'import os\n'), ((2494, 2520), 'os.path.exists', 'os.path.exists', (['checkpoint'], {}), '(checkpoint)\n', (2508, 2520), False, 'import os\n'), ((2774, 2789), 'tensorboardX.SummaryWriter', 'SummaryWriter', ([], {}), '()\n', (2787, 2789), False, 'from tensorboardX import SummaryWriter\n'), ((5776, 5909), 'sbmc.RecurrentMultisteps', 'sbmc.RecurrentMultisteps', (['data.num_features', 'data.num_global_features'], {'ksize': 'args.ksize', 'splat': '(not args.gather)', 'pixel': 'args.pixel'}), '(data.num_features, data.num_global_features, ksize\n =args.ksize, splat=not args.gather, pixel=args.pixel)\n', (5800, 5909), False, 'import sbmc\n'), ((7324, 7345), 'torch.clamp', 'th.clamp', (['radiance', '(0)'], {}), '(radiance, 0)\n', (7332, 7345), True, 'import torch as th\n'), ((7378, 7401), 'torch.pow', 'th.pow', (['data', '(1.0 / 2.2)'], {}), '(data, 1.0 / 2.2)\n', (7384, 7401), True, 'import torch as th\n'), ((7411, 7431), 'torch.clamp', 'th.clamp', (['data', '(0)', '(1)'], {}), '(data, 0, 1)\n', (7419, 7431), True, 'import torch as th\n'), ((7506, 7548), 'os.makedirs', 'os.makedirs', (['checkpoint_dir'], {'exist_ok': '(True)'}), '(checkpoint_dir, exist_ok=True)\n', (7517, 7548), False, 'import os\n'), ((7566, 7609), 'os.path.join', 'os.path.join', (['checkpoint_dir', 'f"""{name}.png"""'], {}), "(checkpoint_dir, f'{name}.png')\n", (7578, 7609), False, 'import os\n'), ((7614, 7643), 'pyexr.write', 'pyexr.write', (['outputfile', 'data'], {}), '(outputfile, data)\n', (7625, 7643), False, 'import pyexr\n'), ((8044, 8062), 'torch.load', 'th.load', (['load_path'], {}), '(load_path)\n', (8051, 8062), True, 'import torch as th\n'), ((8919, 8947), 'ttools.BasicArgumentParser', 'ttools.BasicArgumentParser', ([], {}), '()\n', (8945, 8947), False, 'import ttools\n'), ((10652, 10681), 'ttools.set_logger', 'ttools.set_logger', (['args.debug'], {}), '(args.debug)\n', (10669, 10681), False, 'import ttools\n'), ((1163, 1296), 'sbmc.RecurrentMultisteps', 'sbmc.RecurrentMultisteps', (['data.num_features', 'data.num_global_features'], {'ksize': 'args.ksize', 'splat': '(not args.gather)', 'pixel': 'args.pixel'}), '(data.num_features, data.num_global_features, ksize\n =args.ksize, splat=not args.gather, pixel=args.pixel)\n', (1187, 1296), False, 'import sbmc\n'), ((1547, 1671), 'sbmc.Multisteps', 'sbmc.Multisteps', (['data.num_features', 'data.num_global_features'], {'ksize': 'args.ksize', 'splat': '(not args.gather)', 'pixel': 'args.pixel'}), '(data.num_features, data.num_global_features, ksize=args.\n ksize, splat=not args.gather, pixel=args.pixel)\n', (1562, 1671), False, 'import sbmc\n'), ((2837, 2859), 'torch.cuda.is_available', 'th.cuda.is_available', ([], {}), '()\n', (2857, 2859), True, 'import torch as th\n'), ((3160, 3206), 'os.path.join', 'os.path.join', (['args.checkpoint_dir', '"""start.pth"""'], {}), "(args.checkpoint_dir, 'start.pth')\n", (3172, 3206), False, 'import os\n'), ((5452, 5505), 'os.path.join', 'os.path.join', (['args.checkpoint_dir', '"""training_end.pth"""'], {}), "(args.checkpoint_dir, 'training_end.pth')\n", (5464, 5505), False, 'import os\n'), ((6073, 6119), 'os.path.join', 'os.path.join', (['args.checkpoint_dir', '"""start.pth"""'], {}), "(args.checkpoint_dir, 'start.pth')\n", (6085, 6119), False, 'import os\n'), ((8548, 8586), 'torch.equal', 'th.equal', (['key_item_1[1]', 'key_item_2[1]'], {}), '(key_item_1[1], key_item_2[1])\n', (8556, 8586), True, 'import torch as th\n'), ((3768, 3808), 'ttools.modules.image_operators.crop_like', 'crop_like', (["batch['target_image']", 'output'], {}), "(batch['target_image'], output)\n", (3777, 3808), False, 'from ttools.modules.image_operators import crop_like\n'), ((4589, 4601), 'torch.no_grad', 'th.no_grad', ([], {}), '()\n', (4599, 4601), True, 'import torch as th\n'), ((7716, 7735), 'numpy.clip', 'np.clip', (['data', '(0)', '(1)'], {}), '(data, 0, 1)\n', (7723, 7735), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Code to create a dataset of positions for a mega-constellation of satellites in orbit.
This code uses the commuity-developed AstroPy and Poliastro packages:
https://www.poliastro.space/
https://www.astropy.org/
INPUT: Constellation design, timestep, duration
OUTPUT: Panda dataframe with orbital elements and cartesian positions and velocities for a constellation of satellites over the specified timeframe
Notes:
>Code originally inspired by the STK simulation in <NAME> and <NAME>, ‘Laser Intersatellite Links in a Starlink Constellation: A Classification and Analysis’, IEEE Vehicular Technology Magazine, vol. 16, no. 2, pp. 48–56, Jun. 2021, doi: 10.1109/MVT.2021.3063706.
>The Starlink constellation parameters were updated based on an FCC filing dated April 7th 2020
LICENSE INFORMATION
Copyright 2021 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
#----------------------------------------------#
#--------------------SETUP---------------------#
#----------------------------------------------#
#Import necessary packages
#Basics
import numpy as np
import math
import pandas as pd
import itertools as it
#Astrodynamics
from astropy import units as u
from poliastro.bodies import Earth
from poliastro.twobody import Orbit
from poliastro.twobody import propagation
#----------------------------------------------#
#----------------SET PARAMETERS----------------#
#----------------------------------------------#
"""
The parameters of the simulation are set up, including timestep,
total time to run, and the parameters of the constellation
"""
#Megaconstellation parameters — for a single shell. Currently produces Starlink Phase 1.
altitude = 550 #km
i = 53 #inclination in degrees
number_planes = 72
satellites_per_plane = 22
e = 0.0 #eccentricity
number_agents = int(number_planes * satellites_per_plane) #Number of satellites
plane_spacing = 360/number_planes #spacing between orbital planes in degrees
satellite_spacing = 360/satellites_per_plane #spacings between satellties in a plane in degrees
omega = 0 #argument of periapsis in degrees (irrelevant in case of circular orbit)
#Simple calculations assuming circular orbits, used to detrmine simulation duration
r_E = 6378 #Earth radius in km
mu = 3.986e5 #Gravitational parameter for the Earth (km^3 per s^2)
a = altitude + r_E #Semimajor axis = altitude + r_E for simple circular orbit (km)
#n = math.degrees((math.sqrt(mu/a**3))) #mean motion (degrees per second)
T = (2*math.pi/math.sqrt(mu))*a**(3/2) #orbital period in seconds
#Set up simulation parameters
time = 1.0*T #Choose how many orbital periods to run simulation for.
dt = 10 #timestep in seconds
number_Timesteps = int(time/dt)+1 #number of timesteps
print("The simulation will run for " + str(number_Timesteps) + " timesteps.")
#----------------------------------------------#
#-----------------CREATE DATA------------------#
#----------------------------------------------#
"""
The orbits are created and propagated with Poliastro,
and stored in a multidimensional Numpy array
"""
#Create Numpy data cube with format: plane_number, satellite_number, time, a, e, i, RAAN, argP, nu
satellite_Data_cube = np.zeros((number_Timesteps,number_agents,15),dtype=object) #Set up data cube for swarm positions and data
#Declare arrays of plane and satellite numbers
PlaneNos = np.array(list(it.chain.from_iterable(it.repeat(x, satellites_per_plane) for x in range(1,number_planes+1))))
SatNos = np.array(list(it.chain.from_iterable(it.repeat(range(1,satellites_per_plane+1),number_planes))))
#add fixed values — plane numbers, satellite numbers, constant orbital parameters, and timesteps— to the data cube
for j in range(0,number_Timesteps):
satellite_Data_cube[j,:,0] = PlaneNos #Add plane numbers to all rows in the data cube
satellite_Data_cube[j,:,1] = SatNos #Add satellite numbers to all rows in the data cube
satellite_Data_cube[j,:,2] = j*dt #Add timestamp to all rows in the data cube
satellite_Data_cube[j,:,3] = a #Add semi-major axis
satellite_Data_cube[j,:,4] = e #Add eccentricity
satellite_Data_cube[j,:,5] = i #Add inclination
#Add initial orbital parameters
for j in range(0,number_agents):
satellite_Data_cube[0,j,6] = (satellite_Data_cube[:,j,0]-1)*plane_spacing #Right ascension of ascending node
satellite_Data_cube[0,j,8] = ((satellite_Data_cube[0,j,1]-1)*satellite_spacing) #Initial true anomolies
#Quick internal function to return a Poliastro two body orbit from a slice of values with format a,e,i,raan,argp,nu
def OrbitCreator(dataslice):
semimajor = dataslice[0]* u.km
ecc = dataslice[1] * u.one
inc = dataslice[2] * u.deg
raan = dataslice[3] * u.deg
argp = dataslice[4] * u.deg
nu = dataslice[5] * u.deg
orb = Orbit.from_classical(Earth, semimajor, ecc, inc, raan, argp, nu)
return orb
#Create a list of orbit objects for each satellite
Orbits = [OrbitCreator(satellite_Data_cube[0,j,3:9]) for j in range(0,number_agents)]
#Create array of times with units
Timestamps = np.arange(0,time,dt) * u.second
#Propagate each Orbit using Poliastro and write data to the array
for i in range(0,number_agents):
[Position,Velocity] = propagation.vallado(Earth[1], Orbits[i].r, Orbits[i].v, Timestamps)
satellite_Data_cube[:,i,9:12] = Position.value
satellite_Data_cube[:,i,12:15] = Velocity.value
satellite_Data_cube[:,i,8] = [math.degrees(Orbits[i].n.value)*satellite_Data_cube[j,i,2]+satellite_Data_cube[0,i,8] for j in range(0,number_Timesteps)]
print("Propagated orbit for satellite " + str(i) + " of " + str(number_agents))
#----------------------------------------------#
#------------------WRAP DATA-------------------#
#----------------------------------------------#
"""
With the positional data created and placed in a multidimensional
Numpy arrary, it is wrapped in a nice human-readable panda dataframe.
The dataframe contains extra information to label the satellties
and make later analysis easier.
"""
#Reshape date cube to a 2d array in order to add to a pandas dataframe
satellite_Data_array = satellite_Data_cube.reshape((number_Timesteps*number_agents,15),order='F')
#Set up unique identifiers for each satellite, in the format sXXYYY where XX is plane number and YYY is satellite number.
planes = ['s' + str(x).zfill(2) for x in list(range(1,number_planes+1))]
satellites = [str(x).zfill(3) for x in list(range(1,satellites_per_plane+1))]
SatelliteIDs = [''.join(pair) for pair in list(it.product(planes, satellites))]
#Declare a list of timesteps
Timesteps = np.arange(0,number_Timesteps,1)
#Declare a list of timestamps
Timestamps_array = np.arange(0,time,dt)
#Declare a multilevel index for the dataframe
SatelliteMultiIndex = pd.MultiIndex.from_product([SatelliteIDs,Timestamps_array],names=['Satellite ID','Time'])
#Declare a list of labels for the columns of the dataframe
labels = ['Plane No.', 'Satellite No.', 'Time', 'Semimajor axis', 'Eccentricity', 'Inclination', 'Right Ascension of Ascending Node','Argument of Perigee','True Anomoly','X-position','Y-position','Z-position','X-velocity','Y-velocity','Z-velocity']
#Declare a Dataframe
ConstellationEphemerides = pd.DataFrame(satellite_Data_array, columns = labels,index = SatelliteMultiIndex)
#Export Dataframe to a CSV file in a .zip archive
print("Final Dataframe created. Exporting.")
compression_opts = dict(method='zip', archive_name='ConstellationEphemerides.csv')
ConstellationEphemerides.to_csv('ConstellationEphemerides.zip', index=True, header=True, compression=compression_opts)
| [
"pandas.MultiIndex.from_product",
"itertools.product",
"math.sqrt",
"math.degrees",
"poliastro.twobody.propagation.vallado",
"numpy.zeros",
"pandas.DataFrame",
"poliastro.twobody.Orbit.from_classical",
"numpy.arange",
"itertools.repeat"
] | [((4205, 4266), 'numpy.zeros', 'np.zeros', (['(number_Timesteps, number_agents, 15)'], {'dtype': 'object'}), '((number_Timesteps, number_agents, 15), dtype=object)\n', (4213, 4266), True, 'import numpy as np\n'), ((7609, 7642), 'numpy.arange', 'np.arange', (['(0)', 'number_Timesteps', '(1)'], {}), '(0, number_Timesteps, 1)\n', (7618, 7642), True, 'import numpy as np\n'), ((7691, 7713), 'numpy.arange', 'np.arange', (['(0)', 'time', 'dt'], {}), '(0, time, dt)\n', (7700, 7713), True, 'import numpy as np\n'), ((7782, 7879), 'pandas.MultiIndex.from_product', 'pd.MultiIndex.from_product', (['[SatelliteIDs, Timestamps_array]'], {'names': "['Satellite ID', 'Time']"}), "([SatelliteIDs, Timestamps_array], names=[\n 'Satellite ID', 'Time'])\n", (7808, 7879), True, 'import pandas as pd\n'), ((8230, 8307), 'pandas.DataFrame', 'pd.DataFrame', (['satellite_Data_array'], {'columns': 'labels', 'index': 'SatelliteMultiIndex'}), '(satellite_Data_array, columns=labels, index=SatelliteMultiIndex)\n', (8242, 8307), True, 'import pandas as pd\n'), ((5805, 5869), 'poliastro.twobody.Orbit.from_classical', 'Orbit.from_classical', (['Earth', 'semimajor', 'ecc', 'inc', 'raan', 'argp', 'nu'], {}), '(Earth, semimajor, ecc, inc, raan, argp, nu)\n', (5825, 5869), False, 'from poliastro.twobody import Orbit\n'), ((6072, 6094), 'numpy.arange', 'np.arange', (['(0)', 'time', 'dt'], {}), '(0, time, dt)\n', (6081, 6094), True, 'import numpy as np\n'), ((6230, 6297), 'poliastro.twobody.propagation.vallado', 'propagation.vallado', (['Earth[1]', 'Orbits[i].r', 'Orbits[i].v', 'Timestamps'], {}), '(Earth[1], Orbits[i].r, Orbits[i].v, Timestamps)\n', (6249, 6297), False, 'from poliastro.twobody import propagation\n'), ((3512, 3525), 'math.sqrt', 'math.sqrt', (['mu'], {}), '(mu)\n', (3521, 3525), False, 'import math\n'), ((7533, 7563), 'itertools.product', 'it.product', (['planes', 'satellites'], {}), '(planes, satellites)\n', (7543, 7563), True, 'import itertools as it\n'), ((4407, 4441), 'itertools.repeat', 'it.repeat', (['x', 'satellites_per_plane'], {}), '(x, satellites_per_plane)\n', (4416, 4441), True, 'import itertools as it\n'), ((6435, 6466), 'math.degrees', 'math.degrees', (['Orbits[i].n.value'], {}), '(Orbits[i].n.value)\n', (6447, 6466), False, 'import math\n')] |
#! /usr/bin/env python
""" Top-level script for evaluating models """
from pathlib import Path
import numpy as np
import logging
import sys
import torch
from torch.utils.data import DataLoader
import configargparse
import copy
import tempfile
import os
import pandas as pd
from sklearn.metrics import roc_auc_score, average_precision_score
sys.path.append("../")
from datasets import load_simulator
from utils import create_filename, create_modelname, sum_except_batch, array_to_image_folder
from architectures import create_model
from architectures.create_model import ALGORITHMS
logger = logging.getLogger(__name__)
try:
from fid_score import calculate_fid_given_paths
except:
logger.warning("Could not import fid_score, make sure that pytorch-fid is in the Python path")
calculate_fid_given_paths = None
def parse_args():
""" Parses command line arguments for the evaluation """
parser = configargparse.ArgumentParser(ignore_unknown_config_file_keys=True)
# What what what
parser.add_argument("--truth", action="store_true", help="Evaluate ground truth rather than learned model")
parser.add_argument("--modelname", type=str, default=None, help="Model name. Algorithm, latent dimension, dataset, and run are prefixed automatically.")
parser.add_argument("--algorithm", type=str, default="flow", choices=ALGORITHMS, help="Model: flow (AF), mf (FOM, M-flow), emf (Me-flow), pie (PIE), gamf (M-flow-OT)...")
parser.add_argument("--dataset", type=str, default="spherical_gaussian", help="Dataset: spherical_gaussian, power, lhc, lhc40d, lhc2d, and some others")
parser.add_argument("-i", type=int, default=0, help="Run number")
parser.add_argument("--model_path", type=str, default=None)
parser.add_argument("--ood_dataset", action="append", default=["celeb-a", "svhn", "lsun", "cifar100", "uniform_noise", "textures", "gaussian_noise"])
parser.add_argument("--dataset_dir", type=str, default="./downloaded_datasets")
# Dataset details
parser.add_argument("--truelatentdim", type=int, default=2, help="True manifold dimensionality (for datasets where that is variable)")
parser.add_argument("--datadim", type=int, default=3, help="True data dimensionality (for datasets where that is variable)")
parser.add_argument("--epsilon", type=float, default=0.01, help="Noise term (for datasets where that is variable)")
# Model details
parser.add_argument("--modellatentdim", type=int, default=2, help="Model manifold dimensionality")
parser.add_argument("--specified", action="store_true", help="Prescribe manifold chart: FOM instead of M-flow")
parser.add_argument("--outertransform", type=str, default="rq-coupling", help="Scalar base trf. for f: {affine | quadratic | rq}-{coupling | autoregressive}")
parser.add_argument("--innertransform", type=str, default="rq-coupling", help="Scalar base trf. for h: {affine | quadratic | rq}-{coupling | autoregressive}")
parser.add_argument("--lineartransform", type=str, default="permutation", help="Scalar linear trf: linear | permutation")
parser.add_argument("--outerlayers", type=int, default=5, help="Number of transformations in f (not counting linear transformations)")
parser.add_argument("--innerlayers", type=int, default=5, help="Number of transformations in h (not counting linear transformations)")
parser.add_argument("--conditionalouter", action="store_true", help="If dataset is conditional, use this to make f conditional (otherwise only h is conditional)")
parser.add_argument("--dropout", type=float, default=0.0, help="Use dropout")
parser.add_argument("--pieepsilon", type=float, default=0.01, help="PIE epsilon term")
parser.add_argument("--pieclip", type=float, default=None, help="Clip v in p(v), in multiples of epsilon")
parser.add_argument("--encoderblocks", type=int, default=5, help="Number of blocks in Me-flow / PAE encoder")
parser.add_argument("--encoderhidden", type=int, default=100, help="Number of hidden units in Me-flow / PAE encoder")
parser.add_argument("--splinerange", default=3.0, type=float, help="Spline boundaries")
parser.add_argument("--splinebins", default=8, type=int, help="Number of spline bins")
parser.add_argument("--levels", type=int, default=3, help="Number of levels in multi-scale architectures for image data (for outer transformation f)")
parser.add_argument("--actnorm", action="store_true", help="Use actnorm in convolutional architecture")
parser.add_argument("--batchnorm", action="store_true", help="Use batchnorm in ResNets")
parser.add_argument("--linlayers", type=int, default=2, help="Number of linear layers before the projection for M-flow and PIE on image data")
parser.add_argument("--linchannelfactor", type=int, default=2, help="Determines number of channels in linear trfs before the projection for M-flow and PIE on image data")
parser.add_argument("--intermediatensf", action="store_true", help="Use NSF rather than linear layers before projecting (for M-flows and PIE on image data)")
parser.add_argument("--decoderblocks", type=int, default=5, help="Number of blocks in PAE encoder")
parser.add_argument("--decoderhidden", type=int, default=100, help="Number of hidden units in PAE encoder")
# Other settings
parser.add_argument("-c", is_config_file=True, type=str, help="Config file path")
parser.add_argument("--debug", default=True)
parser.add_argument("--num_classes", default=0, type=int)
return parser.parse_args()
def evaluate_test_samples(args, simulator, model=None, eval_classifier=False):
""" Likelihood evaluation """
# Prepare
dataset = simulator.load_dataset(train=False, dataset_dir=Path(args.dataset_dir))
dataloader = DataLoader(
dataset,
batch_size=32,
pin_memory=False,
num_workers=0,
shuffle=False,
)
# Evaluate
log_prob_ = []
reco_error_ = []
logits = []
ys = []
for batch in dataloader:
x_, y = batch
ys.append(y.cpu())
x_ = x_.cuda()
if args.algorithm == "flow":
out = model(x_, context=params_)
elif args.algorithm in ["pie", "slice"]:
out = model(x_, context=params_, mode=args.algorithm if not args.skiplikelihood else "projection", )
else:
out = model(x_, context=None, mode="mf-fixed-manifold", return_classification=True)
x_reco, log_prob, u, hidden, clf_out = (
out["x_reco"], out["log_prob"], out["u"], out["hidden"], out["clf_out"])
logits.append(clf_out.detach().cpu())
log_prob_.append(log_prob.detach().cpu().numpy())
reco_error_.append((sum_except_batch((x_ - x_reco) ** 2) ** 0.5).detach().cpu().numpy())
if eval_classifier:
ys = torch.cat(ys)
logits = torch.cat(logits)
acc = (ys == logits.argmax(-1)).float().mean()
print(f"Accuracy: {acc.item() * 100:.02f}")
log_prob = np.concatenate(log_prob_, axis=0)
reco_error = np.concatenate(reco_error_, axis=0)
return {"p(x)": log_prob, "reco_error": -reco_error}
if __name__ == "__main__":
# Parse args
args = parse_args()
logging.basicConfig(format="%(asctime)-5.5s %(name)-20.20s %(levelname)-7.7s %(message)s", datefmt="%H:%M", level=logging.DEBUG if args.debug else logging.INFO)
# Silence PIL
for key in logging.Logger.manager.loggerDict:
if "PIL" in key:
logging.getLogger(key).setLevel(logging.WARNING)
logger.info("Hi!")
logger.debug("Starting evaluate.py with arguments %s", args)
create_modelname(args)
logger.info("Evaluating model %s", args.modelname)
# Bug fix related to some num_workers > 1 and CUDA. Bad things happen otherwise!
torch.multiprocessing.set_start_method("spawn", force=True)
simulator = load_simulator(args)
torch.set_default_tensor_type(torch.cuda.FloatTensor)
model = create_model(args, simulator=simulator)
if args.model_path is None:
args.model_path = create_filename("model", None, args)
model.load_state_dict(torch.load(args.model_path, map_location=torch.device("cuda")))
model.cuda()
model.eval()
# Compute ID OOD scores
id_scores_dict = evaluate_test_samples(args, simulator, model=model, eval_classifier=True)
# Compute OOD detection metrics
rows = []
for ood_ds in args.ood_dataset:
logger.info(f"\n\n{ood_ds}")
args.dataset = ood_ds
simulator = load_simulator(args)
ood_scores_dict = evaluate_test_samples(args, simulator, model=model)
for score_name, id_scores in id_scores_dict.items():
ood_scores = ood_scores_dict[score_name]
labels = np.concatenate(
[np.zeros_like(ood_scores), np.ones_like(id_scores)]
)
preds = np.concatenate([ood_scores, id_scores])
auroc = roc_auc_score(labels, preds)
aupr = average_precision_score(labels, preds)
logger.info(score_name)
logger.info(f"AUROC: {auroc * 100:.02f}")
logger.info(f"AUPR: {aupr * 100:.02f}")
rows.append((score_name, ood_ds, auroc * 100, aupr * 100))
model_name = Path(args.model_path).stem
df = pd.DataFrame(rows, columns=["Score", "OOD Dataset", "AUROC", "AUPR"])
df.to_csv(f"{model_name}.csv", index=False)
logger.info("All done! Have a nice day!")
| [
"logging.getLogger",
"sklearn.metrics.roc_auc_score",
"configargparse.ArgumentParser",
"architectures.create_model",
"sys.path.append",
"pathlib.Path",
"torch.set_default_tensor_type",
"numpy.concatenate",
"pandas.DataFrame",
"torch.multiprocessing.set_start_method",
"utils.sum_except_batch",
"sklearn.metrics.average_precision_score",
"torch.cat",
"torch.device",
"logging.basicConfig",
"utils.create_filename",
"numpy.ones_like",
"utils.create_modelname",
"datasets.load_simulator",
"torch.utils.data.DataLoader",
"numpy.zeros_like"
] | [((343, 365), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (358, 365), False, 'import sys\n'), ((595, 622), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (612, 622), False, 'import logging\n'), ((920, 987), 'configargparse.ArgumentParser', 'configargparse.ArgumentParser', ([], {'ignore_unknown_config_file_keys': '(True)'}), '(ignore_unknown_config_file_keys=True)\n', (949, 987), False, 'import configargparse\n'), ((5772, 5859), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'batch_size': '(32)', 'pin_memory': '(False)', 'num_workers': '(0)', 'shuffle': '(False)'}), '(dataset, batch_size=32, pin_memory=False, num_workers=0, shuffle\n =False)\n', (5782, 5859), False, 'from torch.utils.data import DataLoader\n'), ((7001, 7034), 'numpy.concatenate', 'np.concatenate', (['log_prob_'], {'axis': '(0)'}), '(log_prob_, axis=0)\n', (7015, 7034), True, 'import numpy as np\n'), ((7052, 7087), 'numpy.concatenate', 'np.concatenate', (['reco_error_'], {'axis': '(0)'}), '(reco_error_, axis=0)\n', (7066, 7087), True, 'import numpy as np\n'), ((7219, 7389), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)-5.5s %(name)-20.20s %(levelname)-7.7s %(message)s"""', 'datefmt': '"""%H:%M"""', 'level': '(logging.DEBUG if args.debug else logging.INFO)'}), "(format=\n '%(asctime)-5.5s %(name)-20.20s %(levelname)-7.7s %(message)s', datefmt\n ='%H:%M', level=logging.DEBUG if args.debug else logging.INFO)\n", (7238, 7389), False, 'import logging\n'), ((7628, 7650), 'utils.create_modelname', 'create_modelname', (['args'], {}), '(args)\n', (7644, 7650), False, 'from utils import create_filename, create_modelname, sum_except_batch, array_to_image_folder\n'), ((7796, 7855), 'torch.multiprocessing.set_start_method', 'torch.multiprocessing.set_start_method', (['"""spawn"""'], {'force': '(True)'}), "('spawn', force=True)\n", (7834, 7855), False, 'import torch\n'), ((7873, 7893), 'datasets.load_simulator', 'load_simulator', (['args'], {}), '(args)\n', (7887, 7893), False, 'from datasets import load_simulator\n'), ((7898, 7951), 'torch.set_default_tensor_type', 'torch.set_default_tensor_type', (['torch.cuda.FloatTensor'], {}), '(torch.cuda.FloatTensor)\n', (7927, 7951), False, 'import torch\n'), ((7964, 8003), 'architectures.create_model', 'create_model', (['args'], {'simulator': 'simulator'}), '(args, simulator=simulator)\n', (7976, 8003), False, 'from architectures import create_model\n'), ((9293, 9362), 'pandas.DataFrame', 'pd.DataFrame', (['rows'], {'columns': "['Score', 'OOD Dataset', 'AUROC', 'AUPR']"}), "(rows, columns=['Score', 'OOD Dataset', 'AUROC', 'AUPR'])\n", (9305, 9362), True, 'import pandas as pd\n'), ((6829, 6842), 'torch.cat', 'torch.cat', (['ys'], {}), '(ys)\n', (6838, 6842), False, 'import torch\n'), ((6860, 6877), 'torch.cat', 'torch.cat', (['logits'], {}), '(logits)\n', (6869, 6877), False, 'import torch\n'), ((8062, 8098), 'utils.create_filename', 'create_filename', (['"""model"""', 'None', 'args'], {}), "('model', None, args)\n", (8077, 8098), False, 'from utils import create_filename, create_modelname, sum_except_batch, array_to_image_folder\n'), ((8522, 8542), 'datasets.load_simulator', 'load_simulator', (['args'], {}), '(args)\n', (8536, 8542), False, 'from datasets import load_simulator\n'), ((9257, 9278), 'pathlib.Path', 'Path', (['args.model_path'], {}), '(args.model_path)\n', (9261, 9278), False, 'from pathlib import Path\n'), ((5731, 5753), 'pathlib.Path', 'Path', (['args.dataset_dir'], {}), '(args.dataset_dir)\n', (5735, 5753), False, 'from pathlib import Path\n'), ((8877, 8916), 'numpy.concatenate', 'np.concatenate', (['[ood_scores, id_scores]'], {}), '([ood_scores, id_scores])\n', (8891, 8916), True, 'import numpy as np\n'), ((8938, 8966), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['labels', 'preds'], {}), '(labels, preds)\n', (8951, 8966), False, 'from sklearn.metrics import roc_auc_score, average_precision_score\n'), ((8986, 9024), 'sklearn.metrics.average_precision_score', 'average_precision_score', (['labels', 'preds'], {}), '(labels, preds)\n', (9009, 9024), False, 'from sklearn.metrics import roc_auc_score, average_precision_score\n'), ((8166, 8186), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (8178, 8186), False, 'import torch\n'), ((7485, 7507), 'logging.getLogger', 'logging.getLogger', (['key'], {}), '(key)\n', (7502, 7507), False, 'import logging\n'), ((8791, 8816), 'numpy.zeros_like', 'np.zeros_like', (['ood_scores'], {}), '(ood_scores)\n', (8804, 8816), True, 'import numpy as np\n'), ((8818, 8841), 'numpy.ones_like', 'np.ones_like', (['id_scores'], {}), '(id_scores)\n', (8830, 8841), True, 'import numpy as np\n'), ((6718, 6754), 'utils.sum_except_batch', 'sum_except_batch', (['((x_ - x_reco) ** 2)'], {}), '((x_ - x_reco) ** 2)\n', (6734, 6754), False, 'from utils import create_filename, create_modelname, sum_except_batch, array_to_image_folder\n')] |
from django.contrib.auth.mixins import LoginRequiredMixin
from django.urls import reverse_lazy
from django.views.generic import (
CreateView,
DeleteView,
DetailView,
ListView,
UpdateView,
)
from .models import Champion
class ChampionListView(ListView):
model = Champion
class ChampionDetailView(DetailView):
model = Champion
class ChampionCreateView(LoginRequiredMixin, CreateView):
model = Champion
fields = [
"name",
"dps",
"attackspeed",
"dmg",
"range",
"hp",
"mana",
"armor",
"mr",
"origin_prim",
"origin_sec",
"class_prim",
"class_sec",
"cost",
"tier",
]
def form_valid(self, form):
form.instance.creator = self.request.user
return super().form_valid(form)
class ChampionUpdateView(LoginRequiredMixin, UpdateView):
model = Champion
fields = [
"name",
"dps",
"attackspeed",
"dmg",
"range",
"hp",
"mana",
"armor",
"mr",
"origin_prim",
"origin_sec",
"class_prim",
"class_sec",
"cost",
"tier",
]
action = "Update"
class ChampionDeleteView(LoginRequiredMixin, DeleteView):
model = Champion
success_url = reverse_lazy("tftchampions:list")
| [
"django.urls.reverse_lazy"
] | [((1340, 1373), 'django.urls.reverse_lazy', 'reverse_lazy', (['"""tftchampions:list"""'], {}), "('tftchampions:list')\n", (1352, 1373), False, 'from django.urls import reverse_lazy\n')] |
#!/usr/bin/env python3
import os, sys
sys.path.append(os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'data'))
import data_tools
import pandas
#import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import patches
from sklearn.linear_model import LinearRegression, Lasso, Ridge
data_dir = '../../HuGaDB/HuGaDB/Data.parsed/'
def plot_zplane(zeros, k, ax):
if len(zeros) == 0:
return
uc = patches.Circle((0,0), radius=1, fill=False, color='black', ls='dashed')
ax.add_patch(uc)
p = plt.plot(zeros.real, zeros.imag, 'bo', ms=10)
plt.setp( p, markersize=12.0, markeredgewidth=3.0,
markeredgecolor='b', markerfacecolor='b')
ax.spines['left'].set_position('center')
ax.spines['bottom'].set_position('center')
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
r = max(1.5, 1.3*max(abs(zeros))); plt.axis('scaled'); plt.axis([-r, r, -r, r])
ticks = [-1, -.5, .5, 1]; plt.xticks(ticks); plt.yticks(ticks)
ax.title.set_text("z={}, K={}".format(sum(zeros != 0.0), k))
def compute_zeros(b_vec):
"""Compute the zeros of the FIR filter given the coefficents b_vec
y[n] = \summation b_k*x[n-k]
"""
if (b_vec==0.0).all():
return np.array([]), 0
zeros = np.roots(b_vec)
poly = np.poly(zeros)
print("computed zeros:")
print(zeros)
return zeros, b_vec[0]/poly[0]
HISTORY = 3
def build_features(file_name, axis, is_cross=False):
#file_name='HuGaDB_v1_walking_14_02.txt'
#ACCEL_FIX = 2.0/32765
#GYRO_FIX = 2000/32768
#start = 12
#cols = range(start, start+6)
dataset = pandas.read_csv(file_name, sep=',')
data_length = np.shape(dataset)[0]
data_mat = dataset.as_matrix()
features, labels = [], []
#for start in range(0, 36, 6):
col_set = [axis]
#if axis < 3:
# col_set = [0,1,2]
#else:
# col_set = [3,4,5]
for start in col_set:
if is_cross:
cols = range(6)
else:
cols = [start]
for time in range(HISTORY, data_length):
example = []
for col in cols:
example.extend(data_mat[:,col][time-HISTORY:time])
features.append(example)
labels.append(data_mat[:,start][time]) # only train x_accel for now
return np.asarray(features), np.asarray(labels)
def test():
fil_coef = np.zeros((6,6,1))
for thing in range(6):
fil_coef[thing][thing][0] = 1
fil_coef[0][0][0]=.9182
dir_name = 'cross_fir'
data_tools.save_array(fil_coef, os.path.join(dir_name, 'test_coefs'))
print(fil_coef)
def data_train():
#X, y = None, None
#iteration = 0
#for data_file in os.listdir(data_dir):
# data_path = os.path.join(data_dir, data_file)
# iteration +=1
# if iteration < 1:
# continue
# if iteration > 21:
# break
# print("loading: {}".format(data_file))
# if X is not None:
# Xnew, ynew = build_features(data_path)
# X = np.concatenate((X, Xnew))
# y = np.concatenate((y, ynew))
# else:
# X, y = build_features(data_path)
data_file = '/home/chiasson/Documents/David/research/HuGaDB/HuGaDB/Data.parsed/processed/training/all.csv'
all_coeffs = []
#for stream in [0, 3]:
for stream in range(6):
X, y = build_features(data_file, stream, is_cross=False)
print("features built for stream {}".format(stream))
#reg = LinearRegression().fit(X,y)
reg = Lasso(alpha=.1, fit_intercept=False, max_iter=200000).fit(X,y)
reg.coef_[np.abs(reg.coef_) < (1.0/16)] = 0
print(reg.coef_)
#all_coeffs.append(reg.coef_)
#all_coeffs.append(reg.coef_)
all_coeffs.append(reg.coef_)
all_coeffs = np.asarray(all_coeffs)
#print(reg.score(X,y))
#print(np.linalg.norm(reg.coef_, ord=1))
#print(np.linalg.norm(reg.coef_, ord=2))
dir_name = 'cross_fir'
try:
os.makedirs(dir_name)
except OSError:
pass
print("FINISHED")
print(all_coeffs)
data_tools.save_array(all_coeffs, os.path.join(dir_name, 'test_coefs'))
return
# plot the poles and zeros!
for var in range(6):
b_vec = np.flip(reg.coef_[var*HISTORY:var*HISTORY+HISTORY])
zeros, k = compute_zeros(b_vec)
ax = plt.subplot(2,3,var+1)
plot_zplane(zeros, k, ax)
#plt.plot(x,y,'o')
plt.show()
#how to test this?
# training accuracy should exceed any of my models
# testing accuracy should exceed or converge to my model
# Do I understand how to compute poles and zeros?
# Are there any bugs in my code?
# How do I interpret the results?
# would this algorithm learn different coefficients for each the different body parts?
if __name__ == '__main__':
data_train()
#test()
| [
"numpy.poly",
"pandas.read_csv",
"sklearn.linear_model.Lasso",
"numpy.roots",
"numpy.array",
"numpy.flip",
"matplotlib.pyplot.plot",
"numpy.asarray",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.axis",
"matplotlib.patches.Circle",
"numpy.abs",
"matplotlib.pyplot.xticks",
"numpy.shape",
"matplotlib.pyplot.show",
"matplotlib.pyplot.setp",
"os.makedirs",
"os.path.join",
"numpy.zeros",
"os.path.abspath",
"matplotlib.pyplot.subplot"
] | [((469, 541), 'matplotlib.patches.Circle', 'patches.Circle', (['(0, 0)'], {'radius': '(1)', 'fill': '(False)', 'color': '"""black"""', 'ls': '"""dashed"""'}), "((0, 0), radius=1, fill=False, color='black', ls='dashed')\n", (483, 541), False, 'from matplotlib import patches\n'), ((570, 615), 'matplotlib.pyplot.plot', 'plt.plot', (['zeros.real', 'zeros.imag', '"""bo"""'], {'ms': '(10)'}), "(zeros.real, zeros.imag, 'bo', ms=10)\n", (578, 615), True, 'import matplotlib.pyplot as plt\n'), ((620, 715), 'matplotlib.pyplot.setp', 'plt.setp', (['p'], {'markersize': '(12.0)', 'markeredgewidth': '(3.0)', 'markeredgecolor': '"""b"""', 'markerfacecolor': '"""b"""'}), "(p, markersize=12.0, markeredgewidth=3.0, markeredgecolor='b',\n markerfacecolor='b')\n", (628, 715), True, 'import matplotlib.pyplot as plt\n'), ((940, 958), 'matplotlib.pyplot.axis', 'plt.axis', (['"""scaled"""'], {}), "('scaled')\n", (948, 958), True, 'import matplotlib.pyplot as plt\n'), ((960, 984), 'matplotlib.pyplot.axis', 'plt.axis', (['[-r, r, -r, r]'], {}), '([-r, r, -r, r])\n', (968, 984), True, 'import matplotlib.pyplot as plt\n'), ((1015, 1032), 'matplotlib.pyplot.xticks', 'plt.xticks', (['ticks'], {}), '(ticks)\n', (1025, 1032), True, 'import matplotlib.pyplot as plt\n'), ((1034, 1051), 'matplotlib.pyplot.yticks', 'plt.yticks', (['ticks'], {}), '(ticks)\n', (1044, 1051), True, 'import matplotlib.pyplot as plt\n'), ((1340, 1355), 'numpy.roots', 'np.roots', (['b_vec'], {}), '(b_vec)\n', (1348, 1355), True, 'import numpy as np\n'), ((1367, 1381), 'numpy.poly', 'np.poly', (['zeros'], {}), '(zeros)\n', (1374, 1381), True, 'import numpy as np\n'), ((1694, 1729), 'pandas.read_csv', 'pandas.read_csv', (['file_name'], {'sep': '""","""'}), "(file_name, sep=',')\n", (1709, 1729), False, 'import pandas\n'), ((2458, 2477), 'numpy.zeros', 'np.zeros', (['(6, 6, 1)'], {}), '((6, 6, 1))\n', (2466, 2477), True, 'import numpy as np\n'), ((3883, 3905), 'numpy.asarray', 'np.asarray', (['all_coeffs'], {}), '(all_coeffs)\n', (3893, 3905), True, 'import numpy as np\n'), ((4518, 4528), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4526, 4528), True, 'import matplotlib.pyplot as plt\n'), ((1748, 1765), 'numpy.shape', 'np.shape', (['dataset'], {}), '(dataset)\n', (1756, 1765), True, 'import numpy as np\n'), ((2389, 2409), 'numpy.asarray', 'np.asarray', (['features'], {}), '(features)\n', (2399, 2409), True, 'import numpy as np\n'), ((2411, 2429), 'numpy.asarray', 'np.asarray', (['labels'], {}), '(labels)\n', (2421, 2429), True, 'import numpy as np\n'), ((2632, 2668), 'os.path.join', 'os.path.join', (['dir_name', '"""test_coefs"""'], {}), "(dir_name, 'test_coefs')\n", (2644, 2668), False, 'import os, sys\n'), ((4068, 4089), 'os.makedirs', 'os.makedirs', (['dir_name'], {}), '(dir_name)\n', (4079, 4089), False, 'import os, sys\n'), ((4205, 4241), 'os.path.join', 'os.path.join', (['dir_name', '"""test_coefs"""'], {}), "(dir_name, 'test_coefs')\n", (4217, 4241), False, 'import os, sys\n'), ((4328, 4385), 'numpy.flip', 'np.flip', (['reg.coef_[var * HISTORY:var * HISTORY + HISTORY]'], {}), '(reg.coef_[var * HISTORY:var * HISTORY + HISTORY])\n', (4335, 4385), True, 'import numpy as np\n'), ((4433, 4459), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(3)', '(var + 1)'], {}), '(2, 3, var + 1)\n', (4444, 4459), True, 'import matplotlib.pyplot as plt\n'), ((1312, 1324), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1320, 1324), True, 'import numpy as np\n'), ((100, 125), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (115, 125), False, 'import os, sys\n'), ((3613, 3667), 'sklearn.linear_model.Lasso', 'Lasso', ([], {'alpha': '(0.1)', 'fit_intercept': '(False)', 'max_iter': '(200000)'}), '(alpha=0.1, fit_intercept=False, max_iter=200000)\n', (3618, 3667), False, 'from sklearn.linear_model import LinearRegression, Lasso, Ridge\n'), ((3694, 3711), 'numpy.abs', 'np.abs', (['reg.coef_'], {}), '(reg.coef_)\n', (3700, 3711), True, 'import numpy as np\n')] |
# coding: utf-8
"""
Sunshine Conversations API
The version of the OpenAPI document: 9.4.5
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from sunshine_conversations_client.configuration import Configuration
from sunshine_conversations_client.undefined import Undefined
class ConversationCreateBody(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'type': 'ConversationType',
'participants': 'list[ParticipantSubSchema]',
'display_name': 'str',
'description': 'str',
'icon_url': 'str',
'metadata': 'object'
}
attribute_map = {
'type': 'type',
'participants': 'participants',
'display_name': 'displayName',
'description': 'description',
'icon_url': 'iconUrl',
'metadata': 'metadata'
}
nulls = set()
def __init__(self, type=None, participants=None, display_name=None, description=Undefined(), icon_url=Undefined(), metadata=Undefined(), local_vars_configuration=None): # noqa: E501
"""ConversationCreateBody - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._type = None
self._participants = None
self._display_name = None
self._description = None
self._icon_url = None
self._metadata = None
self.discriminator = None
self.type = type
if participants is not None:
self.participants = participants
if display_name is not None:
self.display_name = display_name
self.description = description
self.icon_url = icon_url
self.metadata = metadata
@property
def type(self):
"""Gets the type of this ConversationCreateBody. # noqa: E501
:return: The type of this ConversationCreateBody. # noqa: E501
:rtype: ConversationType
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this ConversationCreateBody.
:param type: The type of this ConversationCreateBody. # noqa: E501
:type: ConversationType
"""
if self.local_vars_configuration.client_side_validation and type is None: # noqa: E501
raise ValueError("Invalid value for `type`, must not be `None`") # noqa: E501
self._type = type
@property
def participants(self):
"""Gets the participants of this ConversationCreateBody. # noqa: E501
The users participating in the conversation. For `personal` conversations, this field is required with a length of exactly 1. For `sdkGroup` conversations, must have a length less than or equal to 10. Can be omitted to have a conversation with no participants if the type is `sdkGroup`. # noqa: E501
:return: The participants of this ConversationCreateBody. # noqa: E501
:rtype: list[ParticipantSubSchema]
"""
return self._participants
@participants.setter
def participants(self, participants):
"""Sets the participants of this ConversationCreateBody.
The users participating in the conversation. For `personal` conversations, this field is required with a length of exactly 1. For `sdkGroup` conversations, must have a length less than or equal to 10. Can be omitted to have a conversation with no participants if the type is `sdkGroup`. # noqa: E501
:param participants: The participants of this ConversationCreateBody. # noqa: E501
:type: list[ParticipantSubSchema]
"""
self._participants = participants
@property
def display_name(self):
"""Gets the display_name of this ConversationCreateBody. # noqa: E501
A friendly name for the conversation, may be displayed to the business or the user. # noqa: E501
:return: The display_name of this ConversationCreateBody. # noqa: E501
:rtype: str
"""
return self._display_name
@display_name.setter
def display_name(self, display_name):
"""Sets the display_name of this ConversationCreateBody.
A friendly name for the conversation, may be displayed to the business or the user. # noqa: E501
:param display_name: The display_name of this ConversationCreateBody. # noqa: E501
:type: str
"""
self._display_name = display_name
@property
def description(self):
"""Gets the description of this ConversationCreateBody. # noqa: E501
A short text describing the conversation. # noqa: E501
:return: The description of this ConversationCreateBody. # noqa: E501
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this ConversationCreateBody.
A short text describing the conversation. # noqa: E501
:param description: The description of this ConversationCreateBody. # noqa: E501
:type: str
"""
if type(description) is Undefined:
description = None
self.nulls.discard("description")
elif description is None:
self.nulls.add("description")
else:
self.nulls.discard("description")
if (self.local_vars_configuration.client_side_validation and
description is not None and len(description) > 100):
raise ValueError("Invalid value for `description`, length must be less than or equal to `100`") # noqa: E501
if (self.local_vars_configuration.client_side_validation and
description is not None and len(description) < 1):
raise ValueError("Invalid value for `description`, length must be greater than or equal to `1`") # noqa: E501
self._description = description
@property
def icon_url(self):
"""Gets the icon_url of this ConversationCreateBody. # noqa: E501
A custom conversation icon url. The image must be in either JPG, PNG, or GIF format # noqa: E501
:return: The icon_url of this ConversationCreateBody. # noqa: E501
:rtype: str
"""
return self._icon_url
@icon_url.setter
def icon_url(self, icon_url):
"""Sets the icon_url of this ConversationCreateBody.
A custom conversation icon url. The image must be in either JPG, PNG, or GIF format # noqa: E501
:param icon_url: The icon_url of this ConversationCreateBody. # noqa: E501
:type: str
"""
if type(icon_url) is Undefined:
icon_url = None
self.nulls.discard("icon_url")
elif icon_url is None:
self.nulls.add("icon_url")
else:
self.nulls.discard("icon_url")
if (self.local_vars_configuration.client_side_validation and
icon_url is not None and len(icon_url) > 2048):
raise ValueError("Invalid value for `icon_url`, length must be less than or equal to `2048`") # noqa: E501
if (self.local_vars_configuration.client_side_validation and
icon_url is not None and len(icon_url) < 1):
raise ValueError("Invalid value for `icon_url`, length must be greater than or equal to `1`") # noqa: E501
self._icon_url = icon_url
@property
def metadata(self):
"""Gets the metadata of this ConversationCreateBody. # noqa: E501
Flat object containing custom properties. Strings, numbers and booleans are the only supported format that can be passed to metadata. The metadata is limited to 4KB in size. # noqa: E501
:return: The metadata of this ConversationCreateBody. # noqa: E501
:rtype: object
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this ConversationCreateBody.
Flat object containing custom properties. Strings, numbers and booleans are the only supported format that can be passed to metadata. The metadata is limited to 4KB in size. # noqa: E501
:param metadata: The metadata of this ConversationCreateBody. # noqa: E501
:type: object
"""
if type(metadata) is Undefined:
metadata = None
self.nulls.discard("metadata")
elif metadata is None:
self.nulls.add("metadata")
else:
self.nulls.discard("metadata")
self._metadata = metadata
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ConversationCreateBody):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, ConversationCreateBody):
return True
return self.to_dict() != other.to_dict()
| [
"sunshine_conversations_client.undefined.Undefined",
"sunshine_conversations_client.configuration.Configuration",
"six.iteritems"
] | [((1370, 1381), 'sunshine_conversations_client.undefined.Undefined', 'Undefined', ([], {}), '()\n', (1379, 1381), False, 'from sunshine_conversations_client.undefined import Undefined\n'), ((1392, 1403), 'sunshine_conversations_client.undefined.Undefined', 'Undefined', ([], {}), '()\n', (1401, 1403), False, 'from sunshine_conversations_client.undefined import Undefined\n'), ((1414, 1425), 'sunshine_conversations_client.undefined.Undefined', 'Undefined', ([], {}), '()\n', (1423, 1425), False, 'from sunshine_conversations_client.undefined import Undefined\n'), ((9176, 9209), 'six.iteritems', 'six.iteritems', (['self.openapi_types'], {}), '(self.openapi_types)\n', (9189, 9209), False, 'import six\n'), ((1646, 1661), 'sunshine_conversations_client.configuration.Configuration', 'Configuration', ([], {}), '()\n', (1659, 1661), False, 'from sunshine_conversations_client.configuration import Configuration\n')] |
#!/usr/bin/python
import sys
import textwrap
import netbyte
def compile():
nbe = netbyte.Netbyte()
if len(sys.argv) < 2:
print("Please give as argument a filename (or '-' for the standard input)!")
return 1
if sys.argv[2] == "-":
if len(sys.argv) < 3:
print("Stdin compilation MUST specify a target bytecode filename.")
return 1
open(sys.argv[3], "wb").write(nbe.compile(*nbe.parse(sys.stdin.read(), "STDIN"), debug=True))
else:
open((sys.argv[3] if len(sys.argv) > 3 else sys.argv[2][:-1] + 'e'), "wb").write(nbe.compile(*nbe.parse_file(sys.argv[2]), debug=True))
def run():
res = netbyte.Netbyte().execute((open(sys.argv[2], 'rb').read() if len(sys.argv) > 2 else sys.stdin.read()), (sys.argv[2] if len(sys.argv) > 2 else "<stdin>"))
if res is not None:
print("[File return value: '{}']".format(res))
commands = {
'compile': compile,
'run': run
}
descriptions = {
'compile': ["Compile your Netbyte source code.", "Usage: compile (<source> [target] | - [target])"],
'run': ["Run your newly-compiled Netbyte bytecode program files.", "Usage: run <target>"]
}
cmd = (sys.argv[1] if len(sys.argv) > 1 else None)
if cmd not in commands:
if cmd is not None:
print("Error: No such command '{}'!".format(cmd))
print()
print("Available subcommands:")
for k, vl in descriptions.items():
print(" - {}".format(k))
for v in vl:
for line in textwrap.wrap(v, 64):
print(" " * 6 + line)
print()
else:
commands[cmd]() | [
"sys.stdin.read",
"netbyte.Netbyte",
"textwrap.wrap"
] | [((87, 104), 'netbyte.Netbyte', 'netbyte.Netbyte', ([], {}), '()\n', (102, 104), False, 'import netbyte\n'), ((685, 702), 'netbyte.Netbyte', 'netbyte.Netbyte', ([], {}), '()\n', (700, 702), False, 'import netbyte\n'), ((769, 785), 'sys.stdin.read', 'sys.stdin.read', ([], {}), '()\n', (783, 785), False, 'import sys\n'), ((1553, 1573), 'textwrap.wrap', 'textwrap.wrap', (['v', '(64)'], {}), '(v, 64)\n', (1566, 1573), False, 'import textwrap\n'), ((459, 475), 'sys.stdin.read', 'sys.stdin.read', ([], {}), '()\n', (473, 475), False, 'import sys\n')] |
from unittest.mock import MagicMock
from src.masonite.middleware import MiddlewareCapsule
from tests import TestCase
class MockMiddleware:
def before(self, request, response, arg1):
return request
def after(self, request, response):
return request
class TestMiddleware(TestCase):
def test_can_create_capsule(self):
capsule = MiddlewareCapsule()
self.assertTrue(capsule)
def test_can_add_middleware(self):
capsule = MiddlewareCapsule()
capsule.add({"mock": MockMiddleware})
capsule.add([MockMiddleware])
self.assertTrue(len(capsule.route_middleware) == 1)
self.assertTrue(len(capsule.http_middleware) == 1)
def test_can_add_and_remove_middleware(self):
capsule = MiddlewareCapsule()
capsule.add({"mock": MockMiddleware})
capsule.add([MockMiddleware])
capsule.remove(MockMiddleware)
self.assertTrue(len(capsule.route_middleware) == 1)
self.assertTrue(len(capsule.http_middleware) == 0)
def test_can_get_multiple_middleware(self):
capsule = MiddlewareCapsule()
capsule.add(
{
"mock": MockMiddleware,
"mock1": MockMiddleware,
"mock2": MockMiddleware,
"mock3": [MockMiddleware, MockMiddleware],
}
)
capsule.add([MockMiddleware])
capsule.remove(MockMiddleware)
self.assertTrue(
len(capsule.get_route_middleware(["mock", "mock1", "mock2"])) == 3
)
self.assertTrue(
len(capsule.get_route_middleware(["mock", "mock1", "mock2", "mock3"])) == 5
)
def test_can_run_middleware_with_args(self):
request = self.make_request()
response = self.make_response()
capsule = MiddlewareCapsule()
MockMiddleware.before = MagicMock(return_value=request)
capsule.add(
{
"mock": MockMiddleware,
}
)
capsule.run_route_middleware(["mock:arg1,arg2"], request, response)
MockMiddleware.before.assert_called_with(request, response, "arg1", "arg2")
def test_can_use_request_inputs_as_args(self):
# this create a request with @user_id and @id as in input
request = self.make_request(query_string="user_id=3&id=1")
response = self.make_response()
capsule = MiddlewareCapsule()
MockMiddleware.before = MagicMock(return_value=request)
capsule.add(
{
"mock": MockMiddleware,
}
)
capsule.run_route_middleware(["mock:@user_id,@id"], request, response)
MockMiddleware.before.assert_called_with(request, response, "3", "1")
def test_can_mix_args_and_request_inputs(self):
# this create a request with @user_id as in input
request = self.make_request(query_string="user_id=3")
response = self.make_response()
capsule = MiddlewareCapsule()
MockMiddleware.before = MagicMock(return_value=request)
capsule.add(
{
"mock": MockMiddleware,
}
)
capsule.run_route_middleware(["mock:@user_id,value"], request, response)
MockMiddleware.before.assert_called_with(request, response, "3", "value")
| [
"unittest.mock.MagicMock",
"src.masonite.middleware.MiddlewareCapsule"
] | [((368, 387), 'src.masonite.middleware.MiddlewareCapsule', 'MiddlewareCapsule', ([], {}), '()\n', (385, 387), False, 'from src.masonite.middleware import MiddlewareCapsule\n'), ((479, 498), 'src.masonite.middleware.MiddlewareCapsule', 'MiddlewareCapsule', ([], {}), '()\n', (496, 498), False, 'from src.masonite.middleware import MiddlewareCapsule\n'), ((772, 791), 'src.masonite.middleware.MiddlewareCapsule', 'MiddlewareCapsule', ([], {}), '()\n', (789, 791), False, 'from src.masonite.middleware import MiddlewareCapsule\n'), ((1102, 1121), 'src.masonite.middleware.MiddlewareCapsule', 'MiddlewareCapsule', ([], {}), '()\n', (1119, 1121), False, 'from src.masonite.middleware import MiddlewareCapsule\n'), ((1823, 1842), 'src.masonite.middleware.MiddlewareCapsule', 'MiddlewareCapsule', ([], {}), '()\n', (1840, 1842), False, 'from src.masonite.middleware import MiddlewareCapsule\n'), ((1875, 1906), 'unittest.mock.MagicMock', 'MagicMock', ([], {'return_value': 'request'}), '(return_value=request)\n', (1884, 1906), False, 'from unittest.mock import MagicMock\n'), ((2410, 2429), 'src.masonite.middleware.MiddlewareCapsule', 'MiddlewareCapsule', ([], {}), '()\n', (2427, 2429), False, 'from src.masonite.middleware import MiddlewareCapsule\n'), ((2462, 2493), 'unittest.mock.MagicMock', 'MagicMock', ([], {'return_value': 'request'}), '(return_value=request)\n', (2471, 2493), False, 'from unittest.mock import MagicMock\n'), ((2982, 3001), 'src.masonite.middleware.MiddlewareCapsule', 'MiddlewareCapsule', ([], {}), '()\n', (2999, 3001), False, 'from src.masonite.middleware import MiddlewareCapsule\n'), ((3034, 3065), 'unittest.mock.MagicMock', 'MagicMock', ([], {'return_value': 'request'}), '(return_value=request)\n', (3043, 3065), False, 'from unittest.mock import MagicMock\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 25 10:36:42 2018
@author: javiergaleano
"""
# Sommers's stability
import numpy as np
import matplotlib.pyplot as plt
S = 1000 # number of species
### Sommers et al. Stability
vari = 1/S
sigma = vari
### matrix with -1.0 in the diagonal
diago = np.full_like(np.arange(S), -10.0, dtype=np.float32)
diagonal1 = np.diagflat(diago)
diagonal2 = np.diagflat(diago)
diagonal3 = np.diagflat(diago)
diagonal4 = np.diagflat(diago)
### Array with normal distribution mean = 0.0 and var = sigma
matriz1 = np.random.normal(loc = 0.0, scale =sigma, size=(S*S-S))
matriz2 = np.random.triangular(-20, -10, 3, size=(S*S-S))
matriz3 = np.random.wald(0.2, sigma, size=(S*S-S))
matriz4 = np.random.exponential(sigma, size=(S*S-S))
### Random matrix complete
k=0
for i in range(S):
for j in range(S):
if i != j:
diagonal1[i][j]= matriz1[k]
diagonal2[i][j]= matriz2[k]
diagonal3[i][j]= matriz3[k]
diagonal4[i][j]= matriz4[k]
k +=1
### mean and var of the matrix
media1 = np.mean(diagonal1, dtype = np.float64)
varianza1 = np.var(diagonal1,dtype=np.float64)
print(media1, varianza1)
media2 = np.mean(diagonal2, dtype = np.float64)
varianza2 = np.var(diagonal2,dtype=np.float64)
print(media2, varianza2)
media3 = np.mean(diagonal3, dtype = np.float64)
varianza3 = np.var(diagonal3,dtype=np.float64)
print(media3, varianza3)
media4 = np.mean(diagonal4, dtype = np.float64)
varianza4 = np.var(diagonal4,dtype=np.float64)
print(media4, varianza4)
### calculating eigenvalues and plotting
autovalor1 = np.linalg.eigvals(diagonal1)
autovalor2 = np.linalg.eigvals(diagonal2)
autovalor3 = np.linalg.eigvals(diagonal3)
autovalor4 = np.linalg.eigvals(diagonal4)
fig, ((ax1,ax2),(ax3,ax4)) = plt.subplots(2, 2)
X1=[]
Y1=[]
for i in autovalor1:
X1.append(i.real)
Y1.append(i.imag)
X2=[]
Y2=[]
for j in autovalor2:
X2.append(j.real)
Y2.append(j.imag)
X3=[]
Y3=[]
for k in autovalor3:
X3.append(k.real)
Y3.append(k.imag)
X4=[]
Y4=[]
for l in autovalor4:
X4.append(l.real)
Y4.append(l.imag)
ax1.scatter(X1,Y1,alpha=0.3)
ax1.set(xlabel="Real", ylabel="Im")
ax2.scatter(X2,Y2,alpha=0.3)
ax2.set(xlabel="Real", ylabel="Im")
ax3.scatter(X3,Y3,alpha=0.3)
ax3.set(xlabel="Real", ylabel="Im")
ax4.scatter(X4,Y4,alpha=0.3)
ax4.set(xlabel="Real", ylabel="Im")
#mean, var, skew, kurt = halfnorm.stats(moments='mvsk')
#x = np.linspace(halfnorm.ppf(0.01), halfnorm.ppf(0.99), 100)
#ax.plot(x, halfnorm.pdf(x), 'ro', lw=5, alpha=0.6, label='halfnorm pdf')
#r = halfnorm.rvs(size=10000)
#ax.hist(r, normed=True, histtype='stepfilled', bins = 20, alpha=0.2)
#ax.legend(loc='best', frameon=False)
#plt.show()
| [
"numpy.random.normal",
"numpy.mean",
"numpy.arange",
"numpy.random.triangular",
"numpy.random.exponential",
"numpy.linalg.eigvals",
"numpy.diagflat",
"matplotlib.pyplot.subplots",
"numpy.var",
"numpy.random.wald"
] | [((386, 404), 'numpy.diagflat', 'np.diagflat', (['diago'], {}), '(diago)\n', (397, 404), True, 'import numpy as np\n'), ((417, 435), 'numpy.diagflat', 'np.diagflat', (['diago'], {}), '(diago)\n', (428, 435), True, 'import numpy as np\n'), ((448, 466), 'numpy.diagflat', 'np.diagflat', (['diago'], {}), '(diago)\n', (459, 466), True, 'import numpy as np\n'), ((479, 497), 'numpy.diagflat', 'np.diagflat', (['diago'], {}), '(diago)\n', (490, 497), True, 'import numpy as np\n'), ((577, 631), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(0.0)', 'scale': 'sigma', 'size': '(S * S - S)'}), '(loc=0.0, scale=sigma, size=S * S - S)\n', (593, 631), True, 'import numpy as np\n'), ((643, 692), 'numpy.random.triangular', 'np.random.triangular', (['(-20)', '(-10)', '(3)'], {'size': '(S * S - S)'}), '(-20, -10, 3, size=S * S - S)\n', (663, 692), True, 'import numpy as np\n'), ((701, 743), 'numpy.random.wald', 'np.random.wald', (['(0.2)', 'sigma'], {'size': '(S * S - S)'}), '(0.2, sigma, size=S * S - S)\n', (715, 743), True, 'import numpy as np\n'), ((752, 796), 'numpy.random.exponential', 'np.random.exponential', (['sigma'], {'size': '(S * S - S)'}), '(sigma, size=S * S - S)\n', (773, 796), True, 'import numpy as np\n'), ((1121, 1157), 'numpy.mean', 'np.mean', (['diagonal1'], {'dtype': 'np.float64'}), '(diagonal1, dtype=np.float64)\n', (1128, 1157), True, 'import numpy as np\n'), ((1172, 1207), 'numpy.var', 'np.var', (['diagonal1'], {'dtype': 'np.float64'}), '(diagonal1, dtype=np.float64)\n', (1178, 1207), True, 'import numpy as np\n'), ((1243, 1279), 'numpy.mean', 'np.mean', (['diagonal2'], {'dtype': 'np.float64'}), '(diagonal2, dtype=np.float64)\n', (1250, 1279), True, 'import numpy as np\n'), ((1294, 1329), 'numpy.var', 'np.var', (['diagonal2'], {'dtype': 'np.float64'}), '(diagonal2, dtype=np.float64)\n', (1300, 1329), True, 'import numpy as np\n'), ((1365, 1401), 'numpy.mean', 'np.mean', (['diagonal3'], {'dtype': 'np.float64'}), '(diagonal3, dtype=np.float64)\n', (1372, 1401), True, 'import numpy as np\n'), ((1416, 1451), 'numpy.var', 'np.var', (['diagonal3'], {'dtype': 'np.float64'}), '(diagonal3, dtype=np.float64)\n', (1422, 1451), True, 'import numpy as np\n'), ((1487, 1523), 'numpy.mean', 'np.mean', (['diagonal4'], {'dtype': 'np.float64'}), '(diagonal4, dtype=np.float64)\n', (1494, 1523), True, 'import numpy as np\n'), ((1538, 1573), 'numpy.var', 'np.var', (['diagonal4'], {'dtype': 'np.float64'}), '(diagonal4, dtype=np.float64)\n', (1544, 1573), True, 'import numpy as np\n'), ((1655, 1683), 'numpy.linalg.eigvals', 'np.linalg.eigvals', (['diagonal1'], {}), '(diagonal1)\n', (1672, 1683), True, 'import numpy as np\n'), ((1697, 1725), 'numpy.linalg.eigvals', 'np.linalg.eigvals', (['diagonal2'], {}), '(diagonal2)\n', (1714, 1725), True, 'import numpy as np\n'), ((1739, 1767), 'numpy.linalg.eigvals', 'np.linalg.eigvals', (['diagonal3'], {}), '(diagonal3)\n', (1756, 1767), True, 'import numpy as np\n'), ((1781, 1809), 'numpy.linalg.eigvals', 'np.linalg.eigvals', (['diagonal4'], {}), '(diagonal4)\n', (1798, 1809), True, 'import numpy as np\n'), ((1840, 1858), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(2)'], {}), '(2, 2)\n', (1852, 1858), True, 'import matplotlib.pyplot as plt\n'), ((335, 347), 'numpy.arange', 'np.arange', (['S'], {}), '(S)\n', (344, 347), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
from numpy.linalg import inv
# a = np.array([[1, 8, 50],
# [8, 64, 400],
# [50, 400, 2500]])
# print(np.linalg.det(a))
# print(inv(a))
X = np.arange(12)
plt.plot(X, [-27.80190177509302, -18.278008698672398, -14.155975751698152, -9.355509112700402, -6.928485221001136,
-7.252783769260551, -9.075566102641499, -12.217659850459722, -15.762571764008012, -19.10983337936544,
-21.26188672609279, -19.165376585714384])
plt.ylabel('maximum of the log marginal likelihood')
plt.xlabel('orders')
plt.show()
print("done")
#
# The local maximum for order 0 is -27.80190177509302
#
# The local maximum for order 1 is -18.278008698672398
#
#
# The local maximum for order 1 is -18.278008698672398
#
#
# The local maximum occurs at [0.1713283 0.09402557]
# The local maximum for order 2 is -14.155975751698152
#
#
# The local maximum occurs at [0.13241786 0.0432884 ]
# The local maximum for order 3 is -9.355509112700402
#
#
# The local maximum occurs at [0.10693263 0.0230254 ]
# The local maximum for order 4 is -6.928485221001136
#
# The local maximum for order 5 is -7.252783769260551 X
#
# The local maximum for order 6 is -9.075566102641499 X
#
# The local maximum for order 7 -12.217659850459722
#
# The local maximum for order 8 is -15.762571764008012
#
# The local maximum occurs at [0.05257424 0.01684657]
# The local maximum for order 9 is -19.10983337936544
#
#
#
# The local maximum occurs at [0.05077136 0.01131081]
# The local maximum for order 10 is -21.26188672609279
#
#
# The local maximum occurs at [0.04474178 0.00024963]
# The local maximum for order 11 is -19.165376585714384
| [
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.arange",
"matplotlib.pyplot.show"
] | [((203, 216), 'numpy.arange', 'np.arange', (['(12)'], {}), '(12)\n', (212, 216), True, 'import numpy as np\n'), ((217, 490), 'matplotlib.pyplot.plot', 'plt.plot', (['X', '[-27.80190177509302, -18.278008698672398, -14.155975751698152, -\n 9.355509112700402, -6.928485221001136, -7.252783769260551, -\n 9.075566102641499, -12.217659850459722, -15.762571764008012, -\n 19.10983337936544, -21.26188672609279, -19.165376585714384]'], {}), '(X, [-27.80190177509302, -18.278008698672398, -14.155975751698152, \n -9.355509112700402, -6.928485221001136, -7.252783769260551, -\n 9.075566102641499, -12.217659850459722, -15.762571764008012, -\n 19.10983337936544, -21.26188672609279, -19.165376585714384])\n', (225, 490), True, 'import matplotlib.pyplot as plt\n'), ((503, 555), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""maximum of the log marginal likelihood"""'], {}), "('maximum of the log marginal likelihood')\n", (513, 555), True, 'import matplotlib.pyplot as plt\n'), ((556, 576), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""orders"""'], {}), "('orders')\n", (566, 576), True, 'import matplotlib.pyplot as plt\n'), ((577, 587), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (585, 587), True, 'import matplotlib.pyplot as plt\n')] |
import numpy as np
def get_trans(v1, v2, trans):
return - np.log(trans[v1[1], v2[1]])
def get_probs(v, probs):
return - np.log(probs[v[1]])
def simple_foward_backward_gap_dist(model, before_sym, after_sym):
# TODO: gap size only one for now
syms = model.syms
trans = model.trans
if before_sym not in syms and after_sym not in syms:
return None, None
probs = np.zeros((len(syms,)))
if before_sym is not None:
before_ind = syms.index(before_sym)
if after_sym is not None:
after_ind = syms.index(after_sym)
for i, sym in enumerate(syms):
if after_sym is None:
probs[i] = np.log(trans[before_ind, i])
elif before_sym is None:
probs[i] = np.log(trans[i, after_ind])
else:
probs[i] = np.log(trans[before_ind, i]) + np.log(trans[i, after_ind])
sorted_inds = np.argsort(-probs)
sorted_probs = [probs[ind] for ind in sorted_inds]
sorted_syms = [syms[ind] for ind in sorted_inds]
return sorted_probs, sorted_syms
def shortest_path(model, fixed, seq_ind, original_seq):
syms = model.syms
lb = np.min(fixed.keys())
ub = np.max(fixed.keys())
if seq_ind - 1 in fixed.keys():
fixed[seq_ind-1] = []
if seq_ind + 1 in fixed.keys():
fixed[seq_ind+1] = []
changed_start_ind = 0
changed_end_ind = None
sym_inds = range(lb, ub+1)
# setting up the vertices
states = {} # for retriving the vertices according to time step
vertices = [] # flat list of all vertices
for i in range(np.min(fixed.keys()), ub+1):
if i in fixed.keys() and len(fixed[i]) > 0:
local_states = [(i, syms.index(fixed[i]))]
else:
local_states = [(i, j) for j in range(len(syms))]
states[i] = local_states
vertices.extend(local_states)
dists = []
first_states = states[lb]
probs = model.priors
if lb == 0:
probs = model.starts
for v in first_states:
dists.append(get_probs(v, probs))
for i in range(len(first_states), len(vertices)):
dists.append(np.inf)
assert len(vertices) == len(dists)
previous_dict = {}
for v in vertices:
previous_dict[v] = None
open_vertices = vertices[:]
open_dists = dists[:]
while len(open_vertices) > 0:
ui = np.argmin(open_dists)
u = open_vertices[ui]
open_vertices.remove(u)
dist = open_dists[ui]
del open_dists[ui]
state_ind = u[0]
n_state_ind = state_ind + 1
if n_state_ind > ub:
break
else:
n_vertices = states[n_state_ind]
for nv in n_vertices:
alt = dist + get_trans(u, nv, model.trans)
ni = vertices.index(nv)
if alt < dists[ni]:
dists[ni] = alt
# update open dists
if nv in open_vertices:
open_pi = open_vertices.index(nv)
open_dists[open_pi] = alt
previous_dict[nv] = u
# find target
targets = states[ub]
last_step_dists = []
for v in targets:
ind = vertices.index(v)
last_step_dists.append(dists[ind])
min_ind = np.argmin(last_step_dists)
u = targets[min_ind]
# construct seq
seq = []
while previous_dict[u] is not None:
seq.insert(0, u)
u = previous_dict[u]
seq.insert(0, u)
sym_seq = []
for s in seq:
sym = syms[s[1]]
sym_seq.append(sym)
sym_subseq = sym_seq[changed_start_ind:changed_end_ind]
assert len(sym_inds) == len(sym_subseq)
# make complete seq
if ub + 1 > len(original_seq) - 1:
new_seq = original_seq[:lb] + sym_subseq
else:
new_seq = original_seq[:lb] + sym_subseq + original_seq[ub+1:]
return new_seq, sym_inds
| [
"numpy.argsort",
"numpy.log",
"numpy.argmin"
] | [((888, 906), 'numpy.argsort', 'np.argsort', (['(-probs)'], {}), '(-probs)\n', (898, 906), True, 'import numpy as np\n'), ((3279, 3305), 'numpy.argmin', 'np.argmin', (['last_step_dists'], {}), '(last_step_dists)\n', (3288, 3305), True, 'import numpy as np\n'), ((66, 93), 'numpy.log', 'np.log', (['trans[v1[1], v2[1]]'], {}), '(trans[v1[1], v2[1]])\n', (72, 93), True, 'import numpy as np\n'), ((134, 153), 'numpy.log', 'np.log', (['probs[v[1]]'], {}), '(probs[v[1]])\n', (140, 153), True, 'import numpy as np\n'), ((2351, 2372), 'numpy.argmin', 'np.argmin', (['open_dists'], {}), '(open_dists)\n', (2360, 2372), True, 'import numpy as np\n'), ((661, 689), 'numpy.log', 'np.log', (['trans[before_ind, i]'], {}), '(trans[before_ind, i])\n', (667, 689), True, 'import numpy as np\n'), ((746, 773), 'numpy.log', 'np.log', (['trans[i, after_ind]'], {}), '(trans[i, after_ind])\n', (752, 773), True, 'import numpy as np\n'), ((811, 839), 'numpy.log', 'np.log', (['trans[before_ind, i]'], {}), '(trans[before_ind, i])\n', (817, 839), True, 'import numpy as np\n'), ((842, 869), 'numpy.log', 'np.log', (['trans[i, after_ind]'], {}), '(trans[i, after_ind])\n', (848, 869), True, 'import numpy as np\n')] |
import subprocess
from firebase import firebase
taiwanstat_firebase = firebase.FirebaseApplication('https://realtaiwanstat.firebaseio.com', None)
with open('./data.csv', 'r') as outfile:
uv_data = outfile.read()
result = taiwanstat_firebase.post('/dengue-fever', uv_data)
print (result)
| [
"firebase.firebase.FirebaseApplication"
] | [((71, 146), 'firebase.firebase.FirebaseApplication', 'firebase.FirebaseApplication', (['"""https://realtaiwanstat.firebaseio.com"""', 'None'], {}), "('https://realtaiwanstat.firebaseio.com', None)\n", (99, 146), False, 'from firebase import firebase\n')] |
from __future__ import division, print_function
import cv2
import numpy as np
import algorithm
import collate
import lib
def split_lines(lines, all_lines=None):
# Maximize horizontal separation
# sorted by starting x value, ascending).
lines = sorted(lines[:], key=lambda line: line.left())
# Greedy algorithm. Maximize L bound of R minus R bound of L.
current_r = 0
quantity = -100000
argmax = -1
for idx, line in enumerate(lines[2:-3], 2):
if line.right() - line.left() > 2000:
print('line:', line.crop())
for l in line:
print(l)
current_r = max(current_r, line.right())
x2 = lines[idx + 1].left()
if lib.debug:
pass
# print('x2:', x2, 'r:', current_r, 'quantity:', x2 - current_r)
if x2 - current_r > quantity:
quantity = x2 - current_r
argmax = idx
if lib.debug: print('split:', argmax, 'out of', len(lines), '@', current_r)
line_groups = [l for l in (lines[:argmax + 1], lines[argmax + 1:]) if l]
if all_lines is None:
return line_groups
else:
if len(line_groups) == 1:
return line_groups, [all_lines]
else:
boundary = (lines[argmax].left() + lines[argmax + 1].left()) / 2
all_lines = sorted(all_lines, key=lambda line: line.left())
lefts = [line.left() for line in all_lines]
middle_idx = np.searchsorted(lefts, boundary)
print('boundary:', boundary)
print('left:', lefts[:middle_idx])
print('right:', lefts[middle_idx:])
return line_groups, [all_lines[:middle_idx], all_lines[middle_idx:]]
def filter_position(AH, im, lines, split):
new_lines = []
line_lefts = np.array([line.left() for line in lines])
line_rights = np.array([line.right() for line in lines])
line_start_thresh = np.percentile(line_lefts, 15 if split else 30) - 15 * AH
line_end_thresh = np.percentile(line_rights, 85 if split else 70) + 15 * AH
debug = cv2.cvtColor(im, cv2.COLOR_GRAY2RGB)
for line in lines:
if line.right() < line_start_thresh or line.left() > line_end_thresh:
line.crop().draw(debug, color=lib.RED)
else:
line.crop().draw(debug, color=lib.GREEN)
new_lines.append(line)
lib.debug_imwrite("position_filter.png", debug)
return new_lines
def crop(im, bw, split=True):
im_h, im_w = im.shape[:2]
all_letters = algorithm.all_letters(bw)
AH = algorithm.dominant_char_height(bw, letters=all_letters)
letters = algorithm.filter_size(AH, bw, letters=all_letters)
all_lines = collate.collate_lines(AH, letters)
combined = algorithm.combine_underlined(AH, bw, all_lines, all_letters)
lines = algorithm.remove_stroke_outliers(bw, combined)
if not lines:
print('WARNING: no lines in image.')
return AH, []
lines = filter_position(AH, bw, lines, split)
lines = [line for line in lines if not np.all(line.crop().apply(bw) == 255)]
if not lines:
print('WARNING: eliminated all lines.')
return AH, []
if split and im_w > im_h: # two pages
line_sets = split_lines(lines)
else:
line_sets = [lines]
return AH, line_sets
| [
"algorithm.all_letters",
"algorithm.remove_stroke_outliers",
"algorithm.filter_size",
"numpy.searchsorted",
"algorithm.dominant_char_height",
"cv2.cvtColor",
"numpy.percentile",
"collate.collate_lines",
"lib.debug_imwrite",
"algorithm.combine_underlined"
] | [((2070, 2106), 'cv2.cvtColor', 'cv2.cvtColor', (['im', 'cv2.COLOR_GRAY2RGB'], {}), '(im, cv2.COLOR_GRAY2RGB)\n', (2082, 2106), False, 'import cv2\n'), ((2366, 2413), 'lib.debug_imwrite', 'lib.debug_imwrite', (['"""position_filter.png"""', 'debug'], {}), "('position_filter.png', debug)\n", (2383, 2413), False, 'import lib\n'), ((2516, 2541), 'algorithm.all_letters', 'algorithm.all_letters', (['bw'], {}), '(bw)\n', (2537, 2541), False, 'import algorithm\n'), ((2551, 2606), 'algorithm.dominant_char_height', 'algorithm.dominant_char_height', (['bw'], {'letters': 'all_letters'}), '(bw, letters=all_letters)\n', (2581, 2606), False, 'import algorithm\n'), ((2621, 2671), 'algorithm.filter_size', 'algorithm.filter_size', (['AH', 'bw'], {'letters': 'all_letters'}), '(AH, bw, letters=all_letters)\n', (2642, 2671), False, 'import algorithm\n'), ((2688, 2722), 'collate.collate_lines', 'collate.collate_lines', (['AH', 'letters'], {}), '(AH, letters)\n', (2709, 2722), False, 'import collate\n'), ((2738, 2798), 'algorithm.combine_underlined', 'algorithm.combine_underlined', (['AH', 'bw', 'all_lines', 'all_letters'], {}), '(AH, bw, all_lines, all_letters)\n', (2766, 2798), False, 'import algorithm\n'), ((2811, 2857), 'algorithm.remove_stroke_outliers', 'algorithm.remove_stroke_outliers', (['bw', 'combined'], {}), '(bw, combined)\n', (2843, 2857), False, 'import algorithm\n'), ((1920, 1966), 'numpy.percentile', 'np.percentile', (['line_lefts', '(15 if split else 30)'], {}), '(line_lefts, 15 if split else 30)\n', (1933, 1966), True, 'import numpy as np\n'), ((1999, 2046), 'numpy.percentile', 'np.percentile', (['line_rights', '(85 if split else 70)'], {}), '(line_rights, 85 if split else 70)\n', (2012, 2046), True, 'import numpy as np\n'), ((1462, 1494), 'numpy.searchsorted', 'np.searchsorted', (['lefts', 'boundary'], {}), '(lefts, boundary)\n', (1477, 1494), True, 'import numpy as np\n')] |
import os
from urllib.parse import urlencode
from markdown.extensions import Extension
from markdown.inlinepatterns import Pattern
from markdown.util import etree
MAP_MATCH = r'\[map:(?P<location_name>[^\]]+)\]'
class GoogleMapExtension(Extension):
def __init__(self, config_getter=os.environ.get, **kwargs):
"""
Initialise Google Maps Markdown Extension with overridable config
Args:
config_getter: A callable object that conforms to the interface
get('key', 'default') e.g. os.environ.get or dict.get
"""
google_api_key = config_getter('GOOGLE_API_KEY', '')
self.config = {
'google_api_key': [
google_api_key, 'Google API Key required to load maps',
],
'width': ['500', 'Width of iframe for map'],
'height': ['300', 'Height of iframe for map'],
'fluid': [
False, 'Should iframe be wrapped in div allowing fluid sizing?'
]
}
super().__init__(**kwargs)
def extendMarkdown(self, md, md_globals):
"""
Extends markdown with inline patterns providing Google map function.
See base class for further info.
"""
google_map = GoogleMapPattern(MAP_MATCH)
google_map.extension_conf = self.config
md.inlinePatterns['google_map'] = google_map
class GoogleMapPattern(Pattern):
extension_conf = None
def handleMatch(self, match):
"""
Handles a regex match object, rendering the matching map tag as a map.
Args:
match: A regex match object that will match map markdown and group
the location.
Returns:
An etree.Element of the generated map html.
"""
location = match.group('location_name')
return self._render_iframe(location)
def _render_iframe(self, location):
"""
Creates an iframe that will be a google map centered on the given
location.
Additional configuration is taken from the calling class
GoogleMapsExtension.config dict.
Args:
location: A plain text string detailing the location to be passed
to google maps. e.g. 'City of London'.
Returns:
an etree.Element of either an iframe or, in the case that
extension_conf['fluid'] is Truthy, a div wrapping an iframe.
"""
gmaps_url = "//www.google.com/maps/embed/v1/place?"
query = {
'key': self.extension_conf['google_api_key'][0],
'q': location
}
src = gmaps_url + urlencode(query)
width = str(self.extension_conf['width'][0])
height = str(self.extension_conf['height'][0])
iframe = etree.Element('iframe')
iframe.set('width', width)
iframe.set('height', height)
iframe.set('src', src)
iframe.set('allowfullscreen', 'true')
iframe.set('frameborder', '0')
wrapper = self._fluid_wrapper(iframe)
return wrapper or iframe
def _fluid_wrapper(self, iframe):
"""
Wraps the given iframe in a div and adds styling to both in order to
create a fluid sized, full-width iframe.
Only wraps the element if the config 'fluid' is set to True.
Args:
iframe: An etree.Element object of the iframe.
Returns:
An etree element with the iframe as a child, the case that
extension_conf['fluid'] is Falsey, returns None.
"""
if self.extension_conf['fluid']:
wrapper_style = (
'position:relative;padding-bottom:56.25%;'
'padding-top:25px;height:0;'
)
iframe_style = (
'position:absolute;top:0;left:0;width:100%;height:100%;'
)
iframe.set('style', iframe_style)
wrapper = etree.Element('div')
wrapper.set('class', 'iframe-wrapper')
wrapper.set('style', wrapper_style)
wrapper.append(iframe)
else:
wrapper = None
return wrapper
| [
"urllib.parse.urlencode",
"markdown.util.etree.Element"
] | [((2827, 2850), 'markdown.util.etree.Element', 'etree.Element', (['"""iframe"""'], {}), "('iframe')\n", (2840, 2850), False, 'from markdown.util import etree\n'), ((2683, 2699), 'urllib.parse.urlencode', 'urlencode', (['query'], {}), '(query)\n', (2692, 2699), False, 'from urllib.parse import urlencode\n'), ((3978, 3998), 'markdown.util.etree.Element', 'etree.Element', (['"""div"""'], {}), "('div')\n", (3991, 3998), False, 'from markdown.util import etree\n')] |
import jcs
jcs.emit_warning("Hello World")
| [
"jcs.emit_warning"
] | [((11, 42), 'jcs.emit_warning', 'jcs.emit_warning', (['"""Hello World"""'], {}), "('Hello World')\n", (27, 42), False, 'import jcs\n')] |
import colorama
import logging
import tabulate
from six import print_
from functools import partial
LEVELS = {
logging.DEBUG: 'DEBG',
logging.INFO: 'INFO',
logging.WARNING: 'WARN',
logging.ERROR: 'ERRR',
logging.CRITICAL: 'CRIT'
}
COLORS = {
logging.DEBUG: colorama.Fore.WHITE,
logging.INFO: colorama.Fore.GREEN,
logging.WARNING: colorama.Fore.YELLOW,
logging.ERROR: colorama.Fore.RED,
logging.CRITICAL: colorama.Fore.RED
}
COLOR_RESET = colorama.Fore.RESET + colorama.Back.RESET + colorama.Style.RESET_ALL
class LogFormatter(logging.Formatter):
_default_format = '[{level}] {message}'
def __init__(self, format = None):
super(LogFormatter, self).__init__()
self._format = format or self._default_format
def double_fault(self, exc, record):
import sys
import traceback
print_('Failure in formatter:', file = sys.stderr)
print_('record: ' + str(record), file = sys.stderr)
print_('message: ' + str(record.msg), file = sys.stderr)
print_('args: ' + str(record.args), file = sys.stderr)
traceback.print_exc(file = sys.stderr)
sys.exit(1)
def _get_vars(self, record):
return {
'level': LEVELS[record.levelno],
'stamp': record.created
}
def format(self, record):
try:
vars = self._get_vars(record)
msg = [self._format.format(message = record.getMessage(), **vars)]
if record.exc_info is not None:
msg += [self._format.format(message = l, **vars) for l in self.formatException(record.exc_info).split('\n')]
return '\n'.join(msg)
except Exception as e:
self.double_fault(e, record)
def colorize(self, s, *args, **kwargs):
return s
def red(self, s):
return s
def green(self, s):
return s
def blue(self, s):
return s
def white(self, s):
return s
class ColorizedLogFormatter(LogFormatter):
_default_format = '{color_start}[{level}]{color_end} {message}'
def _get_vars(self, record):
vars = super(ColorizedLogFormatter, self)._get_vars(record)
vars.update({
'color_start': COLORS[record.levelno],
'color_end': COLOR_RESET
})
return vars
def colorize(self, s, fore = COLOR_RESET, back = COLOR_RESET):
return fore + s + back
def red(self, s):
return self.colorize(s, fore = colorama.Fore.RED)
def green(self, s):
return self.colorize(s, fore = colorama.Fore.GREEN)
def blue(self, s):
return self.colorize(s, fore = colorama.Fore.BLUE)
def white(self, s):
return self.colorize(s, fore = colorama.Fore.WHITE)
class StreamHandler(logging.StreamHandler):
def __init__(self, formatter = None, *args, **kwargs):
super(StreamHandler, self).__init__(*args, **kwargs)
formatter = formatter or ColorizedLogFormatter()
self.setFormatter(formatter)
def create_logger(name = None, handler = None, level = logging.INFO):
name = name or 'ducky'
logger = logging.getLogger(name)
if handler:
logger.addHandler(handler)
def __table(table, fn = None, **kwargs):
fn = fn or logger.info
for line in tabulate.tabulate(table, headers = 'firstrow', tablefmt = 'simple', numalign = 'right', **kwargs).split('\n'):
fn(line)
logger.table = __table
logger.DEBUG = logger.debug
logger.setLevel(level)
return logger
get_logger = partial(logging.getLogger, 'ducky')
| [
"logging.getLogger",
"tabulate.tabulate",
"functools.partial",
"sys.exit",
"traceback.print_exc",
"six.print_"
] | [((3340, 3375), 'functools.partial', 'partial', (['logging.getLogger', '"""ducky"""'], {}), "(logging.getLogger, 'ducky')\n", (3347, 3375), False, 'from functools import partial\n'), ((2943, 2966), 'logging.getLogger', 'logging.getLogger', (['name'], {}), '(name)\n', (2960, 2966), False, 'import logging\n'), ((846, 894), 'six.print_', 'print_', (['"""Failure in formatter:"""'], {'file': 'sys.stderr'}), "('Failure in formatter:', file=sys.stderr)\n", (852, 894), False, 'from six import print_\n'), ((1078, 1114), 'traceback.print_exc', 'traceback.print_exc', ([], {'file': 'sys.stderr'}), '(file=sys.stderr)\n', (1097, 1114), False, 'import traceback\n'), ((1121, 1132), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1129, 1132), False, 'import sys\n'), ((3100, 3196), 'tabulate.tabulate', 'tabulate.tabulate', (['table'], {'headers': '"""firstrow"""', 'tablefmt': '"""simple"""', 'numalign': '"""right"""'}), "(table, headers='firstrow', tablefmt='simple', numalign=\n 'right', **kwargs)\n", (3117, 3196), False, 'import tabulate\n')] |
'''
Created on Mar 7, 2016
@author: mike
'''
from patgen.margins import Margins
class Chunker:
def __init__(self, chunklen, margins=Margins(1,1)):
self.chunklen = chunklen
self.margins = margins
def __call__(self, word, hyphenpos):
'''
Takes word :word: and generates all chunks of the given length with
hyphen position :hyphenpos:.
margin_left sets the minimal length of word prefix where hyphenation is not allowed.
this is the same as TeX's \lefthyphenmin
margin_right sets the minimal length of word suffix where hyphention is not allowed.
this is the same as TeX's \righthyphenmin
Example:
chunker = Chunker(2)
chunker('mike', 2) will produce this sequence:
0, ".m"
1, "mi"
2, "ik"
'''
if hyphenpos > len(word):
return # word is too short
assert 0 <= hyphenpos <= len(word)
word = '.' + word + '.'
start = 0
end = len(word) - self.chunklen + 1 # last valid offset
start = max(start, self.margins.left+1-hyphenpos)
end = min(end, len(word)-self.margins.right-hyphenpos)
for i in range(start, end):
yield i, word[i:i+self.chunklen]
| [
"patgen.margins.Margins"
] | [((143, 156), 'patgen.margins.Margins', 'Margins', (['(1)', '(1)'], {}), '(1, 1)\n', (150, 156), False, 'from patgen.margins import Margins\n')] |
from django.db import models
from django.utils import timezone
from datetime import timedelta
class Setting(models.Model):
self_regulate = models.BooleanField(default=True)
logging_active = models.BooleanField(default=True)
logging_interval = models.IntegerField('Logging interval (minutes)')
loop_interval = models.IntegerField('Loop interval (seconds)',
default=2)
save_night_images = models.BooleanField()
start_light = models.TimeField()
end_light = models.TimeField()
soil_moisture_pump_level = models.FloatField()
watering_time = models.IntegerField('Watering time (seconds)')
watering_inactive_period = \
models.IntegerField('Watering inactive period (minutes)')
graph_range_normal = models.IntegerField('Graph range normal (hours)')
graph_range_detailed = models.IntegerField('Graph range detailed (hours)')
def __str__(self):
return 'Settings'
class DataPoint(models.Model):
time = models.DateTimeField()
temperature = models.FloatField('Temperature degrees Celsius')
air_humidity = models.FloatField()
soil_moisture_1 = models.FloatField('Soil moisture coriander')
soil_moisture_2 = models.FloatField('Soil moisture basil')
image = models.ImageField(upload_to='camera')
light_on = models.BooleanField()
url_to_variables = {'time':['time'],
'temperature':['temperature'],
'moisture':['soil_moisture_1', 'soil_moisture_2'],
'humidity':['air_humidity'],
'video':['image'],
'light':['light_on']}
variable_lookup = {'time':
{'verbal':'Time',
'suffix':''},
'temperature':
{'verbal':'Temperature',
'suffix':''},
'soil_moisture_1':
{'verbal':'Moisture coriander',
'suffix':''},
'soil_moisture_2':
{'verbal':'Moisture basil',
'suffix':''},
'light_on':
{'verbal':'Lights',
'suffix':''},
'air_humidity':
{'verbal':'Humidity',
'suffix':''},
}
def __str__(self):
# return str(self.image.path)
return str(self.time.astimezone().strftime("%Y-%m-%d %H:%M:%S"))
class SystemStatus(models.Model):
last_loop = models.DateTimeField()
message = models.CharField(max_length=140, default='OK')
free_space = models.FloatField(default=1000.0)
ok = models.BooleanField(default=True)
last_watering = models.DateTimeField()
@property
def report(self):
now = timezone.now()
max_delta = timedelta(seconds=Setting.objects.get().loop_interval+1)
free = self.free_space_verbal()
if (now-self.last_loop) > max_delta:
return {'ok': False,
'message': 'Sensors not responding',
'free_space':free}
elif self.ok is False:
return {'ok': False,
'message': self.message,
'free_space':free}
else:
return {'ok': True,
'message': 'OK',
'free_space':free}
def free_space_verbal(self):
mb_free = float(self.free_space)
if mb_free > 1000:
gb_free = mb_free/1000
if gb_free > 10:
return '{} GB'.format(int(gb_free))
else:
return '{:.1f} GB'.format(gb_free)
else:
return '{} MB'.format(mb_free)
def __str__(self):
return 'System status'
| [
"django.db.models.FloatField",
"django.db.models.TimeField",
"django.db.models.IntegerField",
"django.db.models.BooleanField",
"django.utils.timezone.now",
"django.db.models.ImageField",
"django.db.models.DateTimeField",
"django.db.models.CharField"
] | [((151, 184), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(True)'}), '(default=True)\n', (170, 184), False, 'from django.db import models\n'), ((207, 240), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(True)'}), '(default=True)\n', (226, 240), False, 'from django.db import models\n'), ((265, 314), 'django.db.models.IntegerField', 'models.IntegerField', (['"""Logging interval (minutes)"""'], {}), "('Logging interval (minutes)')\n", (284, 314), False, 'from django.db import models\n'), ((336, 393), 'django.db.models.IntegerField', 'models.IntegerField', (['"""Loop interval (seconds)"""'], {'default': '(2)'}), "('Loop interval (seconds)', default=2)\n", (355, 393), False, 'from django.db import models\n'), ((460, 481), 'django.db.models.BooleanField', 'models.BooleanField', ([], {}), '()\n', (479, 481), False, 'from django.db import models\n'), ((501, 519), 'django.db.models.TimeField', 'models.TimeField', ([], {}), '()\n', (517, 519), False, 'from django.db import models\n'), ((537, 555), 'django.db.models.TimeField', 'models.TimeField', ([], {}), '()\n', (553, 555), False, 'from django.db import models\n'), ((588, 607), 'django.db.models.FloatField', 'models.FloatField', ([], {}), '()\n', (605, 607), False, 'from django.db import models\n'), ((629, 675), 'django.db.models.IntegerField', 'models.IntegerField', (['"""Watering time (seconds)"""'], {}), "('Watering time (seconds)')\n", (648, 675), False, 'from django.db import models\n'), ((719, 776), 'django.db.models.IntegerField', 'models.IntegerField', (['"""Watering inactive period (minutes)"""'], {}), "('Watering inactive period (minutes)')\n", (738, 776), False, 'from django.db import models\n'), ((803, 852), 'django.db.models.IntegerField', 'models.IntegerField', (['"""Graph range normal (hours)"""'], {}), "('Graph range normal (hours)')\n", (822, 852), False, 'from django.db import models\n'), ((881, 932), 'django.db.models.IntegerField', 'models.IntegerField', (['"""Graph range detailed (hours)"""'], {}), "('Graph range detailed (hours)')\n", (900, 932), False, 'from django.db import models\n'), ((1034, 1056), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {}), '()\n', (1054, 1056), False, 'from django.db import models\n'), ((1076, 1124), 'django.db.models.FloatField', 'models.FloatField', (['"""Temperature degrees Celsius"""'], {}), "('Temperature degrees Celsius')\n", (1093, 1124), False, 'from django.db import models\n'), ((1145, 1164), 'django.db.models.FloatField', 'models.FloatField', ([], {}), '()\n', (1162, 1164), False, 'from django.db import models\n'), ((1188, 1232), 'django.db.models.FloatField', 'models.FloatField', (['"""Soil moisture coriander"""'], {}), "('Soil moisture coriander')\n", (1205, 1232), False, 'from django.db import models\n'), ((1256, 1296), 'django.db.models.FloatField', 'models.FloatField', (['"""Soil moisture basil"""'], {}), "('Soil moisture basil')\n", (1273, 1296), False, 'from django.db import models\n'), ((1310, 1347), 'django.db.models.ImageField', 'models.ImageField', ([], {'upload_to': '"""camera"""'}), "(upload_to='camera')\n", (1327, 1347), False, 'from django.db import models\n'), ((1364, 1385), 'django.db.models.BooleanField', 'models.BooleanField', ([], {}), '()\n', (1383, 1385), False, 'from django.db import models\n'), ((2736, 2758), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {}), '()\n', (2756, 2758), False, 'from django.db import models\n'), ((2774, 2820), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(140)', 'default': '"""OK"""'}), "(max_length=140, default='OK')\n", (2790, 2820), False, 'from django.db import models\n'), ((2839, 2872), 'django.db.models.FloatField', 'models.FloatField', ([], {'default': '(1000.0)'}), '(default=1000.0)\n', (2856, 2872), False, 'from django.db import models\n'), ((2883, 2916), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(True)'}), '(default=True)\n', (2902, 2916), False, 'from django.db import models\n'), ((2938, 2960), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {}), '()\n', (2958, 2960), False, 'from django.db import models\n'), ((3016, 3030), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (3028, 3030), False, 'from django.utils import timezone\n')] |
'''
Problem Name: Add Two Numbers
Problem Code: FLOW001
Problem Type: https://www.codechef.com/problems/school
Problem Link: https://www.codechef.com/problems/FLOW001
Solution Link: https://www.codechef.com/viewsolution/46835083
'''
from sys import stdin
def main(n):
curInx = n
while curInx:
a, b = map(int, stdin.readline().split())
curInx -= 1
print(a+b)
if __name__ == '__main__':
n = int(stdin.readline())
main(n) | [
"sys.stdin.readline"
] | [((452, 468), 'sys.stdin.readline', 'stdin.readline', ([], {}), '()\n', (466, 468), False, 'from sys import stdin\n'), ((347, 363), 'sys.stdin.readline', 'stdin.readline', ([], {}), '()\n', (361, 363), False, 'from sys import stdin\n')] |
import pytest
from requests.exceptions import ConnectionError
from ...routes.models import Country
from ..coutries import GEO_COUNTRIES_URL, import_country_geometries
#########
# model #
#########
@pytest.mark.django_db
def test_country_str():
country = Country.objects.last()
assert str(country) == country.name
################
# countries.py #
################
@pytest.mark.django_db
def test_import_country_geometries(mock_json_response):
Country.objects.all().delete()
url = GEO_COUNTRIES_URL
file = "countries.geojson"
mock_json_response(url, file)
import_country_geometries()
assert Country.objects.count() == 4
countries = ["Yemen", "Switzerland", "Germany", "South Africa"]
for country in countries:
assert country in Country.objects.values_list("name", flat=True)
@pytest.mark.django_db
def test_import_country_geometries_connection_error(mock_connection_error):
url = GEO_COUNTRIES_URL
mock_connection_error(url)
with pytest.raises(ConnectionError):
import_country_geometries()
| [
"pytest.raises"
] | [((1001, 1031), 'pytest.raises', 'pytest.raises', (['ConnectionError'], {}), '(ConnectionError)\n', (1014, 1031), False, 'import pytest\n')] |
#!/usr/bin/python3
#-*- coding: utf-8 -*-
# modify the .gitignore file
from pathlib import Path
import sys
print('Modifying .gitignore in manim')
fileDir = Path(__file__).absolute().parent
path = Path.joinpath(fileDir.parent, ".gitignore")
if not path.is_file():
print('Cannot find .gitignore in %s.' % str(path.absolute()), file=sys.stderr)
with open(path, 'a') as f:
f.write('\n' + fileDir.name)
f.write('\nbuild.py')
f.write('\nbuild.ps1\n')
print('Done')
# copy run code to manim project
import shutil
print('Copy build.py to %s' % path.parent)
cmd = Path.joinpath(fileDir, 'build.py')
shutil.copy(cmd, path.parent)
print('Done')
print('Copy build.ps1 to %s' % path.parent)
cmd = Path.joinpath(fileDir, 'build.ps1')
shutil.copy(cmd, path.parent)
print('Done')
print('Does .vscode exist?')
vscode_path = Path.joinpath(fileDir.parent, '.vscode')
if vscode_path.is_dir():
print('Yes, .vscode exists')
else:
print('No. Making .vscode')
vscode_path.mkdir()
print('Copy tasks.json %s' % path.parent)
shutil.copy(
Path.joinpath(fileDir, 'tasks.json'),
vscode_path)
print('Done')
# Making media directory
print('Making media directory')
mediaDir = Path.joinpath(fileDir, 'media')
if mediaDir.is_dir():
print('media directory already exists.')
else:
mediaDir.mkdir()
print('Done') | [
"shutil.copy",
"pathlib.Path.joinpath",
"pathlib.Path"
] | [((198, 241), 'pathlib.Path.joinpath', 'Path.joinpath', (['fileDir.parent', '""".gitignore"""'], {}), "(fileDir.parent, '.gitignore')\n", (211, 241), False, 'from pathlib import Path\n'), ((577, 611), 'pathlib.Path.joinpath', 'Path.joinpath', (['fileDir', '"""build.py"""'], {}), "(fileDir, 'build.py')\n", (590, 611), False, 'from pathlib import Path\n'), ((612, 641), 'shutil.copy', 'shutil.copy', (['cmd', 'path.parent'], {}), '(cmd, path.parent)\n', (623, 641), False, 'import shutil\n'), ((707, 742), 'pathlib.Path.joinpath', 'Path.joinpath', (['fileDir', '"""build.ps1"""'], {}), "(fileDir, 'build.ps1')\n", (720, 742), False, 'from pathlib import Path\n'), ((743, 772), 'shutil.copy', 'shutil.copy', (['cmd', 'path.parent'], {}), '(cmd, path.parent)\n', (754, 772), False, 'import shutil\n'), ((831, 871), 'pathlib.Path.joinpath', 'Path.joinpath', (['fileDir.parent', '""".vscode"""'], {}), "(fileDir.parent, '.vscode')\n", (844, 871), False, 'from pathlib import Path\n'), ((1190, 1221), 'pathlib.Path.joinpath', 'Path.joinpath', (['fileDir', '"""media"""'], {}), "(fileDir, 'media')\n", (1203, 1221), False, 'from pathlib import Path\n'), ((1051, 1087), 'pathlib.Path.joinpath', 'Path.joinpath', (['fileDir', '"""tasks.json"""'], {}), "(fileDir, 'tasks.json')\n", (1064, 1087), False, 'from pathlib import Path\n'), ((158, 172), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (162, 172), False, 'from pathlib import Path\n')] |
from asnake.client import ASnakeClient
class ArchivesSpaceClientError(Exception):
pass
class ArchivesSpaceClient:
def __init__(self, baseurl, username, password, repo_id):
self.client = ASnakeClient(
baseurl=baseurl,
username=username,
password=password)
self.repo_id = repo_id
def get_resource(self, resource_id):
"""Returns a JSON representation of a resource, or raises an exception if an error occurs."""
resource = self.client.get("/repositories/{}/resources/{}".format(self.repo_id, resource_id))
if resource.status_code == 200:
return resource.json()
else:
raise ArchivesSpaceClientError(resource.json()["error"])
| [
"asnake.client.ASnakeClient"
] | [((206, 273), 'asnake.client.ASnakeClient', 'ASnakeClient', ([], {'baseurl': 'baseurl', 'username': 'username', 'password': 'password'}), '(baseurl=baseurl, username=username, password=password)\n', (218, 273), False, 'from asnake.client import ASnakeClient\n')] |
import os, sys
import pickle
import time
from datetime import timedelta
import numpy as np
from PIL import Image
from memory_profiler import profile
# datasetディレクトリの参照を追加
dataset_dir = os.path.join(os.getcwd(), "git", "deep-learning-from-scratch")
sys.path.append(dataset_dir)
from dataset.mnist import load_mnist
import functions as fn
def get_data():
(x_train, t_train), (x_test, t_test) = \
load_mnist(flatten=True, normalize=True, one_hot_label=False)
return x_test, t_test
def init_network():
path = os.path.join(os.getcwd(), "git", "deep-learning-from-scratch", "ch03", 'sample_weight.pkl')
with open(path, 'rb') as f:
network = pickle.load(f)
return network
def predict(network, x):
W1, W2, W3 = network['W1'], network['W2'], network['W3']
b1, b2, b3 = network['b1'], network['b2'], network['b3']
a1 = np.dot(x, W1) + b1
z1 = fn.sigmoid(a1)
a2 = np.dot(z1, W2) + b2
z2 = fn.sigmoid(a2)
a3 = np.dot(z2, W3) + b3
y = fn.softmax(a3)
return y
@profile
def plain(x, t, network):
start = time.time()
accuracy_cnt = 0
for i in range(len(x)):
y = predict(network, x[i])
p= np.argmax(y) # 最も確率の高い要素のインデックスを取得
if p == t[i]:
accuracy_cnt += 1
print("Accuracy:" + str(float(accuracy_cnt) / len(x)))
elapsed = time.time() - start
print ("time: {} ".format(str(timedelta(seconds=elapsed))))
@profile
def batch(x, t, network):
start = time.time()
batch_size = 100
accuracy_cnt = 0
for i in range(0, len(x), batch_size):
x_batch = x[i:i+batch_size]
y_batch = predict(network, x_batch)
p = np.argmax(y_batch, axis=1) # 最も確率の高い要素のインデックスを取得
accuracy_cnt += np.sum(p == t[i:i+batch_size])
print("Accuracy:" + str(float(accuracy_cnt) / len(x)))
elapsed = time.time() - start
print ("time: {} ".format(str(timedelta(seconds=elapsed))))
x, t = get_data()
network = init_network()
plain(x, t, network)
batch(x, t, network)
| [
"functions.softmax",
"dataset.mnist.load_mnist",
"pickle.load",
"numpy.argmax",
"os.getcwd",
"datetime.timedelta",
"numpy.sum",
"numpy.dot",
"time.time",
"functions.sigmoid",
"sys.path.append"
] | [((250, 278), 'sys.path.append', 'sys.path.append', (['dataset_dir'], {}), '(dataset_dir)\n', (265, 278), False, 'import os, sys\n'), ((200, 211), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (209, 211), False, 'import os, sys\n'), ((411, 472), 'dataset.mnist.load_mnist', 'load_mnist', ([], {'flatten': '(True)', 'normalize': '(True)', 'one_hot_label': '(False)'}), '(flatten=True, normalize=True, one_hot_label=False)\n', (421, 472), False, 'from dataset.mnist import load_mnist\n'), ((893, 907), 'functions.sigmoid', 'fn.sigmoid', (['a1'], {}), '(a1)\n', (903, 907), True, 'import functions as fn\n'), ((946, 960), 'functions.sigmoid', 'fn.sigmoid', (['a2'], {}), '(a2)\n', (956, 960), True, 'import functions as fn\n'), ((998, 1012), 'functions.softmax', 'fn.softmax', (['a3'], {}), '(a3)\n', (1008, 1012), True, 'import functions as fn\n'), ((1074, 1085), 'time.time', 'time.time', ([], {}), '()\n', (1083, 1085), False, 'import time\n'), ((1474, 1485), 'time.time', 'time.time', ([], {}), '()\n', (1483, 1485), False, 'import time\n'), ((544, 555), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (553, 555), False, 'import os, sys\n'), ((673, 687), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (684, 687), False, 'import pickle\n'), ((865, 878), 'numpy.dot', 'np.dot', (['x', 'W1'], {}), '(x, W1)\n', (871, 878), True, 'import numpy as np\n'), ((917, 931), 'numpy.dot', 'np.dot', (['z1', 'W2'], {}), '(z1, W2)\n', (923, 931), True, 'import numpy as np\n'), ((970, 984), 'numpy.dot', 'np.dot', (['z2', 'W3'], {}), '(z2, W3)\n', (976, 984), True, 'import numpy as np\n'), ((1181, 1193), 'numpy.argmax', 'np.argmax', (['y'], {}), '(y)\n', (1190, 1193), True, 'import numpy as np\n'), ((1342, 1353), 'time.time', 'time.time', ([], {}), '()\n', (1351, 1353), False, 'import time\n'), ((1663, 1689), 'numpy.argmax', 'np.argmax', (['y_batch'], {'axis': '(1)'}), '(y_batch, axis=1)\n', (1672, 1689), True, 'import numpy as np\n'), ((1736, 1768), 'numpy.sum', 'np.sum', (['(p == t[i:i + batch_size])'], {}), '(p == t[i:i + batch_size])\n', (1742, 1768), True, 'import numpy as np\n'), ((1841, 1852), 'time.time', 'time.time', ([], {}), '()\n', (1850, 1852), False, 'import time\n'), ((1396, 1422), 'datetime.timedelta', 'timedelta', ([], {'seconds': 'elapsed'}), '(seconds=elapsed)\n', (1405, 1422), False, 'from datetime import timedelta\n'), ((1895, 1921), 'datetime.timedelta', 'timedelta', ([], {'seconds': 'elapsed'}), '(seconds=elapsed)\n', (1904, 1921), False, 'from datetime import timedelta\n')] |
from setuptools import setup, find_packages
import eden_client_api as api
setup(name='edenchain-client-sdk', version=api.__version__, description='edenchain client sdk', author='Edenpartners', author_email='<EMAIL>', license='MIT', packages= find_packages() ,
install_requires=[
'requests',
'base58',
'cryptoconditions',
'python-rapidjson',
'pysha3',
'eth_account'
],
py_modules=['eden_client_api'],
keyword=['edenchain','api','sdk'],
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Programming Language :: Python :: 3',
'License :: OSI Approved :: MIT License',
],
zip_safe=False)
| [
"setuptools.find_packages"
] | [((244, 259), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (257, 259), False, 'from setuptools import setup, find_packages\n')] |
import re
# TODO超长文本的并行分词扩展
class TokenizerBase:
"""分词的基类,继承该类并在find_word实现分词的核心算法"""
spaces = re.compile("(\r\n|\s)", re.U)
english = re.compile("[a-zA-Z0-9]", re.U)
chinese = re.compile("([\u4E00-\u9FD5a-zA-Z0-9+#&\._%\-]+)", re.U)
def cut(self, text):
return list(self._cut(text))
def _cut(self, text):
# 把长文本切分为句子块
for block in self.chinese.split(text):
if not block:
continue
if self.chinese.match(block):
yield from self.cut_block(block)
else:
for s in self.spaces.split(block):
if self.spaces.match(s):
yield s
else:
yield from s
def cut_block(self, sentence):
# 对文本进行分块分句后分词
buf = ""
for word in self.find_word(sentence):
if len(word) == 1 and self.english.match(word):
buf += word
else:
if buf:
yield buf
buf = ""
yield word
if buf:
yield buf
def find_word(self, sentence):
# 从这里实现分词算法的核心
# 从句子中发现可以构成的词,返回可迭代对象
raise NotImplementedError
if __name__ == "__main__":
# 简单的测试
import dataset
class Tokenizer(TokenizerBase):
def find_word(self, sentence):
for word in sentence:
yield word
tokenizer = Tokenizer()
for text in dataset.load_sentences():
print(tokenizer.cut(text))
| [
"dataset.load_sentences",
"re.compile"
] | [((106, 136), 're.compile', 're.compile', (["'(\\r\\n|\\\\s)'", 're.U'], {}), "('(\\r\\n|\\\\s)', re.U)\n", (116, 136), False, 'import re\n'), ((150, 181), 're.compile', 're.compile', (['"""[a-zA-Z0-9]"""', 're.U'], {}), "('[a-zA-Z0-9]', re.U)\n", (160, 181), False, 'import re\n'), ((196, 244), 're.compile', 're.compile', (['"""([一-鿕a-zA-Z0-9+#&\\\\._%\\\\-]+)"""', 're.U'], {}), "('([一-鿕a-zA-Z0-9+#&\\\\._%\\\\-]+)', re.U)\n", (206, 244), False, 'import re\n'), ((1504, 1528), 'dataset.load_sentences', 'dataset.load_sentences', ([], {}), '()\n', (1526, 1528), False, 'import dataset\n')] |
"""
Revision Author: <NAME>
Sources: python documentation, class material
DescriptiOn: main file for flask server
"""
import flask
from flask import render_template
from flask import request
from flask import url_for
import uuid
import json
import logging
# Date handling
import arrow
# for interpreting local times
from dateutil import tz
# OAuth2 - Google library implementation for convenience
from oauth2client import client
# used in oauth2 flow
import httplib2
# Google API for services
from apiclient import discovery
import timeblock
###
# Globals
###
import config
if __name__ == "__main__":
CONFIG = config.configuration()
else:
CONFIG = config.configuration(proxied=True)
app = flask.Flask(__name__)
app.debug=CONFIG.DEBUG
app.logger.setLevel(logging.DEBUG)
app.secret_key=CONFIG.SECRET_KEY
SCOPES = 'https://www.googleapis.com/auth/calendar.readonly'
CLIENT_SECRET_FILE = CONFIG.GOOGLE_KEY_FILE ## You'll need this
APPLICATION_NAME = 'MeetMe class project'
#############################
# Pages (routed from URLs)
#############################
@app.route("/")
@app.route("/index")
def index():
app.logger.debug("Entering index")
if 'begin_date' not in flask.session:
init_session_values()
return render_template('index.html')
#######
# huge choose route that deals constantly with valid credentials
# and auth routing. all input to webpage goes through here and
# if request method is post, grab eventlist.
@app.route("/choose", methods=['POST', 'GET'])
def choose():
app.logger.debug("Checking credentials for Google calendar access")
credentials = valid_credentials()
if not credentials:
app.logger.debug("Redirecting to authorization")
return flask.redirect(flask.url_for('oauth2callback'))
#get calendars before method check to use cal summary
gcal_service = get_gcal_service(credentials)
app.logger.debug("Returned from get_gcal_service")
flask.g.calendars = list_calendars(gcal_service)
#request is from submission of selected calendars
if request.method == 'POST':
calendarids = request.form.getlist('calendar')
#grab summaries for each calendar id to output as header of events per calendar(in separate module file)
calsummaries = getSummaries(calendarids, flask.g.calendars)
#create events
events = getEvents(calendarids, calsummaries, credentials, gcal_service)
flask.g.events = events
# create list of days
daysList = timeblock.getDayList(flask.session['begin_date'], flask.session['end_date'])
"""
# populate dict of daysAgenda by calendar summary
daysAgendaByCal = timeblock.populateDaysAgendaByCal(daysList, events)
"""
# populate agenda with consolidated events
daysAgenda = timeblock.populateDaysAgenda(daysList, events)
flask.g.agenda = timeblock.getEventsInRange(daysAgenda, flask.session['begin_time'], flask.session['end_time'])
return render_template('index.html')
###
# ADDED FUNCTION:
# get events, to get events from calendars chosen in template
###
def getEvents(calid, calsum, credentials, service):
eventsbycalendar = {}
for count, ids in enumerate(calid):
events = service.events().list(calendarId=ids,
singleEvents=True,
orderBy='startTime',
timeMin=flask.session['begin_date'],
timeMax=flask.session['end_date']).execute()
eventclasslist = []
for event in events['items']:
if 'transparency' not in event:
starttime = event['start']
endtime = event['end']
#to determine whether is all day event or if times specified
if 'dateTime' in starttime:
start = starttime['dateTime']
end = endtime['dateTime']
else:
start = starttime['date']
end = endtime['date']
if 'summary' in event:
summ = event['summary']
else:
summ = 'no title'
eventclass = timeblock.timeblock(start, end, 'event', summ)
# to split events if they include multiple days
passedEvent = timeblock.fixEventTimes(eventclass)
try:
for aEvent in passedEvent:
eventclasslist.append(aEvent)
except TypeError:
eventclasslist.append(passedEvent)
eventsbycalendar[calsum[count]] = eventclasslist
return eventsbycalendar
# ADDED FUNCTION:
# get summaries of calendar from object dict
def getSummaries(calendarid, calendardict):
calsummaries = []
for ids in calendarid:
for calendars in calendardict:
if ids in calendars['id']:
calsummaries.append(calendars['summary'])
return calsummaries
###
# google credential and service object functions
###
#checks for valid credentials
def valid_credentials():
# will eventually redirect to oauth2callback
if 'credentials' not in flask.session:
return None
# will convert
credentials = client.OAuth2Credentials.from_json(
flask.session['credentials'])
if (credentials.invalid or
credentials.access_token_expired):
return None
return credentials
# retrieve the service object for google calendar
def get_gcal_service(credentials):
app.logger.debug("Entering get_gcal_service")
http_auth = credentials.authorize(httplib2.Http())
service = discovery.build('calendar', 'v3', http=http_auth)
app.logger.debug("Returning service")
return service
# oauth2callback directs to google for valid credentials
@app.route('/oauth2callback')
def oauth2callback():
app.logger.debug("Entering oauth2callback")
flow = client.flow_from_clientsecrets(
CLIENT_SECRET_FILE,
scope= SCOPES,
redirect_uri=flask.url_for('oauth2callback', _external=True))
app.logger.debug("Got flow")
if 'code' not in flask.request.args:
app.logger.debug("Code not in flask.request.args")
auth_uri = flow.step1_get_authorize_url()
return flask.redirect(auth_uri)
else:
app.logger.debug("Code was in flask.request.args")
auth_code = flask.request.args.get('code')
credentials = flow.step2_exchange(auth_code)
flask.session['credentials'] = credentials.to_json()
app.logger.debug("Got credentials")
return flask.redirect(flask.url_for('choose'))
#####
# routes to affect things on page
#####
@app.route('/setrange', methods=['POST'])
def setrange():
"""
User chose a date range with the bootstrap daterange
widget.
"""
app.logger.debug("Entering setrange")
flask.flash("Setrange gave us '{}'".format(
request.form.get('daterange')))
daterange = request.form.get('daterange')
flask.session['daterange'] = daterange
daterange_parts = daterange.split()
flask.session['begin_date'] = interpret_date(daterange_parts[0])
flask.session['end_date'] = interpret_date(daterange_parts[2])
app.logger.debug("Setrange parsed {} - {} dates as {} - {}".format(
daterange_parts[0], daterange_parts[1],
flask.session['begin_date'], flask.session['end_date']))
end = arrow.get(flask.session['end_date'])
end = end.shift(minutes=-1)
#print('END DATE CEILING: ', end.ceil('day').isoformat())
flask.session['end_date'] = end.ceil('day').isoformat()
flask.session["begin_time"] = interpret_time(request.form.get('timestart'))
flask.session["end_time"] = interpret_time(request.form.get('timeend'))
return flask.redirect(flask.url_for("choose"))
####
# Initialize session variables
####
# must be run in app context. can't call from main
def init_session_values():
# Default date span = tomorrow to 1 week from now
now = arrow.now('local')
tomorrow = now.replace(days=+1)
nextweek = now.replace(days=+7)
flask.session["begin_date"] = tomorrow.floor('day').isoformat()
flask.session["end_date"] = nextweek.ceil('day').isoformat()
flask.session["daterange"] = "{} - {}".format(
tomorrow.format("MM/DD/YYYY"),
nextweek.format("MM/DD/YYYY"))
# Default time span each day, 8 to 5
#flask.session["begin_time"] = interpret_time("9am")
#flask.session["end_time"] = interpret_time("5pm")
def interpret_time( text ):
"""
Read time in a human-compatible format and
interpret as ISO format with local timezone.
May throw exception if time can't be interpreted. In that
case it will also flash a message explaining accepted formats.
"""
app.logger.debug("Decoding time '{}'".format(text))
time_formats = ["ha", "h:mma", "h:mm a", "H:mm"]
try:
as_arrow = arrow.get(text, time_formats).replace(tzinfo=tz.tzlocal())
as_arrow = as_arrow.replace(year=2016) #HACK see below
app.logger.debug("Succeeded interpreting time")
except:
app.logger.debug("Failed to interpret time")
flask.flash("Time '{}' didn't match accepted formats 13:30 or 1:30pm"
.format(text))
raise
return as_arrow.isoformat()
#HACK #Workaround
# isoformat() on raspberry Pi does not work for some dates
# far from now. It will fail with an overflow from time stamp out
# of range while checking for daylight savings time. Workaround is
# to force the date-time combination into the year 2016, which seems to
# get the timestamp into a reasonable range. This workaround should be
# removed when Arrow or Dateutil.tz is fixed.
# FIXME: Remove the workaround when arrow is fixed (but only after testing
# on raspberry Pi --- failure is likely due to 32-bit integers on that platform)
def interpret_date( text ):
"""
Convert text of date to ISO format used internally,
with the local time zone.
"""
try:
as_arrow = arrow.get(text, "MM/DD/YYYY").replace(
tzinfo=tz.tzlocal())
except:
flask.flash("Date '{}' didn't fit expected format 12/31/2001")
raise
return as_arrow.isoformat()
def next_day(isotext):
"""
ISO date + 1 day (used in query to Google calendar)
"""
as_arrow = arrow.get(isotext)
return as_arrow.replace(days=+1).isoformat()
####
#
# Functions (NOT pages) that return some information
#
####
def list_calendars(service):
app.logger.debug("Entering list_calendars")
calendar_list = service.calendarList().list().execute()["items"]
result = [ ]
for cal in calendar_list:
kind = cal["kind"]
id = cal["id"]
if "description" in cal:
desc = cal["description"]
else:
desc = "(no description)"
summary = cal["summary"]
# Optional binary attributes with False as default
selected = ("selected" in cal) and cal["selected"]
primary = ("primary" in cal) and cal["primary"]
result.append(
{ "kind": kind,
"id": id,
"summary": summary,
"selected": selected,
"primary": primary
})
return sorted(result, key=cal_sort_key)
def cal_sort_key( cal ):
"""
Sort key for the list of calendars: primary calendar first,
then other selected calendars, then unselected calendars.
(" " sorts before "X", and tuples are compared piecewise)
"""
if cal["selected"]:
selected_key = " "
else:
selected_key = "X"
if cal["primary"]:
primary_key = " "
else:
primary_key = "X"
return (primary_key, selected_key, cal["summary"])
#################
#
# Functions used within the templates
#
#################
@app.template_filter( 'fmtdate' )
def format_arrow_date( date ):
try:
normal = arrow.get( date )
return normal.format("ddd MM/DD")
except:
return "(bad date)"
@app.template_filter( 'fmttime' )
def format_arrow_time( time ):
try:
normal = arrow.get( time )
return normal.format("HH:mm")
except:
return "(bad time)"
#############
if __name__ == "__main__":
# App is created above so that it will
# exist whether this is 'main' or not
# (e.g., if we are running under green unicorn)
app.run(port=CONFIG.PORT,host="0.0.0.0")
| [
"flask.render_template",
"flask.request.args.get",
"timeblock.populateDaysAgenda",
"dateutil.tz.tzlocal",
"flask.Flask",
"apiclient.discovery.build",
"flask.flash",
"timeblock.timeblock",
"flask.request.form.get",
"timeblock.fixEventTimes",
"timeblock.getDayList",
"arrow.now",
"flask.redirect",
"httplib2.Http",
"flask.request.form.getlist",
"oauth2client.client.OAuth2Credentials.from_json",
"flask.url_for",
"arrow.get",
"config.configuration",
"timeblock.getEventsInRange"
] | [((703, 724), 'flask.Flask', 'flask.Flask', (['__name__'], {}), '(__name__)\n', (714, 724), False, 'import flask\n'), ((619, 641), 'config.configuration', 'config.configuration', ([], {}), '()\n', (639, 641), False, 'import config\n'), ((661, 695), 'config.configuration', 'config.configuration', ([], {'proxied': '(True)'}), '(proxied=True)\n', (681, 695), False, 'import config\n'), ((1237, 1266), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (1252, 1266), False, 'from flask import render_template\n'), ((2990, 3019), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (3005, 3019), False, 'from flask import render_template\n'), ((5338, 5402), 'oauth2client.client.OAuth2Credentials.from_json', 'client.OAuth2Credentials.from_json', (["flask.session['credentials']"], {}), "(flask.session['credentials'])\n", (5372, 5402), False, 'from oauth2client import client\n'), ((5731, 5780), 'apiclient.discovery.build', 'discovery.build', (['"""calendar"""', '"""v3"""'], {'http': 'http_auth'}), "('calendar', 'v3', http=http_auth)\n", (5746, 5780), False, 'from apiclient import discovery\n'), ((7005, 7034), 'flask.request.form.get', 'request.form.get', (['"""daterange"""'], {}), "('daterange')\n", (7021, 7034), False, 'from flask import request\n'), ((7447, 7483), 'arrow.get', 'arrow.get', (["flask.session['end_date']"], {}), "(flask.session['end_date'])\n", (7456, 7483), False, 'import arrow\n'), ((8032, 8050), 'arrow.now', 'arrow.now', (['"""local"""'], {}), "('local')\n", (8041, 8050), False, 'import arrow\n'), ((10404, 10422), 'arrow.get', 'arrow.get', (['isotext'], {}), '(isotext)\n', (10413, 10422), False, 'import arrow\n'), ((2094, 2126), 'flask.request.form.getlist', 'request.form.getlist', (['"""calendar"""'], {}), "('calendar')\n", (2114, 2126), False, 'from flask import request\n'), ((2501, 2577), 'timeblock.getDayList', 'timeblock.getDayList', (["flask.session['begin_date']", "flask.session['end_date']"], {}), "(flask.session['begin_date'], flask.session['end_date'])\n", (2521, 2577), False, 'import timeblock\n'), ((2811, 2857), 'timeblock.populateDaysAgenda', 'timeblock.populateDaysAgenda', (['daysList', 'events'], {}), '(daysList, events)\n', (2839, 2857), False, 'import timeblock\n'), ((2883, 2982), 'timeblock.getEventsInRange', 'timeblock.getEventsInRange', (['daysAgenda', "flask.session['begin_time']", "flask.session['end_time']"], {}), "(daysAgenda, flask.session['begin_time'], flask.\n session['end_time'])\n", (2909, 2982), False, 'import timeblock\n'), ((5702, 5717), 'httplib2.Http', 'httplib2.Http', ([], {}), '()\n', (5715, 5717), False, 'import httplib2\n'), ((6336, 6360), 'flask.redirect', 'flask.redirect', (['auth_uri'], {}), '(auth_uri)\n', (6350, 6360), False, 'import flask\n'), ((6440, 6470), 'flask.request.args.get', 'flask.request.args.get', (['"""code"""'], {}), "('code')\n", (6462, 6470), False, 'import flask\n'), ((7687, 7716), 'flask.request.form.get', 'request.form.get', (['"""timestart"""'], {}), "('timestart')\n", (7703, 7716), False, 'from flask import request\n'), ((7765, 7792), 'flask.request.form.get', 'request.form.get', (['"""timeend"""'], {}), "('timeend')\n", (7781, 7792), False, 'from flask import request\n'), ((7820, 7843), 'flask.url_for', 'flask.url_for', (['"""choose"""'], {}), "('choose')\n", (7833, 7843), False, 'import flask\n'), ((11977, 11992), 'arrow.get', 'arrow.get', (['date'], {}), '(date)\n', (11986, 11992), False, 'import arrow\n'), ((12169, 12184), 'arrow.get', 'arrow.get', (['time'], {}), '(time)\n', (12178, 12184), False, 'import arrow\n'), ((1731, 1762), 'flask.url_for', 'flask.url_for', (['"""oauth2callback"""'], {}), "('oauth2callback')\n", (1744, 1762), False, 'import flask\n'), ((6102, 6149), 'flask.url_for', 'flask.url_for', (['"""oauth2callback"""'], {'_external': '(True)'}), "('oauth2callback', _external=True)\n", (6115, 6149), False, 'import flask\n'), ((6643, 6666), 'flask.url_for', 'flask.url_for', (['"""choose"""'], {}), "('choose')\n", (6656, 6666), False, 'import flask\n'), ((6957, 6986), 'flask.request.form.get', 'request.form.get', (['"""daterange"""'], {}), "('daterange')\n", (6973, 6986), False, 'from flask import request\n'), ((10184, 10246), 'flask.flash', 'flask.flash', (['"""Date \'{}\' didn\'t fit expected format 12/31/2001"""'], {}), '("Date \'{}\' didn\'t fit expected format 12/31/2001")\n', (10195, 10246), False, 'import flask\n'), ((4268, 4314), 'timeblock.timeblock', 'timeblock.timeblock', (['start', 'end', '"""event"""', 'summ'], {}), "(start, end, 'event', summ)\n", (4287, 4314), False, 'import timeblock\n'), ((4410, 4445), 'timeblock.fixEventTimes', 'timeblock.fixEventTimes', (['eventclass'], {}), '(eventclass)\n', (4433, 4445), False, 'import timeblock\n'), ((8947, 8976), 'arrow.get', 'arrow.get', (['text', 'time_formats'], {}), '(text, time_formats)\n', (8956, 8976), False, 'import arrow\n'), ((8992, 9004), 'dateutil.tz.tzlocal', 'tz.tzlocal', ([], {}), '()\n', (9002, 9004), False, 'from dateutil import tz\n'), ((10094, 10123), 'arrow.get', 'arrow.get', (['text', '"""MM/DD/YYYY"""'], {}), "(text, 'MM/DD/YYYY')\n", (10103, 10123), False, 'import arrow\n'), ((10150, 10162), 'dateutil.tz.tzlocal', 'tz.tzlocal', ([], {}), '()\n', (10160, 10162), False, 'from dateutil import tz\n')] |
""" The file provides the network ip address, works with py 2 and 3. and need external package """
from netifaces import interfaces, ifaddresses, AF_INET
def get_ip_address(ifname):
""" Method will provide the ip address for an interface. """
try:
for ifaceName in interfaces():
if ifaceName == ifname :
addresses = [i['addr'] for i in ifaddresses(ifaceName).setdefault(AF_INET, [{'addr':'No IP addr'}] )]
return addresses[0]
return 'Unable to find NIC.'
except:
return 'Unable to find IP(Execption).'
# Test code.
#ip = get_ip_address('eno1')
#ip = get_ip_address('wlan0')
#print (ip)
| [
"netifaces.interfaces",
"netifaces.ifaddresses"
] | [((283, 295), 'netifaces.interfaces', 'interfaces', ([], {}), '()\n', (293, 295), False, 'from netifaces import interfaces, ifaddresses, AF_INET\n'), ((382, 404), 'netifaces.ifaddresses', 'ifaddresses', (['ifaceName'], {}), '(ifaceName)\n', (393, 404), False, 'from netifaces import interfaces, ifaddresses, AF_INET\n')] |
import graphene
from graphene_django import DjangoObjectType
from graphql import GraphQLError
from django.db.models import Q
from graphql_jwt.decorators import login_required
import django_filters
from .models import Customer
from users.schema import UserType
#########
# Types #
#########
class CustomerType(DjangoObjectType):
class Meta:
model = Customer
###########
# Queries #
###########
class CustomerFilter(django_filters.FilterSet):
class Meta:
model = Customer
fields = ["id","name","business_name","created_at"] # does this even do anything?
class Query(graphene.ObjectType):
customer = graphene.Field(CustomerType, lookup_id=graphene.String(required=True))
customers = graphene.List(
CustomerType,
search=graphene.String(),
first=graphene.Int(),
skip=graphene.Int()
)
@login_required
def resolve_customer(self, info, lookup_id, **kwargs):
"""
Return a single customer.
"""
cust = Customer.objects.filter(lookup_id=lookup_id).first()
return cust
@login_required
def resolve_customers(self, info, search=None, first=None, skip=None, **kwargs):
"""
Return a list of customers according to search parameters. By default it returns ALL.
"""
qs = Customer.objects.all()
if search:
filter = (
Q(url__icontains=search) |
Q(description__icontains=search)
)
qs = qs.filter(filter)
if skip:
qs = qs[skip:]
if first:
qs = qs[:first]
return qs
#############
# Mutations #
#############
class CreateCustomer(graphene.Mutation):
created_by = graphene.Field(UserType)
customer = graphene.Field(CustomerType)
class Arguments:
name = graphene.String()
business_name = graphene.String()
phone_number = graphene.String()
email = graphene.String()
website = graphene.String()
description = graphene.String()
@login_required
def mutate(self, info, name, business_name, phone_number, email, website, description):
user = info.context.user
cust = Customer(
name=name,
business_name=business_name,
phone_number=phone_number,
email=email,
website=website,
description=description,
created_by=user
)
cust.save()
return CreateCustomer(customer=cust)
class Mutation(graphene.AbstractType):
create_customer = CreateCustomer.Field() | [
"graphene.String",
"django.db.models.Q",
"graphene.Int",
"graphene.Field"
] | [((1759, 1783), 'graphene.Field', 'graphene.Field', (['UserType'], {}), '(UserType)\n', (1773, 1783), False, 'import graphene\n'), ((1799, 1827), 'graphene.Field', 'graphene.Field', (['CustomerType'], {}), '(CustomerType)\n', (1813, 1827), False, 'import graphene\n'), ((1865, 1882), 'graphene.String', 'graphene.String', ([], {}), '()\n', (1880, 1882), False, 'import graphene\n'), ((1907, 1924), 'graphene.String', 'graphene.String', ([], {}), '()\n', (1922, 1924), False, 'import graphene\n'), ((1948, 1965), 'graphene.String', 'graphene.String', ([], {}), '()\n', (1963, 1965), False, 'import graphene\n'), ((1982, 1999), 'graphene.String', 'graphene.String', ([], {}), '()\n', (1997, 1999), False, 'import graphene\n'), ((2018, 2035), 'graphene.String', 'graphene.String', ([], {}), '()\n', (2033, 2035), False, 'import graphene\n'), ((2058, 2075), 'graphene.String', 'graphene.String', ([], {}), '()\n', (2073, 2075), False, 'import graphene\n'), ((680, 710), 'graphene.String', 'graphene.String', ([], {'required': '(True)'}), '(required=True)\n', (695, 710), False, 'import graphene\n'), ((780, 797), 'graphene.String', 'graphene.String', ([], {}), '()\n', (795, 797), False, 'import graphene\n'), ((813, 827), 'graphene.Int', 'graphene.Int', ([], {}), '()\n', (825, 827), False, 'import graphene\n'), ((842, 856), 'graphene.Int', 'graphene.Int', ([], {}), '()\n', (854, 856), False, 'import graphene\n'), ((1412, 1436), 'django.db.models.Q', 'Q', ([], {'url__icontains': 'search'}), '(url__icontains=search)\n', (1413, 1436), False, 'from django.db.models import Q\n'), ((1455, 1487), 'django.db.models.Q', 'Q', ([], {'description__icontains': 'search'}), '(description__icontains=search)\n', (1456, 1487), False, 'from django.db.models import Q\n')] |
from . import views
from django.urls import path
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('add', views.add),
path('create', views.create), # create method url
# read method url but here, null because type localhost show read view
path('', views.read),
path('update/<id>', views.update), # update method url
path('delete/<id>', views.delete), # delete method url
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL,
document_root=settings.MEDIA_ROOT)
| [
"django.conf.urls.static.static",
"django.urls.path"
] | [((145, 167), 'django.urls.path', 'path', (['"""add"""', 'views.add'], {}), "('add', views.add)\n", (149, 167), False, 'from django.urls import path\n'), ((173, 201), 'django.urls.path', 'path', (['"""create"""', 'views.create'], {}), "('create', views.create)\n", (177, 201), False, 'from django.urls import path\n'), ((303, 323), 'django.urls.path', 'path', (['""""""', 'views.read'], {}), "('', views.read)\n", (307, 323), False, 'from django.urls import path\n'), ((329, 362), 'django.urls.path', 'path', (['"""update/<id>"""', 'views.update'], {}), "('update/<id>', views.update)\n", (333, 362), False, 'from django.urls import path\n'), ((389, 422), 'django.urls.path', 'path', (['"""delete/<id>"""', 'views.delete'], {}), "('delete/<id>', views.delete)\n", (393, 422), False, 'from django.urls import path\n'), ((486, 547), 'django.conf.urls.static.static', 'static', (['settings.MEDIA_URL'], {'document_root': 'settings.MEDIA_ROOT'}), '(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n', (492, 547), False, 'from django.conf.urls.static import static\n')] |
from typing import Text, Dict, Any
from sagas.nlu.rules_header import *
from sagas.nlu.registries import sinkers_fn
import sagas.tracker_fn as tc
import logging
logger = logging.getLogger(__name__)
class Rules_af(LangSpecBase):
@staticmethod
def prepare(meta: Dict[Text, Any]):
tc.emp('yellow', '.. Rules_af(Afrikaans, 南非荷兰语) prepare phrase')
def verb_rules(self):
pat, actions_obj = (self.pat, self.actions_obj)
self.collect(pats=[
# $ saf 'Ry asseblief stadiger.'
pat(5, name='behave_ride').verb(extract_for('word', 'advmod'),
behaveof('ride', 'v'),
advmod=specs_trans('*', 'slow', 'fast').opt(raw_fmt=raw_fmt_pos)
),
])
def aux_rules(self):
pat, actions_obj = (self.pat, self.actions_obj)
self.collect(pats=[
# $ saf 'Ek is haastig.'
pat(5, name='desc_hastily').cop(extract_for('word', 'nsubj'),
specs_trans('*', 'hurriedly').opt(raw_fmt=raw_fmt_pos),
nsubj=agency, cop='c_aux'),
])
| [
"logging.getLogger",
"sagas.tracker_fn.emp"
] | [((173, 200), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (190, 200), False, 'import logging\n'), ((299, 363), 'sagas.tracker_fn.emp', 'tc.emp', (['"""yellow"""', '""".. Rules_af(Afrikaans, 南非荷兰语) prepare phrase"""'], {}), "('yellow', '.. Rules_af(Afrikaans, 南非荷兰语) prepare phrase')\n", (305, 363), True, 'import sagas.tracker_fn as tc\n')] |
from setuptools import setup, find_packages
setup(
name='gen3dictionary',
version='0.0.0',
description="Gen3 generic data dictionary",
license="Apache",
packages=find_packages(),
install_requires=[
'dictionaryutils==3.4.2',
'requests==2.26.0'
],
dependency_links=[],
package_data={
"gdcdictionary": [
"schemas/*.yaml",
"schemas/projects/*.yaml",
"schemas/projects/*/*.yaml",
]
},
)
| [
"setuptools.find_packages"
] | [((183, 198), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (196, 198), False, 'from setuptools import setup, find_packages\n')] |
import itertools
import typing
import pandas as pd
import plotly.graph_objects as go
from sklearn.preprocessing import MinMaxScaler
from fcapsy.typicality import typicality_avg
from binsdpy.similarity import jaccard, smc, russell_rao
from fcapsy_experiments._styles import css, css_typ
class ConceptTypicality:
count_label = "Count"
def __init__(
self,
concept: "concepts.lattices.Concept",
axis: int = 0,
count: bool = False,
extra_columns: dict[str, "pd.Series"] = None,
typicality_functions: dict[str, dict] = None,
) -> None:
"""Calculates typiclity for given concept
Args:
concept (concepts.lattices.Concept): in which concept the typicality is calculated
axis (int, optional): if typicality is calculated for objects (0) or attributes (1). Defaults to 0.
count (bool, optional): if count of attributes/objects should be included as column. Defaults to False.
extra_columns (dict[str, pandas.Series], optional): extra columns to be included in the table. Defaults to None.
typicality_functions (dict[str, dict], optional): when specified, user can modify default functions which is used for typicality calculation, see default example. Defaults to None.
"""
if typicality_functions is None:
# default typicality configuration
typicality_functions = {
"typ_avg": {
# must be callable
"func": typicality_avg,
"args": {"J": [jaccard], "SMC": [smc], "R": [russell_rao]},
}
}
self._concept = concept
context = self._concept.lattice._context
if axis == 0:
self._items_domain = context.objects
self._items_sets = context._intents
self._concept_core = self._concept.extent
elif axis == 1:
self._items_domain = context.properties
self._items_sets = context._extents
self._concept_core = self._concept.intent
else:
raise ValueError("Invalid axis index")
self.axis = axis
self.df = self._init(concept, count, typicality_functions, extra_columns)
if extra_columns:
extra_columns = list(extra_columns.keys())
self.extra_columns = extra_columns
def _init(self, concept, count, typicality_functions, extra_columns):
columns = []
for name, typicality in typicality_functions.items():
if typicality["args"]:
for arg in typicality["args"].keys():
columns.append(f"{name}({arg})")
else:
columns.append(f"{name}")
df = pd.DataFrame(index=self._concept_core, columns=columns, dtype=float)
for item in self._concept_core:
row = []
for typicality in typicality_functions.values():
function = typicality["func"]
args = typicality["args"].values()
if args:
row = []
for name, arg in typicality["args"].items():
print(name)
row.append(function(item, concept, *arg))
else:
row.append(function(item, concept))
df.loc[item] = row
if count:
counts = (extent.bits().count("1") for extent in self._items_sets)
df[self.count_label] = [
row[1] for row in filter(lambda x: x[0] in self._concept_core, zip(self._items_domain, counts))
]
if extra_columns:
for name, values in extra_columns.items():
df[name] = values
return df
def to_html(self) -> str:
"""Generates html table.
Returns:
str: html output
"""
final_table = pd.DataFrame()
for column in self.df.columns:
round_and_sort = self.df.sort_values(
column, ascending=False, kind="mergesort"
)
final_table[f"{column} order"] = round_and_sort.index
final_table[column] = round_and_sort.reset_index()[column]
df = final_table.reset_index().drop("index", axis=1)
df = df.style.format(precision=3)
df.background_gradient(cmap="RdYlGn")
df.set_table_styles(css + css_typ)
df.hide_index()
return df.to_html()
def to_plotly(self) -> "go.Figure":
"""Generates plotly figure.
Returns:
go.Figure: figure
"""
markers = ["square", "diamond", "triangle-up", "circle", "pentagon"]
scatters = []
df = self.df.sort_values(self.df.columns[0], ascending=False, kind="mergesort")
if "Count" in df.columns:
# scaling Count column
scaler = MinMaxScaler()
df["Count"] = scaler.fit_transform(df["Count"].values.reshape(-1, 1))
if self.extra_columns:
for extra in self.extra_columns:
scaler = MinMaxScaler()
df[extra] = scaler.fit_transform(df[extra].values.reshape(-1, 1))
for column, marker in zip(df.columns, itertools.cycle(markers)):
scatters.append(
go.Scatter(
name=column,
x=df.index,
y=df[column],
mode="markers",
marker_symbol=marker,
marker_line_width=1,
marker_size=8,
)
)
fig = go.Figure(data=scatters)
# layout needs some cleaning
fig.update_layout(
font_family="IBM Plex Sans",
font_color="black",
margin=dict(l=15, r=15, t=15, b=15),
xaxis=dict(
title="objects",
mirror=True,
ticks="inside",
showline=True,
linecolor="black",
linewidth=1,
tickangle=-90,
gridcolor="rgba(200,200,200,1)",
tickfont=dict(size=11),
),
yaxis=dict(
title="typicality",
mirror=True,
ticks="inside",
showline=True,
linecolor="black",
linewidth=1,
range=[0, 1],
dtick=0.1,
gridcolor="rgba(200,200,200,0)",
tickfont=dict(size=11),
),
paper_bgcolor="rgba(255,255,255,1)",
plot_bgcolor="rgba(255,255,255,1)",
legend=dict(
font=dict(size=10),
),
)
return fig
def to_plotly_html(self, default_width: int=700, default_height: int=390) -> str:
"""Generates html version of plotly graph
Args:
default_width (int, optional): default graph width. Defaults to 700.
default_height (int, optional): default graph height. Defaults to 390.
Returns:
str: graph html
"""
return self.to_plotly().to_html(
full_html=False,
include_plotlyjs="cdn",
include_mathjax="cdn",
default_width=default_width,
default_height=default_height,
) | [
"itertools.cycle",
"plotly.graph_objects.Figure",
"plotly.graph_objects.Scatter",
"pandas.DataFrame",
"sklearn.preprocessing.MinMaxScaler"
] | [((2773, 2841), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'self._concept_core', 'columns': 'columns', 'dtype': 'float'}), '(index=self._concept_core, columns=columns, dtype=float)\n', (2785, 2841), True, 'import pandas as pd\n'), ((3970, 3984), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (3982, 3984), True, 'import pandas as pd\n'), ((5676, 5700), 'plotly.graph_objects.Figure', 'go.Figure', ([], {'data': 'scatters'}), '(data=scatters)\n', (5685, 5700), True, 'import plotly.graph_objects as go\n'), ((4949, 4963), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {}), '()\n', (4961, 4963), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((5292, 5316), 'itertools.cycle', 'itertools.cycle', (['markers'], {}), '(markers)\n', (5307, 5316), False, 'import itertools\n'), ((5148, 5162), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {}), '()\n', (5160, 5162), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((5364, 5491), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'name': 'column', 'x': 'df.index', 'y': 'df[column]', 'mode': '"""markers"""', 'marker_symbol': 'marker', 'marker_line_width': '(1)', 'marker_size': '(8)'}), "(name=column, x=df.index, y=df[column], mode='markers',\n marker_symbol=marker, marker_line_width=1, marker_size=8)\n", (5374, 5491), True, 'import plotly.graph_objects as go\n')] |
# Generated by Django 3.0.6 on 2020-06-03 04:12
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('vmware', '0003_deploylists_vcenter'),
]
operations = [
migrations.AddField(
model_name='deploylists',
name='state',
field=models.BooleanField(default=0),
),
migrations.AddField(
model_name='deploylists',
name='user',
field=models.CharField(max_length=255, null=True),
),
]
| [
"django.db.models.CharField",
"django.db.models.BooleanField"
] | [((338, 368), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(0)'}), '(default=0)\n', (357, 368), False, 'from django.db import migrations, models\n'), ((491, 534), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'null': '(True)'}), '(max_length=255, null=True)\n', (507, 534), False, 'from django.db import migrations, models\n')] |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('journal', '0009_auto_20151105_0938'),
]
operations = [
migrations.RemoveField(
model_name='article',
name='old_number',
),
migrations.AlterField(
model_name='journaluser',
name='groups',
field=models.ManyToManyField(help_text='The groups this user belongs to. A user will get all permissions granted to each of his/her group.', to='auth.Group', verbose_name='groups', blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='journaluser',
name='user_permissions',
field=models.ManyToManyField(help_text='Specific permissions for this user.', to='auth.Permission', verbose_name='user permissions', blank=True),
preserve_default=True,
),
]
| [
"django.db.migrations.RemoveField",
"django.db.models.ManyToManyField"
] | [((251, 314), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""article"""', 'name': '"""old_number"""'}), "(model_name='article', name='old_number')\n", (273, 314), False, 'from django.db import models, migrations\n'), ((465, 661), 'django.db.models.ManyToManyField', 'models.ManyToManyField', ([], {'help_text': '"""The groups this user belongs to. A user will get all permissions granted to each of his/her group."""', 'to': '"""auth.Group"""', 'verbose_name': '"""groups"""', 'blank': '(True)'}), "(help_text=\n 'The groups this user belongs to. A user will get all permissions granted to each of his/her group.'\n , to='auth.Group', verbose_name='groups', blank=True)\n", (487, 661), False, 'from django.db import models, migrations\n'), ((823, 966), 'django.db.models.ManyToManyField', 'models.ManyToManyField', ([], {'help_text': '"""Specific permissions for this user."""', 'to': '"""auth.Permission"""', 'verbose_name': '"""user permissions"""', 'blank': '(True)'}), "(help_text='Specific permissions for this user.', to=\n 'auth.Permission', verbose_name='user permissions', blank=True)\n", (845, 966), False, 'from django.db import models, migrations\n')] |
#
# COPYRIGHT:
# The Leginon software is Copyright 2003
# The Scripps Research Institute, La Jolla, CA
# For terms of the license agreement
# see http://ami.scripps.edu/software/leginon-license
#
'''
The TargetFilter node takes a list of targets and produces a new list of targets.
It would typically be placed between a TargetFinder and an Acquisition node.
Subclasses need to implement the filterTargets method.
'''
import math
import node
from leginon import leginondata
import event
import threading
import targethandler
import gui.wx.TargetFilter
class TargetFilter(node.Node, targethandler.TargetWaitHandler):
panelclass = gui.wx.TargetFilter.Panel
settingsclass = leginondata.TargetFilterSettingsData
defaultsettings = {
'bypass':True,
'target type':'acquisition',
'user check': False,
}
eventinputs = node.Node.eventinputs + targethandler.TargetWaitHandler.eventinputs + [event.ImageTargetListPublishEvent]
eventoutputs = node.Node.eventoutputs + targethandler.TargetWaitHandler.eventoutputs + [event.TargetListDoneEvent]
displaytypes = ('acquisition', 'focus', 'preview', 'meter')
def __init__(self, id, session, managerlocation, **kwargs):
node.Node.__init__(self, id, session, managerlocation, **kwargs)
self.userpause = threading.Event()
targethandler.TargetWaitHandler.__init__(self)
self.addEventInput(event.ImageTargetListPublishEvent, self.handleTargetListPublish)
self.addEventInput(event.QueuePublishEvent, self.handleQueuePublish)
self.test = False
self.abort = False
if self.__class__ == TargetFilter:
self.start()
def handleTargetListPublish(self, pubevent):
targetlistdata = pubevent['data']
newtargetlistdata = self.__filterTargetList(targetlistdata,self.settings['target type'])
tid = self.makeTargetListEvent(newtargetlistdata)
self.publish(newtargetlistdata, pubevent=pubevent)
self.setStatus('idle')
status = self.waitForTargetListDone(tid)
e = event.TargetListDoneEvent(targetlistid=targetlistdata.dbid, status=status)
self.outputEvent(e)
def handleQueuePublish(self, pubevent):
'''
filter just passes input queuedata to the output, but the target lists
in that queue are filtered. The old target list is dequeued.
'''
queuedata = pubevent['data']
## this is only active (not dequeued) target lists
oldtargetlists = self.getListsInQueue(queuedata)
for oldtargetlist in oldtargetlists:
newtargetlist = self.__filterTargetList(oldtargetlist,self.settings['target type'])
if newtargetlist is not oldtargetlist:
# newtargetlist has already been put in queue, now dequeue old one
donetargetlist = leginondata.DequeuedImageTargetListData(list=oldtargetlist,queue=queuedata)
self.publish(donetargetlist, database=True)
self.publish(queuedata, pubevent=True)
self.setStatus('idle')
def __filterTargetList(self, targetlistdata,type='acquisition'):
'''
- create a new ImageTargetListData for the new targets
- run the custom filter method on targets in this list
- publish each target
- return new target list data
'''
if self.settings['bypass']:
self.logger.info('Bypassing target filter')
return targetlistdata
else:
oldtargets = self.researchTargets(list=targetlistdata,type=type)
alltargets = self.researchTargets(list=targetlistdata)
self.alltargets = alltargets
goodoldtargets = []
self.newtesttargets = False
for oldtarget in oldtargets:
if oldtarget['status'] not in ('done', 'aborted'):
goodoldtargets.append(oldtarget)
self.goodoldtargets = goodoldtargets
self.logger.info('Filter input: %d' % (len(goodoldtargets),))
self.test = False
if len(goodoldtargets) > 0:
newtargets = self.filterTargets(goodoldtargets)
else:
newtargets = goodoldtargets
self.logger.info('Filter output: %d' % (len(newtargets),))
newtargets = self.appendOtherTargets(alltargets,newtargets)
self.displayTargets(newtargets,targetlistdata)
if self.settings['user check'] and len(goodoldtargets) > 0:
self.setStatus('user input')
self.logger.info('Waiting for user to check targets...')
self.panel.enableSubmitTargets()
self.userpause.clear()
self.userpause.wait()
if self.newtesttargets:
# newtargets need to be reset to the results of the new testing
newtargets = self.newtesttargets
self.newtesttargets = False
self.setStatus('processing')
if self.abort:
self.markTargetsDone(alltargets)
self.abort = False
return targetlistdata
newtargets = self.removeDeletedTargetsOnImage(newtargets)
self.newtargets = newtargets
self.targetlistdata = targetlistdata
self.displayTargets(newtargets,targetlistdata)
newtargetlistdata = self.submitTargets()
return newtargetlistdata
def onSubmitTargets(self):
self.userpause.set()
def onAbortTargets(self):
self.abort = True
self.userpause.set()
def submitTargets(self):
targetlistdata = self.targetlistdata
alltargets = self.alltargets
newtargets = self.newtargets
totaloldtargetcount = self.getAllTargetCount(alltargets)
self.markTargetsDone(alltargets)
self.logger.info('Original targets marked done.')
newtargetlistdata = self.newTargetList()
newtargetlistdata.update(targetlistdata)
self.publish(newtargetlistdata, database=True, dbforce=True)
for i, filteredtarget in enumerate(newtargets):
## original targets are getting through the filter somehow.
## need to make sure we are publishing copies of them.
newtarget = leginondata.AcquisitionImageTargetData(initializer=filteredtarget)
newtarget['list'] = newtargetlistdata
newtarget['number'] = i+1+totaloldtargetcount
self.publish(newtarget, database=True, dbforce=True)
return newtargetlistdata
def appendOtherTargets(self,alltargets,newtargets):
filteredtype = self.settings['target type']
for target in alltargets:
if target['type'] != filteredtype and target['status'] not in ('done','aborted'):
newtarget = leginondata.AcquisitionImageTargetData(initializer=target)
newtarget['delta row'] = target['delta row']
newtarget['delta column'] = target['delta column']
if newtarget not in newtargets:
newtargets.append(newtarget)
return newtargets
def displayTargets(self,targets,oldtargetlistdata):
targets_by_type = dict([(displaytype,[]) for displaytype in self.displaytypes])
if oldtargetlistdata['image'] is not None:
halfrows = oldtargetlistdata['image']['camera']['dimension']['y'] / 2
halfcols = oldtargetlistdata['image']['camera']['dimension']['x'] / 2
image = oldtargetlistdata['image']['image']
elif len(targets) > 0:
halfrows = targets[0]['image']['camera']['dimension']['y'] / 2
halfcols = targets[0]['image']['camera']['dimension']['x'] / 2
image = targets[0]['image']['image']
else:
return
self.setImage(image, 'Image')
for target in targets:
drow = target['delta row']
dcol = target['delta column']
x = dcol + halfcols
y = drow + halfrows
disptarget = x,y
if target['status'] in ('done', 'aborted'):
continue
elif target['type'] in self.displaytypes:
targets_by_type[target['type']].append(disptarget)
for targettype in self.displaytypes:
self.setTargets(targets_by_type[targettype], targettype, block=True)
original = []
for oldtarget in self.goodoldtargets:
drow = oldtarget['delta row']
dcol = oldtarget['delta column']
x = dcol + halfcols
y = drow + halfrows
disptarget = x,y
original.append(disptarget)
self.panel.onOriginalTarget(original)
self.setTargets(original, 'original')
def getAllTargetCount(self,alltargetdata):
parentimgs =[]
totalcount = 0
for target in alltargetdata:
parentim = target.special_getitem('image',dereference=False)
if parentim.dbid not in parentimgs:
parentimgs.append(parentim.dbid)
imagetargets = self.researchTargets(image=parentim)
if imagetargets:
totalcount = totalcount + len(imagetargets)
return totalcount
def filterTargets(self, targetlist):
raise NotImplementedError()
def onTest(self):
self.test = True
goodoldtargets = self.goodoldtargets
self.logger.info('Filter input: %d' % (len(goodoldtargets),))
newtargets = self.filterTargets(goodoldtargets)
self.logger.info('Filter output: %d' % (len(newtargets),))
# append other targets here so that they don't get lost on the display
newtargets = self.appendOtherTargets(self.alltargets, newtargets)
self.displayTargets(newtargets,{'image':None})
self.newtesttargets = newtargets
return newtargets
def distance(self,position1,position2):
return abs(math.hypot(position1[0]-position2[0],position1[1]-position2[1]))
def removeDeletedTargetsOnImage(self,oldtargets):
'''
This removes targets that user removed by right-click on the image panel.
It will ignore all new targets added by the user. By modifying the old targets,
this function retains the parentage of the targets from filterTarget function.
'''
if len(oldtargets) == 0:
return
newtargets = []
parentimage = oldtargets[0]['image']
dimension = parentimage['camera']['dimension']
imgcenter = {'x':dimension['x']/2, 'y':dimension['y']/2}
binning = parentimage['camera']['binning']
positions = {}
for typename in self.displaytypes:
positions[typename] = []
imagetargets = self.panel.getTargetPositions(typename)
for imgtarget in imagetargets:
delta_row = (imgtarget[1] - imgcenter['y'])
delta_col = (imgtarget[0] - imgcenter['x'])
positions[typename].append((delta_col,delta_row))
for target in oldtargets:
targetdelta = (target['delta column'],target['delta row'])
for i,position in enumerate(positions[target['type']]):
# check distance with larger tolerance on larger image because it might
# give truncation error when display on to image panel
if self.distance(targetdelta,position) <= dimension['x']/512.0:
newtargets.append(target)
del positions[target['type']][i]
break
for typename in self.displaytypes:
if len(positions[typename]) > 0:
self.logger.warning('%s targets added manually will not be processed' % typename)
return newtargets
| [
"node.Node.__init__",
"event.TargetListDoneEvent",
"threading.Event",
"leginon.leginondata.DequeuedImageTargetListData",
"leginon.leginondata.AcquisitionImageTargetData",
"math.hypot",
"targethandler.TargetWaitHandler.__init__"
] | [((1205, 1269), 'node.Node.__init__', 'node.Node.__init__', (['self', 'id', 'session', 'managerlocation'], {}), '(self, id, session, managerlocation, **kwargs)\n', (1223, 1269), False, 'import node\n'), ((1289, 1306), 'threading.Event', 'threading.Event', ([], {}), '()\n', (1304, 1306), False, 'import threading\n'), ((1309, 1355), 'targethandler.TargetWaitHandler.__init__', 'targethandler.TargetWaitHandler.__init__', (['self'], {}), '(self)\n', (1349, 1355), False, 'import targethandler\n'), ((1962, 2036), 'event.TargetListDoneEvent', 'event.TargetListDoneEvent', ([], {'targetlistid': 'targetlistdata.dbid', 'status': 'status'}), '(targetlistid=targetlistdata.dbid, status=status)\n', (1987, 2036), False, 'import event\n'), ((5519, 5585), 'leginon.leginondata.AcquisitionImageTargetData', 'leginondata.AcquisitionImageTargetData', ([], {'initializer': 'filteredtarget'}), '(initializer=filteredtarget)\n', (5557, 5585), False, 'from leginon import leginondata\n'), ((8615, 8683), 'math.hypot', 'math.hypot', (['(position1[0] - position2[0])', '(position1[1] - position2[1])'], {}), '(position1[0] - position2[0], position1[1] - position2[1])\n', (8625, 8683), False, 'import math\n'), ((2650, 2726), 'leginon.leginondata.DequeuedImageTargetListData', 'leginondata.DequeuedImageTargetListData', ([], {'list': 'oldtargetlist', 'queue': 'queuedata'}), '(list=oldtargetlist, queue=queuedata)\n', (2689, 2726), False, 'from leginon import leginondata\n'), ((5992, 6050), 'leginon.leginondata.AcquisitionImageTargetData', 'leginondata.AcquisitionImageTargetData', ([], {'initializer': 'target'}), '(initializer=target)\n', (6030, 6050), False, 'from leginon import leginondata\n')] |
# Auto generated by generator.py. Delete this line if you make modification.
from scrapy.spiders import Rule
from scrapy.linkextractors import LinkExtractor
XPATH = {
'name' : "//div[@class='col-md-6']/div[@class='ditem']/h4",
'price' : "//div[@class='ditem']/h5[@class='price']",
'category' : "//ol[@class='breadcrumb']/li/a",
'description' : "",
'images' : "//div[@class='col-md-4']/img[@class='img-responsive']/@src",
'canonical' : "",
'base_url' : "",
'brand' : ""
}
name = 'yame.vn'
allowed_domains = ['yame.vn']
start_urls = ['http://yame.vn']
tracking_url = ''
sitemap_urls = ['']
sitemap_rules = [('', 'parse_item')]
sitemap_follow = []
rules = [
Rule(LinkExtractor(allow=['/shop/[a-zA-Z0-9-]+/[a-zA-Z0-9-]+-\d+\?']), 'parse_item'),
Rule(LinkExtractor(allow=['/shop/[a-zA-Z-]+($|\?page)']), 'parse'),
#Rule(LinkExtractor(), 'parse_item_and_links'),
]
| [
"scrapy.linkextractors.LinkExtractor"
] | [((697, 763), 'scrapy.linkextractors.LinkExtractor', 'LinkExtractor', ([], {'allow': "['/shop/[a-zA-Z0-9-]+/[a-zA-Z0-9-]+-\\\\d+\\\\?']"}), "(allow=['/shop/[a-zA-Z0-9-]+/[a-zA-Z0-9-]+-\\\\d+\\\\?'])\n", (710, 763), False, 'from scrapy.linkextractors import LinkExtractor\n'), ((787, 839), 'scrapy.linkextractors.LinkExtractor', 'LinkExtractor', ([], {'allow': "['/shop/[a-zA-Z-]+($|\\\\?page)']"}), "(allow=['/shop/[a-zA-Z-]+($|\\\\?page)'])\n", (800, 839), False, 'from scrapy.linkextractors import LinkExtractor\n')] |
# Alignment examples.
from ocempgui.widgets import Renderer, VFrame, HFrame, Button, Alignment, Label
from ocempgui.widgets.Constants import *
def create_alignment_view ():
# Crate and display the different alignments.
frm_main = VFrame (Label ("Alignment examples"))
frm_main.topleft = 10, 10
# Top alignments.
align_topleft = Alignment (100, 50)
align_topleft.align = ALIGN_TOP | ALIGN_LEFT
align_topleft.child = Button ("topleft")
align_top = Alignment (100, 50)
align_top.align = ALIGN_TOP
align_top.child = Button ("top")
align_topright = Alignment (100, 50)
align_topright.align = ALIGN_TOP | ALIGN_RIGHT
align_topright.child = Button ("topright")
frm_top = HFrame ()
frm_top.children = align_topleft, align_top, align_topright
# Centered alignments.
align_left = Alignment (100, 50)
align_left.align = ALIGN_LEFT
align_left.child = Button ("left")
align_center = Alignment (100, 50)
align_center.align = ALIGN_NONE
align_center.child = Button ("center")
align_right = Alignment (100, 50)
align_right.align = ALIGN_RIGHT
align_right.child = Button ("right")
frm_center = HFrame ()
frm_center.children = align_left, align_center, align_right
# Bottom alignments.
align_bottomleft = Alignment (100, 50)
align_bottomleft.align = ALIGN_BOTTOM | ALIGN_LEFT
align_bottomleft.child = Button ("bottomleft")
align_bottom = Alignment (100, 50)
align_bottom.align = ALIGN_BOTTOM
align_bottom.child = Button ("bottom")
align_bottomright = Alignment (100, 50)
align_bottomright.align = ALIGN_BOTTOM | ALIGN_RIGHT
align_bottomright.child = Button ("bottomright")
frm_bottom = HFrame ()
frm_bottom.children = align_bottomleft, align_bottom, align_bottomright
frm_main.children = frm_top, frm_center, frm_bottom
return frm_main
if __name__ == "__main__":
# Initialize the drawing window.
re = Renderer ()
re.create_screen (350, 300)
re.title = "Alignment examples"
re.color = (234, 228, 223)
re.add_widget (create_alignment_view ())
# Start the main rendering loop.
re.start ()
| [
"ocempgui.widgets.Label",
"ocempgui.widgets.Alignment",
"ocempgui.widgets.HFrame",
"ocempgui.widgets.Button",
"ocempgui.widgets.Renderer"
] | [((350, 368), 'ocempgui.widgets.Alignment', 'Alignment', (['(100)', '(50)'], {}), '(100, 50)\n', (359, 368), False, 'from ocempgui.widgets import Renderer, VFrame, HFrame, Button, Alignment, Label\n'), ((445, 462), 'ocempgui.widgets.Button', 'Button', (['"""topleft"""'], {}), "('topleft')\n", (451, 462), False, 'from ocempgui.widgets import Renderer, VFrame, HFrame, Button, Alignment, Label\n'), ((481, 499), 'ocempgui.widgets.Alignment', 'Alignment', (['(100)', '(50)'], {}), '(100, 50)\n', (490, 499), False, 'from ocempgui.widgets import Renderer, VFrame, HFrame, Button, Alignment, Label\n'), ((555, 568), 'ocempgui.widgets.Button', 'Button', (['"""top"""'], {}), "('top')\n", (561, 568), False, 'from ocempgui.widgets import Renderer, VFrame, HFrame, Button, Alignment, Label\n'), ((592, 610), 'ocempgui.widgets.Alignment', 'Alignment', (['(100)', '(50)'], {}), '(100, 50)\n', (601, 610), False, 'from ocempgui.widgets import Renderer, VFrame, HFrame, Button, Alignment, Label\n'), ((691, 709), 'ocempgui.widgets.Button', 'Button', (['"""topright"""'], {}), "('topright')\n", (697, 709), False, 'from ocempgui.widgets import Renderer, VFrame, HFrame, Button, Alignment, Label\n'), ((726, 734), 'ocempgui.widgets.HFrame', 'HFrame', ([], {}), '()\n', (732, 734), False, 'from ocempgui.widgets import Renderer, VFrame, HFrame, Button, Alignment, Label\n'), ((845, 863), 'ocempgui.widgets.Alignment', 'Alignment', (['(100)', '(50)'], {}), '(100, 50)\n', (854, 863), False, 'from ocempgui.widgets import Renderer, VFrame, HFrame, Button, Alignment, Label\n'), ((922, 936), 'ocempgui.widgets.Button', 'Button', (['"""left"""'], {}), "('left')\n", (928, 936), False, 'from ocempgui.widgets import Renderer, VFrame, HFrame, Button, Alignment, Label\n'), ((962, 980), 'ocempgui.widgets.Alignment', 'Alignment', (['(100)', '(50)'], {}), '(100, 50)\n', (971, 980), False, 'from ocempgui.widgets import Renderer, VFrame, HFrame, Button, Alignment, Label\n'), ((1044, 1060), 'ocempgui.widgets.Button', 'Button', (['"""center"""'], {}), "('center')\n", (1050, 1060), False, 'from ocempgui.widgets import Renderer, VFrame, HFrame, Button, Alignment, Label\n'), ((1081, 1099), 'ocempgui.widgets.Alignment', 'Alignment', (['(100)', '(50)'], {}), '(100, 50)\n', (1090, 1099), False, 'from ocempgui.widgets import Renderer, VFrame, HFrame, Button, Alignment, Label\n'), ((1161, 1176), 'ocempgui.widgets.Button', 'Button', (['"""right"""'], {}), "('right')\n", (1167, 1176), False, 'from ocempgui.widgets import Renderer, VFrame, HFrame, Button, Alignment, Label\n'), ((1196, 1204), 'ocempgui.widgets.HFrame', 'HFrame', ([], {}), '()\n', (1202, 1204), False, 'from ocempgui.widgets import Renderer, VFrame, HFrame, Button, Alignment, Label\n'), ((1319, 1337), 'ocempgui.widgets.Alignment', 'Alignment', (['(100)', '(50)'], {}), '(100, 50)\n', (1328, 1337), False, 'from ocempgui.widgets import Renderer, VFrame, HFrame, Button, Alignment, Label\n'), ((1423, 1443), 'ocempgui.widgets.Button', 'Button', (['"""bottomleft"""'], {}), "('bottomleft')\n", (1429, 1443), False, 'from ocempgui.widgets import Renderer, VFrame, HFrame, Button, Alignment, Label\n'), ((1465, 1483), 'ocempgui.widgets.Alignment', 'Alignment', (['(100)', '(50)'], {}), '(100, 50)\n', (1474, 1483), False, 'from ocempgui.widgets import Renderer, VFrame, HFrame, Button, Alignment, Label\n'), ((1548, 1564), 'ocempgui.widgets.Button', 'Button', (['"""bottom"""'], {}), "('bottom')\n", (1554, 1564), False, 'from ocempgui.widgets import Renderer, VFrame, HFrame, Button, Alignment, Label\n'), ((1591, 1609), 'ocempgui.widgets.Alignment', 'Alignment', (['(100)', '(50)'], {}), '(100, 50)\n', (1600, 1609), False, 'from ocempgui.widgets import Renderer, VFrame, HFrame, Button, Alignment, Label\n'), ((1698, 1719), 'ocempgui.widgets.Button', 'Button', (['"""bottomright"""'], {}), "('bottomright')\n", (1704, 1719), False, 'from ocempgui.widgets import Renderer, VFrame, HFrame, Button, Alignment, Label\n'), ((1739, 1747), 'ocempgui.widgets.HFrame', 'HFrame', ([], {}), '()\n', (1745, 1747), False, 'from ocempgui.widgets import Renderer, VFrame, HFrame, Button, Alignment, Label\n'), ((1976, 1986), 'ocempgui.widgets.Renderer', 'Renderer', ([], {}), '()\n', (1984, 1986), False, 'from ocempgui.widgets import Renderer, VFrame, HFrame, Button, Alignment, Label\n'), ((247, 274), 'ocempgui.widgets.Label', 'Label', (['"""Alignment examples"""'], {}), "('Alignment examples')\n", (252, 274), False, 'from ocempgui.widgets import Renderer, VFrame, HFrame, Button, Alignment, Label\n')] |
import matplotlib.pyplot as plt
f = open('out.txt', 'r')
d = f.readlines()
f.close()
data = list(map(lambda s: int(s.replace(' ', '').replace('\n', '').split(':')[1]), d))
plt.plot(data)
plt.show() | [
"matplotlib.pyplot.plot",
"matplotlib.pyplot.show"
] | [((183, 197), 'matplotlib.pyplot.plot', 'plt.plot', (['data'], {}), '(data)\n', (191, 197), True, 'import matplotlib.pyplot as plt\n'), ((199, 209), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (207, 209), True, 'import matplotlib.pyplot as plt\n')] |
"""
=============================================
Discrete distribution as horizontal bar chart
=============================================
Stacked bar charts can be used to visualize discrete distributions.
This example visualizes the result of a survey in which people could rate
their agreement to questions on a five-element scale.
The horizontal stacking is achieved by calling `~.Axes.barh()` for each
category and passing the starting point as the cumulative sum of the
already drawn bars via the parameter ``left``.
"""
import numpy as np
import matplotlib.pyplot as plt
category_names = ['Strongly disagree', 'Disagree',
'Neither agree nor disagree', 'Agree', 'Strongly agree']
results = {
'Question 1': [10, 15, 17, 32, 26],
'Question 2': [26, 22, 29, 10, 13],
'Question 3': [35, 37, 7, 2, 19],
'Question 4': [32, 11, 9, 15, 33],
'Question 5': [21, 29, 5, 5, 40],
'Question 6': [8, 19, 5, 30, 38]
}
def survey(results, category_names):
"""
Parameters
----------
results : dict
A mapping from question labels to a list of answers per category.
It is assumed all lists contain the same number of entries and that
it matches the length of *category_names*.
category_names : list of str
The category labels.
"""
labels = list(results.keys())
data = np.array(list(results.values()))
data_cum = data.cumsum(axis=1)
category_colors = plt.get_cmap('RdYlGn')(
np.linspace(0.15, 0.85, data.shape[1]))
fig, ax = plt.subplots(figsize=(9.2, 5))
ax.invert_yaxis()
ax.xaxis.set_visible(False)
ax.set_xlim(0, np.sum(data, axis=1).max())
for i, (colname, color) in enumerate(zip(category_names, category_colors)):
widths = data[:, i]
starts = data_cum[:, i] - widths
ax.barh(labels, widths, left=starts, height=0.5,
label=colname, color=color)
xcenters = starts + widths / 2
r, g, b, _ = color
text_color = 'white' if r * g * b < 0.5 else 'darkgrey'
for y, (x, c) in enumerate(zip(xcenters, widths)):
ax.text(x, y, str(int(c)), ha='center', va='center',
color=text_color)
ax.legend(ncol=len(category_names), bbox_to_anchor=(0, 1),
loc='lower left', fontsize='small')
return fig, ax
survey(results, category_names)
plt.show()
#############################################################################
#
# ------------
#
# References
# """"""""""
#
# The use of the following functions, methods, classes and modules is shown
# in this example:
import matplotlib
matplotlib.axes.Axes.barh
matplotlib.pyplot.barh
matplotlib.axes.Axes.text
matplotlib.pyplot.text
matplotlib.axes.Axes.legend
matplotlib.pyplot.legend
| [
"matplotlib.pyplot.show",
"numpy.sum",
"numpy.linspace",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.get_cmap"
] | [((2391, 2401), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2399, 2401), True, 'import matplotlib.pyplot as plt\n'), ((1548, 1578), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(9.2, 5)'}), '(figsize=(9.2, 5))\n', (1560, 1578), True, 'import matplotlib.pyplot as plt\n'), ((1461, 1483), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""RdYlGn"""'], {}), "('RdYlGn')\n", (1473, 1483), True, 'import matplotlib.pyplot as plt\n'), ((1493, 1531), 'numpy.linspace', 'np.linspace', (['(0.15)', '(0.85)', 'data.shape[1]'], {}), '(0.15, 0.85, data.shape[1])\n', (1504, 1531), True, 'import numpy as np\n'), ((1652, 1672), 'numpy.sum', 'np.sum', (['data'], {'axis': '(1)'}), '(data, axis=1)\n', (1658, 1672), True, 'import numpy as np\n')] |
import os
directory = os.path.dirname(__file__)
def load(settings):
from hardhat.util import load_recipes
recipes = load_recipes(directory, 'hardhat.recipes.javascript')
dependencies = [
]
for recipe in recipes:
recipe = recipe(settings=settings)
depends = list(recipe.depends)
depends.insert(0, recipe.name)
if needs_nodejs(depends):
depends.append('nodejs')
dependencies.append(depends)
return (recipes, dependencies)
def needs_nodejs(depends):
needs = False
has = False
for d in depends:
if d.startswith('nodejs-'):
needs = True
elif d == '':
has = True
return needs and not has
| [
"os.path.dirname",
"hardhat.util.load_recipes"
] | [((23, 48), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (38, 48), False, 'import os\n'), ((127, 180), 'hardhat.util.load_recipes', 'load_recipes', (['directory', '"""hardhat.recipes.javascript"""'], {}), "(directory, 'hardhat.recipes.javascript')\n", (139, 180), False, 'from hardhat.util import load_recipes\n')] |
# main app
from flask import Flask, jsonify, request, url_for, redirect, session
app = Flask(__name__)
test = 10
app.config.update(
TESTING=True,
DEBUG=True,
SECRET_KEY=b'_5#y234dasfgsd324535ghjgk32hrjhqgwf4123fgd4123t2374dsfgsdf2L"F4Q8z\n\xec]/'
)
@app.route('/')
def start():
tekst = '''
<h1> Aplikacja quizy szkolne </h1> <hr>
Tu będzie Bootstrap zapewne ;-)
'''
return tekst
@app.route('/login')
def login():
# tutaj trzeba logowanie jakieś sensowne dorobić...
tekst = '''
<h1> Aplikacja quizy szkolne </h1> <hr>
Tu będzie logowanie zapewne ;-)
'''
return tekst
if __name__ == "__main__":
app.run()
| [
"flask.Flask"
] | [((88, 103), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (93, 103), False, 'from flask import Flask, jsonify, request, url_for, redirect, session\n')] |
# -*- coding: utf-8 -*-
import inspect
import traceback
from db import LoginDAO
from util import Util, Constants, Log, CodeReturn
from controller import Controller
log = Log('AuthRoute')
util = Util()
constants = Constants()
loginDAO = LoginDAO()
controller = Controller()
codeReturn = CodeReturn()
class AuthRoute:
def login(self, request):
if (request.is_json):
try:
content = request.get_json()
#Get datas from JSON
user = str(content['user'])
password = str(content['pass'])
except:
return util.make_json(codeReturn.BAD_REQUEST_CODE, codeReturn.BAD_REQUEST_MSG, [])
code, msg, data = loginDAO.login(user, password)
return util.make_json(code, msg, data)
else:
return util.make_json(codeReturn.BAD_REQUEST_CODE, codeReturn.BAD_REQUEST_MSG, [])
def check_password(self, request):
if (request.is_json):
try:
content = request.get_json()
header = request.headers
#Get Token from Header
token = str(header['token'])
#Get datas from JSON
data = json.loads(str(content['data']).replace("'",'"'))
password = data['<PASSWORD>']
except:
return util.make_json(codeReturn.BAD_REQUEST_CODE, codeReturn.BAD_REQUEST_MSG, [])
#Authentication
decode_auth_token = controller.decode_auth_token(token)
if(decode_auth_token == codeReturn.EXPIRED_TOKEN_CODE):
log.warning(inspect.getframeinfo(inspect.currentframe()).function,
str(codeReturn.EXPIRED_TOKEN_MSG),
0)
return util.make_json(codeReturn.EXPIRED_TOKEN_CODE, codeReturn.EXPIRED_TOKEN_MSG, [])
elif(decode_auth_token == codeReturn.INVALID_TOKEN_CODE):
log.error(inspect.getframeinfo(inspect.currentframe()).function,
str(codeReturn.INVALID_TOKEN_MSG),
0)
return util.make_json(codeReturn.INVALID_TOKEN_CODE, codeReturn.INVALID_TOKEN_MSG, [])
else:
code, msg, data = loginDAO.check_password(password, decode_auth_token)
return util.make_json(code, msg, data)
else:
return util.make_json(codeReturn.BAD_REQUEST_CODE, codeReturn.BAD_REQUEST_MSG, [])
def check_token(self, request):
if (request.is_json):
try:
header = request.headers
#Get Token from Header
token = str(header['token'])
except:
log.error(inspect.getframeinfo(inspect.currentframe()).function,
str(traceback.format_exc()),
0)
return util.make_json(codeReturn.BAD_REQUEST_CODE, codeReturn.BAD_REQUEST_MSG, [])
#Authentication
decode_auth_token = controller.decode_auth_token(token)
if(decode_auth_token == codeReturn.EXPIRED_TOKEN_CODE):
log.warning(inspect.getframeinfo(inspect.currentframe()).function,
str(codeReturn.EXPIRED_TOKEN_MSG),
0)
return util.make_json(codeReturn.EXPIRED_TOKEN_CODE, codeReturn.EXPIRED_TOKEN_MSG, [])
elif(decode_auth_token == codeReturn.INVALID_TOKEN_CODE):
log.error(inspect.getframeinfo(inspect.currentframe()).function,
str(codeReturn.INVALID_TOKEN_MSG),
0)
return util.make_json(codeReturn.INVALID_TOKEN_CODE, codeReturn.INVALID_TOKEN_MSG, [])
else:
return util.make_json(codeReturn.SUCCESS_CODE, codeReturn.SUCCESS_MSG, {
'id':decode_auth_token
})
else:
return util.make_json(codeReturn.BAD_REQUEST_CODE, codeReturn.BAD_REQUEST_MSG, [])
def logout(self, request):
if request.is_json:
return util.make_json(codeReturn.SUCCESS_CODE, codeReturn.SUCCESS_MSG, [])
else:
return util.make_json(codeReturn.BAD_REQUEST_CODE, codeReturn.BAD_REQUEST_MSG, [])
| [
"traceback.format_exc",
"inspect.currentframe",
"util.Log",
"util.CodeReturn",
"util.Constants",
"controller.Controller",
"util.Util",
"db.LoginDAO"
] | [((172, 188), 'util.Log', 'Log', (['"""AuthRoute"""'], {}), "('AuthRoute')\n", (175, 188), False, 'from util import Util, Constants, Log, CodeReturn\n'), ((196, 202), 'util.Util', 'Util', ([], {}), '()\n', (200, 202), False, 'from util import Util, Constants, Log, CodeReturn\n'), ((215, 226), 'util.Constants', 'Constants', ([], {}), '()\n', (224, 226), False, 'from util import Util, Constants, Log, CodeReturn\n'), ((238, 248), 'db.LoginDAO', 'LoginDAO', ([], {}), '()\n', (246, 248), False, 'from db import LoginDAO\n'), ((262, 274), 'controller.Controller', 'Controller', ([], {}), '()\n', (272, 274), False, 'from controller import Controller\n'), ((288, 300), 'util.CodeReturn', 'CodeReturn', ([], {}), '()\n', (298, 300), False, 'from util import Util, Constants, Log, CodeReturn\n'), ((1679, 1701), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (1699, 1701), False, 'import inspect\n'), ((2869, 2891), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (2889, 2891), False, 'import traceback\n'), ((3234, 3256), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (3254, 3256), False, 'import inspect\n'), ((2031, 2053), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (2051, 2053), False, 'import inspect\n'), ((2808, 2830), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (2828, 2830), False, 'import inspect\n'), ((3585, 3607), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (3605, 3607), False, 'import inspect\n')] |
"""
This module handles the generation of 2 8-bit keys from a 10-bit key
"""
import general_functions as gf
import s_des_parameters as pa
def s_des_keygen(key):
primary_key=gf.create_keystring(key, 10)
primary_key=gf.permute_key(primary_key, pa.kg_P10)
primary_key=gf.left_shift(primary_key[:5], 1) + gf.left_shift(primary_key[5:], 1)
key1=gf.permute_key(primary_key, pa.kg_P8)
primary_key=gf.left_shift(primary_key[:5], 2) + gf.left_shift(primary_key[5:], 2)
key2=gf.permute_key(primary_key, pa.kg_P8)
return key1,key2
| [
"general_functions.permute_key",
"general_functions.left_shift",
"general_functions.create_keystring"
] | [((179, 207), 'general_functions.create_keystring', 'gf.create_keystring', (['key', '(10)'], {}), '(key, 10)\n', (198, 207), True, 'import general_functions as gf\n'), ((224, 262), 'general_functions.permute_key', 'gf.permute_key', (['primary_key', 'pa.kg_P10'], {}), '(primary_key, pa.kg_P10)\n', (238, 262), True, 'import general_functions as gf\n'), ((358, 395), 'general_functions.permute_key', 'gf.permute_key', (['primary_key', 'pa.kg_P8'], {}), '(primary_key, pa.kg_P8)\n', (372, 395), True, 'import general_functions as gf\n'), ((491, 528), 'general_functions.permute_key', 'gf.permute_key', (['primary_key', 'pa.kg_P8'], {}), '(primary_key, pa.kg_P8)\n', (505, 528), True, 'import general_functions as gf\n'), ((279, 312), 'general_functions.left_shift', 'gf.left_shift', (['primary_key[:5]', '(1)'], {}), '(primary_key[:5], 1)\n', (292, 312), True, 'import general_functions as gf\n'), ((315, 348), 'general_functions.left_shift', 'gf.left_shift', (['primary_key[5:]', '(1)'], {}), '(primary_key[5:], 1)\n', (328, 348), True, 'import general_functions as gf\n'), ((412, 445), 'general_functions.left_shift', 'gf.left_shift', (['primary_key[:5]', '(2)'], {}), '(primary_key[:5], 2)\n', (425, 445), True, 'import general_functions as gf\n'), ((448, 481), 'general_functions.left_shift', 'gf.left_shift', (['primary_key[5:]', '(2)'], {}), '(primary_key[5:], 2)\n', (461, 481), True, 'import general_functions as gf\n')] |
import os
access_types = ['create', 'read', 'update', 'delete']
users = ['user', 'user_group2', 'super_admin_user', 'group_admin_user']
targets = [
'public_group',
'public_groupuser',
'public_stream',
'public_groupstream',
'public_streamuser',
'public_filter',
'public_candidate_object',
'public_source_object',
'keck1_telescope',
'sedm',
'public_group_sedm_allocation',
'public_group_taxonomy',
'public_taxonomy',
'public_comment',
'public_groupcomment',
'public_annotation',
'public_groupannotation',
'public_classification',
'public_groupclassification',
'public_source_photometry_point',
'public_source_spectrum',
'public_source_groupphotometry',
'public_source_groupspectrum',
'public_source_followuprequest',
'public_source_followup_request_target_group',
'public_thumbnail',
'red_transients_run',
'problematic_assignment',
'invitation',
'user_notification',
]
directory = os.path.dirname(__file__)
fname = os.path.join(directory, 'test_permissions.py')
with open(fname, 'w') as f:
for user in users:
for access_type in access_types:
for target in targets:
test = f"""
def test_{user}_{access_type}_{target}({user}, {target}):
accessible = {target}.is_accessible_by({user}, mode="{access_type}")
assert accessible == accessible
"""
f.write(test)
| [
"os.path.dirname",
"os.path.join"
] | [((1002, 1027), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1017, 1027), False, 'import os\n'), ((1036, 1082), 'os.path.join', 'os.path.join', (['directory', '"""test_permissions.py"""'], {}), "(directory, 'test_permissions.py')\n", (1048, 1082), False, 'import os\n')] |
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.i18n import i18n_patterns
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView
from django.views import defaults as default_views
from .settings.base import JET
import acatto.views
urlpatterns = [
url(r'^$', acatto.views.rnd, name='rnd'),
#url(r'^$', TemplateView.as_view(template_name='acatto/home.html'), name='home'),
# Django Admin, use {% url 'admin:index' %}
url(settings.ADMIN_URL, acatto.views.rnd, name='rnd'),
#url(settings.ADMIN_URL, admin.site.urls),
# User management
#url(r'^users/', include('acatto_web.users.urls', namespace='users')),
#url(r'^accounts/', include('allauth.urls')),
# Your stuff: custom urls includes go here
url(r'^products/$', acatto.views.products, name='products'),
url(r'^product/(?P<product_id>\d+)/$', acatto.views.product, name='product'),
url(r'^rnd/$', acatto.views.rnd, name='rnd'),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if JET:
urlpatterns.append(url('^jet/', include('jet.urls')))
urlpatterns = i18n_patterns(*urlpatterns)
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
url(r'^400/$', default_views.bad_request, kwargs={'exception': Exception('Bad Request!')}),
url(r'^403/$', default_views.permission_denied, kwargs={'exception': Exception('Permission Denied')}),
url(r'^404/$', default_views.page_not_found, kwargs={'exception': Exception('Page not Found')}),
url(r'^500/$', default_views.server_error),
]
if 'debug_toolbar' in settings.INSTALLED_APPS:
import debug_toolbar
urlpatterns = [
url(r'^__debug__/', include(debug_toolbar.urls)),
] + urlpatterns
| [
"django.conf.urls.include",
"django.conf.urls.static.static",
"django.conf.urls.url",
"django.conf.urls.i18n.i18n_patterns"
] | [((1197, 1224), 'django.conf.urls.i18n.i18n_patterns', 'i18n_patterns', (['*urlpatterns'], {}), '(*urlpatterns)\n', (1210, 1224), False, 'from django.conf.urls.i18n import i18n_patterns\n'), ((1053, 1114), 'django.conf.urls.static.static', 'static', (['settings.MEDIA_URL'], {'document_root': 'settings.MEDIA_ROOT'}), '(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n', (1059, 1114), False, 'from django.conf.urls.static import static\n'), ((371, 410), 'django.conf.urls.url', 'url', (['"""^$"""', 'acatto.views.rnd'], {'name': '"""rnd"""'}), "('^$', acatto.views.rnd, name='rnd')\n", (374, 410), False, 'from django.conf.urls import include, url\n'), ((552, 605), 'django.conf.urls.url', 'url', (['settings.ADMIN_URL', 'acatto.views.rnd'], {'name': '"""rnd"""'}), "(settings.ADMIN_URL, acatto.views.rnd, name='rnd')\n", (555, 605), False, 'from django.conf.urls import include, url\n'), ((855, 913), 'django.conf.urls.url', 'url', (['"""^products/$"""', 'acatto.views.products'], {'name': '"""products"""'}), "('^products/$', acatto.views.products, name='products')\n", (858, 913), False, 'from django.conf.urls import include, url\n'), ((920, 996), 'django.conf.urls.url', 'url', (['"""^product/(?P<product_id>\\\\d+)/$"""', 'acatto.views.product'], {'name': '"""product"""'}), "('^product/(?P<product_id>\\\\d+)/$', acatto.views.product, name='product')\n", (923, 996), False, 'from django.conf.urls import include, url\n'), ((1002, 1045), 'django.conf.urls.url', 'url', (['"""^rnd/$"""', 'acatto.views.rnd'], {'name': '"""rnd"""'}), "('^rnd/$', acatto.views.rnd, name='rnd')\n", (1005, 1045), False, 'from django.conf.urls import include, url\n'), ((1738, 1779), 'django.conf.urls.url', 'url', (['"""^500/$"""', 'default_views.server_error'], {}), "('^500/$', default_views.server_error)\n", (1741, 1779), False, 'from django.conf.urls import include, url\n'), ((1160, 1179), 'django.conf.urls.include', 'include', (['"""jet.urls"""'], {}), "('jet.urls')\n", (1167, 1179), False, 'from django.conf.urls import include, url\n'), ((1924, 1951), 'django.conf.urls.include', 'include', (['debug_toolbar.urls'], {}), '(debug_toolbar.urls)\n', (1931, 1951), False, 'from django.conf.urls import include, url\n')] |
from datetime import timedelta
from django.core.paginator import Paginator
from django.db.models import Count, Q
from django.utils import timezone
from app.models import Delegate, History
from app.serializers import DelegateInfo
def fetch_delegates(page, limit=60, search_query=None):
total_node_count_query = Count(
"delegate_fk__nodes", filter=Q(delegate_fk__nodes__is_active=True), distinct=True
)
backup_node_count_query = Count(
"delegate_fk__nodes",
filter=Q(delegate_fk__nodes__is_active=True, delegate_fk__nodes__is_backup=True),
distinct=True,
)
contributions_count_query = Count("delegate_fk__contributions", distinct=True)
base_query = History.objects.filter(created__gt=timezone.now() - timedelta(days=3))
if search_query:
base_query = History.objects.filter(
Q(delegate_fk__name__icontains=search_query) | Q(delegate_fk__address=search_query)
)
history_ids = (
base_query.order_by("delegate_fk", "-created")
.distinct("delegate_fk")
.values_list("id", flat=True)
)
histories = (
History.objects.all()
.filter(id__in=history_ids)
.annotate(total_nodes_count=total_node_count_query)
.annotate(backup_nodes_count=backup_node_count_query)
.annotate(contributions_count=contributions_count_query)
.select_related("delegate_fk")
.order_by("rank")
)
paginator = Paginator(histories, limit)
histories_paginated = paginator.get_page(page)
delegates = []
for history in histories_paginated.object_list:
data = {
"id": history.delegate_fk.id,
"name": history.delegate_fk.name,
"slug": history.delegate_fk.slug,
"address": history.delegate_fk.address,
"public_key": history.delegate_fk.public_key,
"created": history.delegate_fk.created,
"updated": history.delegate_fk.updated,
"website": history.delegate_fk.website,
"proposal": history.delegate_fk.proposal,
"is_private": history.delegate_fk.is_private,
"payout_covering_fee": history.delegate_fk.payout_covering_fee,
"payout_percent": history.delegate_fk.payout_percent,
"payout_interval": history.delegate_fk.payout_interval,
"payout_minimum": history.delegate_fk.payout_minimum,
"payout_maximum": history.delegate_fk.payout_maximum,
"payout_minimum_vote_amount": history.delegate_fk.payout_minimum_vote_amount,
"payout_maximum_vote_amount": history.delegate_fk.payout_maximum_vote_amount,
"user_id": history.delegate_fk.user_id,
"total_nodes_count": history.total_nodes_count,
"backup_nodes_count": history.backup_nodes_count,
"contributions_count": history.contributions_count,
"approval": history.approval,
"rank": history.rank,
"rank_changed": history.rank_changed,
"forged": history.forged,
"voters": history.voters,
"voting_power": history.voting_power,
"voters_zero_balance": history.payload.get("voters_zero_balance"),
"voters_not_zero_balance": history.payload.get("voters_not_zero_balance"),
}
delegates.append(data)
return DelegateInfo(instance=delegates, many=True).data, histories_paginated
def fetch_new_delegates(page, limit=60):
delegates = (
Delegate.objects.exclude(proposal=None).exclude(user_id=None).order_by("-created")[:6]
)
base_query = History.objects.filter(
created__gt=timezone.now() - timedelta(days=3), delegate_fk__in=delegates.values_list("id")
)
history_ids = (
base_query.order_by("delegate_fk", "-created")
.distinct("delegate_fk")
.values_list("id", flat=True)
)
total_node_count_query = Count(
"delegate_fk__nodes", filter=Q(delegate_fk__nodes__is_active=True), distinct=True
)
backup_node_count_query = Count(
"delegate_fk__nodes",
filter=Q(delegate_fk__nodes__is_active=True, delegate_fk__nodes__is_backup=True),
distinct=True,
)
contributions_count_query = Count("delegate_fk__contributions", distinct=True)
histories = (
History.objects.all()
.filter(id__in=history_ids)
.annotate(total_nodes_count=total_node_count_query)
.annotate(backup_nodes_count=backup_node_count_query)
.annotate(contributions_count=contributions_count_query)
.select_related("delegate_fk")
.order_by("-delegate_fk__created")
)
paginator = Paginator(histories, limit)
histories_paginated = paginator.get_page(page)
delegates_data = []
for history in histories_paginated.object_list:
data = {
"id": history.delegate_fk.id,
"name": history.delegate_fk.name,
"slug": history.delegate_fk.slug,
"address": history.delegate_fk.address,
"public_key": history.delegate_fk.public_key,
"created": history.delegate_fk.created,
"updated": history.delegate_fk.updated,
"website": history.delegate_fk.website,
"proposal": history.delegate_fk.proposal,
"is_private": history.delegate_fk.is_private,
"payout_covering_fee": history.delegate_fk.payout_covering_fee,
"payout_percent": history.delegate_fk.payout_percent,
"payout_interval": history.delegate_fk.payout_interval,
"payout_minimum": history.delegate_fk.payout_minimum,
"payout_maximum": history.delegate_fk.payout_maximum,
"payout_minimum_vote_amount": history.delegate_fk.payout_minimum_vote_amount,
"payout_maximum_vote_amount": history.delegate_fk.payout_maximum_vote_amount,
"user_id": history.delegate_fk.user_id,
"total_nodes_count": history.total_nodes_count,
"backup_nodes_count": history.backup_nodes_count,
"contributions_count": history.contributions_count,
"approval": history.approval,
"rank": history.rank,
"rank_changed": history.rank_changed,
"forged": history.forged,
"voters": history.voters,
"voting_power": history.voting_power,
"voters_zero_balance": history.payload.get("voters_zero_balance"),
"voters_not_zero_balance": history.payload.get("voters_not_zero_balance"),
}
delegates_data.append(data)
return DelegateInfo(instance=delegates_data, many=True).data, histories_paginated
| [
"django.db.models.Count",
"app.models.Delegate.objects.exclude",
"datetime.timedelta",
"django.utils.timezone.now",
"app.serializers.DelegateInfo",
"app.models.History.objects.all",
"django.db.models.Q",
"django.core.paginator.Paginator"
] | [((639, 689), 'django.db.models.Count', 'Count', (['"""delegate_fk__contributions"""'], {'distinct': '(True)'}), "('delegate_fk__contributions', distinct=True)\n", (644, 689), False, 'from django.db.models import Count, Q\n'), ((1464, 1491), 'django.core.paginator.Paginator', 'Paginator', (['histories', 'limit'], {}), '(histories, limit)\n', (1473, 1491), False, 'from django.core.paginator import Paginator\n'), ((4258, 4308), 'django.db.models.Count', 'Count', (['"""delegate_fk__contributions"""'], {'distinct': '(True)'}), "('delegate_fk__contributions', distinct=True)\n", (4263, 4308), False, 'from django.db.models import Count, Q\n'), ((4686, 4713), 'django.core.paginator.Paginator', 'Paginator', (['histories', 'limit'], {}), '(histories, limit)\n', (4695, 4713), False, 'from django.core.paginator import Paginator\n'), ((362, 399), 'django.db.models.Q', 'Q', ([], {'delegate_fk__nodes__is_active': '(True)'}), '(delegate_fk__nodes__is_active=True)\n', (363, 399), False, 'from django.db.models import Count, Q\n'), ((503, 576), 'django.db.models.Q', 'Q', ([], {'delegate_fk__nodes__is_active': '(True)', 'delegate_fk__nodes__is_backup': '(True)'}), '(delegate_fk__nodes__is_active=True, delegate_fk__nodes__is_backup=True)\n', (504, 576), False, 'from django.db.models import Count, Q\n'), ((3375, 3418), 'app.serializers.DelegateInfo', 'DelegateInfo', ([], {'instance': 'delegates', 'many': '(True)'}), '(instance=delegates, many=True)\n', (3387, 3418), False, 'from app.serializers import DelegateInfo\n'), ((3981, 4018), 'django.db.models.Q', 'Q', ([], {'delegate_fk__nodes__is_active': '(True)'}), '(delegate_fk__nodes__is_active=True)\n', (3982, 4018), False, 'from django.db.models import Count, Q\n'), ((4122, 4195), 'django.db.models.Q', 'Q', ([], {'delegate_fk__nodes__is_active': '(True)', 'delegate_fk__nodes__is_backup': '(True)'}), '(delegate_fk__nodes__is_active=True, delegate_fk__nodes__is_backup=True)\n', (4123, 4195), False, 'from django.db.models import Count, Q\n'), ((6607, 6655), 'app.serializers.DelegateInfo', 'DelegateInfo', ([], {'instance': 'delegates_data', 'many': '(True)'}), '(instance=delegates_data, many=True)\n', (6619, 6655), False, 'from app.serializers import DelegateInfo\n'), ((743, 757), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (755, 757), False, 'from django.utils import timezone\n'), ((760, 777), 'datetime.timedelta', 'timedelta', ([], {'days': '(3)'}), '(days=3)\n', (769, 777), False, 'from datetime import timedelta\n'), ((857, 901), 'django.db.models.Q', 'Q', ([], {'delegate_fk__name__icontains': 'search_query'}), '(delegate_fk__name__icontains=search_query)\n', (858, 901), False, 'from django.db.models import Count, Q\n'), ((904, 940), 'django.db.models.Q', 'Q', ([], {'delegate_fk__address': 'search_query'}), '(delegate_fk__address=search_query)\n', (905, 940), False, 'from django.db.models import Count, Q\n'), ((3668, 3682), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (3680, 3682), False, 'from django.utils import timezone\n'), ((3685, 3702), 'datetime.timedelta', 'timedelta', ([], {'days': '(3)'}), '(days=3)\n', (3694, 3702), False, 'from datetime import timedelta\n'), ((3514, 3553), 'app.models.Delegate.objects.exclude', 'Delegate.objects.exclude', ([], {'proposal': 'None'}), '(proposal=None)\n', (3538, 3553), False, 'from app.models import Delegate, History\n'), ((1131, 1152), 'app.models.History.objects.all', 'History.objects.all', ([], {}), '()\n', (1150, 1152), False, 'from app.models import Delegate, History\n'), ((4336, 4357), 'app.models.History.objects.all', 'History.objects.all', ([], {}), '()\n', (4355, 4357), False, 'from app.models import Delegate, History\n')] |
import sys
import random
import pyjokes
sys.argv
first = 1
last = 10
answer = random.randint(first, last)
while True:
try:
# print(answer)
guess = int(input(f"Guess a number between {first} ~ {last} : "))
if first-1 < guess < last+1:
if(guess == answer):
print("You are genuis")
break
else:
print(f"Hey Random Guy Please enter a number {first} ~ {last} !")
except ValueError:
print("Please enter a Number!")
continue
joke = pyjokes.get_joke('en','neutral')
print(" ")
print('Joke For the Day!!')
print(joke); | [
"random.randint",
"pyjokes.get_joke"
] | [((92, 119), 'random.randint', 'random.randint', (['first', 'last'], {}), '(first, last)\n', (106, 119), False, 'import random\n'), ((502, 535), 'pyjokes.get_joke', 'pyjokes.get_joke', (['"""en"""', '"""neutral"""'], {}), "('en', 'neutral')\n", (518, 535), False, 'import pyjokes\n')] |
"""
Preprocess dataset
usage:
preprocess.py [options] <wav-dir>...
options:
--output-dir=<dir> Directory where processed outputs are saved. [default: data_dir].
-h, --help Show help message.
"""
import os
from docopt import docopt
import numpy as np
import math, pickle, os
from audio import *
from hparams import hparams as hp
from utils import *
from tqdm import tqdm
class MelVocoder:
def __init__(self):
self._mel_basis = None
def fft(self, y, sample_rate, use_preemphasis=False):
if use_preemphasis:
pre_y = self.preemphasis(y)
else:
pre_y = y
D = self._stft(pre_y, sample_rate)
return D.transpose()
def ifft(self, y, sample_rate):
y = y.transpose()
return self._istft(y, sample_rate)
def melspectrogram(self, y, sample_rate, num_mels, use_preemphasis=False):
if use_preemphasis:
pre_y = self.preemphasis(y)
else:
pre_y = y
D = self._stft(pre_y, sample_rate)
S = self._amp_to_db(self._linear_to_mel(np.abs(D), sample_rate, num_mels))
return self._normalize(S)
def preemphasis(self, x):
return signal.lfilter([1, -0.97], [1], x)
def _istft(self, y, sample_rate):
n_fft, hop_length, win_length = self._stft_parameters(sample_rate)
return librosa.istft(y, hop_length=hop_length, win_length=win_length)
def _stft(self, y, sample_rate):
n_fft, hop_length, win_length = self._stft_parameters(sample_rate)
return librosa.stft(y=y, n_fft=n_fft, hop_length=hop_length, win_length=win_length, window='hann')
def _linear_to_mel(self, spectrogram, sample_rate, num_mels):
if self._mel_basis is None:
self._mel_basis = self._build_mel_basis(sample_rate, num_mels)
return np.dot(self._mel_basis, spectrogram)
def _build_mel_basis(self, sample_rate, num_mels):
n_fft = 1024
return librosa.filters.mel(sample_rate, n_fft, n_mels=num_mels, fmax=7600)
def _normalize(self, S):
min_level_db = -100.0
return np.clip((S - min_level_db) / -min_level_db, 0, 1)
def _stft_parameters(self, sample_rate):
n_fft = 1024
hop_length = 256
win_length = n_fft
return n_fft, hop_length, win_length
def _denormalize(self, S):
min_level_db = -100.0
# return np.clip((S - min_level_db) / -min_level_db, 0, 1)
return S * (-min_level_db) + min_level_db
def _db_to_amp(self, x):
reference = 20.0
# return 20 * np.log10(np.maximum(1e-5, x)) - reference
return np.power(10.0, (x + reference) * 0.05)
def _amp_to_db(self, x):
reference = 20.0
return 20 * np.log10(np.maximum(1e-5, x)) - reference
def griffinlim(self, spectrogram, n_iter=50, sample_rate=16000):
n_fft, hop_length, win_length = self._stft_parameters(sample_rate)
return self._griffinlim(spectrogram.transpose(), n_iter=n_iter, n_fft=n_fft, hop_length=hop_length)
def _griffinlim(self, spectrogram, n_iter=100, window='hann', n_fft=2048, hop_length=-1, verbose=False):
if hop_length == -1:
hop_length = n_fft // 4
angles = np.exp(2j * np.pi * np.random.rand(*spectrogram.shape))
t = tqdm(range(n_iter), ncols=100, mininterval=2.0, disable=not verbose)
for i in t:
full = np.abs(spectrogram).astype(np.complex) * angles
inverse = librosa.istft(full, hop_length=hop_length, window=window)
rebuilt = librosa.stft(inverse, n_fft=n_fft, hop_length=hop_length, window=window)
angles = np.exp(1j * np.angle(rebuilt))
if verbose:
diff = np.abs(spectrogram) - np.abs(rebuilt)
t.set_postfix(loss=np.linalg.norm(diff, 'fro'))
full = np.abs(spectrogram).astype(np.complex) * angles
inverse = librosa.istft(full, hop_length=hop_length, window=window)
return inverse
mvc = MelVocoder()
def _normalize(data):
m = np.max(np.abs(data))
data = (data / m) * 0.999
return data
def get_wav_mel(path):
"""Given path to .wav file, get the quantized wav and mel spectrogram as numpy vectors
"""
wav = load_wav(path)
wav = _normalize(wav)
mel = mvc.melspectrogram(wav, sample_rate=hp.sample_rate, num_mels=hp.num_mels) # melspectrogram(wav)
if hp.input_type == 'raw' or hp.input_type == 'mixture':
return wav.astype(np.float32), mel
elif hp.input_type == 'mulaw':
quant = mulaw_quantize(wav, hp.mulaw_quantize_channels)
return quant.astype(np.int), mel
elif hp.input_type == 'bits':
quant = quantize(wav)
return quant.astype(np.int), mel
else:
raise ValueError("hp.input_type {} not recognized".format(hp.input_type))
def process_data(wav_dirs, output_path, mel_path, wav_path):
"""
given wav directory and output directory, process wav files and save quantized wav and mel
spectrogram to output directory
"""
dataset_ids = []
# get list of wav files
wav_files = []
for wav_dir in wav_dirs:
thisdir = os.listdir(wav_dir)
thisdir = [os.path.join(wav_dir, thisfile) for thisfile in thisdir]
wav_files += thisdir
# check wav_file
assert len(wav_files) != 0 or wav_files[0][-4:] == '.wav', "no wav files found!"
# create training and testing splits
test_wav_files = wav_files[:4]
wav_files = wav_files[4:]
for i, wav_file in enumerate(tqdm(wav_files)):
# get the file id
# from ipdb import set_trace
# set_trace()
file_id = '{:d}'.format(i).zfill(5)
wav, mel = get_wav_mel(wav_file)
# save
np.save(os.path.join(mel_path, file_id + ".npy"), mel)
np.save(os.path.join(wav_path, file_id + ".npy"), wav)
# add to dataset_ids
dataset_ids.append(file_id)
# save dataset_ids
with open(os.path.join(output_path, 'dataset_ids.pkl'), 'wb') as f:
pickle.dump(dataset_ids, f)
# process testing_wavs
test_path = os.path.join(output_path, 'test')
os.makedirs(test_path, exist_ok=True)
for i, wav_file in enumerate(test_wav_files):
wav, mel = get_wav_mel(wav_file)
# save test_wavs
np.save(os.path.join(test_path, "test_{}_mel.npy".format(i)), mel)
np.save(os.path.join(test_path, "test_{}_wav.npy".format(i)), wav)
print(
"\npreprocessing done, total processed wav files:{}.\nProcessed files are located in:{}".format(len(wav_files),
os.path.abspath(
output_path)))
if __name__ == "__main__":
args = docopt(__doc__)
wav_dir = args["<wav-dir>"]
output_dir = args["--output-dir"]
# create paths
output_path = os.path.join(output_dir, "")
mel_path = os.path.join(output_dir, "mel")
wav_path = os.path.join(output_dir, "wav")
# create dirs
os.makedirs(output_path, exist_ok=True)
os.makedirs(mel_path, exist_ok=True)
os.makedirs(wav_path, exist_ok=True)
# process data
process_data(wav_dir, output_path, mel_path, wav_path)
def test_get_wav_mel():
wav, mel = get_wav_mel('sample.wav')
print(wav.shape, mel.shape)
print(wav)
| [
"numpy.clip",
"numpy.abs",
"os.listdir",
"pickle.dump",
"numpy.random.rand",
"os.makedirs",
"numpy.power",
"tqdm.tqdm",
"os.path.join",
"numpy.angle",
"numpy.dot",
"numpy.linalg.norm",
"os.path.abspath",
"numpy.maximum",
"docopt.docopt"
] | [((6126, 6159), 'os.path.join', 'os.path.join', (['output_path', '"""test"""'], {}), "(output_path, 'test')\n", (6138, 6159), False, 'import math, pickle, os\n'), ((6164, 6201), 'os.makedirs', 'os.makedirs', (['test_path'], {'exist_ok': '(True)'}), '(test_path, exist_ok=True)\n', (6175, 6201), False, 'import math, pickle, os\n'), ((6884, 6899), 'docopt.docopt', 'docopt', (['__doc__'], {}), '(__doc__)\n', (6890, 6899), False, 'from docopt import docopt\n'), ((7008, 7036), 'os.path.join', 'os.path.join', (['output_dir', '""""""'], {}), "(output_dir, '')\n", (7020, 7036), False, 'import math, pickle, os\n'), ((7052, 7083), 'os.path.join', 'os.path.join', (['output_dir', '"""mel"""'], {}), "(output_dir, 'mel')\n", (7064, 7083), False, 'import math, pickle, os\n'), ((7099, 7130), 'os.path.join', 'os.path.join', (['output_dir', '"""wav"""'], {}), "(output_dir, 'wav')\n", (7111, 7130), False, 'import math, pickle, os\n'), ((7154, 7193), 'os.makedirs', 'os.makedirs', (['output_path'], {'exist_ok': '(True)'}), '(output_path, exist_ok=True)\n', (7165, 7193), False, 'import math, pickle, os\n'), ((7198, 7234), 'os.makedirs', 'os.makedirs', (['mel_path'], {'exist_ok': '(True)'}), '(mel_path, exist_ok=True)\n', (7209, 7234), False, 'import math, pickle, os\n'), ((7239, 7275), 'os.makedirs', 'os.makedirs', (['wav_path'], {'exist_ok': '(True)'}), '(wav_path, exist_ok=True)\n', (7250, 7275), False, 'import math, pickle, os\n'), ((1849, 1885), 'numpy.dot', 'np.dot', (['self._mel_basis', 'spectrogram'], {}), '(self._mel_basis, spectrogram)\n', (1855, 1885), True, 'import numpy as np\n'), ((2121, 2170), 'numpy.clip', 'np.clip', (['((S - min_level_db) / -min_level_db)', '(0)', '(1)'], {}), '((S - min_level_db) / -min_level_db, 0, 1)\n', (2128, 2170), True, 'import numpy as np\n'), ((2648, 2686), 'numpy.power', 'np.power', (['(10.0)', '((x + reference) * 0.05)'], {}), '(10.0, (x + reference) * 0.05)\n', (2656, 2686), True, 'import numpy as np\n'), ((4076, 4088), 'numpy.abs', 'np.abs', (['data'], {}), '(data)\n', (4082, 4088), True, 'import numpy as np\n'), ((5185, 5204), 'os.listdir', 'os.listdir', (['wav_dir'], {}), '(wav_dir)\n', (5195, 5204), False, 'import math, pickle, os\n'), ((5556, 5571), 'tqdm.tqdm', 'tqdm', (['wav_files'], {}), '(wav_files)\n', (5560, 5571), False, 'from tqdm import tqdm\n'), ((6054, 6081), 'pickle.dump', 'pickle.dump', (['dataset_ids', 'f'], {}), '(dataset_ids, f)\n', (6065, 6081), False, 'import math, pickle, os\n'), ((5224, 5255), 'os.path.join', 'os.path.join', (['wav_dir', 'thisfile'], {}), '(wav_dir, thisfile)\n', (5236, 5255), False, 'import math, pickle, os\n'), ((5775, 5815), 'os.path.join', 'os.path.join', (['mel_path', "(file_id + '.npy')"], {}), "(mel_path, file_id + '.npy')\n", (5787, 5815), False, 'import math, pickle, os\n'), ((5838, 5878), 'os.path.join', 'os.path.join', (['wav_path', "(file_id + '.npy')"], {}), "(wav_path, file_id + '.npy')\n", (5850, 5878), False, 'import math, pickle, os\n'), ((5988, 6032), 'os.path.join', 'os.path.join', (['output_path', '"""dataset_ids.pkl"""'], {}), "(output_path, 'dataset_ids.pkl')\n", (6000, 6032), False, 'import math, pickle, os\n'), ((6704, 6732), 'os.path.abspath', 'os.path.abspath', (['output_path'], {}), '(output_path)\n', (6719, 6732), False, 'import math, pickle, os\n'), ((1094, 1103), 'numpy.abs', 'np.abs', (['D'], {}), '(D)\n', (1100, 1103), True, 'import numpy as np\n'), ((3270, 3304), 'numpy.random.rand', 'np.random.rand', (['*spectrogram.shape'], {}), '(*spectrogram.shape)\n', (3284, 3304), True, 'import numpy as np\n'), ((2771, 2791), 'numpy.maximum', 'np.maximum', (['(1e-05)', 'x'], {}), '(1e-05, x)\n', (2781, 2791), True, 'import numpy as np\n'), ((3683, 3700), 'numpy.angle', 'np.angle', (['rebuilt'], {}), '(rebuilt)\n', (3691, 3700), True, 'import numpy as np\n'), ((3750, 3769), 'numpy.abs', 'np.abs', (['spectrogram'], {}), '(spectrogram)\n', (3756, 3769), True, 'import numpy as np\n'), ((3772, 3787), 'numpy.abs', 'np.abs', (['rebuilt'], {}), '(rebuilt)\n', (3778, 3787), True, 'import numpy as np\n'), ((3868, 3887), 'numpy.abs', 'np.abs', (['spectrogram'], {}), '(spectrogram)\n', (3874, 3887), True, 'import numpy as np\n'), ((3427, 3446), 'numpy.abs', 'np.abs', (['spectrogram'], {}), '(spectrogram)\n', (3433, 3446), True, 'import numpy as np\n'), ((3823, 3850), 'numpy.linalg.norm', 'np.linalg.norm', (['diff', '"""fro"""'], {}), "(diff, 'fro')\n", (3837, 3850), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
from PIL import Image, ImageFont, ImageDraw
import sys
tileSize = 48
tiles = []
def cropTile(img):
x = 0
y = 16
for i in range(0, 4):
for j in range(0, 4):
box = (x, y, x + tileSize, y + tileSize)
region = img.crop(box)
tiles.append(region)
x = x + tileSize
x = 0
y = y + 16 + tileSize
if len(sys.argv) < 1:
exit(1)
rawFiles = []
f = open(sys.argv[1], 'r')
for line in f.readlines():
rawFiles.append(line.strip('\n'))
for fileName in rawFiles:
print(fileName)
img = Image.open(fileName)
print(img.format, img.size, img.mode)
cropTile(img)
print(len(tiles))
tileCount = len(tiles)
cx = 0
cy = 0
if tileCount > 32:
cx = 32
cy = int(tileCount / cx)
else:
cx = tileCount
cy = 1
dest = Image.new('RGBA', (cx * tileSize, cy * tileSize))
for j in range(0, cy):
for i in range(0, cx):
dest.paste(tiles[j * cx + i], (i * tileSize, j * tileSize))
font = ImageFont.truetype("../assets/PingFang.ttc", 12)
draw = ImageDraw.Draw(dest)
for j in range(0, cy):
for i in range(0, cx):
draw.text((i * tileSize, j * tileSize), "%d,%d" % (i, j), font=font, fill=256)
dest.save('dest.png')
| [
"PIL.Image.new",
"PIL.ImageDraw.Draw",
"PIL.Image.open",
"PIL.ImageFont.truetype"
] | [((838, 887), 'PIL.Image.new', 'Image.new', (['"""RGBA"""', '(cx * tileSize, cy * tileSize)'], {}), "('RGBA', (cx * tileSize, cy * tileSize))\n", (847, 887), False, 'from PIL import Image, ImageFont, ImageDraw\n'), ((1014, 1062), 'PIL.ImageFont.truetype', 'ImageFont.truetype', (['"""../assets/PingFang.ttc"""', '(12)'], {}), "('../assets/PingFang.ttc', 12)\n", (1032, 1062), False, 'from PIL import Image, ImageFont, ImageDraw\n'), ((1070, 1090), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['dest'], {}), '(dest)\n', (1084, 1090), False, 'from PIL import Image, ImageFont, ImageDraw\n'), ((596, 616), 'PIL.Image.open', 'Image.open', (['fileName'], {}), '(fileName)\n', (606, 616), False, 'from PIL import Image, ImageFont, ImageDraw\n')] |
# -*- coding: utf-8 -*-
#
# This file is part of Sequana software
#
# Copyright (c) 2016-2020 - Sequana Development Team
#
# File author(s):
# <NAME> <<EMAIL>>
#
# Distributed under the terms of the 3-clause BSD license.
# The full license is in the LICENSE file, distributed with this software.
#
# website: https://github.com/sequana/sequana
# documentation: http://sequana.readthedocs.io
#
##############################################################################
from sequana.lazy import pylab
import colorlog
logger = colorlog.getLogger(__name__)
from sequana.viz import clusterisation
__all__ = ['PCA']
class PCA(clusterisation.Cluster):
"""
.. plot::
:include-source:
from sequana.viz.pca import PCA
from sequana import sequana_data
import pandas as pd
data = sequana_data("test_pca.csv")
df = pd.read_csv(data)
df = df.set_index("Id")
p = PCA(df, colors={
"A1": 'r', "A2": 'r', 'A3': 'r',
"B1": 'b', "B2": 'b', 'B3': 'b'})
p.plot(n_components=2)
"""
def __init__(self, data, colors={}):
super(PCA, self).__init__(data, colors)
def plot_pca_vs_max_features(self, step=100, n_components=2,
progress=True):
"""
.. plot::
:include-source:
from sequana.viz.pca import PCA
from sequana import sequana_data
import pandas as pd
data = sequana_data("test_pca.csv")
df = pd.read_csv(data)
df = df.set_index("Id")
p = PCA(df)
p.plot_pca_vs_max_features()
"""
assert n_components in [2,3,4]
N = len(self.df)
if step > N:
step = N
# We start with at least 5 features
X = range(10, N, step)
from easydev import Progress
pb = Progress(len(X))
Y = []
for i, x in enumerate(X):
res = self.plot(n_components=n_components, max_features=x, show_plot=False)
Y.append(res)
if progress: pb.animate(i+1)
sub = n_components
pylab.subplot(sub,1,1)
pylab.plot(X, [y[0]*100 for y in Y])
pylab.ylabel("PC1 (%)")
pylab.subplot(sub,1,2)
pylab.plot(X, [y[1]*100 for y in Y])
pylab.ylabel("PC2 (%)")
if sub >= 3:
pylab.subplot(sub,1,3)
pylab.plot(X, [y[2]*100 for y in Y])
pylab.ylabel("PC3 (%)")
if sub >= 4:
pylab.subplot(sub,1,4)
pylab.plot(X, [y[3]*100 for y in Y])
pylab.ylabel("PC4 (%)")
def plot(self, n_components=2, transform="log", switch_x=False,
switch_y=False, switch_z=False, colors=None,
max_features=500, show_plot=True):
"""
:param n_components: at number starting at 2 or a value below 1
e.g. 0.95 means select automatically the number of components to
capture 95% of the variance
:param transform: can be 'log' or 'anscombe', log is just log10. count
with zeros, are set to 1
"""
assert transform in ['log', 'anscombe']
from sklearn.decomposition import PCA
import numpy as np
pylab.clf()
pca = PCA(n_components)
data, kept = self.scale_data(transform_method=transform, max_features=max_features)
pca.fit(data.T)
Xr = pca.transform(self.scaler.fit_transform(self.df.loc[kept].T))
self.Xr = Xr
if switch_x:
Xr[:,0] *= -1
if switch_y:
Xr[:,1] *= -1
if switch_z:
Xr[:,2] *= -1
# PC1 vs PC2
if show_plot:
pylab.figure(1)
self._plot(Xr, pca=pca, pc1=0,pc2=1, colors=colors)
if len(pca.explained_variance_ratio_) >= 3:
if show_plot:
pylab.figure(2)
self._plot(Xr, pca=pca, pc1=0,pc2=2, colors=colors)
pylab.figure(3)
self._plot(Xr, pca=pca, pc1=1,pc2=2, colors=colors)
return pca.explained_variance_ratio_
| [
"sklearn.decomposition.PCA",
"sequana.lazy.pylab.subplot",
"sequana.lazy.pylab.ylabel",
"colorlog.getLogger",
"sequana.lazy.pylab.clf",
"sequana.lazy.pylab.figure",
"sequana.lazy.pylab.plot"
] | [((541, 569), 'colorlog.getLogger', 'colorlog.getLogger', (['__name__'], {}), '(__name__)\n', (559, 569), False, 'import colorlog\n'), ((2146, 2170), 'sequana.lazy.pylab.subplot', 'pylab.subplot', (['sub', '(1)', '(1)'], {}), '(sub, 1, 1)\n', (2159, 2170), False, 'from sequana.lazy import pylab\n'), ((2177, 2217), 'sequana.lazy.pylab.plot', 'pylab.plot', (['X', '[(y[0] * 100) for y in Y]'], {}), '(X, [(y[0] * 100) for y in Y])\n', (2187, 2217), False, 'from sequana.lazy import pylab\n'), ((2222, 2245), 'sequana.lazy.pylab.ylabel', 'pylab.ylabel', (['"""PC1 (%)"""'], {}), "('PC1 (%)')\n", (2234, 2245), False, 'from sequana.lazy import pylab\n'), ((2254, 2278), 'sequana.lazy.pylab.subplot', 'pylab.subplot', (['sub', '(1)', '(2)'], {}), '(sub, 1, 2)\n', (2267, 2278), False, 'from sequana.lazy import pylab\n'), ((2285, 2325), 'sequana.lazy.pylab.plot', 'pylab.plot', (['X', '[(y[1] * 100) for y in Y]'], {}), '(X, [(y[1] * 100) for y in Y])\n', (2295, 2325), False, 'from sequana.lazy import pylab\n'), ((2330, 2353), 'sequana.lazy.pylab.ylabel', 'pylab.ylabel', (['"""PC2 (%)"""'], {}), "('PC2 (%)')\n", (2342, 2353), False, 'from sequana.lazy import pylab\n'), ((3270, 3281), 'sequana.lazy.pylab.clf', 'pylab.clf', ([], {}), '()\n', (3279, 3281), False, 'from sequana.lazy import pylab\n'), ((3296, 3313), 'sklearn.decomposition.PCA', 'PCA', (['n_components'], {}), '(n_components)\n', (3299, 3313), False, 'from sklearn.decomposition import PCA\n'), ((2387, 2411), 'sequana.lazy.pylab.subplot', 'pylab.subplot', (['sub', '(1)', '(3)'], {}), '(sub, 1, 3)\n', (2400, 2411), False, 'from sequana.lazy import pylab\n'), ((2422, 2462), 'sequana.lazy.pylab.plot', 'pylab.plot', (['X', '[(y[2] * 100) for y in Y]'], {}), '(X, [(y[2] * 100) for y in Y])\n', (2432, 2462), False, 'from sequana.lazy import pylab\n'), ((2471, 2494), 'sequana.lazy.pylab.ylabel', 'pylab.ylabel', (['"""PC3 (%)"""'], {}), "('PC3 (%)')\n", (2483, 2494), False, 'from sequana.lazy import pylab\n'), ((2528, 2552), 'sequana.lazy.pylab.subplot', 'pylab.subplot', (['sub', '(1)', '(4)'], {}), '(sub, 1, 4)\n', (2541, 2552), False, 'from sequana.lazy import pylab\n'), ((2563, 2603), 'sequana.lazy.pylab.plot', 'pylab.plot', (['X', '[(y[3] * 100) for y in Y]'], {}), '(X, [(y[3] * 100) for y in Y])\n', (2573, 2603), False, 'from sequana.lazy import pylab\n'), ((2612, 2635), 'sequana.lazy.pylab.ylabel', 'pylab.ylabel', (['"""PC4 (%)"""'], {}), "('PC4 (%)')\n", (2624, 2635), False, 'from sequana.lazy import pylab\n'), ((3727, 3742), 'sequana.lazy.pylab.figure', 'pylab.figure', (['(1)'], {}), '(1)\n', (3739, 3742), False, 'from sequana.lazy import pylab\n'), ((3902, 3917), 'sequana.lazy.pylab.figure', 'pylab.figure', (['(2)'], {}), '(2)\n', (3914, 3917), False, 'from sequana.lazy import pylab\n'), ((4002, 4017), 'sequana.lazy.pylab.figure', 'pylab.figure', (['(3)'], {}), '(3)\n', (4014, 4017), False, 'from sequana.lazy import pylab\n')] |
import os
import pytest
from waves import Sound, mono_ttf_gen as _mtg, stereo_ttf_gen as _stg
TESTS_DIR = os.path.abspath(os.path.dirname(__file__))
@pytest.fixture
def mono_filepath():
return os.path.join(TESTS_DIR, "files", "mono.wav")
@pytest.fixture
def stereo_filepath():
return os.path.join(TESTS_DIR, "files", "stereo.wav")
@pytest.fixture
def mono_sound(mono_filepath):
return Sound.from_file(mono_filepath)
@pytest.fixture
def stereo_sound(stereo_filepath):
return Sound.from_file(stereo_filepath)
@pytest.fixture
def mono_ttf_gen():
return _mtg
@pytest.fixture
def stereo_ttf_gen():
return _stg
| [
"os.path.dirname",
"os.path.join",
"waves.Sound.from_file"
] | [((126, 151), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (141, 151), False, 'import os\n'), ((203, 247), 'os.path.join', 'os.path.join', (['TESTS_DIR', '"""files"""', '"""mono.wav"""'], {}), "(TESTS_DIR, 'files', 'mono.wav')\n", (215, 247), False, 'import os\n'), ((300, 346), 'os.path.join', 'os.path.join', (['TESTS_DIR', '"""files"""', '"""stereo.wav"""'], {}), "(TESTS_DIR, 'files', 'stereo.wav')\n", (312, 346), False, 'import os\n'), ((407, 437), 'waves.Sound.from_file', 'Sound.from_file', (['mono_filepath'], {}), '(mono_filepath)\n', (422, 437), False, 'from waves import Sound, mono_ttf_gen as _mtg, stereo_ttf_gen as _stg\n'), ((502, 534), 'waves.Sound.from_file', 'Sound.from_file', (['stereo_filepath'], {}), '(stereo_filepath)\n', (517, 534), False, 'from waves import Sound, mono_ttf_gen as _mtg, stereo_ttf_gen as _stg\n')] |
import opytimark.utils.loader as l
from opytimark.markers.cec import year_2005
def test_F1():
f = year_2005.F1()
x = l.load_cec_auxiliary('F1_o', '2005')
y = f(x)
assert y == -450
def test_F2():
f = year_2005.F2()
x = l.load_cec_auxiliary('F2_o', '2005')
y = f(x)
assert y == -450
def test_F3():
f = year_2005.F3()
x = l.load_cec_auxiliary('F3_o', '2005')
x = x[:50]
y = f(x)
assert y == -450
def test_F4():
f = year_2005.F4()
x = l.load_cec_auxiliary('F4_o', '2005')
y = f(x)
assert y == -450
def test_F5():
f = year_2005.F5()
x = l.load_cec_auxiliary('F5_o', '2005')
x[:int(x.shape[0]/4)] = -100
x[int(3*x.shape[0]/4):] = 100
y = f(x)
assert y == -310
def test_F6():
f = year_2005.F6()
x = l.load_cec_auxiliary('F6_o', '2005')
x += 1
y = f(x)
assert y == 390
def test_F7():
f = year_2005.F7()
x = l.load_cec_auxiliary('F7_o', '2005')
x = x[:50]
y = f(x)
assert y == -180
def test_F8():
f = year_2005.F8()
x = l.load_cec_auxiliary('F8_o', '2005')
x = x[:50]
for j in range(int(x.shape[0]/2)):
x[2*j] = -32 * x[2*j+1]
y = f(x)
assert y == -140
def test_F9():
f = year_2005.F9()
x = l.load_cec_auxiliary('F9_o', '2005')
y = f(x)
assert y == -330
def test_F10():
f = year_2005.F10()
x = l.load_cec_auxiliary('F10_o', '2005')
x = x[:50]
y = f(x)
assert y == -330
def test_F11():
f = year_2005.F11()
x = l.load_cec_auxiliary('F11_o', '2005')
x = x[:50]
y = f(x)
assert y == 90
def test_F12():
f = year_2005.F12()
x = l.load_cec_auxiliary('F12_alpha', '2005')
y = f(x)
assert y == -460
def test_F13():
f = year_2005.F13()
x = l.load_cec_auxiliary('F13_o', '2005')
y = f(x)
assert y == -130
def test_F14():
f = year_2005.F14()
x = l.load_cec_auxiliary('F14_o', '2005')
x = x[:50]
y = f(x)
assert y == -300
def test_F15():
f = year_2005.F15()
x = l.load_cec_auxiliary('F15_o', '2005')
x = x[0][:50]
y = f(x)
assert y == 120
def test_F16():
f = year_2005.F16()
x = l.load_cec_auxiliary('F16_o', '2005')
x = x[0][:50]
y = f(x)
assert y == 120
def test_F17():
f = year_2005.F17()
x = l.load_cec_auxiliary('F17_o', '2005')
x = x[0][:50]
y = f(x)
assert y == 120
def test_F18():
f = year_2005.F18()
x = l.load_cec_auxiliary('F18_o', '2005')
x = x[0][:50]
y = f(x)
assert y == 10
def test_F19():
f = year_2005.F19()
x = l.load_cec_auxiliary('F19_o', '2005')
x = x[0][:50]
y = f(x)
assert y == 10
def test_F20():
f = year_2005.F20()
x = l.load_cec_auxiliary('F20_o', '2005')
x = x[0][:50]
for j in range(int(x.shape[0]/2)):
x[2*j+1] = 5
y = f(x)
assert y == 10
def test_F21():
f = year_2005.F21()
x = l.load_cec_auxiliary('F21_o', '2005')
x = x[0][:50]
y = f(x)
assert y == 360
def test_F22():
f = year_2005.F22()
x = l.load_cec_auxiliary('F22_o', '2005')
x = x[0][:50]
y = f(x)
assert y == 360
def test_F23():
f = year_2005.F23()
x = l.load_cec_auxiliary('F23_o', '2005')
x = x[0][:50]
y = f(x)
assert y == 360
def test_F24():
f = year_2005.F24()
x = l.load_cec_auxiliary('F24_o', '2005')
x = x[0][:50]
y = f(x)
assert y == 260
def test_F25():
f = year_2005.F25()
x = l.load_cec_auxiliary('F25_o', '2005')
x = x[0][:50]
y = f(x)
assert y == 260
| [
"opytimark.markers.cec.year_2005.F1",
"opytimark.markers.cec.year_2005.F14",
"opytimark.markers.cec.year_2005.F8",
"opytimark.markers.cec.year_2005.F22",
"opytimark.markers.cec.year_2005.F2",
"opytimark.markers.cec.year_2005.F23",
"opytimark.markers.cec.year_2005.F10",
"opytimark.markers.cec.year_2005.F20",
"opytimark.markers.cec.year_2005.F21",
"opytimark.markers.cec.year_2005.F5",
"opytimark.utils.loader.load_cec_auxiliary",
"opytimark.markers.cec.year_2005.F7",
"opytimark.markers.cec.year_2005.F3",
"opytimark.markers.cec.year_2005.F4",
"opytimark.markers.cec.year_2005.F12",
"opytimark.markers.cec.year_2005.F15",
"opytimark.markers.cec.year_2005.F13",
"opytimark.markers.cec.year_2005.F18",
"opytimark.markers.cec.year_2005.F9",
"opytimark.markers.cec.year_2005.F25",
"opytimark.markers.cec.year_2005.F19",
"opytimark.markers.cec.year_2005.F24",
"opytimark.markers.cec.year_2005.F6",
"opytimark.markers.cec.year_2005.F16",
"opytimark.markers.cec.year_2005.F17",
"opytimark.markers.cec.year_2005.F11"
] | [((104, 118), 'opytimark.markers.cec.year_2005.F1', 'year_2005.F1', ([], {}), '()\n', (116, 118), False, 'from opytimark.markers.cec import year_2005\n'), ((128, 164), 'opytimark.utils.loader.load_cec_auxiliary', 'l.load_cec_auxiliary', (['"""F1_o"""', '"""2005"""'], {}), "('F1_o', '2005')\n", (148, 164), True, 'import opytimark.utils.loader as l\n'), ((226, 240), 'opytimark.markers.cec.year_2005.F2', 'year_2005.F2', ([], {}), '()\n', (238, 240), False, 'from opytimark.markers.cec import year_2005\n'), ((250, 286), 'opytimark.utils.loader.load_cec_auxiliary', 'l.load_cec_auxiliary', (['"""F2_o"""', '"""2005"""'], {}), "('F2_o', '2005')\n", (270, 286), True, 'import opytimark.utils.loader as l\n'), ((348, 362), 'opytimark.markers.cec.year_2005.F3', 'year_2005.F3', ([], {}), '()\n', (360, 362), False, 'from opytimark.markers.cec import year_2005\n'), ((372, 408), 'opytimark.utils.loader.load_cec_auxiliary', 'l.load_cec_auxiliary', (['"""F3_o"""', '"""2005"""'], {}), "('F3_o', '2005')\n", (392, 408), True, 'import opytimark.utils.loader as l\n'), ((485, 499), 'opytimark.markers.cec.year_2005.F4', 'year_2005.F4', ([], {}), '()\n', (497, 499), False, 'from opytimark.markers.cec import year_2005\n'), ((509, 545), 'opytimark.utils.loader.load_cec_auxiliary', 'l.load_cec_auxiliary', (['"""F4_o"""', '"""2005"""'], {}), "('F4_o', '2005')\n", (529, 545), True, 'import opytimark.utils.loader as l\n'), ((607, 621), 'opytimark.markers.cec.year_2005.F5', 'year_2005.F5', ([], {}), '()\n', (619, 621), False, 'from opytimark.markers.cec import year_2005\n'), ((631, 667), 'opytimark.utils.loader.load_cec_auxiliary', 'l.load_cec_auxiliary', (['"""F5_o"""', '"""2005"""'], {}), "('F5_o', '2005')\n", (651, 667), True, 'import opytimark.utils.loader as l\n'), ((796, 810), 'opytimark.markers.cec.year_2005.F6', 'year_2005.F6', ([], {}), '()\n', (808, 810), False, 'from opytimark.markers.cec import year_2005\n'), ((820, 856), 'opytimark.utils.loader.load_cec_auxiliary', 'l.load_cec_auxiliary', (['"""F6_o"""', '"""2005"""'], {}), "('F6_o', '2005')\n", (840, 856), True, 'import opytimark.utils.loader as l\n'), ((928, 942), 'opytimark.markers.cec.year_2005.F7', 'year_2005.F7', ([], {}), '()\n', (940, 942), False, 'from opytimark.markers.cec import year_2005\n'), ((952, 988), 'opytimark.utils.loader.load_cec_auxiliary', 'l.load_cec_auxiliary', (['"""F7_o"""', '"""2005"""'], {}), "('F7_o', '2005')\n", (972, 988), True, 'import opytimark.utils.loader as l\n'), ((1065, 1079), 'opytimark.markers.cec.year_2005.F8', 'year_2005.F8', ([], {}), '()\n', (1077, 1079), False, 'from opytimark.markers.cec import year_2005\n'), ((1089, 1125), 'opytimark.utils.loader.load_cec_auxiliary', 'l.load_cec_auxiliary', (['"""F8_o"""', '"""2005"""'], {}), "('F8_o', '2005')\n", (1109, 1125), True, 'import opytimark.utils.loader as l\n'), ((1274, 1288), 'opytimark.markers.cec.year_2005.F9', 'year_2005.F9', ([], {}), '()\n', (1286, 1288), False, 'from opytimark.markers.cec import year_2005\n'), ((1298, 1334), 'opytimark.utils.loader.load_cec_auxiliary', 'l.load_cec_auxiliary', (['"""F9_o"""', '"""2005"""'], {}), "('F9_o', '2005')\n", (1318, 1334), True, 'import opytimark.utils.loader as l\n'), ((1397, 1412), 'opytimark.markers.cec.year_2005.F10', 'year_2005.F10', ([], {}), '()\n', (1410, 1412), False, 'from opytimark.markers.cec import year_2005\n'), ((1422, 1459), 'opytimark.utils.loader.load_cec_auxiliary', 'l.load_cec_auxiliary', (['"""F10_o"""', '"""2005"""'], {}), "('F10_o', '2005')\n", (1442, 1459), True, 'import opytimark.utils.loader as l\n'), ((1537, 1552), 'opytimark.markers.cec.year_2005.F11', 'year_2005.F11', ([], {}), '()\n', (1550, 1552), False, 'from opytimark.markers.cec import year_2005\n'), ((1562, 1599), 'opytimark.utils.loader.load_cec_auxiliary', 'l.load_cec_auxiliary', (['"""F11_o"""', '"""2005"""'], {}), "('F11_o', '2005')\n", (1582, 1599), True, 'import opytimark.utils.loader as l\n'), ((1675, 1690), 'opytimark.markers.cec.year_2005.F12', 'year_2005.F12', ([], {}), '()\n', (1688, 1690), False, 'from opytimark.markers.cec import year_2005\n'), ((1700, 1741), 'opytimark.utils.loader.load_cec_auxiliary', 'l.load_cec_auxiliary', (['"""F12_alpha"""', '"""2005"""'], {}), "('F12_alpha', '2005')\n", (1720, 1741), True, 'import opytimark.utils.loader as l\n'), ((1804, 1819), 'opytimark.markers.cec.year_2005.F13', 'year_2005.F13', ([], {}), '()\n', (1817, 1819), False, 'from opytimark.markers.cec import year_2005\n'), ((1829, 1866), 'opytimark.utils.loader.load_cec_auxiliary', 'l.load_cec_auxiliary', (['"""F13_o"""', '"""2005"""'], {}), "('F13_o', '2005')\n", (1849, 1866), True, 'import opytimark.utils.loader as l\n'), ((1929, 1944), 'opytimark.markers.cec.year_2005.F14', 'year_2005.F14', ([], {}), '()\n', (1942, 1944), False, 'from opytimark.markers.cec import year_2005\n'), ((1954, 1991), 'opytimark.utils.loader.load_cec_auxiliary', 'l.load_cec_auxiliary', (['"""F14_o"""', '"""2005"""'], {}), "('F14_o', '2005')\n", (1974, 1991), True, 'import opytimark.utils.loader as l\n'), ((2069, 2084), 'opytimark.markers.cec.year_2005.F15', 'year_2005.F15', ([], {}), '()\n', (2082, 2084), False, 'from opytimark.markers.cec import year_2005\n'), ((2094, 2131), 'opytimark.utils.loader.load_cec_auxiliary', 'l.load_cec_auxiliary', (['"""F15_o"""', '"""2005"""'], {}), "('F15_o', '2005')\n", (2114, 2131), True, 'import opytimark.utils.loader as l\n'), ((2211, 2226), 'opytimark.markers.cec.year_2005.F16', 'year_2005.F16', ([], {}), '()\n', (2224, 2226), False, 'from opytimark.markers.cec import year_2005\n'), ((2236, 2273), 'opytimark.utils.loader.load_cec_auxiliary', 'l.load_cec_auxiliary', (['"""F16_o"""', '"""2005"""'], {}), "('F16_o', '2005')\n", (2256, 2273), True, 'import opytimark.utils.loader as l\n'), ((2353, 2368), 'opytimark.markers.cec.year_2005.F17', 'year_2005.F17', ([], {}), '()\n', (2366, 2368), False, 'from opytimark.markers.cec import year_2005\n'), ((2378, 2415), 'opytimark.utils.loader.load_cec_auxiliary', 'l.load_cec_auxiliary', (['"""F17_o"""', '"""2005"""'], {}), "('F17_o', '2005')\n", (2398, 2415), True, 'import opytimark.utils.loader as l\n'), ((2495, 2510), 'opytimark.markers.cec.year_2005.F18', 'year_2005.F18', ([], {}), '()\n', (2508, 2510), False, 'from opytimark.markers.cec import year_2005\n'), ((2520, 2557), 'opytimark.utils.loader.load_cec_auxiliary', 'l.load_cec_auxiliary', (['"""F18_o"""', '"""2005"""'], {}), "('F18_o', '2005')\n", (2540, 2557), True, 'import opytimark.utils.loader as l\n'), ((2636, 2651), 'opytimark.markers.cec.year_2005.F19', 'year_2005.F19', ([], {}), '()\n', (2649, 2651), False, 'from opytimark.markers.cec import year_2005\n'), ((2661, 2698), 'opytimark.utils.loader.load_cec_auxiliary', 'l.load_cec_auxiliary', (['"""F19_o"""', '"""2005"""'], {}), "('F19_o', '2005')\n", (2681, 2698), True, 'import opytimark.utils.loader as l\n'), ((2777, 2792), 'opytimark.markers.cec.year_2005.F20', 'year_2005.F20', ([], {}), '()\n', (2790, 2792), False, 'from opytimark.markers.cec import year_2005\n'), ((2802, 2839), 'opytimark.utils.loader.load_cec_auxiliary', 'l.load_cec_auxiliary', (['"""F20_o"""', '"""2005"""'], {}), "('F20_o', '2005')\n", (2822, 2839), True, 'import opytimark.utils.loader as l\n'), ((2979, 2994), 'opytimark.markers.cec.year_2005.F21', 'year_2005.F21', ([], {}), '()\n', (2992, 2994), False, 'from opytimark.markers.cec import year_2005\n'), ((3004, 3041), 'opytimark.utils.loader.load_cec_auxiliary', 'l.load_cec_auxiliary', (['"""F21_o"""', '"""2005"""'], {}), "('F21_o', '2005')\n", (3024, 3041), True, 'import opytimark.utils.loader as l\n'), ((3121, 3136), 'opytimark.markers.cec.year_2005.F22', 'year_2005.F22', ([], {}), '()\n', (3134, 3136), False, 'from opytimark.markers.cec import year_2005\n'), ((3146, 3183), 'opytimark.utils.loader.load_cec_auxiliary', 'l.load_cec_auxiliary', (['"""F22_o"""', '"""2005"""'], {}), "('F22_o', '2005')\n", (3166, 3183), True, 'import opytimark.utils.loader as l\n'), ((3263, 3278), 'opytimark.markers.cec.year_2005.F23', 'year_2005.F23', ([], {}), '()\n', (3276, 3278), False, 'from opytimark.markers.cec import year_2005\n'), ((3288, 3325), 'opytimark.utils.loader.load_cec_auxiliary', 'l.load_cec_auxiliary', (['"""F23_o"""', '"""2005"""'], {}), "('F23_o', '2005')\n", (3308, 3325), True, 'import opytimark.utils.loader as l\n'), ((3405, 3420), 'opytimark.markers.cec.year_2005.F24', 'year_2005.F24', ([], {}), '()\n', (3418, 3420), False, 'from opytimark.markers.cec import year_2005\n'), ((3430, 3467), 'opytimark.utils.loader.load_cec_auxiliary', 'l.load_cec_auxiliary', (['"""F24_o"""', '"""2005"""'], {}), "('F24_o', '2005')\n", (3450, 3467), True, 'import opytimark.utils.loader as l\n'), ((3547, 3562), 'opytimark.markers.cec.year_2005.F25', 'year_2005.F25', ([], {}), '()\n', (3560, 3562), False, 'from opytimark.markers.cec import year_2005\n'), ((3572, 3609), 'opytimark.utils.loader.load_cec_auxiliary', 'l.load_cec_auxiliary', (['"""F25_o"""', '"""2005"""'], {}), "('F25_o', '2005')\n", (3592, 3609), True, 'import opytimark.utils.loader as l\n')] |
from collections import defaultdict
from pprint import pprint
import csv
BASEDIR = 'COVID-19/csse_covid_19_data/csse_covid_19_time_series/'
BASEFILE = BASEDIR + 'time_series_covid19_{}.csv'
LABELS = ['confirmed_global', 'confirmed_US', 'deaths_global', 'deaths_US', 'recovered_global']
def read_US(label):
regions = dict()
meta = dict()
filename = BASEFILE.format(label)
with open(filename, 'r') as infile:
label_reader = csv.reader(infile, delimiter=',', quotechar='"')
headers = next(label_reader)
dates = headers[12:]
for row in label_reader:
_, _, _, _, fips, county, state, region, lat, lng, _, pop, *counts = row
try:
counts = list(map(int, counts))
except ValueError:
print(counts)
print(headers)
print(dates)
print(row)
regions[(county, state, region)] = counts
meta[(county, state, region)] = dict(fips=fips, county=county, state=state, region=region, lat=lat, lng=lng, population=pop)
return regions, dates, meta
def read_global(label):
regions = dict()
filename = BASEFILE.format(label)
with open(filename, 'r') as infile:
label_reader = csv.reader(infile, delimiter=',', quotechar='"')
headers = next(label_reader)
dates = headers[4:]
for row in label_reader:
province, country, _, _, *counts = row
try:
counts = list(map(int, counts))
except ValueError:
print(headers)
print(row)
regions[(province, country)] = counts
return regions, dates, None
def read():
regions = defaultdict(dict)
us = defaultdict(dict)
for label in LABELS:
reader = read_US if 'US' in label else read_global
output = us if 'US' in label else regions
label_regions, dates, meta = reader(label)
for region_name, counts in label_regions.items():
output[region_name][label] = counts
if meta is not None:
output['meta'] = meta
return regions, us
def main():
data = read()
if __name__ == '__main__':
main()
| [
"collections.defaultdict",
"csv.reader"
] | [((1729, 1746), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (1740, 1746), False, 'from collections import defaultdict\n'), ((1761, 1778), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (1772, 1778), False, 'from collections import defaultdict\n'), ((452, 500), 'csv.reader', 'csv.reader', (['infile'], {'delimiter': '""","""', 'quotechar': '"""\\""""'}), '(infile, delimiter=\',\', quotechar=\'"\')\n', (462, 500), False, 'import csv\n'), ((1268, 1316), 'csv.reader', 'csv.reader', (['infile'], {'delimiter': '""","""', 'quotechar': '"""\\""""'}), '(infile, delimiter=\',\', quotechar=\'"\')\n', (1278, 1316), False, 'import csv\n')] |
# -*- coding: utf-8 -*-
import unittest
import clue
class TestClue(unittest.TestCase):
def test_clue(self):
c = clue.ClueGame(123, "foo")
for i in range(10):
c.register_user(i, str(i))
c.start_game()
# Check deck
deck = []
for uid, u in c._users.items():
deck.extend(u._deck)
self.assertEqual(set(deck), set(c._deck))
user, what = c.suggest(0, "1", clue.TOOLS[0], clue.ROOMS[0])
if user is not None:
self.assertGreater(len(what), 0)
if __name__ == '__main__':
unittest.main()
'' | [
"unittest.main",
"clue.ClueGame"
] | [((595, 610), 'unittest.main', 'unittest.main', ([], {}), '()\n', (608, 610), False, 'import unittest\n'), ((128, 153), 'clue.ClueGame', 'clue.ClueGame', (['(123)', '"""foo"""'], {}), "(123, 'foo')\n", (141, 153), False, 'import clue\n')] |
Subsets and Splits