hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
90948ab3b394c7cb6e8df8160515b81630f1c311 | 4,510 | py | Python | lib/site_config.py | bruceravel/xraylarch | a8179208872d43bd23453fa0c64680e11bc2b5ed | [
"BSD-3-Clause"
]
| null | null | null | lib/site_config.py | bruceravel/xraylarch | a8179208872d43bd23453fa0c64680e11bc2b5ed | [
"BSD-3-Clause"
]
| null | null | null | lib/site_config.py | bruceravel/xraylarch | a8179208872d43bd23453fa0c64680e11bc2b5ed | [
"BSD-3-Clause"
]
| null | null | null | #!/usr/bin/env python
"""
site configuration for larch:
init_files: list of larch files run (in order) on startup
module_path: list of directories to search for larch code
history_file:
"""
from __future__ import print_function
import sys
import os
from os.path import exists, abspath, join
from .utils import get_homedir, nativepath
from .version import __version__ as larch_version
def pjoin(*args):
return nativepath(join(*args))
##
# set system-wide and local larch folders
# larchdir = sys.exec_prefix + 'share' + 'larch'
# usr_larchdir = get_homedir() + '.larch' (#unix)
# = get_homedir() + 'larch' (#win)
##
larchdir = pjoin(sys.exec_prefix, 'share', 'larch')
home_dir = get_homedir()
usr_larchdir = pjoin(home_dir, '.larch')
if os.name == 'nt':
usr_larchdir = pjoin(home_dir, 'larch')
if 'LARCHDIR' in os.environ:
usr_larchdir = nativepath(os.environ['LARCHDIR'])
##
## names (and loading order) for core plugin modules
core_plugins = ('std', 'math', 'io', 'wx', 'xray', 'xrf', 'xafs')
# frozen executables, as from cx_freeze, will have
# these paths to be altered...
if hasattr(sys, 'frozen'):
if os.name == 'nt':
try:
tdir, exe = os.path.split(sys.executable)
toplevel, bindir = os.path.split(tdir)
larchdir = os.path.abspath(toplevel)
except:
pass
elif sys.platform.lower().startswith('darwin'):
tdir, exe = os.path.split(sys.executable)
toplevel, bindir = os.path.split(tdir)
larchdir = pjoin(toplevel, 'Resources', 'larch')
modules_path = []
plugins_path = []
_path = [usr_larchdir, larchdir]
if 'LARCHPATH' in os.environ:
_path.extend([nativepath(s) for s in os.environ['LARCHPATH'].split(':')])
for pth in _path:
mdir = pjoin(pth, 'modules')
if exists(mdir) and mdir not in modules_path:
modules_path.append(mdir)
pdir = pjoin(pth, 'plugins')
if exists(pdir) and pdir not in plugins_path:
plugins_path.append(pdir)
# initialization larch files to be run on startup
init_files = [pjoin(usr_larchdir, 'init.lar')]
if 'LARCHSTARTUP' in os.environ:
startup = os.environ['LARCHSTARTUP']
if exists(startup):
init_files = [nativepath(startup)]
# history file:
history_file = pjoin(usr_larchdir, 'history.lar')
def make_user_larchdirs():
"""create user's larch directories"""
files = {'init.lar': 'put custom startup larch commands:',
'history.lar': 'history of larch commands:',
'history_larchgui.lar': 'history of larch_gui commands:',
}
subdirs = {'matplotlib': 'matplotlib may put files here',
'dlls': 'put dlls here',
'modules': 'put custom larch or python modules here',
'plugins': 'put custom larch plugins here'}
def make_dir(dname):
if not exists(dname):
try:
os.mkdir(dname)
except (OSError, TypeError):
print(sys.exc_info()[1])
def write_file(fname, text):
if not exists(fname):
try:
f = open(fname, 'w')
f.write('# %s\n' % text)
f.close()
except:
print(sys.exc_info()[1])
make_dir(usr_larchdir)
for fname, text in files.items():
write_file(pjoin(usr_larchdir, fname), text)
for sdir, text in subdirs.items():
sdir = pjoin(usr_larchdir, sdir)
make_dir(sdir)
write_file(pjoin(sdir, 'README'), text)
def show_site_config():
print( """=== Larch Configuration
larch version: %s
sys executable: %s
sys is frozen: %s
system larch dir: %s
users larch dir: %s
users history_file: %s
users startup files: %s
modules search path: %s
plugins search path: %s
========================
""" % (larch_version, sys.executable,
repr(getattr(sys, 'frozen', False)),
larchdir, usr_larchdir,
history_file, init_files,
modules_path, plugins_path))
def system_settings():
"""set system-specific Environmental Variables, and make sure
that the user larchdirs exist.
This is run by the interpreter on startup."""
# ubuntu / unity hack
if sys.platform.lower().startswith('linux'):
if 'ubuntu' in os.uname()[3].lower():
os.environ['UBUNTU_MENUPROXY'] = '0'
make_user_larchdirs()
if __name__ == '__main__':
show_site_config()
| 29.86755 | 77 | 0.614634 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,689 | 0.374501 |
909490610fb0cdfc5860262dba5b4c657bee2b6b | 2,898 | py | Python | gpath/path_similarity.py | insilichem/gpathfinder | e6c7df14d473857acb007efbae3cc7b4fee1b330 | [
"Apache-2.0"
]
| 5 | 2020-03-22T20:21:47.000Z | 2022-03-08T07:50:25.000Z | gpath/path_similarity.py | insilichem/gpathfinder | e6c7df14d473857acb007efbae3cc7b4fee1b330 | [
"Apache-2.0"
]
| 2 | 2020-04-09T10:49:26.000Z | 2022-03-08T04:37:27.000Z | gpath/path_similarity.py | insilichem/gpathfinder | e6c7df14d473857acb007efbae3cc7b4fee1b330 | [
"Apache-2.0"
]
| null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
##############
# GPathFinder: Identification of ligand pathways by a multi-objective
# genetic algorithm
#
# https://github.com/insilichem/gpathfinder
#
# Copyright 2019 José-Emilio Sánchez Aparicio, Giuseppe Sciortino,
# Daniel Villadrich Herrmannsdoerfer, Pablo Orenes Chueca,
# Jaime Rodríguez-Guerra Pedregal and Jean-Didier Maréchal
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############
"""
This module contains the similarity functions that are used to discard
individuals that are not different enough.
This criterion of similarity will be applied in the case of two
``pathways`` individuals with the same score. Then, if they are similar
enough according to this module, one of them will be discarded.
"""
from __future__ import print_function, division
import logging
import numpy as np
logger = logging.getLogger(__name__)
def pathways_rmsd(ind1, ind2, subject, threshold, *args, **kwargs):
"""
Calculates the RMSD between the positions of the ``pathways`` genes
belonging two the two individuals object of study. If the squared
RMSD is less or equal than the squared threshold, we consider that
the two pathways are identical and one of them will be discarded.
Parameters
----------
ind1 : gpath.base.Individual
ind2 : gpath.base.Individual
subject: str
Name of Gpath ``pathway`` gene instance to measure.
threshold : float
Maximum RMSD value in Angstroms to consider two individuals as
similar.
If ``rmsd > threshold``, they are considered different.
Returns
-------
bool
True if ``rmsd`` is within threshold, False otherwise.
It will always return False if number of points of the pathway
is not equal in the two Individuals.
"""
coords1 = np.array([elem[:] for elem in \
ind1.genes[subject].allele['positions']])
coords2 = np.array([elem[:] for elem in \
ind2.genes[subject].allele['positions']])
if coords1.shape[0] != coords2.shape[0]:
return False
rmsd_squared = _rmsd_squared(coords1, coords2)
if rmsd_squared > threshold*threshold:
return False
return True
def _rmsd_squared(coords1, coords2):
diff = coords1 - coords2
return (diff * diff).sum() / coords1.shape[0] | 36.683544 | 77 | 0.689786 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,139 | 0.737078 |
9096de4357058d79ebeafc310708bd4b4560fdc0 | 1,666 | py | Python | Schedule/groupagenda/urls.py | f0rdream/party-time | 3b596043627383859042a6e70167e4304bab9a92 | [
"MIT"
]
| null | null | null | Schedule/groupagenda/urls.py | f0rdream/party-time | 3b596043627383859042a6e70167e4304bab9a92 | [
"MIT"
]
| null | null | null | Schedule/groupagenda/urls.py | f0rdream/party-time | 3b596043627383859042a6e70167e4304bab9a92 | [
"MIT"
]
| null | null | null | from django.conf.urls import url, include
from .views import (GroupListAPIView,
GroupCreateAPIView,
AgendaListAPIView,
AgendaDetailAPIView,
AgendaCreateAPIView,
AgendaPostAPIView,
agenda_create,
AgendaRefreshAPIView,
NumberInGroupAPIView,
GroupProfileDetailAPIView,
GroupProfileUpdateAPIView,
number_in_group)
urlpatterns = [
url(r'^group/$', GroupListAPIView.as_view(), name="group_list"),
url(r'^group/create/$', GroupCreateAPIView.as_view(), name="group_create"),
url(r'agenda-list/$', AgendaListAPIView.as_view(), name="agenda_list"),
url(r'^(?P<group_id>\d+)/(?P<pk>\d+)/detail/$', AgendaDetailAPIView.as_view(), name='agenda_detail'),
# url(r'^create/$', AgendaCreateAPIView.as_view(), name='agenda_create'),
url(r'^(?P<group_id>\d+)/post2/$', AgendaPostAPIView.as_view(), name='agenda_create2'), # recommended api
url(r'^(?P<group_id>\d+)/post/$', agenda_create, name='agenda_create'),
url(r'^(?P<group_id>\d+)/(?P<pk>\d+)/refresh/$', AgendaRefreshAPIView.as_view(), name='agenda_refresh'),
url(r'^(?P<id>\d+)/number/$', NumberInGroupAPIView.as_view(), name="number"),
url(r'^(?P<group_id>\d+)/(?P<date>\d{4}-\d{2}-\d{2})/number/$', number_in_group, name="number2"),
url(r'^(?P<group_id>\d+)/group-profile/$', GroupProfileDetailAPIView.as_view(), name="group_profile"),
url(r'^(?P<group_id>\d+)/group-profile/update/$', GroupProfileUpdateAPIView.as_view(), name="group_profile_update"),
]
| 57.448276 | 120 | 0.614046 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 595 | 0.357143 |
90988846045a582c1eb61f51d1fdf6a5c9b664f2 | 312 | py | Python | examples/admin.py | kimbackdoo/Web-Cralwer | 6a92ec00ea2273f228b8c304cd596ad9120c4709 | [
"MIT"
]
| null | null | null | examples/admin.py | kimbackdoo/Web-Cralwer | 6a92ec00ea2273f228b8c304cd596ad9120c4709 | [
"MIT"
]
| null | null | null | examples/admin.py | kimbackdoo/Web-Cralwer | 6a92ec00ea2273f228b8c304cd596ad9120c4709 | [
"MIT"
]
| null | null | null | from django.contrib import admin
# Register your models here.
#models에서 Shop을 임폴트
from .models import Shop
from .models import Parsed_data
from .models import Img_data
from .models import Other
admin.site.register(Shop)
admin.site.register(Parsed_data)
admin.site.register(Img_data)
admin.site.register(Other)
| 22.285714 | 32 | 0.814103 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 59 | 0.182099 |
909914b3df8b80013e491c569d64a1ce700cd6e4 | 630 | py | Python | main_test_dad.py | AdamLohSg/GTA | bf6a745a6e28e365466e76360a15ca10ce61e009 | [
"Apache-2.0"
]
| 8 | 2022-01-19T20:47:36.000Z | 2022-03-20T05:11:04.000Z | main_test_dad.py | AdamLohSg/GTA | bf6a745a6e28e365466e76360a15ca10ce61e009 | [
"Apache-2.0"
]
| 2 | 2022-02-17T06:14:25.000Z | 2022-02-17T08:43:57.000Z | main_test_dad.py | AdamLohSg/GTA | bf6a745a6e28e365466e76360a15ca10ce61e009 | [
"Apache-2.0"
]
| 5 | 2022-02-15T04:16:27.000Z | 2022-03-29T01:21:41.000Z | import torch
from models.gta import GraphTemporalEmbedding
if __name__ == '__main__':
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
x = torch.randn(32, 96, 122)
model = GraphTemporalEmbedding(122, 96, 3)
y = model(x)
print(y.size())
# model = AdaGraphSage(num_nodes=10, seq_len=96, label_len=48, out_len=24)
# model = model.double().to(device)
# x = torch.randn(32, 96, 10, requires_grad=True).double().to(device)
# y = torch.randn(32, 48, 10, requires_grad=True).double().to(device)
# # print(out.size())
# out = model(x, y, None, None)
# print(out.size()) | 39.375 | 78 | 0.647619 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 339 | 0.538095 |
909961221deb2afd6f990975d13c32b0c0400f10 | 638 | py | Python | daiquiri_client/auth.py | aipescience/django-daiquiri-client | 30521dc7393b65ef51a5fd42b63b1dece46a2d5c | [
"Apache-2.0"
]
| null | null | null | daiquiri_client/auth.py | aipescience/django-daiquiri-client | 30521dc7393b65ef51a5fd42b63b1dece46a2d5c | [
"Apache-2.0"
]
| null | null | null | daiquiri_client/auth.py | aipescience/django-daiquiri-client | 30521dc7393b65ef51a5fd42b63b1dece46a2d5c | [
"Apache-2.0"
]
| null | null | null | class Auth():
def __init__(self, client):
self.client = client
def get_profiles(self):
return self.client.get('/auth/api/profiles/', {'page_size': 10000})['results']
def get_groups(self):
return self.client.get('/auth/api/groups/')
def get_group_map(self):
return {group['id']: group['name'] for group in self.get_groups()}
def activate_profile(self, pk):
return self.client.put('/auth/api/profiles/%d/activate/' % pk, {})
def update_profile_attributes(self, pk, attributes):
return self.client.patch('/auth/api/profiles/%d/' % pk, {'attributes': attributes})
| 31.9 | 91 | 0.639498 | 637 | 0.998433 | 0 | 0 | 0 | 0 | 0 | 0 | 139 | 0.217868 |
909acbc6fed7077e7d615e7ea5b4fd6ba9538288 | 954 | py | Python | CSS/spiraleFile.py | NsiLycee/premiere | 2814a21860e227e2db01ea201b1c4d99723a0562 | [
"Unlicense"
]
| null | null | null | CSS/spiraleFile.py | NsiLycee/premiere | 2814a21860e227e2db01ea201b1c4d99723a0562 | [
"Unlicense"
]
| null | null | null | CSS/spiraleFile.py | NsiLycee/premiere | 2814a21860e227e2db01ea201b1c4d99723a0562 | [
"Unlicense"
]
| null | null | null | '''
Auteur : Joël Dendaletche
But : tracé une figure géométrique à l'aide de la bibliothèque Turtle
Le projet utilise l'objet file pour itérer le calcul de chaque nouveau point
Les coordonnées des points d'un polygone sont placés dans une file
l'algorithme consiste à calculer les coordonnées d'un point pour tracer une droite qui part du premier points
de la file et passe par le deuxième en prolongeant le segment d'une fraction déterminée de la longueur entre les
deux points. Le deuxième point est remplacé par le nouveau. A la prochaine itération, le segment va partir du
nouveau point pour passer par le suivant dans la file, qui sera remplacé par le nouveau point et ainsi de
suite.
'''
import turtle
board = turtle.Turtle()
listePoints = [(0,0),(10,0),(5, int(10*75**.5)]
print(listePoints)
for x, y in listePoints :
board.goto(x, y)
turtle.done() | 45.428571 | 121 | 0.697065 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 793 | 0.815844 |
909acc24e11a5c6671af7463f6c79ae6bbfe3286 | 20,420 | py | Python | network/modules/spconv_unet.py | alexisgroshenry/NPM3D_DSNet | d1a2ec071728dcb3c733ecdee3a27f4534b67f33 | [
"MIT"
]
| null | null | null | network/modules/spconv_unet.py | alexisgroshenry/NPM3D_DSNet | d1a2ec071728dcb3c733ecdee3a27f4534b67f33 | [
"MIT"
]
| null | null | null | network/modules/spconv_unet.py | alexisgroshenry/NPM3D_DSNet | d1a2ec071728dcb3c733ecdee3a27f4534b67f33 | [
"MIT"
]
| null | null | null | # -*- coding:utf-8 -*-
# author: Xinge
# @file: spconv_unet.py
# @time: 2020/06/22 15:01
import time
import numpy as np
import spconv
import torch
import torch.nn.functional as F
from torch import nn
def conv3x3(in_planes, out_planes, stride=1, indice_key=None):
return spconv.SubMConv3d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False, indice_key=indice_key)
def conv1x3(in_planes, out_planes, stride=1, indice_key=None):
return spconv.SubMConv3d(in_planes, out_planes, kernel_size=(1, 3, 3), stride=stride,
padding=(0, 1, 1), bias=False, indice_key=indice_key)
def conv1x1x3(in_planes, out_planes, stride=1, indice_key=None):
return spconv.SubMConv3d(in_planes, out_planes, kernel_size=(1, 1, 3), stride=stride,
padding=(0, 0, 1), bias=False, indice_key=indice_key)
def conv1x3x1(in_planes, out_planes, stride=1, indice_key=None):
return spconv.SubMConv3d(in_planes, out_planes, kernel_size=(1, 3, 1), stride=stride,
padding=(0, 1, 0), bias=False, indice_key=indice_key)
def conv3x1x1(in_planes, out_planes, stride=1, indice_key=None):
return spconv.SubMConv3d(in_planes, out_planes, kernel_size=(3, 1, 1), stride=stride,
padding=(1, 0, 0), bias=False, indice_key=indice_key)
def conv3x1(in_planes, out_planes, stride=1, indice_key=None):
return spconv.SubMConv3d(in_planes, out_planes, kernel_size=(3, 1, 3), stride=stride,
padding=(1, 0, 1), bias=False, indice_key=indice_key)
def conv1x1(in_planes, out_planes, stride=1, indice_key=None):
return spconv.SubMConv3d(in_planes, out_planes, kernel_size=1, stride=stride,
padding=1, bias=False, indice_key=indice_key)
class ResContextBlock(nn.Module):
def __init__(self, in_filters, out_filters, kernel_size=(3, 3, 3), stride=1, indice_key=None):
super(ResContextBlock, self).__init__()
self.conv1 = conv1x3(in_filters, out_filters, indice_key=indice_key+"bef")
self.bn0 = nn.BatchNorm1d(out_filters)
self.act1 = nn.LeakyReLU()
self.conv1_2 = conv3x1(out_filters, out_filters, indice_key=indice_key+"bef")
self.bn0_2 = nn.BatchNorm1d(out_filters)
self.act1_2 = nn.LeakyReLU()
self.conv2 = conv3x1(in_filters, out_filters, indice_key=indice_key+"bef")
self.act2 = nn.LeakyReLU()
self.bn1 = nn.BatchNorm1d(out_filters)
self.conv3 = conv1x3(out_filters, out_filters, indice_key=indice_key+"bef")
self.act3 = nn.LeakyReLU()
self.bn2 = nn.BatchNorm1d(out_filters)
def forward(self, x):
shortcut = self.conv1(x)
shortcut.features = self.act1(shortcut.features)
shortcut.features = self.bn0(shortcut.features)
shortcut = self.conv1_2(shortcut)
shortcut.features = self.act1_2(shortcut.features)
shortcut.features = self.bn0_2(shortcut.features)
resA = self.conv2(x)
resA.features = self.act2(resA.features)
resA.features = self.bn1(resA.features)
resA = self.conv3(resA)
resA.features = self.act3(resA.features)
resA.features = self.bn2(resA.features)
resA.features = resA.features + shortcut.features
return resA
class ResBlock(nn.Module):
def __init__(self, in_filters, out_filters, dropout_rate, kernel_size=(3, 3, 3), stride=1,
pooling=True, drop_out=True, height_pooling=False, indice_key=None):
super(ResBlock, self).__init__()
self.pooling = pooling
self.drop_out = drop_out
self.conv1 = conv3x1(in_filters, out_filters, indice_key=indice_key+"bef")
self.act1 = nn.LeakyReLU()
self.bn0 = nn.BatchNorm1d(out_filters)
self.conv1_2 = conv1x3(out_filters, out_filters, indice_key=indice_key+"bef")
self.act1_2 = nn.LeakyReLU()
self.bn0_2 = nn.BatchNorm1d(out_filters)
self.conv2 = conv1x3(in_filters, out_filters, indice_key=indice_key+"bef")
self.act2 = nn.LeakyReLU()
self.bn1 = nn.BatchNorm1d(out_filters)
self.conv3 = conv3x1(out_filters, out_filters, indice_key=indice_key+"bef")
self.act3 = nn.LeakyReLU()
self.bn2 = nn.BatchNorm1d(out_filters)
# self.conv4 = conv3x3(out_filters, out_filters, indice_key=indice_key+"bef")
# self.act4 = nn.LeakyReLU()
# self.bn4 = nn.BatchNorm1d(out_filters)
if pooling:
# self.dropout = nn.Dropout3d(p=dropout_rate)
if height_pooling:
# self.pool = spconv.SparseMaxPool3d(kernel_size=2, stride=2)
self.pool = spconv.SparseConv3d(out_filters, out_filters, kernel_size=3, stride=2,
padding=1, indice_key=indice_key, bias=False)
else:
# self.pool = spconv.SparseMaxPool3d(kernel_size=(2,2,1), stride=(2, 2, 1))
self.pool = spconv.SparseConv3d(out_filters, out_filters, kernel_size=3, stride=(2,2,1),
padding=1, indice_key=indice_key, bias=False)
# else:
# self.dropout = nn.Dropout3d(p=dropout_rate)
def forward(self, x):
shortcut = self.conv1(x)
shortcut.features = self.act1(shortcut.features)
shortcut.features = self.bn0(shortcut.features)
shortcut = self.conv1_2(shortcut)
shortcut.features = self.act1_2(shortcut.features)
shortcut.features = self.bn0_2(shortcut.features)
resA = self.conv2(x)
resA.features = self.act2(resA.features)
resA.features = self.bn1(resA.features)
resA = self.conv3(resA)
resA.features = self.act3(resA.features)
resA.features = self.bn2(resA.features)
resA.features = resA.features + shortcut.features
# resA = self.conv4(resA)
# resA.features = self.act4(resA.features)
# resA.features = self.bn4(resA.features)
if self.pooling:
# if self.drop_out:
# resB = self.dropout(resA.features)
# else:
# resB = resA
resB = self.pool(resA)
return resB, resA
else:
# if self.drop_out:
# resB = self.dropout(resA)
# else:
# resB = resA
return resA
class UpBlock(nn.Module):
def __init__(self, in_filters, out_filters, kernel_size=(3, 3, 3), indice_key=None, up_key=None):
super(UpBlock, self).__init__()
# self.drop_out = drop_out
#self.trans = nn.ConvTranspose2d(in_filters, out_filters, kernel_size, stride=(2, 2), padding=1)
self.trans_dilao = conv3x3(in_filters, out_filters, indice_key=indice_key+"new_up")
self.trans_act = nn.LeakyReLU()
self.trans_bn = nn.BatchNorm1d(out_filters)
# self.dropout1 = nn.Dropout3d(p=dropout_rate)
# self.dropout2 = nn.Dropout3d(p=dropout_rate)
self.conv1 = conv1x3(out_filters, out_filters, indice_key=indice_key)
self.act1 = nn.LeakyReLU()
self.bn1 = nn.BatchNorm1d(out_filters)
self.conv2 = conv3x1(out_filters, out_filters, indice_key=indice_key)
self.act2 = nn.LeakyReLU()
self.bn2 = nn.BatchNorm1d(out_filters)
self.conv3 = conv3x3(out_filters, out_filters, indice_key=indice_key)
self.act3 = nn.LeakyReLU()
self.bn3 = nn.BatchNorm1d(out_filters)
# self.dropout3 = nn.Dropout3d(p=dropout_rate)
self.up_subm = spconv.SparseInverseConv3d(out_filters, out_filters, kernel_size=3, indice_key=up_key, bias=False)
def forward(self, x, skip):
upA = self.trans_dilao(x)
#if upA.shape != skip.shape:
# upA = F.pad(upA, (0, 1, 0, 1), mode='replicate')
upA.features = self.trans_act(upA.features)
upA.features = self.trans_bn(upA.features)
## upsample
upA = self.up_subm(upA)
# upA = F.interpolate(upA, size=skip.size()[2:], mode='trilinear', align_corners=True)
# if self.drop_out:
# upA = self.dropout1(upA)
upA.features = upA.features + skip.features
# if self.drop_out:
# upB = self.dropout2(upB)
upE = self.conv1(upA)
upE.features = self.act1(upE.features)
upE.features = self.bn1(upE.features)
upE = self.conv2(upE)
upE.features = self.act2(upE.features)
upE.features = self.bn2(upE.features)
upE = self.conv3(upE)
upE.features = self.act3(upE.features)
upE.features = self.bn3(upE.features)
# if self.drop_out:
# upE = self.dropout3(upE)
return upE
class ReconBlock(nn.Module):
def __init__(self, in_filters, out_filters, kernel_size=(3, 3, 3), stride=1, indice_key=None):
super(ReconBlock, self).__init__()
self.conv1 = conv3x1x1(in_filters, out_filters, indice_key=indice_key+"bef")
self.bn0 = nn.BatchNorm1d(out_filters)
self.act1 = nn.Sigmoid()
self.conv1_2 = conv1x3x1(in_filters, out_filters, indice_key=indice_key+"bef")
self.bn0_2 = nn.BatchNorm1d(out_filters)
self.act1_2 = nn.Sigmoid()
self.conv1_3 = conv1x1x3(in_filters, out_filters, indice_key=indice_key+"bef")
self.bn0_3 = nn.BatchNorm1d(out_filters)
self.act1_3 = nn.Sigmoid()
# self.conv2 = conv3x1(in_filters, out_filters, indice_key=indice_key+"bef")
# self.act2 = nn.LeakyReLU()
# self.bn1 = nn.BatchNorm1d(out_filters)
#
# self.conv3 = conv1x3(out_filters, out_filters, indice_key=indice_key+"bef")
# self.act3 = nn.LeakyReLU()
# self.bn2 = nn.BatchNorm1d(out_filters)
def forward(self, x):
shortcut = self.conv1(x)
shortcut.features = self.bn0(shortcut.features)
shortcut.features = self.act1(shortcut.features)
shortcut2 = self.conv1_2(x)
shortcut2.features = self.bn0_2(shortcut2.features)
shortcut2.features = self.act1_2(shortcut2.features)
shortcut3 = self.conv1_3(x)
shortcut3.features = self.bn0_3(shortcut3.features)
shortcut3.features = self.act1_3(shortcut3.features)
# resA = self.conv2(x)
# resA.features = self.act2(resA.features)
# resA.features = self.bn1(resA.features)
#
# resA = self.conv3(resA)
# resA.features = self.act3(resA.features)
# resA.features = self.bn2(resA.features)
shortcut.features = shortcut.features + shortcut2.features + shortcut3.features
shortcut.features = shortcut.features * x.features
return shortcut
class Spconv_salsaNet_res_cfg(nn.Module):
def __init__(self, cfg):
super(Spconv_salsaNet_res_cfg, self).__init__()
output_shape = cfg.DATA_CONFIG.DATALOADER.GRID_SIZE
if 'FEATURE_COMPRESSION' in cfg.MODEL.MODEL_FN:
num_input_features = cfg.MODEL.MODEL_FN.FEATURE_COMPRESSION
else:
num_input_features = cfg.DATA_CONFIG.DATALOADER.GRID_SIZE[2]
nclasses = cfg.DATA_CONFIG.NCLASS
n_height = cfg.DATA_CONFIG.DATALOADER.GRID_SIZE[2]
init_size = cfg.MODEL.BACKBONE.INIT_SIZE
self.nclasses = nclasses
self.nheight = n_height
self.strict = False
sparse_shape = np.array(output_shape)
# sparse_shape[0] = 11
self.sparse_shape = sparse_shape
self.downCntx = ResContextBlock(num_input_features, init_size, indice_key="pre")
# self.resBlock1 = ResBlock(init_size, init_size, 0.2, pooling=True, height_pooling=True, indice_key="down1")
self.resBlock2 = ResBlock(init_size, 2 * init_size, 0.2, height_pooling=True, indice_key="down2")
self.resBlock3 = ResBlock(2 * init_size, 4 * init_size, 0.2, height_pooling=True, indice_key="down3")
self.resBlock4 = ResBlock(4 * init_size, 8 * init_size, 0.2, pooling=True, height_pooling=False, indice_key="down4")
self.resBlock5 = ResBlock(8 * init_size, 16 * init_size, 0.2, pooling=True, height_pooling=False, indice_key="down5")
# self.resBlock6 = ResBlock(16 * init_size, 16 * init_size, 0.2, pooling=False, height_pooling=False, indice_key="down6")
# self.ReconNet = ReconBlock(16 * init_size, 16 * init_size, indice_key="recon")
self.upBlock0 = UpBlock(16 * init_size, 16 * init_size, indice_key="up0", up_key="down5")
self.upBlock1 = UpBlock(16 * init_size, 8 * init_size, indice_key="up1", up_key="down4")
self.upBlock2 = UpBlock(8 * init_size, 4 * init_size, indice_key="up2", up_key="down3")
self.upBlock3 = UpBlock(4 * init_size, 2 * init_size, indice_key="up3", up_key="down2")
# self.upBlock4 = UpBlock(4 * init_size, 2 * init_size, indice_key="up4", up_key="down2")
# self.upBlock5 = UpBlock(2 * init_size, init_size, indice_key="up5", up_key="down1")
self.ReconNet = ReconBlock(2*init_size, 2*init_size, indice_key="recon")
def forward(self, voxel_features, coors, batch_size):
# x = x.contiguous()
coors = coors.int()
ret = spconv.SparseConvTensor(voxel_features, coors, self.sparse_shape,
batch_size)
ret = self.downCntx(ret)
# down0c, down0b = self.resBlock1(ret)
down1c, down1b = self.resBlock2(ret)
down2c, down2b = self.resBlock3(down1c)
down3c, down3b = self.resBlock4(down2c)
down4c, down4b = self.resBlock5(down3c)
# down5b = self.resBlock6(down4c)
# down6b = self.ReconNet(down5b)
up4e = self.upBlock0(down4c, down4b)
up3e = self.upBlock1(up4e, down3b)
up2e = self.upBlock2(up3e, down2b)
up1e = self.upBlock3(up2e, down1b)
up0e = self.ReconNet(up1e)
up0e.features = torch.cat((up0e.features, up1e.features), 1) # size 4 * init_size --> OK with the size of the semantic and instance heads
return up0e, up0e
class Spconv_sem_logits_head_cfg(nn.Module):
def __init__(self, cfg):
super(Spconv_sem_logits_head_cfg, self).__init__()
output_shape = cfg.DATA_CONFIG.DATALOADER.GRID_SIZE
if 'FEATURE_COMPRESSION' in cfg.MODEL.MODEL_FN:
num_input_features = cfg.MODEL.MODEL_FN.FEATURE_COMPRESSION
else:
num_input_features = cfg.DATA_CONFIG.DATALOADER.GRID_SIZE[2]
nclasses = cfg.DATA_CONFIG.NCLASS
n_height = cfg.DATA_CONFIG.DATALOADER.GRID_SIZE[2]
init_size = cfg.MODEL.BACKBONE.INIT_SIZE
self.logits = spconv.SubMConv3d(4 * init_size, nclasses, indice_key="logit", kernel_size=3, stride=1, padding=1, bias=True)
def forward(self, fea):
logits = self.logits(fea)
return logits.dense()
class Spconv_ins_offset_concatxyz_threelayers_head_cfg(nn.Module):
def __init__(self, cfg):
super(Spconv_ins_offset_concatxyz_threelayers_head_cfg, self).__init__()
init_size = cfg.MODEL.BACKBONE.INIT_SIZE
self.pt_fea_dim = 4 * init_size
self.embedding_dim = cfg.MODEL.INS_HEAD.EMBEDDING_CHANNEL
self.conv1 = conv3x3(self.pt_fea_dim, self.pt_fea_dim, indice_key='offset_head_conv1')
self.bn1 = nn.BatchNorm1d(self.pt_fea_dim)
self.act1 = nn.LeakyReLU()
self.conv2 = conv3x3(self.pt_fea_dim, 2 * init_size, indice_key='offset_head_conv2')
self.bn2 = nn.BatchNorm1d(2 * init_size)
self.act2 = nn.LeakyReLU()
self.conv3 = conv3x3(2 * init_size, init_size, indice_key='offset_head_conv3')
self.bn3 = nn.BatchNorm1d(init_size)
self.act3 = nn.LeakyReLU()
self.offset = nn.Sequential(
nn.Linear(init_size+3, init_size, bias=True),
nn.BatchNorm1d(init_size),
nn.ReLU()
)
self.offset_linear = nn.Linear(init_size, self.embedding_dim, bias=True)
def forward(self, fea, batch):
fea = self.conv1(fea)
fea.features = self.act1(self.bn1(fea.features))
fea = self.conv2(fea)
fea.features = self.act2(self.bn2(fea.features))
fea = self.conv3(fea)
fea.features = self.act3(self.bn3(fea.features))
grid_ind = batch['grid']
xyz = batch['pt_cart_xyz']
fea = fea.dense()
fea = fea.permute(0, 2, 3, 4, 1)
pt_ins_fea_list = []
for batch_i, grid_ind_i in enumerate(grid_ind):
pt_ins_fea_list.append(fea[batch_i, grid_ind[batch_i][:,0], grid_ind[batch_i][:,1], grid_ind[batch_i][:,2]])
pt_pred_offsets_list = []
for batch_i, pt_ins_fea in enumerate(pt_ins_fea_list):
pt_pred_offsets_list.append(self.offset_linear(self.offset(torch.cat([pt_ins_fea,torch.from_numpy(xyz[batch_i]).cuda()],dim=1))))
return pt_pred_offsets_list, pt_ins_fea_list
class Spconv_alsaNet_res(nn.Module):
def __init__(self,
output_shape,
use_norm=True,
num_input_features=128,
nclasses = 20, n_height = 32, strict=False, init_size=16):
super(Spconv_alsaNet_res, self).__init__()
self.nclasses = nclasses
self.nheight = n_height
self.strict = False
sparse_shape = np.array(output_shape)
# sparse_shape[0] = 11
print(sparse_shape)
self.sparse_shape = sparse_shape
self.downCntx = ResContextBlock(num_input_features, init_size, indice_key="pre")
# self.resBlock1 = ResBlock(init_size, init_size, 0.2, pooling=True, height_pooling=True, indice_key="down1")
self.resBlock2 = ResBlock(init_size, 2 * init_size, 0.2, height_pooling=True, indice_key="down2")
self.resBlock3 = ResBlock(2 * init_size, 4 * init_size, 0.2, height_pooling=True, indice_key="down3")
self.resBlock4 = ResBlock(4 * init_size, 8 * init_size, 0.2, pooling=True, height_pooling=False, indice_key="down4")
self.resBlock5 = ResBlock(8 * init_size, 16 * init_size, 0.2, pooling=True, height_pooling=False, indice_key="down5")
# self.resBlock6 = ResBlock(16 * init_size, 16 * init_size, 0.2, pooling=False, height_pooling=False, indice_key="down6")
# self.ReconNet = ReconBlock(16 * init_size, 16 * init_size, indice_key="recon")
self.upBlock0 = UpBlock(16 * init_size, 16 * init_size, indice_key="up0", up_key="down5")
self.upBlock1 = UpBlock(16 * init_size, 8 * init_size, indice_key="up1", up_key="down4")
self.upBlock2 = UpBlock(8 * init_size, 4 * init_size, indice_key="up2", up_key="down3")
self.upBlock3 = UpBlock(4 * init_size, 2 * init_size, indice_key="up3", up_key="down2")
# self.upBlock4 = UpBlock(4 * init_size, 2 * init_size, indice_key="up4", up_key="down2")
# self.upBlock5 = UpBlock(2 * init_size, init_size, indice_key="up5", up_key="down1")
self.ReconNet = ReconBlock(2*init_size, 2*init_size, indice_key="recon")
self.logits = spconv.SubMConv3d(4 * init_size, nclasses, indice_key="logit", kernel_size=3, stride=1, padding=1, bias=True)
def forward(self, voxel_features, coors, batch_size):
# x = x.contiguous()
coors = coors.int()
import pdb
pdb.set_trace()
ret = spconv.SparseConvTensor(voxel_features, coors, self.sparse_shape,
batch_size)
ret = self.downCntx(ret)
# down0c, down0b = self.resBlock1(ret)
down1c, down1b = self.resBlock2(ret)
down2c, down2b = self.resBlock3(down1c)
down3c, down3b = self.resBlock4(down2c)
down4c, down4b = self.resBlock5(down3c)
# down5b = self.resBlock6(down4c)
# down6b = self.ReconNet(down5b)
up4e = self.upBlock0(down4c, down4b)
up3e = self.upBlock1(up4e, down3b)
up2e = self.upBlock2(up3e, down2b)
up1e = self.upBlock3(up2e, down1b)
up0e = self.ReconNet(up1e)
up0e.features = torch.cat((up0e.features, up1e.features), 1)
# up2e = self.upBlock3(up3e, down2b)
# up1e = self.upBlock4(up2e, down1b)
# up0e = self.upBlock5(up1e, down0b)
# up0e_gap = nn.AdaptiveAvgPool3d((1))(up0e)
# up0e_gap = F.interpolate(up0e_gap, size=(up0e.size()[2:]), mode='trilinear', align_corners=True)
# up0e = torch.cat((up0e, up0e_gap), dim=1)
logits = self.logits(up0e)
y = logits.dense()
# y = logits.permute(0, 1, 3, 4, 2)
return y
| 41.588595 | 145 | 0.645495 | 18,620 | 0.911851 | 0 | 0 | 0 | 0 | 0 | 0 | 3,854 | 0.188737 |
909ad865d21f2537f3949dbc416292efd7136d09 | 45 | py | Python | scivision_test_plugin/__init__.py | acocac/scivision-test-plugin | 0ebeabe256287a83d8a268649085f18dc3ddfc9f | [
"BSD-3-Clause"
]
| null | null | null | scivision_test_plugin/__init__.py | acocac/scivision-test-plugin | 0ebeabe256287a83d8a268649085f18dc3ddfc9f | [
"BSD-3-Clause"
]
| null | null | null | scivision_test_plugin/__init__.py | acocac/scivision-test-plugin | 0ebeabe256287a83d8a268649085f18dc3ddfc9f | [
"BSD-3-Clause"
]
| null | null | null | from .model import DummyModel, ImageNetModel
| 22.5 | 44 | 0.844444 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
909b082c85db7f41252c1dd15a6d1058abd2c236 | 2,330 | py | Python | prml/dimreduction/bayesian_pca.py | andresmasegosa/PRML-CoreSets | fb768debb15e3ff6f5b65b7224915a41c1493f3d | [
"MIT"
]
| null | null | null | prml/dimreduction/bayesian_pca.py | andresmasegosa/PRML-CoreSets | fb768debb15e3ff6f5b65b7224915a41c1493f3d | [
"MIT"
]
| null | null | null | prml/dimreduction/bayesian_pca.py | andresmasegosa/PRML-CoreSets | fb768debb15e3ff6f5b65b7224915a41c1493f3d | [
"MIT"
]
| null | null | null | import numpy as np
from prml.dimreduction.pca import PCA
class BayesianPCA(PCA):
def fit(self, X, iter_max=100, initial="random"):
"""
empirical bayes estimation of pca parameters
Parameters
----------
X : (sample_size, n_features) ndarray
input data
iter_max : int
maximum number of em steps
Returns
-------
mean : (n_features,) ndarray
sample mean fo the input data
W : (n_features, n_components) ndarray
projection matrix
var : float
variance of observation noise
"""
initial_list = ["random", "eigen"]
self.mean = np.mean(X, axis=0)
self.I = np.eye(self.n_components)
if initial not in initial_list:
print("availabel initializations are {}".format(initial_list))
if initial == "random":
self.W = np.eye(np.size(X, 1), self.n_components)
#self.W = np.random.randn(np.size(X, 1), self.n_components)
self.var = 1.
elif initial == "eigen":
self.eigen(X)
self.alpha = len(self.mean) / np.sum(self.W ** 2, axis=0).clip(min=1e-10)
for i in range(iter_max):
W = np.copy(self.W)
stats = self._expectation(X - self.mean)
self._maximization(X - self.mean, *stats)
#self.alpha = len(self.mean) / np.sum(self.W ** 2, axis=0).clip(min=1e-10)
#if np.allclose(W, self.W):
# break
self.n_iter = i + 1
self.C = self.W @ self.W.T + self.var * np.eye(np.size(X, 1))
self.Cinv = np.linalg.inv(self.C)
def _maximization(self, X, Ez, Ezz):
self.W = X.T @ Ez @ np.linalg.inv(np.sum(Ezz, axis=0) + self.var * np.diag(self.alpha))
self.var = np.mean(
np.mean(X ** 2, axis=-1)
- 2 * np.mean(Ez @ self.W.T * X, axis=-1)
+ np.trace((Ezz @ self.W.T @ self.W).T) / len(self.mean))
def maximize(self, D, Ez, Ezz):
self.W = D.T.dot(Ez).dot(np.linalg.inv(np.sum(Ezz, axis=0) + self.var * np.diag(self.alpha)))
self.var = np.mean(
np.mean(D ** 2, axis=-1)
- 2 * np.mean(Ez.dot(self.W.T) * D, axis=-1)
+ np.trace(Ezz.dot(self.W.T).dot(self.W).T) / self.ndim)
| 36.40625 | 101 | 0.529185 | 2,270 | 0.974249 | 0 | 0 | 0 | 0 | 0 | 0 | 731 | 0.313734 |
909b242da63999e1207271fb27d3b19ba2f0e8e9 | 11,492 | py | Python | mne/time_frequency/psd.py | jnvandermeer/mne-python | 143a1fbfd2a68a0ce8d700da9299564de0b92334 | [
"BSD-3-Clause"
]
| null | null | null | mne/time_frequency/psd.py | jnvandermeer/mne-python | 143a1fbfd2a68a0ce8d700da9299564de0b92334 | [
"BSD-3-Clause"
]
| 2 | 2016-02-27T13:43:15.000Z | 2018-07-18T19:44:45.000Z | mne/time_frequency/psd.py | jnvandermeer/mne-python | 143a1fbfd2a68a0ce8d700da9299564de0b92334 | [
"BSD-3-Clause"
]
| 1 | 2017-03-05T20:44:07.000Z | 2017-03-05T20:44:07.000Z | # Authors : Alexandre Gramfort, [email protected] (2011)
# Denis A. Engemann <[email protected]>
# License : BSD 3-clause
import numpy as np
from ..parallel import parallel_func
from ..io.pick import _pick_data_channels
from ..utils import logger, verbose, _time_mask
from ..fixes import get_spectrogram
from .multitaper import psd_array_multitaper
def _psd_func(epoch, noverlap, n_per_seg, nfft, fs, freq_mask, func):
"""Aux function."""
return func(epoch, fs=fs, nperseg=n_per_seg, noverlap=noverlap,
nfft=nfft, window='hamming')[2][..., freq_mask, :]
def _check_nfft(n, n_fft, n_per_seg, n_overlap):
"""Ensure n_fft, n_per_seg and n_overlap make sense."""
if n_per_seg is None and n_fft > n:
raise ValueError(('If n_per_seg is None n_fft is not allowed to be > '
'n_times. If you want zero-padding, you have to set '
'n_per_seg to relevant length. Got n_fft of %d while'
' signal length is %d.') % (n_fft, n))
n_per_seg = n_fft if n_per_seg is None or n_per_seg > n_fft else n_per_seg
n_per_seg = n if n_per_seg > n else n_per_seg
if n_overlap >= n_per_seg:
raise ValueError(('n_overlap cannot be greater than n_per_seg (or '
'n_fft). Got n_overlap of %d while n_per_seg is '
'%d.') % (n_overlap, n_per_seg))
return n_fft, n_per_seg, n_overlap
def _check_psd_data(inst, tmin, tmax, picks, proj, reject_by_annotation=False):
"""Check PSD data / pull arrays from inst."""
from ..io.base import BaseRaw
from ..epochs import BaseEpochs
from ..evoked import Evoked
if not isinstance(inst, (BaseEpochs, BaseRaw, Evoked)):
raise ValueError('epochs must be an instance of Epochs, Raw, or'
'Evoked. Got type {0}'.format(type(inst)))
time_mask = _time_mask(inst.times, tmin, tmax, sfreq=inst.info['sfreq'])
if picks is None:
picks = _pick_data_channels(inst.info, with_ref_meg=False)
if proj:
# Copy first so it's not modified
inst = inst.copy().apply_proj()
sfreq = inst.info['sfreq']
if isinstance(inst, BaseRaw):
start, stop = np.where(time_mask)[0][[0, -1]]
rba = 'NaN' if reject_by_annotation else None
data = inst.get_data(picks, start, stop + 1, reject_by_annotation=rba)
elif isinstance(inst, BaseEpochs):
data = inst.get_data()[:, picks][:, :, time_mask]
else: # Evoked
data = inst.data[picks][:, time_mask]
return data, sfreq
@verbose
def psd_array_welch(x, sfreq, fmin=0, fmax=np.inf, n_fft=256, n_overlap=0,
n_per_seg=None, n_jobs=1, verbose=None):
"""Compute power spectral density (PSD) using Welch's method.
Parameters
----------
x : array, shape=(..., n_times)
The data to compute PSD from.
sfreq : float
The sampling frequency.
fmin : float
The lower frequency of interest.
fmax : float
The upper frequency of interest.
n_fft : int
The length of FFT used, must be ``>= n_per_seg`` (default: 256).
The segments will be zero-padded if ``n_fft > n_per_seg``.
n_overlap : int
The number of points of overlap between segments. Will be adjusted
to be <= n_per_seg. The default value is 0.
n_per_seg : int | None
Length of each Welch segment (windowed with a Hamming window). Defaults
to None, which sets n_per_seg equal to n_fft.
n_jobs : int
Number of CPUs to use in the computation.
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
Returns
-------
psds : ndarray, shape (..., n_freqs) or
The power spectral densities. All dimensions up to the last will
be the same as input.
freqs : ndarray, shape (n_freqs,)
The frequencies.
Notes
-----
.. versionadded:: 0.14.0
"""
spectrogram = get_spectrogram()
dshape = x.shape[:-1]
n_times = x.shape[-1]
x = x.reshape(-1, n_times)
# Prep the PSD
n_fft, n_per_seg, n_overlap = _check_nfft(n_times, n_fft, n_per_seg,
n_overlap)
win_size = n_fft / float(sfreq)
logger.info("Effective window size : %0.3f (s)" % win_size)
freqs = np.arange(n_fft // 2 + 1, dtype=float) * (sfreq / n_fft)
freq_mask = (freqs >= fmin) & (freqs <= fmax)
freqs = freqs[freq_mask]
# Parallelize across first N-1 dimensions
parallel, my_psd_func, n_jobs = parallel_func(_psd_func, n_jobs=n_jobs)
x_splits = np.array_split(x, n_jobs)
f_spectrogram = parallel(my_psd_func(d, noverlap=n_overlap, nfft=n_fft,
fs=sfreq, freq_mask=freq_mask,
func=spectrogram, n_per_seg=n_per_seg)
for d in x_splits)
# Combining, reducing windows and reshaping to original data shape
psds = np.concatenate([np.nanmean(f_s, axis=-1)
for f_s in f_spectrogram], axis=0)
psds.shape = dshape + (-1,)
return psds, freqs
@verbose
def psd_welch(inst, fmin=0, fmax=np.inf, tmin=None, tmax=None, n_fft=256,
n_overlap=0, n_per_seg=None, picks=None, proj=False, n_jobs=1,
reject_by_annotation=True, verbose=None):
"""Compute the power spectral density (PSD) using Welch's method.
Calculates periodograms for a sliding window over the time dimension, then
averages them together for each channel/epoch.
Parameters
----------
inst : instance of Epochs or Raw or Evoked
The data for PSD calculation
fmin : float
Min frequency of interest
fmax : float
Max frequency of interest
tmin : float | None
Min time of interest
tmax : float | None
Max time of interest
n_fft : int
The length of FFT used, must be ``>= n_per_seg`` (default: 256).
The segments will be zero-padded if ``n_fft > n_per_seg``.
If n_per_seg is None, n_fft must be >= number of time points
in the data.
n_overlap : int
The number of points of overlap between segments. Will be adjusted
to be <= n_per_seg. The default value is 0.
n_per_seg : int | None
Length of each Welch segment (windowed with a Hamming window). Defaults
to None, which sets n_per_seg equal to n_fft.
picks : array-like of int | None
The selection of channels to include in the computation.
If None, take all channels.
proj : bool
Apply SSP projection vectors. If inst is ndarray this is not used.
n_jobs : int
Number of CPUs to use in the computation.
reject_by_annotation : bool
Whether to omit bad segments from the data while computing the
PSD. If True, annotated segments with a description that starts
with 'bad' are omitted. Has no effect if ``inst`` is an Epochs or
Evoked object. Defaults to True.
.. versionadded:: 0.15.0
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
Returns
-------
psds : ndarray, shape (..., n_freqs)
The power spectral densities. If input is of type Raw,
then psds will be shape (n_channels, n_freqs), if input is type Epochs
then psds will be shape (n_epochs, n_channels, n_freqs).
freqs : ndarray, shape (n_freqs,)
The frequencies.
See Also
--------
mne.io.Raw.plot_psd
mne.Epochs.plot_psd
psd_multitaper
psd_array_welch
Notes
-----
.. versionadded:: 0.12.0
"""
# Prep data
data, sfreq = _check_psd_data(inst, tmin, tmax, picks, proj,
reject_by_annotation=reject_by_annotation)
return psd_array_welch(data, sfreq, fmin=fmin, fmax=fmax, n_fft=n_fft,
n_overlap=n_overlap, n_per_seg=n_per_seg,
n_jobs=n_jobs, verbose=verbose)
@verbose
def psd_multitaper(inst, fmin=0, fmax=np.inf, tmin=None, tmax=None,
bandwidth=None, adaptive=False, low_bias=True,
normalization='length', picks=None, proj=False,
n_jobs=1, verbose=None):
"""Compute the power spectral density (PSD) using multitapers.
Calculates spectral density for orthogonal tapers, then averages them
together for each channel/epoch. See [1] for a description of the tapers
and [2] for the general method.
Parameters
----------
inst : instance of Epochs or Raw or Evoked
The data for PSD calculation.
fmin : float
Min frequency of interest
fmax : float
Max frequency of interest
tmin : float | None
Min time of interest
tmax : float | None
Max time of interest
bandwidth : float
The bandwidth of the multi taper windowing function in Hz. The default
value is a window half-bandwidth of 4.
adaptive : bool
Use adaptive weights to combine the tapered spectra into PSD
(slow, use n_jobs >> 1 to speed up computation).
low_bias : bool
Only use tapers with more than 90% spectral concentration within
bandwidth.
normalization : str
Either "full" or "length" (default). If "full", the PSD will
be normalized by the sampling rate as well as the length of
the signal (as in nitime).
picks : array-like of int | None
The selection of channels to include in the computation.
If None, take all channels.
proj : bool
Apply SSP projection vectors. If inst is ndarray this is not used.
n_jobs : int
Number of CPUs to use in the computation.
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
Returns
-------
psds : ndarray, shape (..., n_freqs)
The power spectral densities. If input is of type Raw,
then psds will be shape (n_channels, n_freqs), if input is type Epochs
then psds will be shape (n_epochs, n_channels, n_freqs).
freqs : ndarray, shape (n_freqs,)
The frequencies.
References
----------
.. [1] Slepian, D. "Prolate spheroidal wave functions, Fourier analysis,
and uncertainty V: The discrete case." Bell System Technical
Journal, vol. 57, 1978.
.. [2] Percival D.B. and Walden A.T. "Spectral Analysis for Physical
Applications: Multitaper and Conventional Univariate Techniques."
Cambridge University Press, 1993.
See Also
--------
mne.io.Raw.plot_psd
mne.Epochs.plot_psd
psd_array_multitaper
psd_welch
csd_multitaper
Notes
-----
.. versionadded:: 0.12.0
"""
# Prep data
data, sfreq = _check_psd_data(inst, tmin, tmax, picks, proj)
return psd_array_multitaper(data, sfreq, fmin=fmin, fmax=fmax,
bandwidth=bandwidth, adaptive=adaptive,
low_bias=low_bias, normalization=normalization,
n_jobs=n_jobs, verbose=verbose)
| 38.563758 | 79 | 0.62661 | 0 | 0 | 0 | 0 | 8,860 | 0.770971 | 0 | 0 | 7,195 | 0.626088 |
909b464aebeffe98a01bbc3d1080af46d979ef36 | 5,690 | py | Python | culturebank/models.py | Anaphory/culturebank | 9a408cb25fafcb14bbdd96278bebfbc898d32d00 | [
"Apache-2.0"
]
| null | null | null | culturebank/models.py | Anaphory/culturebank | 9a408cb25fafcb14bbdd96278bebfbc898d32d00 | [
"Apache-2.0"
]
| null | null | null | culturebank/models.py | Anaphory/culturebank | 9a408cb25fafcb14bbdd96278bebfbc898d32d00 | [
"Apache-2.0"
]
| null | null | null | from zope.interface import implementer
from sqlalchemy import (
Column,
String,
Integer,
Float,
ForeignKey,
CheckConstraint,
)
from sqlalchemy.orm import relationship, backref
from clld import interfaces
from clld.db.meta import Base, CustomModelMixin
from clld.db.versioned import Versioned
from clld.db.models.common import (
Contribution, Parameter, IdNameDescriptionMixin, Language
)
from clld_glottologfamily_plugin.models import HasFamilyMixin, Family
from .interfaces import IDependency, ITransition, IStability, IDeepFamily, ISupport, IHasSupport
@implementer(interfaces.ILanguage)
class CulturebankLanguage(CustomModelMixin, Language, HasFamilyMixin):
pk = Column(Integer, ForeignKey('language.pk'), primary_key=True)
@implementer(interfaces.IParameter)
class Feature(CustomModelMixin, Parameter, Versioned):
"""Parameters in CultureBank are called features. They are always related to one Designer.
"""
pk = Column(Integer, ForeignKey('parameter.pk'), primary_key=True)
doc = Column(String)
patron = Column(String)
newdoc = Column(String)
vdoc = Column(String)
std_comments = Column(String)
name_french = Column(String)
clarification = Column(String)
alternative_id = Column(String)
representation = Column(Integer)
designer = Column(String)
abbreviation = Column(String)
sortkey_str = Column(String)
sortkey_int = Column(Integer)
jl_relevant_unit = Column(String)
jl_function = Column(String)
jl_formal_means = Column(String)
legacy_status = Column(String)
culturebank_status = Column(String)
wip_comments = Column(String)
nts_culturebank = Column(String)
hard_to_deny = Column(String)
prone_misunderstanding = Column(String)
requires_extensive_data = Column(String)
last_edited = Column(String)
other_survey = Column(String)
@implementer(IStability)
class Stability(Base, CustomModelMixin):
pk = Column(Integer, primary_key=True)
id = Column(String)
parsimony_stability_value = Column(Float)
parsimony_retentions = Column(Float)
parsimony_transitions = Column(Float)
feature_pk = Column(Integer, ForeignKey('feature.pk'))
feature = relationship(Feature, lazy='joined', foreign_keys = feature_pk, backref = "stability")
@implementer(IDependency)
class Dependency(Base, CustomModelMixin):
pk = Column(Integer, primary_key=True)
id = Column(String)
feature1_pk = Column(Integer, ForeignKey('feature.pk'))
feature1 = relationship(Feature, lazy='joined', foreign_keys = feature1_pk)
feature2_pk = Column(Integer, ForeignKey('feature.pk'))
feature2 = relationship(Feature, lazy='joined', foreign_keys = feature2_pk)
strength = Column(Float)
representation = Column(Integer)
combinatory_status = Column(String)
@implementer(ITransition)
class Transition(Base, CustomModelMixin):
pk = Column(Integer, primary_key=True)
id = Column(String)
fromnode = Column(String)
fromvalue = Column(String)
tonode = Column(String)
tovalue = Column(String)
stability_pk = Column(Integer, ForeignKey('stability.pk'))
stability = relationship(Stability, lazy='joined', foreign_keys = stability_pk)
family_pk = Column(Integer, ForeignKey('family.pk'))
family = relationship(Family, backref='transitions')
retention_innovation = Column(String)
@implementer(interfaces.IContribution)
class CulturebankContribution(CustomModelMixin, Contribution):
pk = Column(Integer, ForeignKey('contribution.pk'), primary_key=True)
desc = Column(String)
@implementer(IDeepFamily)
class DeepFamily(Base, CustomModelMixin):
pk = Column(Integer, primary_key=True)
id = Column(String)
family1_pk = Column(Integer, ForeignKey('family.pk'))
family1 = relationship(Family, lazy='joined', foreign_keys = family1_pk)
family1_longitude = Column(
Float(),
CheckConstraint('-180 <= family1_longitude and family1_longitude <= 180 '),
doc='geographical longitude in WGS84')
family1_latitude = Column(
Float(),
CheckConstraint('-90 <= family1_latitude and family1_latitude <= 90'),
doc='geographical latitude in WGS84')
family2_pk = Column(Integer, ForeignKey('family.pk'))
family2 = relationship(Family, lazy='joined', foreign_keys = family2_pk)
family2_longitude = Column(
Float(),
CheckConstraint('-180 <= family2_longitude and family2_longitude <= 180 '),
doc='geographical longitude in WGS84')
family2_latitude = Column(
Float(),
CheckConstraint('-90 <= family2_latitude and family2_latitude <= 90'),
doc='geographical latitude in WGS84')
support_value = Column(Float)
significance = Column(Float)
geographic_plausibility = Column(Float)
@implementer(ISupport)
class Support(Base, CustomModelMixin):
pk = Column(Integer, primary_key=True)
id = Column(String)
value1 = Column(String)
value2 = Column(String)
historical_score = Column(Float)
independent_score = Column(Float)
support_score = Column(Float)
feature_pk = Column(Integer, ForeignKey('feature.pk'))
feature = relationship(Feature, lazy='joined', foreign_keys = feature_pk)
@implementer(IHasSupport)
class HasSupport(Base, CustomModelMixin):
id = Column(String)
deepfamily_pk = Column(Integer, ForeignKey('deepfamily.pk'), primary_key=True)
deepfamily = relationship(DeepFamily, lazy='joined', foreign_keys = deepfamily_pk)
support_pk = Column(Integer, ForeignKey('support.pk'), primary_key=True)
support = relationship(Support, lazy='joined', foreign_keys = support_pk)
| 38.187919 | 100 | 0.727065 | 4,801 | 0.843761 | 0 | 0 | 5,063 | 0.889807 | 0 | 0 | 708 | 0.124429 |
909b5fdd491dd149598afad1dcf2d6d1cdc7dcc1 | 600 | py | Python | src/models/layers/feature.py | icycookies/dd_benchmark | 5551c0654d3dc30d72b817096d0877a02f28f116 | [
"MIT"
]
| 2 | 2021-08-01T13:02:41.000Z | 2021-08-01T14:39:44.000Z | src/models/layers/feature.py | icycookies/dd_benchmark | 5551c0654d3dc30d72b817096d0877a02f28f116 | [
"MIT"
]
| null | null | null | src/models/layers/feature.py | icycookies/dd_benchmark | 5551c0654d3dc30d72b817096d0877a02f28f116 | [
"MIT"
]
| 1 | 2021-08-01T14:39:45.000Z | 2021-08-01T14:39:45.000Z | import torch
import torch.nn as nn
class MLP(nn.Module):
def __init__(self, num_features, hidden_sizes, dropout):
super().__init__()
self.layers = nn.ModuleList(
[nn.Linear(num_features, hidden_sizes[0])] +
[nn.Linear(hidden_sizes[i], hidden_sizes[i + 1]) for i in range(len(hidden_sizes) - 1)]
)
self.activation = nn.ReLU()
self.dropout = nn.Dropout(p=dropout)
def forward(self, x):
for layer in self.layers:
x = layer(x)
x = self.activation(x)
x = self.dropout(x)
return x | 31.578947 | 99 | 0.576667 | 564 | 0.94 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
909b69d30b3ae1f1f238868bd4ff4b5d2afdace9 | 27,662 | py | Python | src/kanone/adapter/tx.py | doncatnip/kanone | 1f149f69f4f9dbb49dd29153fd0366cde68c2b85 | [
"Unlicense"
]
| 5 | 2015-06-14T22:56:10.000Z | 2017-05-29T07:59:35.000Z | src/kanone/adapter/tx.py | doncatnip/kanone | 1f149f69f4f9dbb49dd29153fd0366cde68c2b85 | [
"Unlicense"
]
| 5 | 2019-03-19T13:39:26.000Z | 2020-11-03T20:01:46.000Z | src/kanone/adapter/tx.py | doncatnip/kanone | 1f149f69f4f9dbb49dd29153fd0366cde68c2b85 | [
"Unlicense"
]
| null | null | null | """ Twisted adapter for Kanone """
from twisted.python.failure import Failure
from twisted.internet import defer
from ..lib import Invalid
from ..util import varargs2kwargs
import logging, sys
log = logging.getLogger( __name__ )
# hacky and redundant, but it'll do for now ..
# TODO: move to proper twisted specific classes under .tx.*
# and get rid of the monkey
_python3 = sys.version_info[0]>=3
def monkeyPatch():
"""
Patches Kanone so that any validation returns a Deferred, thus
one can write asynchronous validators using Twisted's non-blocking API.
Schema and ForEach fields are validated concurrently.
"""
if getattr( monkeyPatch,'_isMonkeyPatched',False):
return
from ..lib import Context, PASS, MISSING
from ..validator.core import Tag, Compose, Tmp, Item, Not, And, Or, Call, If
from ..validator.check import Match
from ..validator.schema import Schema, ForEach, Field
from ..validator.web import MXLookup
@defer.inlineCallbacks
def context_validate( self ):
if self.isValidated:
if self.__error__ is not MISSING:
raise self.__error__
defer.returnValue( self.__result__ )
self.isValidating = True
if self.parent is not None:
if not self.parent.isValidated and not self.parent.isValidating:
yield defer.maybeDeferred\
( self.parent.validate
)
if not self.validator:
raise AttributeError("No validator set for context '%s'" % self.path )
result = defer.maybeDeferred\
( self.validator.validate
, self
, self.__value__
)
result.addErrback( context_gotError, self )
result = yield result
self.isValidated = True
self.isValidating = False
if self.__error__ is not MISSING:
raise self.__error__
else:
if result is not PASS:
self.__result__ = result
else:
self.__result__ = self.__value__
self.__result__ = result
defer.returnValue( result )
def context_gotError( error, self ):
e = error.value
if not isinstance( e, Invalid ):
self.__error__ = error
return
self.__error__ = e
e.context = self
message = e.validator.__messages__[e.key]
if message is not None:
extra = e.data['extra']
value = e.value
data = e.data
data['message'] = message
if hasattr(e,'realkey'):
data['key'] = e.realkey
extra['value.type'] = getattr(value, '__class__', None) is not None \
and getattr(value.__class__,'__name__', False) or 'unknown'
if isinstance(value,str) or not _python3 and isinstance(value,basestring):
extra['value'] = value
else:
extra['value'] = str(value)
cache = getattr( self, 'cache', None)
if cache is not None:
extra.update( cache )
self['error'] = self.__error__.data
self.root.errorlist.append( self.__error__.context.path )
def tag_gotResult( result, d, validator, tagName ):
if isinstance( result, Failure ):
if not isinstance(result.value, Invalid):
d.errback( result )
return
e = result.value
if e.validator is validator or getattr(e,'composer',None) is validator:
e.tagName = tagName
d.errback( e )
else:
d.callback( result )
def tag_validate( self, context, value ):
validator = context.root.taggedValidators.get(self.tagID, None)
if validator is None:
validator = self.enabled and self.validator
if not validator:
return value
d = defer.Deferred()
result = defer.maybeDeferred\
( validator.validate
, context
, value
)
result.addBoth( tag_gotResult, d, validator, self.tagName )
return d
def compose_gotResult( result, d, context, tmpTags, composer ):
context.root.taggedValidators = tmpTags
if isinstance( result, Failure ):
if not isinstance( result.value, Invalid ):
d.errback( result )
return
e = result.value
if hasattr(e,'tagName'):
e.realkey = "%s_%s" % (e.tagName, getattr(e,'realkey',e.key))
e.composer = composer
del e.tagName
d.errback( e )
else:
d.callback( result )
def compose_validate( self, context, value ):
tmpTags = context.root.taggedValidators
context.root.taggedValidators = self.currentTaggedValidators
d = defer.Deferred()
result = defer.maybeDeferred\
( self.validator.validate
, context
, value
)
result.addBoth( compose_gotResult, d, context, tmpTags, self )
return d
def tmp_gotReslt( result, d, raiseError, value ):
if isinstance( result, Failure ):
if not isinstance(result.value, Invalid):
d.errback( result )
return
if raiseError:
d.errback( result.value )
return
d.callback( value )
def tmp_validate( self, context, value ):
d = defer.Deferred()
result = defer.maybeDeferred\
( self.validator.validate
, context
, value
)
result.addBoth( tmp_gotReslt, d, self.raiseError, value )
return d
def item_gotResult( result, d, value, key, alter ):
if isinstance( result, Failure ):
if not isinstance(result.value, Invalid):
d.errback( result )
return
d.errback( result.value )
else:
if alter:
value[key] = result
d.callback( value )
def item_validate( self, context, value ):
try:
val = value[ self.key ]
except TypeError:
raise Invalid( value, self, 'type' )
except (KeyError, IndexError):
raise Invalid( value, self, 'notFound', key=self.key )
else:
if self.validator is not None:
d = defer.Deferred()
result = defer.maybeDeferred\
( self.validator.validate
, context
, val
)
result.addBoth( item_gotResult, d , value, self.key, self.alter )
return d
else:
return val
def not_gotResult( result, d, value, validator ):
if isinstance( result, Failure ):
if not isinstance( result.value, Invalid ):
d.errback( result )
return
d.callback( value )
else:
d.errback( Invalid( value, validator ) )
def not_validate(self, context, value ):
d = defer.Deferred()
result = defer.maybeDeferred\
( self.validator.validate
, context
, value
)
result.addBoth( not_gotResult, d, value, self )
return d
def and_doTryNext( result, validators, context, value, d ):
if isinstance( result, Failure ):
if not isinstance(result.value, Invalid):
d.errback( result )
else:
e = result.value
d.errback( e )
else:
if validators:
and_tryNext( validators, context, result, d )
else:
d.callback( result )
def and_tryNext( validators, context, value, d ):
result = defer.maybeDeferred\
( validators.pop(0).validate
, context
, value
)
result.addBoth( and_doTryNext, validators, context, value, d )
def and_validate( self, context, value ):
d = defer.Deferred()
and_tryNext( list( self.validators ), context, value, d )
return d
def or_doTryNext( result, validators, context, value, d ):
if isinstance( result, Failure ):
err = result
if not isinstance(err.value, Invalid):
d.errback( err )
return
e = err.value
if not validators:
d.errback( e )
else:
or_tryNext( validators, context, value, d )
else:
d.callback( result )
def or_tryNext( validators, context, value, d ):
result = defer.maybeDeferred\
( validators.pop(0).validate
, context
, value
)
result.addBoth( or_doTryNext, validators, context, value, d )
def or_validate( self, context, value ):
d = defer.Deferred()
or_tryNext( list(self.validators), context, value, d )
return d
@defer.inlineCallbacks
def call_validate( self, context, value ):
try:
result = yield defer.maybeDeferred\
( self.__func__
, context
, value
)
except Failure as e:
if not isinstance(e.value, Invalid):
raise
e = e.value
e.validator = self
raise e
else:
defer.returnValue( result )
def match_gotResult( result, self, value, d ):
if isinstance( result, Failure ):
if not isinstance(result.value, Invalid):
raise
d.errback( Invalid( value, self, matchType=self.type, criterion=result.value ) )
else:
val = value
if self.ignoreCase:
result = str(result).lower()
val = str(value).lower()
if val != result:
d.errback( Invalid( value, self, matchType=self.type, criterion=result ) )
else:
d.callback( value )
def match_on_value(self, context, value ):
if self.type is Match.REGEX:
if not self.criterion.match(value):
raise Invalid( value, self, matchType=self.type, criterion=self.criterion.pattern)
return value
elif self.type is Match.VALIDATOR:
compare = defer.maybeDeferred\
( self.criterion.validate
, context
, value
)
d = defer.Deferred()
compare.addBoth( match_gotResult, self, value, d )
return d
else:
compare = self.criterion
val = value
if self.ignoreCase:
compare = str(compare).lower()
val = str(value).lower()
if val != compare:
raise Invalid( value, self, matchType=self.type, criterion=compare )
return value
def if_gotResult( result, d, context, value ):
if isinstance( result, Failure ):
if not isinstance(result.value, Invalid):
d.errback( result )
else:
d.errback( result.value )
else:
d.callback( result )
def if_gotResultExpression( result, validator, d, context, value ):
if isinstance( result, Failure ):
if not isinstance( result.value, Invalid):
raise
value = defer.maybeDeferred\
( validator._else.validate, context, value
)
else:
value = defer.maybeDeferred\
( validator._then.validate, context, result
)
value.addBoth( if_gotResult, d, context, value )
def if_validate( self, context, value ):
d = defer.Deferred()
result = defer.maybeDeferred( self.criterion.validate, context, value )
result.addBoth( if_gotResultExpression, self, d, context, value )
return d
def schema_gotResult( result, resultset, key, isList, returnList ):
if returnList:
resultset.append( result )
else:
resultset[ key ] = result
return result
def schema_gotError( error, errorset, key ):
if isinstance( error, Failure ):
if not isinstance(error.value, Invalid):
raise error
error = error.value
errorset.append( error )
def schema__on_value_done( waste, d, schema, value, result, errors ):
if not errors:
d.callback( result )
else:
d.errback( errors.pop(0) )
def schema__createContextChildren_on_value_done( waste, d, schema, value, result, errors ):
if not errors:
d.callback( result )
else:
d.errback( Invalid( value, schema ) )
def schema__on_value( self, context, value ):
isList = isinstance(value, list) or isinstance(value,tuple) or isinstance(value,set)
if not isList and not isinstance( value, dict ):
raise Invalid( value, self, 'type')
extraFields = None
if not self.allowExtraFields:
if isList:
extraFields = max( len(value), len(self.index) )
else:
extraFields = list(value.keys())
if self.returnList:
result = []
else:
result = {}
numValues = len(value)
jobs = []
errorset = []
for pos in range(len(self.index)):
key = self.index[pos]
if isList:
if numValues>pos:
val = value[ pos ]
if not self.allowExtraFields:
extraFields-=1
else:
val = MISSING
else:
val = value.get( key, MISSING)
if not self.allowExtraFields and val is not MISSING:
try: extraFields.remove(key)
except: pass
job = defer.maybeDeferred\
( self.validators[ key ].validate
, context
, val
)
jobs.append\
( job.addCallback( schema_gotResult, result, key, isList, self.returnList )\
.addErrback( schema_gotError, errorset, key )
)
if extraFields:
raise Invalid( value, self, 'extraFields',extraFields=extraFields)
d = defer.Deferred()
jobs =defer.DeferredList( jobs )
jobs.addCallback\
( schema__on_value_done
, d
, self
, value
, result
, errorset
)
return d
def schema__createContextChildren_on_value( self, context, value ):
isList = isinstance(value, list) or isinstance(value,tuple) or isinstance(value,set)
if not isList and not isinstance( value, dict ):
raise Invalid( value, self, 'type')
extraFields = None
if not self.allowExtraFields:
if isList:
extraFields = max( len(value), len(self.index) )
else:
extraFields = list(value.keys())
errors = []
if self.returnList:
result = []
else:
result = {}
len_value = len(value)
len_index = len(self.index)
# populate
for pos in range(len_index):
key = self.index[pos]
childContext = context( key )
try:
childContext.validator = self.validators[ key ]
except KeyError:
raise SyntaxError("No validator set for %s" % childContext.path)
if isList:
if len_value<=pos:
childContext.__value__ = MISSING
else:
childContext.__value__ = value[ pos ]
else:
childContext.__value__ = value.get( key, MISSING )
if not self.allowExtraFields:
if isList:
extraFields-=1
else:
try: extraFields.remove(key)
except: pass
if extraFields:
raise Invalid( value, self, 'extraFields',extraFields=extraFields)
context.setIndexFunc( lambda index: self.index[index] )
jobs = []
# validate
for key in self.index:
jobs.append\
( context( key ).result\
.addCallback( schema_gotResult, result, key, isList, self.returnList )\
.addErrback( schema_gotError, errors, key )
)
d = defer.Deferred()
jobs = defer.DeferredList( jobs )
jobs.addCallback\
( schema__createContextChildren_on_value_done
, d
, self
, value
, result
, errors
)
return d
def forEach__on_value( self, context, value ):
if self.returnList:
result = []
else:
result = {}
isList = isinstance( value, list) or isinstance(value, tuple) or isinstance(value, set)
errorset = []
jobs = []
if isList or self.numericKeys:
for pos in range( len( value ) ):
if not isList:
val = value.get(str(pos),MISSING)
if val is MISSING:
raise Invalid( value, self, 'numericKeys', keys=list(value.keys()) )
else:
val = value[pos]
key = str(pos)
jobs.append\
( defer.maybeDeferred\
( self.validator.validate
, context, val
).addCallback\
( schema_gotResult
, result
, key
, isList
, self.returnList
)\
.addErrback\
( schema_gotError
, errorset
, key
)
)
else:
for (key, val) in value.items():
jobs.append\
( defer.maybeDeferred\
( self.validator.validate
, context, val
).addCallback\
( schema_gotResult
, result
, key
, isList
, self.returnList
)\
.addErrback\
( schema_gotError
, errorset
, key
)
)
d = defer.Deferred()
jobs = defer.DeferredList( jobs )
jobs.addCallback\
( schema__on_value_done
, d
, self
, value
, result
, errorset
)
return d
def forEach__createContextChildren_on_value( self, context, value ):
isList = isinstance( value, list) or isinstance(value, tuple) or isinstance(value, set)
if not isList:
if not isinstance(value, dict ):
raise Invalid( value, self,'type' )
if self.returnList:
result = []
else:
result = {}
errors = []
# populate
children = []
if isList or self.numericKeys:
context.setIndexFunc( lambda index: str(index) )
for pos in range( len( value ) ):
if not isList:
val = value.get(str(pos),MISSING)
if value.get(str(pos),MISSING) is MISSING:
context.setIndexFunc( None )
raise Invalid( value, self, 'numericKeys',keys=list(value.keys()))
else:
val = value[ pos ]
contextChild = context( str( pos ) )
contextChild.validator = self.validator
contextChild.__value__ = val
children.append( contextChild )
else:
context.setIndexFunc( None )
if self.returnList:
raise Invalid( value, self, 'listType' )
for (key,val) in value.items():
contextChild = context( key )
contextChild.validator = self.validator
contextChild.__value__ = val
children.append( contextChild )
jobs = []
#validate
for childContext in children:
jobs.append\
( childContext.validate()\
.addCallback\
( schema_gotResult
, result
, childContext.key
, isList
, self.returnList
)\
.addErrback\
( schema_gotError
, errors
, childContext.key
)
)
d = defer.Deferred()
jobs = defer.DeferredList( jobs )
jobs.addCallback\
( schema__createContextChildren_on_value_done
, d
, self
, value
, result
, errors
)
return d
@defer.inlineCallbacks
def field_validate(self, context, value):
fieldcontext = self.getField( context, self.path )
if not self.useResult:
result = fieldcontext.value
else:
try:
result = yield fieldcontext.result
except Invalid:
result = PASS
if self.validator is not None:
if result is not PASS:
result = yield defer.maybeDeferred\
( self.validator.validate
, fieldcontext, result
)
if self.writeToContext:
fieldcontext.__result__ = result
if self.copy:
if result is PASS:
defer.returnValue( value )
defer.returnValue( result )
defer.returnValue( value )
from twisted.names import client
from twisted.names.dns import Record_MX
from twisted.names.error import DNSNameError
from twisted.internet.defer import TimeoutError
def mxLookup_gotResult(result, d, value, validator, context ):
if isinstance( result, Failure ):
if isinstance(result.value, TimeoutError):
d.errback( Invalid( value, validator ) )
elif not isinstance(result.value, DNSNameError):
d.errback( result )
else:
d.errback( Invalid( value, validator ) )
return
(answers, auth, add) = result
if not len(answers):
d.errback( Invalid( value, validator ) )
else:
for record in answers:
if isinstance(record.payload,Record_MX):
d.callback( value )
return
d.errback( Invalid( value, validator ) )
mxLookup_resolver = client.Resolver('/etc/resolv.conf')
def mxLookup_on_value( self, context, value ):
d = defer.Deferred()
mxLookup_resolver.lookupMailExchange( value, [2,4,6,8,10] )\
.addBoth( mxLookup_gotResult, d, value, self, context )
return d
Context.validate = context_validate
Tag.validate = tag_validate
Compose.valdate = compose_validate
Tmp.validate = tmp_validate
Item.validate = item_validate
Not.validate = not_validate
And.validate = and_validate
Or.validate = or_validate
Call.validate = call_validate
Match.on_value = match_on_value
If.validate = if_validate
Schema._on_value = schema__on_value
Schema._createContextChildren_on_value = schema__createContextChildren_on_value
ForEach._on_value = forEach__on_value
ForEach._createContextChildren_on_value = forEach__createContextChildren_on_value
Field.validate = field_validate
MXLookup.on_value = mxLookup_on_value
monkeyPatch._isMonkeyPatched = True
from ..util import getArgSpec, getParameterNames
def validateDecorator_gotValidationResult\
( result
, d
, origArgs
, origKwargs
, method
, varargs
, keywords
, shifted
, onInvalid
):
if isinstance( result, Failure ):
if not isinstance(result.value, Invalid):
d.errback( result )
elif onInvalid is not None:
try:
result = onInvalid( result.value )
except Exception as e:
d.errback( e )
else:
d.callback( result )
else:
d.errback( result )
else:
origKwargs.update( result )
resultArgs = origKwargs.pop( varargs, origArgs )
resultArgs = [ origKwargs.pop(key) for key in shifted ] + resultArgs
if keywords is not False:
origKwargs.update( origKwargs.pop( keywords ) )
defer.maybeDeferred( method, *resultArgs, **origKwargs )\
.chainDeferred( d )
def validateDecorator( validator, method, include, exclude, onInvalid, inlineCallbacks ):
if include and exclude:
raise SyntaxError("'include' and 'exclude' cannot be used at the same time")
spec = getArgSpec( method )
hasVarargs = spec.varargs is not None
varargs = spec.varargs or '*varargs'
keywords = spec.keywords or False
methodParameterNames = getParameterNames( method, skipSelf=False )
skip = ()
if exclude:
skip = exclude
if include:
skip = set(methodParameterNames) - set(include)
varargs = varargs
hasVarargs = spec.varargs not in skip and hasVarargs
keywords = keywords not in skip and keywords
if inlineCallbacks:
method = defer.inlineCallbacks( method )
def __wrap( *fargs, **fkwargs):
d = defer.Deferred()
(fargs, fkwargs, shifted ) = varargs2kwargs( method, fargs, fkwargs, skipSelf=False )
origKwargs = dict(fkwargs)
if keywords is not False:
restKwargs = dict(\
( key, fkwargs.pop(key))\
for key in list(fkwargs.keys()) if key not in methodParameterNames
)
fkwargs[ keywords ] = restKwargs
if fargs or hasVarargs:
fkwargs[ varargs ] = list(fargs)
result = validator.context\
( dict( ( key, fkwargs[ key] ) for key in fkwargs if key not in skip )
).result
result.addBoth( validateDecorator_gotValidationResult, d, fargs, origKwargs, method, varargs, keywords, shifted, onInvalid )
return d
return __wrap
def validate( validator, include=None, exclude=None, onInvalid=None, inlineCallbacks=False ):
def __createDecorator( method ):
return validateDecorator( validator, method, include, exclude, onInvalid, inlineCallbacks)
return __createDecorator
| 30.431243 | 132 | 0.521654 | 0 | 0 | 24,312 | 0.878895 | 2,476 | 0.089509 | 0 | 0 | 817 | 0.029535 |
909bb64980267ae4a08d2d7a1f0a4d2581917497 | 1,579 | py | Python | sandbox/graph-size.py | maarten1983/khmer | 417aaa57f0659685c01887a6910de1c08d0a73e5 | [
"BSD-3-Clause"
]
| 1 | 2019-11-02T15:12:44.000Z | 2019-11-02T15:12:44.000Z | sandbox/graph-size.py | ibest/khmer | fbc307abd64363b329745709846d77444ce0c025 | [
"BSD-3-Clause"
]
| null | null | null | sandbox/graph-size.py | ibest/khmer | fbc307abd64363b329745709846d77444ce0c025 | [
"BSD-3-Clause"
]
| null | null | null | #! /usr/bin/env python2
#
# This file is part of khmer, http://github.com/ged-lab/khmer/, and is
# Copyright (C) Michigan State University, 2009-2013. It is licensed under
# the three-clause BSD license; see doc/LICENSE.txt.
# Contact: [email protected]
#
import khmer
import sys
import screed
import os.path
from khmer.thread_utils import ThreadedSequenceProcessor, verbose_fasta_iter
K = 32
HASHTABLE_SIZE = int(4e9)
THRESHOLD = 500
N_HT = 4
WORKER_THREADS = 5
###
GROUPSIZE = 100
###
def main():
infile = sys.argv[1]
outfile = os.path.basename(infile) + '.graphsize'
if len(sys.argv) == 3:
outfile = sys.argv[2]
print 'input file to graphsize filter: %s' % infile
print 'filtering to output:', outfile
print '-- settings:'
print 'K', K
print 'HASHTABLE SIZE %g' % HASHTABLE_SIZE
print 'N HASHTABLES %d' % N_HT
print 'THRESHOLD', THRESHOLD
print 'N THREADS', WORKER_THREADS
print '--'
print 'creating ht'
ht = khmer.new_hashbits(K, HASHTABLE_SIZE, N_HT)
print 'eating fa', infile
total_reads, n_consumed = ht.consume_fasta(infile)
outfp = open(outfile, 'w')
###
def process_fn(record, ht=ht):
kmer = record['sequence'][:K]
size = ht.calc_connected_graph_size(kmer, THRESHOLD)
if size >= THRESHOLD:
return record['name'], record['sequence']
return None, None
tsp = ThreadedSequenceProcessor(process_fn, WORKER_THREADS, GROUPSIZE)
###
tsp.start(verbose_fasta_iter(infile), outfp)
if __name__ == '__main__':
main()
| 23.567164 | 76 | 0.664345 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 479 | 0.303357 |
909c067225e930569a068986504ae450bf7106ff | 3,187 | py | Python | ferry/crawler/fetch_demand.py | coursetable/ferry | f369b9588557c359af8589f2575a03493d6b08b6 | [
"MIT"
]
| 4 | 2020-11-12T19:37:06.000Z | 2021-12-14T01:38:39.000Z | ferry/crawler/fetch_demand.py | coursetable/ferry | f369b9588557c359af8589f2575a03493d6b08b6 | [
"MIT"
]
| 96 | 2020-09-08T05:17:17.000Z | 2022-03-31T23:12:51.000Z | ferry/crawler/fetch_demand.py | coursetable/ferry | f369b9588557c359af8589f2575a03493d6b08b6 | [
"MIT"
]
| 2 | 2021-03-03T23:02:40.000Z | 2021-06-17T23:33:05.000Z | """
Fetches demand statistics.
Modified from Dan Zhao
Original article:
https://yaledailynews.com/blog/2020/01/10/yales-most-popular-courses/
Github:
https://github.com/iamdanzhao/yale-popular-classes
README:
https://github.com/iamdanzhao/yale-popular-classes/blob/master/data-guide/course_data_guide.md
"""
import argparse
from multiprocessing import Pool
from typing import List, Tuple
import ujson
from ferry import config
from ferry.crawler.common_args import add_seasons_args, parse_seasons_arg
from ferry.includes.demand_processing import fetch_season_subject_demand, get_dates
from ferry.includes.tqdm import tqdm
def handle_season_subject_demand(demand_args: Tuple[str, str, List[str], List[str]]):
"""
Handler for fetching subject codes to be passed into Pool()
"""
demand_season, demand_subject_code, demand_subject_codes, demand_dates = demand_args
courses = fetch_season_subject_demand(
demand_season, demand_subject_code, demand_subject_codes, demand_dates
)
return courses
if __name__ == "__main__":
class FetchDemandError(Exception):
"""
Error object for demand fetching exceptions.
"""
# pylint: disable=unnecessary-pass
pass
# Set season
# Pass using command line arguments
# Examples: 202001 = 2020 Spring, 201903 = 2019 Fall
# If no season is provided, the program will scrape all available seasons
parser = argparse.ArgumentParser(description="Import demand stats")
add_seasons_args(parser)
args = parser.parse_args()
# list of seasons previously from fetch_seasons.py
with open(f"{config.DATA_DIR}/demand_seasons.json", "r") as f:
all_viable_seasons = ujson.load(f)
seasons = parse_seasons_arg(args.seasons, all_viable_seasons)
print("Retrieving subjects list... ", end="")
with open(f"{config.DATA_DIR}/demand_subjects.json", "r") as f:
subjects = ujson.load(f)
subject_codes = sorted(list(subjects.keys()))
print("ok")
# set up parallel processing pool
with Pool(processes=64) as pool:
for season in seasons:
print(f"Retrieving demand by subject for season {season}")
dates = get_dates(season)
pool_args = [
(season, subject_code, subject_codes, dates)
for subject_code in subject_codes
]
season_courses = []
# use imap_unordered to report to tqdm
with tqdm(total=len(pool_args), desc="Subjects retrieved") as pbar:
for i, result in enumerate(
pool.imap_unordered(handle_season_subject_demand, pool_args)
):
pbar.update()
season_courses.append(result)
# flatten season courses
season_courses = [x for y in season_courses for x in y]
# sort courses by title (for consistency with ferry-data)
season_courses = sorted(season_courses, key=lambda x: x["title"])
with open(f"{config.DATA_DIR}/demand_stats/{season}_demand.json", "w") as f:
ujson.dump(season_courses, f, indent=4)
| 29.509259 | 94 | 0.671478 | 168 | 0.052714 | 0 | 0 | 0 | 0 | 0 | 0 | 1,151 | 0.361155 |
909dc9969f5cc018e88da564d8e3efacb5bc1be6 | 406 | py | Python | migrate_db.py | qxf2/interview-scheduler | ef17350cec70c66c7136671789ed188231a5fcba | [
"MIT"
]
| 2 | 2021-05-06T17:02:21.000Z | 2021-05-19T19:41:21.000Z | migrate_db.py | qxf2/interview-scheduler | ef17350cec70c66c7136671789ed188231a5fcba | [
"MIT"
]
| 9 | 2019-08-01T18:49:35.000Z | 2021-04-01T12:52:35.000Z | migrate_db.py | qxf2/interview-scheduler | ef17350cec70c66c7136671789ed188231a5fcba | [
"MIT"
]
| 18 | 2019-07-23T16:26:17.000Z | 2022-01-21T10:33:41.000Z | from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from qxf2_scheduler import models
from qxf2_scheduler import db
from qxf2_scheduler.__init__ import app
from flask_script import Manager
from flask_migrate import Migrate,MigrateCommand
migrate=Migrate(app, db,render_as_batch=True)
manager=Manager(app)
manager.add_command('db',MigrateCommand)
if __name__ == "__main__":
manager.run() | 25.375 | 48 | 0.830049 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 14 | 0.034483 |
909e429cd3c93b342a1a4e97e4084847d6b07a78 | 3,476 | py | Python | main.py | tarunsinghal92/indeedscrapperlatest | 2c7fd920d115764192bf5f7bf8fd3d30aa6ec2b4 | [
"MIT"
]
| 15 | 2019-07-31T11:48:28.000Z | 2022-02-25T13:55:23.000Z | main.py | tarunsinghal92/indeedscrapperlatest | 2c7fd920d115764192bf5f7bf8fd3d30aa6ec2b4 | [
"MIT"
]
| null | null | null | main.py | tarunsinghal92/indeedscrapperlatest | 2c7fd920d115764192bf5f7bf8fd3d30aa6ec2b4 | [
"MIT"
]
| 14 | 2019-02-20T21:44:39.000Z | 2022-02-16T11:35:27.000Z | # import packages
import requests
import pandas as pd
import time
from functions import *
# limit per sity
max_results_per_city = 100
# db of city
city_set = ['New+York','Toronto','Las+Vegas']
# job roles
job_set = ['business+analyst','data+scientist']
# file num
file = 1
# from where to skip
SKIPPER = 0
# loop on all cities
for city in city_set:
# for each job role
for job_qry in job_set:
# count
cnt = 0
startTime = time.time()
# skipper
if(file > SKIPPER):
# dataframe
df = pd.DataFrame(columns = ['unique_id', 'city', 'job_qry','job_title', 'company_name', 'location', 'summary', 'salary', 'link', 'date', 'full_text'])
# for results
for start in range(0, max_results_per_city, 10):
# get dom
page = requests.get('http://www.indeed.com/jobs?q=' + job_qry +'&l=' + str(city) + '&start=' + str(start))
#ensuring at least 1 second between page grabs
time.sleep(1)
#fetch data
soup = get_soup(page.text)
divs = soup.find_all(name="div", attrs={"class":"row"})
# if results exist
if(len(divs) == 0):
break
# for all jobs on a page
for div in divs:
#specifying row num for index of job posting in dataframe
num = (len(df) + 1)
cnt = cnt + 1
#job data after parsing
job_post = []
#append unique id
job_post.append(div['id'])
#append city name
job_post.append(city)
#append job qry
job_post.append(job_qry)
#grabbing job title
job_post.append(extract_job_title(div))
#grabbing company
job_post.append(extract_company(div))
#grabbing location name
job_post.append(extract_location(div))
#grabbing summary text
job_post.append(extract_summary(div))
#grabbing salary
job_post.append(extract_salary(div))
#grabbing link
link = extract_link(div)
job_post.append(link)
#grabbing date
job_post.append(extract_date(div))
#grabbing full_text
job_post.append(extract_fulltext(link))
#appending list of job post info to dataframe at index num
df.loc[num] = job_post
#debug add
write_logs(('Completed =>') + '\t' + city + '\t' + job_qry + '\t' + str(cnt) + '\t' + str(start) + '\t' + str(time.time() - startTime) + '\t' + ('file_' + str(file)))
#saving df as a local csv file
df.to_csv('jobs_' + str(file) + '.csv', encoding='utf-8')
else:
#debug add
write_logs(('Skipped =>') + '\t' + city + '\t' + job_qry + '\t' + str(-1) + '\t' + str(-1) + '\t' + str(time.time() - startTime) + '\t' + ('file_' + str(file)))
# increment file
file = file + 1
| 29.709402 | 183 | 0.467779 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,032 | 0.296893 |
909f78a9a426fedd3532cae3c362b0e27f684e37 | 4,973 | py | Python | L0_serial.py | RL-WWW/ISST | 42b656686fa9660794007a0bc00a7177937410e9 | [
"BSD-3-Clause"
]
| 5 | 2021-01-24T13:19:45.000Z | 2021-04-05T15:49:35.000Z | L0_serial.py | RL-WWW/ISST | 42b656686fa9660794007a0bc00a7177937410e9 | [
"BSD-3-Clause"
]
| null | null | null | L0_serial.py | RL-WWW/ISST | 42b656686fa9660794007a0bc00a7177937410e9 | [
"BSD-3-Clause"
]
| null | null | null | # Import Libraries
import numpy as np
import cv2
import argparse
import time
# Import User Libraries
import L0_helpers
# Image File Path
image_r = "images/flowers.jpg"
image_w = "out_serial.png"
# L0 minimization parameters
kappa = 2.0
_lambda = 2e-2
# Verbose output
verbose = False
def L0_smooth(input_path, output_path, kappa=2.0, _lambda=2e-2, verbose=False):
# Set parameters
image_r = input_path
image_w = output_path
# Read image I
image = cv2.imread(image_r)
# Timers
step_1 = 0.0
step_2 = 0.0
step_2_fft = 0.0
# Start time
start_time = time.time()
# Validate image format
N, M, D = np.int32(image.shape)
assert D == 3, "Error: input must be 3-channel RGB image"
print("Processing %d x %d RGB image" % (M, N))
# Initialize S as I
S = np.float32(image) / 256
# Compute image OTF
size_2D = [N, M]
fx = np.int32([[1, -1]])
fy = np.int32([[1], [-1]])
otfFx = L0_helpers.psf2otf(fx, size_2D)
otfFy = L0_helpers.psf2otf(fy, size_2D)
# Compute F(I)
FI = np.complex64(np.zeros((N, M, D)))
FI[:, :, 0] = np.fft.fft2(S[:, :, 0])
FI[:, :, 1] = np.fft.fft2(S[:, :, 1])
FI[:, :, 2] = np.fft.fft2(S[:, :, 2])
# Compute MTF
MTF = np.power(np.abs(otfFx), 2) + np.power(np.abs(otfFy), 2)
MTF = np.tile(MTF[:, :, np.newaxis], (1, 1, D))
# Initialize buffers
h = np.float32(np.zeros((N, M, D)))
v = np.float32(np.zeros((N, M, D)))
dxhp = np.float32(np.zeros((N, M, D)))
dyvp = np.float32(np.zeros((N, M, D)))
FS = np.complex64(np.zeros((N, M, D)))
# Iteration settings
beta_max = 1e5;
beta = 2 * _lambda
iteration = 0
# Done initializing
init_time = time.time()
# Iterate until desired convergence in similarity
while beta < beta_max:
if verbose:
print("ITERATION %i" % iteration)
### Step 1: estimate (h, v) subproblem
# subproblem 1 start time
s_time = time.time()
# compute dxSp
h[:, 0:M - 1, :] = np.diff(S, 1, 1)
h[:, M - 1:M, :] = S[:, 0:1, :] - S[:, M - 1:M, :]
# compute dySp
v[0:N - 1, :, :] = np.diff(S, 1, 0)
v[N - 1:N, :, :] = S[0:1, :, :] - S[N - 1:N, :, :]
# compute minimum energy E = dxSp^2 + dySp^2 <= _lambda/beta
t = np.sum(np.power(h, 2) + np.power(v, 2), axis=2) < _lambda / beta
t = np.tile(t[:, :, np.newaxis], (1, 1, 3))
# compute piecewise solution for hp, vp
h[t] = 0
v[t] = 0
# subproblem 1 end time
e_time = time.time()
step_1 = step_1 + e_time - s_time
if verbose:
print("-subproblem 1: estimate (h,v)")
print("--time: %f (s)" % (e_time - s_time))
### Step 2: estimate S subproblem
# subproblem 2 start time
s_time = time.time()
# compute dxhp + dyvp
dxhp[:, 0:1, :] = h[:, M - 1:M, :] - h[:, 0:1, :]
dxhp[:, 1:M, :] = -(np.diff(h, 1, 1))
dyvp[0:1, :, :] = v[N - 1:N, :, :] - v[0:1, :, :]
dyvp[1:N, :, :] = -(np.diff(v, 1, 0))
normin = dxhp + dyvp
fft_s = time.time()
FS[:, :, 0] = np.fft.fft2(normin[:, :, 0])
FS[:, :, 1] = np.fft.fft2(normin[:, :, 1])
FS[:, :, 2] = np.fft.fft2(normin[:, :, 2])
fft_e = time.time()
step_2_fft += fft_e - fft_s
# solve for S + 1 in Fourier domain
denorm = 1 + beta * MTF
FS[:, :, :] = (FI + beta * FS) / denorm
# inverse FFT to compute S + 1
fft_s = time.time()
S[:, :, 0] = np.float32((np.fft.ifft2(FS[:, :, 0])).real)
S[:, :, 1] = np.float32((np.fft.ifft2(FS[:, :, 1])).real)
S[:, :, 2] = np.float32((np.fft.ifft2(FS[:, :, 2])).real)
fft_e = time.time()
step_2_fft += fft_e - fft_s
# subproblem 2 end time
e_time = time.time()
step_2 = step_2 + e_time - s_time
if verbose:
print("-subproblem 2: estimate S + 1")
print("--time: %f (s)" % (e_time - s_time))
print("")
# update beta for next iteration
beta *= kappa
iteration += 1
# Rescale image
S = S * 256
# Total end time
final_time = time.time()
print("Total Time: %f (s)" % (final_time - start_time))
print("Setup: %f (s)" % (init_time - start_time))
print("Step 1: %f (s)" % (step_1))
print("Step 2: %f (s)" % (step_2))
print("Step 2 (FFT): %f (s)" % (step_2_fft))
print("Iterations: %d" % (iteration))
cv2.imwrite(image_w, S)
if __name__ == '__main__':
# Parse arguments
parser = argparse.ArgumentParser(
description="Serial implementation of image smoothing via L0 gradient minimization")
parser.add_argument('image_r', help="input image file")
parser.add_argument('image_w', help="output image file")
parser.add_argument('-k', type=float, default=2.0,
metavar='kappa', help='updating weight (default 2.0)')
parser.add_argument('-l', type=float, default=2e-2,
metavar='lambda', help='smoothing weight (default 2e-2)')
parser.add_argument('-v', '--verbose', action='store_true',
help='enable verbose logging for each iteration')
args = parser.parse_args()
L0_smooth(args.image_r, args.image_w, args.k, args.l, args.verbose)
| 26.593583 | 90 | 0.575508 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,424 | 0.286346 |
909f8409bcfac0d98c71ec79e9110765c9b7b295 | 2,565 | py | Python | data_processing/signal_downsampling.py | HassanHayat08/Interpretable-CNN-for-Big-Five-Personality-Traits-using-Audio-Data | 7149e78736611f07a1c7c4adbdf24ae03011e549 | [
"MIT"
]
| 9 | 2020-09-26T23:52:49.000Z | 2021-10-04T00:08:23.000Z | data_processing/signal_downsampling.py | HassanHayat08/Interpretable-CNN-for-Big-Five-Personality-Traits-using-Audio-Data | 7149e78736611f07a1c7c4adbdf24ae03011e549 | [
"MIT"
]
| null | null | null | data_processing/signal_downsampling.py | HassanHayat08/Interpretable-CNN-for-Big-Five-Personality-Traits-using-Audio-Data | 7149e78736611f07a1c7c4adbdf24ae03011e549 | [
"MIT"
]
| 2 | 2021-04-06T13:02:24.000Z | 2021-12-06T09:03:24.000Z | ### Interpretable cnn for big five personality traits using audio data ###
### This script downsamples 41000 kz signal into 4000 kz signal ###
from __future__ import absolute_import, division, print_function
import pathlib
import random
import csv
import numpy as np
from scipy.io import wavfile
import tensorflow as tf
import itertools
from scipy import stats
### functions for mapping ###
def normalize_with_moments(data, axes=[0], epsilon=1e-8):
mean, variance = tf.nn.moments(data, axes=axes)
data_normed = (data - mean) / tf.sqrt(variance + epsilon) # epsilon to avoid dividing by zero
return data_normed
def get_wav(path, label):
wav_file = tf.read_file(path)
data = tf.contrib.ffmpeg.decode_audio(tf.read_file(path), file_format="wav",samples_per_second=4000, channel_count=1)
data = tf.cast(data,tf.complex64)
data = tf.fft(data,name='FFT')
return normalize_with_moments(data), label
### down sample the data ###
data = []
labels = []
folder_path = '/...path/to/wav/data/folder/'
folder_path = pathlib.Path(folder_path)
files_path = list(folder_path.glob('*.wav'))
files_path = [str(path) for path in files_path]
no_of_samples = len(files_path)
### load data labels ###
with open('/...path/to/.csv/labels/file', 'rb') as csvfile:
spamreader = csv.reader(csvfile, delimiter=',', quotechar='|')
for row in spamreader:
data.append(row)
for i in range(len(files_path)):
file_1 = files_path[i]
file_1 = file_1.split("/")[5]
file_name_1 = file_1[:-4]
new_filename_1 = file_name_1 + '.mp4'
label_1 = []
label_2 = []
matching = [s for s in data if new_filename_1 in s]
label_1= np.delete(matching,[0],axis=1)
label_2 = label_1[0,:]
label_2 = [float(i) for i in label_2]
labels.append(label_2)
### dataset pipeline ###
ds = tf.data.Dataset.from_tensor_slices((files_path, labels))
data_ds = ds.map(get_wav)
ds = data_ds.shuffle(buffer_size=wavfiles_count)
ds = ds.repeat()
ds = ds.batch(1)
### prefetch the data batches in the background ###
ds = ds.prefetch(buffer_size=1)
iterator = ds.make_one_shot_iterator()
next_ele = iterator.get_next()
features_4k = []
labels_4k = []
with tf.Session() as sess:
for _ in range(len(files_path)):
t_features, t_labels = sess.run(next_ele)
features_4k.append(t_features)
labels_4k.append(t_labels)
np.save('.../save/path/',features_4k)
np.save('.../save/path/',labels_4k)
print('Completed')
| 29.482759 | 121 | 0.670955 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 471 | 0.183626 |
90a1865751cb26e76fdfe2385bd5686fe05ca8bb | 1,858 | py | Python | ai_flow/model_center/entity/_model_repo_object.py | flink-extended/ai-flow | d1427a243097d94d77fedbe1966500ae26975a13 | [
"Apache-2.0"
]
| 79 | 2021-10-15T07:32:27.000Z | 2022-03-28T04:10:19.000Z | ai_flow/model_center/entity/_model_repo_object.py | flink-extended/ai-flow | d1427a243097d94d77fedbe1966500ae26975a13 | [
"Apache-2.0"
]
| 153 | 2021-10-15T05:23:46.000Z | 2022-02-23T06:07:10.000Z | ai_flow/model_center/entity/_model_repo_object.py | flink-extended/ai-flow | d1427a243097d94d77fedbe1966500ae26975a13 | [
"Apache-2.0"
]
| 23 | 2021-10-15T02:36:37.000Z | 2022-03-17T02:59:27.000Z | #
# Copyright 2022 The AI Flow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import pprint
from abc import abstractmethod
class _ModelRepoObject(object):
def __iter__(self):
# Iterate through list of properties and yield as key -> value
for prop in self._properties():
yield prop, self.__getattribute__(prop)
@classmethod
def _get_properties_helper(cls):
return sorted([p for p in cls.__dict__ if isinstance(getattr(cls, p), property)])
@classmethod
def _properties(cls):
return cls._get_properties_helper()
@classmethod
@abstractmethod
def from_proto(cls, proto):
pass
def __repr__(self):
return to_string(self)
def to_string(obj):
return _ModelRepoObjectPrinter().to_string(obj)
def get_classname(obj):
return type(obj).__name__
class _ModelRepoObjectPrinter(object):
def __init__(self):
super(_ModelRepoObjectPrinter, self).__init__()
self.printer = pprint.PrettyPrinter()
def to_string(self, obj):
if isinstance(obj, _ModelRepoObject):
return "<%s: %s>" % (get_classname(obj), self._entity_to_string(obj))
return self.printer.pformat(obj)
def _entity_to_string(self, entity):
return ", ".join(["%s=%s" % (key, self.to_string(value)) for key, value in entity])
| 28.584615 | 91 | 0.697524 | 1,089 | 0.586114 | 182 | 0.097955 | 298 | 0.160388 | 0 | 0 | 655 | 0.35253 |
90a2c66069c33df69aa851c8c0f49466dd43d14e | 2,127 | py | Python | model_search/search/common_test.py | LinqCod/model_search | d90bc39994bc2a5f5028035ac954f796eda03310 | [
"Apache-2.0"
]
| null | null | null | model_search/search/common_test.py | LinqCod/model_search | d90bc39994bc2a5f5028035ac954f796eda03310 | [
"Apache-2.0"
]
| null | null | null | model_search/search/common_test.py | LinqCod/model_search | d90bc39994bc2a5f5028035ac954f796eda03310 | [
"Apache-2.0"
]
| null | null | null | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for model_search.search.common."""
from absl.testing import parameterized
from model_search.search import common
import tensorflow.compat.v2 as tf
class CommonTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.named_parameters(
{
"testcase_name": "no_completed_trials",
"num_completed_trials": 0,
"expected": 1,
}, {
"testcase_name": "some_completed_trials",
"num_completed_trials": 11,
"expected": 3,
}, {
"testcase_name": "custom_depth_thresholds",
"num_completed_trials": 2,
"expected": 2,
"depth_thresholds": [0, 1, 10, 20],
}, {
"testcase_name": "maximum_respected",
"num_completed_trials": 1000,
"expected": 5,
})
def test_get_allowed_depth(self,
num_completed_trials,
expected,
depth_thresholds=None):
actual = common.get_allowed_depth(
num_completed_trials, depth_thresholds, max_depth=5)
self.assertEqual(expected, actual)
def test_get_random_architecture(self):
architecture = common.get_random_architecture(["a", "b", "c"], 3)
self.assertLen(architecture, 3)
self.assertAllInSet(architecture, ["a", "b", "c"])
def test_get_random_block(self):
block = common.get_random_block(["a", "b", "c"])
self.assertIn(block, ["a", "b", "c"])
if __name__ == "__main__":
tf.enable_v2_behavior()
tf.test.main()
| 32.227273 | 74 | 0.649741 | 1,299 | 0.610719 | 0 | 0 | 901 | 0.423601 | 0 | 0 | 963 | 0.45275 |
90a3029cbc5a3d0ba677696927ab7f1da401c62e | 588 | py | Python | model-builder/skrutil/deprecate_util.py | DaYeSquad/worktilerwdemo | 03fbc18dcba4881628cf790f2f0cd7e6f9aa130f | [
"MIT"
]
| 5 | 2016-05-13T15:23:41.000Z | 2019-05-29T08:23:25.000Z | model-builder/skrutil/deprecate_util.py | DaYeSquad/worktilerwdemo | 03fbc18dcba4881628cf790f2f0cd7e6f9aa130f | [
"MIT"
]
| null | null | null | model-builder/skrutil/deprecate_util.py | DaYeSquad/worktilerwdemo | 03fbc18dcba4881628cf790f2f0cd7e6f9aa130f | [
"MIT"
]
| 2 | 2016-06-08T08:22:42.000Z | 2019-02-25T08:46:54.000Z | import warnings
def deprecated(func):
"""This is a decorator which can be used to mark functions
as deprecated. It will result in a warning being emmitted
when the function is used."""
def newFunc(*args, **kwargs):
warnings.simplefilter('always', DeprecationWarning)
warnings.warn("Call to deprecated function %s." % func.__name__,
category=DeprecationWarning)
return func(*args, **kwargs)
newFunc.__name__ = func.__name__
newFunc.__doc__ = func.__doc__
newFunc.__dict__.update(func.__dict__)
return newFunc
| 34.588235 | 72 | 0.681973 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 195 | 0.331633 |
90a3bca5369f1537b322d1766cb9151ec9a0af0c | 3,181 | py | Python | models.py | sheldonjinqi/CIS680_BicycleGAN | a1d32ad9ba39c61e07838f5b6391b6d2ab0765c4 | [
"MIT"
]
| null | null | null | models.py | sheldonjinqi/CIS680_BicycleGAN | a1d32ad9ba39c61e07838f5b6391b6d2ab0765c4 | [
"MIT"
]
| null | null | null | models.py | sheldonjinqi/CIS680_BicycleGAN | a1d32ad9ba39c61e07838f5b6391b6d2ab0765c4 | [
"MIT"
]
| null | null | null | from torchvision.models import resnet18
import torch.nn.functional as F
import torch.nn as nn
import numpy as np
import torch
import pdb
##############################
# Encoder
##############################
class Encoder(nn.Module):
def __init__(self, latent_dim):
super(Encoder, self).__init__()
""" The encoder used in both cVAE-GAN and cLR-GAN, which encode image B or B_hat to latent vector
This encoder uses resnet-18 to extract features, and further encode them into a distribution
similar to VAE encoder.
Note: You may either add "reparametrization trick" and "KL divergence" or in the train.py file
Args in constructor:
latent_dim: latent dimension for z
Args in forward function:
img: image input (from domain B)
Returns:
mu: mean of the latent code
logvar: sigma of the latent code
"""
# Extracts features at the last fully-connected
resnet18_model = resnet18(pretrained=True)
self.feature_extractor = nn.Sequential(*list(resnet18_model.children())[:-3])
self.pooling = nn.AvgPool2d(kernel_size=8, stride=8, padding=0)
# Output is mu and log(var) for reparameterization trick used in VAEs
self.fc_mu = nn.Linear(256, latent_dim)
self.fc_logvar = nn.Linear(256, latent_dim)
def forward(self, img):
out = self.feature_extractor(img)
out = self.pooling(out)
out = out.view(out.size(0), -1)
mu = self.fc_mu(out)
logvar = self.fc_logvar(out)
return mu, logvar
##############################
# Generator
##############################
class Generator(nn.Module):
""" The generator used in both cVAE-GAN and cLR-GAN, which transform A to B
Args in constructor:
latent_dim: latent dimension for z
image_shape: (channel, h, w), you may need this to specify the output dimension (optional)
Args in forward function:
x: image input (from domain A)
z: latent vector (encoded B)
Returns:
fake_B: generated image in domain B
"""
def __init__(self, latent_dim, img_shape):
super(Generator, self).__init__()
channels, self.h, self.w = img_shape
# (TODO: add layers...)
def forward(self, x, z):
# (TODO: add layers...)
return
##############################
# Discriminator
##############################
class Discriminator(nn.Module):
def __init__(self, in_channels=3):
super(Discriminator, self).__init__()
""" The discriminator used in both cVAE-GAN and cLR-GAN
Args in constructor:
in_channels: number of channel in image (default: 3 for RGB)
Args in forward function:
x: image input (real_B, fake_B)
Returns:
discriminator output: could be a single value or a matrix depending on the type of GAN
"""
def forward(self, x):
return
| 30.586538 | 106 | 0.563345 | 2,781 | 0.874253 | 0 | 0 | 0 | 0 | 0 | 0 | 1,941 | 0.610185 |
90a433b0faab6ec973b072f69d11760a7c0bb8ef | 3,381 | py | Python | oem_storage_file/main.py | OpenEntityMap/oem-storage-file | cce7e3979c413273aaa224799cfe6b86bad7627e | [
"BSD-3-Clause"
]
| null | null | null | oem_storage_file/main.py | OpenEntityMap/oem-storage-file | cce7e3979c413273aaa224799cfe6b86bad7627e | [
"BSD-3-Clause"
]
| null | null | null | oem_storage_file/main.py | OpenEntityMap/oem-storage-file | cce7e3979c413273aaa224799cfe6b86bad7627e | [
"BSD-3-Clause"
]
| null | null | null | from oem_framework.models.core import ModelRegistry
from oem_framework.plugin import Plugin
from oem_framework.storage import ProviderStorage
from oem_storage_file.core.base import BaseFileStorage
from oem_storage_file.database import DatabaseFileStorage
import appdirs
import os
class ProviderFileStorage(ProviderStorage, BaseFileStorage, Plugin):
__key__ = 'file'
def __init__(self, path=None):
super(ProviderFileStorage, self).__init__()
self.path = path
if self.path is None:
self.path = self._create_dir()
@classmethod
def open(cls, client, path=None):
storage = cls(path)
storage.initialize(client)
return storage
#
# Provider methods
#
def create(self, source, target):
package_path = self.package_path(source, target)
# Ensure cache directory exists
if not os.path.exists(package_path):
os.makedirs(package_path)
return True
def open_database(self, source, target, path=None):
return ModelRegistry['Database'].load(
DatabaseFileStorage.open(self, source, target, path),
source, target
)
#
# Index methods
#
def has_index(self, source, target):
return os.path.exists(os.path.join(
self._collection_path(source, target),
'index.%s' % self.main.format.__extension__
))
def update_index(self, source, target, response):
# Build collection path
collection_path = self._collection_path(source, target)
# Ensure directory exists
if not os.path.exists(collection_path):
os.makedirs(collection_path)
# Write index to file
path = os.path.join(collection_path, 'index.%s' % self.main.format.__extension__)
with open(path, 'w') as fp:
for chunk in response.iter_content(chunk_size=1024):
if chunk:
fp.write(chunk)
return True
#
# Item methods
#
def has_item(self, source, target, key, metadata=None):
return os.path.exists(os.path.join(
self._collection_path(source, target), 'items',
'%s.%s' % (key, self.main.format.__extension__)
))
def update_item(self, source, target, key, response, metadata):
# Build collection path
items_path = os.path.join(self._collection_path(source, target), 'items')
# Ensure directory exists
if not os.path.exists(items_path):
os.makedirs(items_path)
# Write index to file
path = os.path.join(items_path, '%s.%s' % (key, self.main.format.__extension__))
with open(path, 'w') as fp:
for chunk in response.iter_content(chunk_size=1024):
if chunk:
fp.write(chunk)
return True
#
# Private methods
#
def _collection_path(self, source, target):
return os.path.join(self.database_path(source, target), source)
@staticmethod
def _create_dir():
# Build cache path
path = os.path.join(
appdirs.user_data_dir('OpenEntityMap', appauthor=False),
'databases',
'file'
)
# Ensure cache directory exists
if not os.path.exists(path):
os.makedirs(path)
return path
| 27.266129 | 89 | 0.615794 | 3,097 | 0.916001 | 0 | 0 | 480 | 0.14197 | 0 | 0 | 392 | 0.115942 |
90a450c6bb8a1da60bd0c096428df1ba30321115 | 1,565 | py | Python | scripts/slave/recipe_modules/v8/gclient_config.py | bopopescu/chromium-build | f8e42c70146c1b668421ee6358dc550a955770a3 | [
"BSD-3-Clause"
]
| null | null | null | scripts/slave/recipe_modules/v8/gclient_config.py | bopopescu/chromium-build | f8e42c70146c1b668421ee6358dc550a955770a3 | [
"BSD-3-Clause"
]
| null | null | null | scripts/slave/recipe_modules/v8/gclient_config.py | bopopescu/chromium-build | f8e42c70146c1b668421ee6358dc550a955770a3 | [
"BSD-3-Clause"
]
| 1 | 2020-07-22T09:16:32.000Z | 2020-07-22T09:16:32.000Z | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import DEPS
CONFIG_CTX = DEPS['gclient'].CONFIG_CTX
ChromiumGitURL = DEPS['gclient'].config.ChromiumGitURL
@CONFIG_CTX()
def v8(c):
soln = c.solutions.add()
soln.name = 'v8'
soln.url = ChromiumGitURL(c, 'v8', 'v8')
c.got_revision_reverse_mapping['got_revision'] = 'v8'
# Needed to get the testers to properly sync the right revision.
# TODO(infra): Upload full buildspecs for every build to isolate and then use
# them instead of this gclient garbage.
c.parent_got_revision_mapping['parent_got_revision'] = 'got_revision'
p = c.patch_projects
p['icu'] = ('v8/third_party/icu', 'HEAD')
@CONFIG_CTX(includes=['v8'])
def dynamorio(c):
soln = c.solutions.add()
soln.name = 'dynamorio'
soln.url = ChromiumGitURL(c, 'external', 'dynamorio')
@CONFIG_CTX(includes=['v8'])
def llvm_compiler_rt(c):
c.solutions[0].custom_deps['v8/third_party/llvm/projects/compiler-rt'] = (
ChromiumGitURL(c, 'external', 'llvm.org', 'compiler-rt'))
@CONFIG_CTX()
def node_js(c):
soln = c.solutions.add()
soln.name = 'node.js'
soln.url = ChromiumGitURL(c, 'external', 'github.com', 'v8', 'node')
soln.revision = 'vee-eight-lkgr:HEAD'
c.got_revision_reverse_mapping['got_node_js_revision'] = soln.name
@CONFIG_CTX(includes=['v8'])
def v8_valgrind(c):
c.solutions[0].custom_deps['v8/third_party/valgrind'] = (
ChromiumGitURL(c, 'chromium', 'deps', 'valgrind', 'binaries'))
| 30.686275 | 79 | 0.709904 | 0 | 0 | 0 | 0 | 1,280 | 0.817891 | 0 | 0 | 717 | 0.458147 |
90a4ede6bfdb471d923545a3e19b34b37a9df384 | 7,038 | py | Python | parser/fase2/team28/models/Other/funcion.py | jossiebk/tytus | de6ce433d61609d4eaa5d0dbbd2ce13aaa573544 | [
"MIT"
]
| null | null | null | parser/fase2/team28/models/Other/funcion.py | jossiebk/tytus | de6ce433d61609d4eaa5d0dbbd2ce13aaa573544 | [
"MIT"
]
| null | null | null | parser/fase2/team28/models/Other/funcion.py | jossiebk/tytus | de6ce433d61609d4eaa5d0dbbd2ce13aaa573544 | [
"MIT"
]
| null | null | null | from models.instructions.shared import Instruction
from models.Other.ambito import Ambito
from controllers.three_address_code import ThreeAddressCode
from controllers.procedures import Procedures
from models.instructions.Expression.expression import DATA_TYPE, PrimitiveData
class Parametro(Instruction):
def __init__(self, id, data_type, line, column):
self.id = id
self.data_type = data_type
self.line = line
self.column = column
self._tac = ''
def compile(self):
pass
def process(self, environment):
pass
def __repr__(self):
return str(vars(self))
class Funcion(Instruction):
def __init__(self, id, params, body, val_return, isNew, isCall, line, column):
self.id = id
self.params = params
self.body = body
self.val_return = val_return
self.isNew = isNew
self.isCall = isCall
self.environment = None
self.line = line
self.column = column
def __repr__(self):
return str(vars(self))
def process(self, environment):
pass
def compile(self, environment):
params = len(self.params)
temporal = None
if self.isNew:
self.environment = environment # TODO verificar
if Procedures().saveProcedure(self.id, self, self.line, self.column):
var_array = self.print(environment)
temporal = self.setVariables(var_array, environment)
else:
var_array = Procedures().getProcedure(self.id, params, self.line, self.column)
if var_array:
temporal = self.setVariables(var_array, environment)
fun = ThreeAddressCode().searchFunction(self.id)
if fun:
temporal = self.setVariables(fun['variables'], environment)
return temporal
#temp = ThreeAddressCode().newTemp()
def print(self, environment):
if ThreeAddressCode().searchFunction(self.id):
return None
ThreeAddressCode().newFunction(self.id)
newAmbito = Ambito(environment)
pos = 0
var_array = []
for var in self.params:
pos = ThreeAddressCode().stackCounter
var_array.append(newAmbito.addVar(var.id, var.data_type, None,
pos, var.line, var.column))
ThreeAddressCode().incStackCounter()
pos = ThreeAddressCode().stackCounter
#Generando etiqueta de salida para la funcion
lbl_exit = ThreeAddressCode().newLabel()
newAmbito.lbl_return = lbl_exit
#Agregando cuerpo de la funcion
self.body.compile(newAmbito)
# Agregando etiqueta de salida
ThreeAddressCode().addCode(f"label .{lbl_exit}")
# Imprime primera variable declarada, NO parametro
# ThreeAddressCode().addCode(f"print(Stack[{pos}])")
ThreeAddressCode().createFunction(self.id, self.params, var_array)
return var_array
def setVariables(self, var_array, environment):
if self.isCall:
value = 0
for index, var in enumerate(var_array):
value = self.params[index].compile(environment)
if isinstance(value, PrimitiveData):
if value.data_type == DATA_TYPE.STRING:
value.value = f"\'{value.value}\'"
ThreeAddressCode().addCode(f"Stack[{var.position}] = {value.value}")
temp = ThreeAddressCode().newTemp()
#Llamando a la funcion
ThreeAddressCode().addCode(f"{self.id}()")
#Obteniendo el valor de retorno de la funcion
ThreeAddressCode().addCode("#Obteniendo valor de retorno--------")
ThreeAddressCode().addCode(f"{temp} = Stack[P]")
return temp
return None
class DropFuncion(Instruction):
def __init__(self, id, params, line, column):
self.id = id
self.params = params
self.line = line
self.column = column
class ProcedimientoAlmacenado(Instruction):
def __init__(self, id, params, body, isNew, isCall, line, column):
self.id = id
self.params = params
self.body = body
self.isNew = isNew
self.isCall = isCall
self.environment = None
self.line = line
self.column = column
def __repr__(self):
return str(vars(self))
def process(self, environment):
pass
def compile(self, environment):
params = len(self.params)
if self.isNew:
self.environment = environment # TODO verificar
if Procedures().saveProcedure(self.id, self, self.line, self.column):
var_array = self.print(environment)
self.setVariables(var_array, environment)
else:
var_array = Procedures().getProcedure(self.id, params, self.line, self.column)
if var_array:
self.setVariables(var_array, environment)
fun = ThreeAddressCode().searchFunction(self.id)
if fun:
self.setVariables(fun['variables'], environment)
#temp = ThreeAddressCode().newTemp()
def print(self, environment):
if ThreeAddressCode().searchFunction(self.id):
return None
ThreeAddressCode().newFunction(self.id)
newAmbito = Ambito(environment)
pos = 0
var_array = []
for var in self.params:
pos = ThreeAddressCode().stackCounter
var_array.append(newAmbito.addVar(var.id, var.data_type, None,
pos, var.line, var.column))
ThreeAddressCode().incStackCounter()
pos = ThreeAddressCode().stackCounter
#Generando etiqueta de salida para la funcion
lbl_exit = ThreeAddressCode().newLabel()
newAmbito.lbl_return = lbl_exit
#Agregando cuerpo de la funcion
self.body.compile(newAmbito)
# Agregando etiqueta de salida
ThreeAddressCode().addCode(f"label .{lbl_exit}")
# Imprime primera variable declarada, NO parametro
ThreeAddressCode().addCode(f"print(Stack[{pos}])")
ThreeAddressCode().createFunction(self.id, self.params, var_array)
return var_array
def setVariables(self, var_array, environment):
if self.isCall:
value = 0
for index, var in enumerate(var_array):
value = self.params[index].compile(environment)
if isinstance(value, PrimitiveData):
if value.data_type == DATA_TYPE.STRING:
value.value = f"\'{value.value}\'"
ThreeAddressCode().addCode(f"Stack[{var.position}] = {value.value}")
#Llamando a la funcion
ThreeAddressCode().addCode(f"{self.id}()")
#Una procedimiento almacenado NO devuelve nada | 34.331707 | 90 | 0.596903 | 6,753 | 0.959506 | 0 | 0 | 0 | 0 | 0 | 0 | 895 | 0.127167 |
90a5135d7b2c7cb2a555e6f77c99a227c0fdaa11 | 2,386 | py | Python | podcast/download.py | jessstringham/podcasts | 04de6cc5cd7d27ee6ab56c0c7950526b606ec201 | [
"MIT"
]
| 1 | 2018-05-08T09:26:45.000Z | 2018-05-08T09:26:45.000Z | podcast/download.py | jessstringham/podcasts | 04de6cc5cd7d27ee6ab56c0c7950526b606ec201 | [
"MIT"
]
| null | null | null | podcast/download.py | jessstringham/podcasts | 04de6cc5cd7d27ee6ab56c0c7950526b606ec201 | [
"MIT"
]
| 1 | 2020-12-13T18:04:00.000Z | 2020-12-13T18:04:00.000Z | import typing
import urllib.error
import urllib.request
from podcast.files import download_location
from podcast.info import build_info_content
from podcast.info import InfoContent
from podcast.models import Channel
from podcast.models import get_podcast_audio_link
from podcast.models import NewStatus
from podcast.models import Podcast
from podcast.models import Radio
from podcast.models import RadioDirectory
def _download_from_url(url: str, location: str) -> bool:
try:
urllib.request.urlretrieve(url, location)
return True
except (IOError, urllib.error.ContentTooShortError):
# If a connection can't be made, IOError is raised
# If the download gets interrupted (ContentTooShortError), we
# should try again later
# TODO: can we tell if it was a bad filename (and should stop
# requesting it), or internet connectivity (and should tell
# us), or just a fluke (and should retry)?
return False
def download_podcast(
directory: RadioDirectory,
channel: Channel,
podcast: Podcast) -> Podcast:
location = download_location(directory, channel, podcast)
url = get_podcast_audio_link(podcast)
# TODO: This takes some time, especially when there are a lot to
# download. I could have this spawn threads, or add priorities,
# and so on. For now, since it runs every few hours, and is more
# of a push than a pull situation for the user, I'm leaving it
# simple
success = _download_from_url(url, location)
if success:
return podcast._replace(status=NewStatus())
else:
return podcast
def download_channel(directory: RadioDirectory, channel: Channel) -> Channel:
updated_podcasts = []
for known_podcast in channel.known_podcasts:
if type(known_podcast.status).__name__ == 'RequestedStatus':
known_podcast = download_podcast(directory, channel, known_podcast)
updated_podcasts.append(known_podcast)
return channel._replace(known_podcasts=updated_podcasts)
def download_radio(radio: Radio) -> typing.Tuple[Radio, InfoContent]:
downloaded_channels = [
download_channel(radio.directory, channel)
for channel in radio.channels
]
radio = radio._replace(channels=downloaded_channels)
info_content = build_info_content()
return (radio, info_content)
| 33.605634 | 79 | 0.723386 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 575 | 0.240989 |
90a586abf2c437f6ccd419108bdf5f296a7fed74 | 5,630 | py | Python | tests/model/test_ocrd_page.py | j23d/core | b063737a5cc4701fb507328b5940919848934ef1 | [
"Apache-2.0"
]
| null | null | null | tests/model/test_ocrd_page.py | j23d/core | b063737a5cc4701fb507328b5940919848934ef1 | [
"Apache-2.0"
]
| null | null | null | tests/model/test_ocrd_page.py | j23d/core | b063737a5cc4701fb507328b5940919848934ef1 | [
"Apache-2.0"
]
| null | null | null | from tests.base import TestCase, main, assets
from ocrd_models.ocrd_page import (
AlternativeImageType,
PcGtsType,
PageType,
TextRegionType,
TextLineType,
WordType,
GlyphType,
parseString,
parse,
to_xml
)
simple_page = """\
<PcGts xmlns="http://schema.primaresearch.org/PAGE/gts/pagecontent/2013-07-15" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://schema.primaresearch.org/PAGE/gts/pagecontent/2013-07-15 http://schema.primaresearch.org/PAGE/gts/pagecontent/2013-07-15/pagecontent.xsd">
<Metadata>
<Creator>OCR-D</Creator>
<Created>2016-09-20T11:09:27.041+02:00</Created>
<LastChange>2018-04-25T17:44:49.605+01:00</LastChange>
</Metadata>
<Page
imageFilename="https://github.com/OCR-D/assets/raw/master/data/kant_aufklaerung_1784/data/OCR-D-IMG/INPUT_0017.tif"
imageWidth="1457"
imageHeight="2083"
type="content">
<TextRegion type="heading" id="r_1_1" custom="readingOrder {index:0;} structure {type:heading;}">
<Coords points="113,365 919,365 919,439 113,439"/>
<TextLine id="tl_1" primaryLanguage="German" custom="readingOrder {index:0;} textStyle {offset:0; length:26;fontFamily:Arial; fontSize:17.0; bold:true;}">
<Coords points="114,366 918,366 918,438 114,438"/>
<Baseline points="114,429 918,429"/>
<Word id="w_w1aab1b1b2b1b1ab1" language="German" custom="readingOrder {index:0;} textStyle {offset:0; length:11;fontFamily:Arial; fontSize:17.0; bold:true;}">
<Coords points="114,368 442,368 442,437 114,437"/>
<TextEquiv conf="0.987654321">
<Unicode>Berliniſche</Unicode>
</TextEquiv>
</Word>
</TextLine>
</TextRegion>
</Page>
</PcGts>
"""
# pylint: disable=protected-access
class TestOcrdPage(TestCase):
def setUp(self):
with open(assets.path_to('glyph-consistency/data/OCR-D-GT-PAGE/FAULTY_GLYPHS.xml'), 'rb') as f:
self.xml_as_str = f.read()
self.pcgts = parseString(self.xml_as_str, silence=True)
def test_to_xml(self):
# with open('/tmp/test.xml', 'w') as f:
# f.write(to_xml(self.pcgts))
self.assertIn(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://schema.primaresearch.org/PAGE/gts/pagecontent/2019-07-15 http://schema.primaresearch.org/PAGE/gts/pagecontent/2019-07-15/pagecontent.xsd"', to_xml(self.pcgts)[:1000])
self.assertIn('</TextRegion', to_xml(self.pcgts))
def test_issue_269(self):
"""
@conf is parsed as str but should be float
https://github.com/OCR-D/core/issues/269
"""
# GIGO
self.pcgts.get_Page().get_TextRegion()[0].get_TextEquiv()[0].set_conf(1.0)
self.assertEqual(type(self.pcgts.get_Page().get_TextRegion()[0].get_TextEquiv()[0].get_conf()), float)
self.pcgts.get_Page().get_TextRegion()[0].get_TextEquiv()[0].set_conf('1.0')
self.assertEqual(type(self.pcgts.get_Page().get_TextRegion()[0].get_TextEquiv()[0].get_conf()), str)
# test with parseString that @conf in TextEquiv won't throw an error
parseString(simple_page, silence=True)
# self.assertTrue(True)
def test_pcGtsId(self):
self.assertEqual(self.pcgts.pcGtsId, 'glyph-test')
def test_delete_region(self):
pcgts = parseString(simple_page, silence=True)
self.assertEqual(len(pcgts.get_Page().get_TextRegion()), 1)
del pcgts.get_Page().get_TextRegion()[0]
self.assertEqual(len(pcgts.get_Page().get_TextRegion()), 0)
def test_imageFileName(self):
# print(self.pcgts.export(sys.stdout, 0))
self.assertEqual(self.pcgts.get_Page().imageFilename, '00000259.sw.tif')
self.pcgts.get_Page().imageFilename = 'foo'
self.assertEqual(self.pcgts.get_Page().imageFilename, 'foo')
def test_alternativeImage(self):
pcgts = PcGtsType(pcGtsId="foo")
self.assertEqual(pcgts.pcGtsId, 'foo')
# Page/AlternativeImage
page = PageType()
pcgts.set_Page(page)
page.add_AlternativeImage(AlternativeImageType())
# TextRegion/AlternativeImage
region = TextRegionType()
page.add_TextRegion(region)
region.add_AlternativeImage(AlternativeImageType())
# TextLine/AlternativeImage
line = TextLineType()
region.add_TextLine(line)
line.add_AlternativeImage(AlternativeImageType())
# Word/AlternativeImage
word = WordType()
line.add_Word(word)
word.add_AlternativeImage(AlternativeImageType())
# Glyph/AlternativeImage
glyph = GlyphType()
word.add_Glyph(glyph)
glyph.add_AlternativeImage(AlternativeImageType())
def test_simpletypes(self):
pcgts = parseString(simple_page, silence=True)
self.assertTrue(isinstance(pcgts.get_Page().imageWidth, int))
el = pcgts.get_Page().get_TextRegion()[0].get_TextLine()[0].get_Word()[0].get_TextEquiv()[0]
self.assertTrue(isinstance(el.conf, float))
# XXX no validation on setting attributes :-(
# c.f. https://www.davekuhlman.org/generateDS.html#simpletype
# el.set_conf('2.0987')
# self.assertTrue(isinstance(el.conf, float))
with self.assertRaisesRegex(TypeError, ''):
el.set_conf('I AM NOT A FLOAT DEAL WITH IT')
parseString(to_xml(pcgts).encode('utf8'))
if __name__ == '__main__':
main()
| 43.307692 | 298 | 0.649556 | 3,655 | 0.649085 | 0 | 0 | 0 | 0 | 0 | 0 | 2,694 | 0.478423 |
90a811a1c9219aef26a6c2b2f33c1210f92378af | 643 | py | Python | athena/athena/algorithms/NetworkAnalysis/Components.py | aculich/openmappr | c9e5b4cfc974a6eda9cbc8a0ea6f8a96ce35efba | [
"MIT"
]
| 19 | 2018-04-05T23:33:33.000Z | 2022-03-24T00:18:20.000Z | athena/athena/algorithms/NetworkAnalysis/Components.py | aculich/openmappr | c9e5b4cfc974a6eda9cbc8a0ea6f8a96ce35efba | [
"MIT"
]
| 13 | 2018-01-10T23:31:11.000Z | 2018-07-20T12:55:02.000Z | athena/athena/algorithms/NetworkAnalysis/Components.py | aculich/openmappr | c9e5b4cfc974a6eda9cbc8a0ea6f8a96ce35efba | [
"MIT"
]
| 5 | 2018-02-12T05:33:19.000Z | 2019-09-21T22:43:02.000Z | # -*- coding: utf-8 -*-
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 13 15:35:50 2014
@author: rich
"""
import networkx as nx
# assign component IDs to graph components, id=0 is giant component
def componentIDs(network):
# networkx algo only works on undirected network
if isinstance(network, nx.DiGraph):
network = nx.Graph(network)
cIDs = {}
components = sorted(nx.connected_components(network), key = len, reverse=True)
# assign ids to node properties
for i in range(len(components)):
component = components[i]
cIDs.update(dict(zip(component, len(component)*[i])))
return cIDs
| 26.791667 | 82 | 0.659409 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 250 | 0.388802 |
90a821eadcd600fc9ceb85786e62d6539b2c7ae3 | 9,603 | py | Python | tools/netconf.py | jpfluger/radiucal | 42666478baaa93da05fdc5ab8f3b53df68b993e6 | [
"BSD-3-Clause"
]
| 5 | 2019-12-15T09:47:02.000Z | 2022-03-16T03:18:55.000Z | tools/netconf.py | jpfluger/radiucal | 42666478baaa93da05fdc5ab8f3b53df68b993e6 | [
"BSD-3-Clause"
]
| null | null | null | tools/netconf.py | jpfluger/radiucal | 42666478baaa93da05fdc5ab8f3b53df68b993e6 | [
"BSD-3-Clause"
]
| 1 | 2021-03-27T08:11:53.000Z | 2021-03-27T08:11:53.000Z | #!/usr/bin/python
"""composes the config from user definitions."""
import argparse
import os
import users
import users.__config__
import importlib
import csv
# file indicators
IND_DELIM = "_"
USER_INDICATOR = "user" + IND_DELIM
VLAN_INDICATOR = "vlan" + IND_DELIM
AUTH_PHASE_ONE = "PEAP"
AUTH_PHASE_TWO = "MSCHAPV2"
class ConfigMeta(object):
"""configuration meta information."""
def __init__(self):
"""init the instance."""
self.passwords = []
self.macs = []
self.vlans = []
self.all_vlans = []
self.user_name = []
self.vlan_users = []
self.vlan_initiate = []
self.extras = []
def password(self, password):
"""password group validation(s)."""
if password in self.passwords:
print("password duplicated")
exit(-1)
self.passwords.append(password)
def extra(self, macs):
"""Limited macs."""
for mac in macs:
if mac in self.extras:
print("mac already known as extra: " + mac)
exit(-1)
self.extras.append(mac)
def user_macs(self, macs):
"""user+mac combos."""
self.macs = self.macs + macs
self.macs = list(set(self.macs))
def verify(self):
"""verify meta data."""
for mac in self.macs:
if mac in self.extras:
print("mac is flagged extra: " + mac)
exit(-1)
for mac in self.extras:
if mac in self.macs:
print("mac is user assigned: " + mac)
exit(-1)
used_vlans = set(self.vlans + self.vlan_initiate)
if len(used_vlans) != len(set(self.all_vlans)):
print("unused vlans detected")
exit(-1)
for ref in used_vlans:
if ref not in self.all_vlans:
print("reference to unknown vlan: " + ref)
exit(-1)
def vlan_user(self, vlan, user):
"""indicate a vlan was used."""
self.vlans.append(vlan)
self.vlan_users.append(vlan + "." + user)
self.user_name.append(user)
def vlan_to_vlan(self, vlan_to):
"""VLAN to VLAN mappings."""
self.vlan_initiate.append(vlan_to)
def _get_mod(name):
"""import the module dynamically."""
return importlib.import_module("users." + name)
def _load_objs(name, typed):
mod = _get_mod(name)
for key in dir(mod):
obj = getattr(mod, key)
if not isinstance(obj, typed):
continue
yield obj
def _get_by_indicator(indicator):
"""get by a file type indicator."""
return [x for x in sorted(users.__all__) if x.startswith(indicator)]
def _common_call(common, method, entity):
"""make a common mod call."""
obj = entity
if common is not None and method in dir(common):
call = getattr(common, method)
if call is not None:
obj = call(obj)
return obj
def check_object(obj):
"""Check an object."""
return obj.check()
def _process(output):
"""process the composition of users."""
common_mod = None
try:
common_mod = _get_mod("common")
print("loaded common definitions...")
except Exception as e:
print("defaults only...")
vlans = None
meta = ConfigMeta()
for v_name in _get_by_indicator(VLAN_INDICATOR):
print("loading vlan..." + v_name)
for obj in _load_objs(v_name, users.__config__.VLAN):
if vlans is None:
vlans = {}
if not check_object(obj):
exit(-1)
num_str = str(obj.num)
for vk in vlans.keys():
if num_str == vlans[vk]:
print("vlan number defined multiple times...")
exit(-1)
vlans[obj.name] = num_str
if obj.initiate is not None and len(obj.initiate) > 0:
for init_to in obj.initiate:
meta.vlan_to_vlan(init_to)
if vlans is None:
raise Exception("missing required config settings...")
meta.all_vlans = vlans.keys()
store = Store()
for f_name in _get_by_indicator(USER_INDICATOR):
print("composing..." + f_name)
for obj in _load_objs(f_name, users.__config__.Assignment):
obj = _common_call(common_mod, 'ready', obj)
key = f_name.replace(USER_INDICATOR, "")
if not key.isalnum():
print("does not meet naming requirements...")
exit(-1)
vlan = obj.vlan
if vlan not in vlans:
raise Exception("no vlan defined for " + key)
store.add_vlan(vlan, vlans[vlan])
meta.vlan_user(vlan, key)
fqdn = vlan + "." + key
if not check_object(obj):
print("did not pass check...")
exit(-1)
if obj.disabled:
print("account is disabled")
continue
macs = sorted(obj.macs)
password = obj.password
bypassed = sorted(obj.bypassed())
owned = sorted(obj.owns)
# meta checks
meta.user_macs(macs)
if not obj.inherits:
meta.password(password)
meta.extra(bypassed)
meta.extra(owned)
store.add_user(fqdn, macs, password)
if obj.mab_only:
store.set_mab(fqdn)
if len(bypassed) > 0:
for m in bypassed:
store.add_mab(m, obj.bypass_vlan(m))
user_all = []
for l in [obj.macs, obj.owns, bypassed]:
user_all += list(l)
store.add_audit(fqdn, sorted(set(user_all)))
meta.verify()
# audit outputs
with open(output + "audit.csv", 'w') as f:
csv_writer = csv.writer(f, lineterminator=os.linesep)
for a in sorted(store.get_tag(store.audit)):
p = a[0].split(".")
for m in a[1]:
csv_writer.writerow([p[1], p[0], m])
# eap_users and preauth
manifest = []
with open(output + "eap_users", 'w') as f:
for u in store.get_eap_user():
f.write('"{}" {}\n\n'.format(u[0], AUTH_PHASE_ONE))
f.write('"{}" {} hash:{} [2]\n'.format(u[0], AUTH_PHASE_TWO, u[1]))
write_vlan(f, u[2])
for u in store.get_eap_mab():
up = u[0].upper()
f.write('"{}" MD5 "{}"\n'.format(up, up))
write_vlan(f, u[1])
manifest.append((u[0], u[0]))
for u in store.get_tag(store.umac):
manifest.append((u[0], u[1]))
with open(output + "manifest", 'w') as f:
for m in sorted(manifest):
f.write("{}.{}\n".format(m[0], m[1]).lower())
def write_vlan(f, vlan_id):
"""Write vlan assignment for login."""
f.write('radius_accept_attr=64:d:13\n')
f.write('radius_accept_attr=65:d:6\n')
f.write('radius_accept_attr=81:s:{}\n\n'.format(vlan_id))
class Store(object):
"""Storage object."""
def __init__(self):
"""Init the instance."""
self._data = []
self.umac = "UMAC"
self.pwd = "PWD"
self.mac = "MAC"
self.audit = "AUDIT"
self._users = []
self._mab = []
self._macs = []
self._vlans = {}
def set_mab(self, username):
"""Set a user as MAB-only, no login set."""
self._mab.append(username)
def get_tag(self, tag):
"""Get tagged items."""
for item in self._data:
if item[0] == tag:
yield item[1:]
def add_vlan(self, vlan_name, vlan_id):
"""Add a vlan item."""
self._vlans[vlan_name] = vlan_id
def _add(self, tag, key, value):
"""Backing tagged add."""
self._data.append([tag, key, value])
def add_user(self, username, macs, password):
"""Add a user definition."""
if username in self._users:
raise Exception("{} already defined".format(username))
self._users.append(username)
for m in macs:
self._add(self.umac, username, m)
self._add(self.pwd, username, password)
def add_mab(self, mac, vlan):
"""Add a MAB."""
if mac in self._macs:
raise Exception("{} already defined".format(mac))
self._macs.append(mac)
self._add(self.mac, mac, vlan)
def add_audit(self, user, objs):
"""Add an audit entry."""
self._add(self.audit, user, objs)
def get_eap_mab(self):
"""Get eap entries for MAB."""
for m in self.get_tag(self.mac):
v = m[1]
if not isinstance(v, int):
v = self._get_vlan(v)
yield [m[0], v]
def get_eap_user(self):
"""Get eap users."""
for u in self.get_tag(self.pwd):
if u[0] in self._mab:
continue
vlan = u[0].split(".")[0]
yield [u[0], u[1], self._get_vlan(vlan)]
def _get_vlan(self, name):
"""Get vlans."""
return self._vlans[name]
def main():
"""main entry."""
success = False
try:
parser = argparse.ArgumentParser()
parser.add_argument("--output", type=str, required=True)
args = parser.parse_args()
_process(args.output)
success = True
except Exception as e:
print('unable to compose')
print(str(e))
if success:
print("success")
exit(0)
else:
print("failure")
exit(1)
if __name__ == "__main__":
main()
| 30.389241 | 79 | 0.53754 | 4,029 | 0.419556 | 808 | 0.08414 | 0 | 0 | 0 | 0 | 1,653 | 0.172134 |
90a9c694ad7055aeb7e214346c75ba596c28d602 | 3,673 | py | Python | twitter_scrapper.py | juanlucruz/SportEventLocator | 1ac8236f9fdd60917b9a7ee6bb6ca1fa5f6fa71e | [
"Apache-2.0"
]
| null | null | null | twitter_scrapper.py | juanlucruz/SportEventLocator | 1ac8236f9fdd60917b9a7ee6bb6ca1fa5f6fa71e | [
"Apache-2.0"
]
| null | null | null | twitter_scrapper.py | juanlucruz/SportEventLocator | 1ac8236f9fdd60917b9a7ee6bb6ca1fa5f6fa71e | [
"Apache-2.0"
]
| null | null | null | # Import the Twython class
from twython import Twython, TwythonStreamer
import json
# import pandas as pd
import csv
import datetime
def process_tweet(tweet):
# Filter out unwanted data
d = {}
d['hashtags'] = [hashtag['text'] for hashtag in tweet['entities']['hashtags']]
try:
for key in {
'created_at', 'id', 'text', 'source', 'truncated',
'in_reply_to_status_id', 'in_reply_to_user_id',
'in_reply_to_screen_name', 'user', 'coordinates',
'place', 'quoted_status_id', 'is_quote_status', 'quoted_status',
'retweeted_status', 'quote_count', 'reply_count', 'retweet_count',
'favorite_count', 'favorited', 'retweeted', 'entities', 'extended_entities',
'possibly_sensitive', 'filter_level', 'lang', 'matching_rules'}:
if key == 'user':
pass
elif key == 'place':
pass
elif key == 'quoted_status' or key == 'retweeted_status':
pass
elif key == 'entities':
pass
elif key == 'extended_entities':
pass
else:
d[key] = tweet[key]
except KeyError as e:
pass
# d['text'] = tweet['text']
# d['user'] = tweet['user']['screen_name']
# d['user_loc'] = tweet['user']['location']
# d['date'] = tweet['created_at']
return d
# Create a class that inherits TwythonStreamer
class MyStreamer(TwythonStreamer):
# Received data
def on_success(self, data):
# # Only collect tweets in English
# if data['lang'] == 'en':
# tweet_data = process_tweet(data)
print(datetime.datetime.now())
# self.save_to_csv(tweet_data)
self.save_to_json(data)
# Problem with the API
def on_error(self, status_code, data):
print(status_code, data)
self.disconnect()
# Save each tweet to csv file
def save_to_csv(self, tweet):
# with open(r'saved_tweets.csv', 'a') as out_file:
with open(r'saved_tweets_big.csv', 'a') as out_file:
writer = csv.writer(out_file)
writer.writerow(list(tweet.values()))
def save_to_json(self, tweet):
with open('saved_tweets_big.json', 'a') as out_file:
json.dump(tweet, out_file)
def main():
# Load credentials from json file
with open("twitter_credentials.json", "r") as tw_creds:
creds = json.load(tw_creds)
# Instantiate an object
# python_tweets = Twython(creds['CONSUMER_KEY'], creds['CONSUMER_SECRET'])
# Instantiate from our streaming class
stream = MyStreamer(creds['CONSUMER_KEY'], creds['CONSUMER_SECRET'],
creds['ACCESS_TOKEN'], creds['ACCESS_SECRET'])
# Start the stream
# stream.statuses.filter(track='madrid')
stream.statuses.filter(locations='-7.876154,37.460012,3.699873,43.374723')
# # Create our query
# query = {
# 'q': 'futbol',
# 'result_type': 'mixed',
# 'lang': 'es',
# 'count': '100',
# }
#
# dict_ = {'user': [], 'date': [], 'text': [], 'favorite_count': []}
# for status in python_tweets.search(**query)['statuses']:
# print(format(status))
# dict_['user'].append(status['user']['screen_name'])
# dict_['date'].append(status['created_at'])
# dict_['text'].append(status['text'])
# dict_['favorite_count'].append(status['favorite_count'])
#
# df = pd.DataFrame(dict_)
# df.sort_values(by='favorite_count', inplace=True, ascending=False)
# print(df.values)
if __name__ == "__main__":
main()
| 33.390909 | 88 | 0.58263 | 865 | 0.235502 | 0 | 0 | 0 | 0 | 0 | 0 | 1,999 | 0.544242 |
90aa48820bf97867a9816268e697f65885c29466 | 389 | py | Python | tools/bin/filter_cassandra_attributes.py | fruch/scylla-tools-java | 3fdce3d357b64402799742f61d3cc33b6f8fcfbb | [
"Apache-2.0"
]
| null | null | null | tools/bin/filter_cassandra_attributes.py | fruch/scylla-tools-java | 3fdce3d357b64402799742f61d3cc33b6f8fcfbb | [
"Apache-2.0"
]
| null | null | null | tools/bin/filter_cassandra_attributes.py | fruch/scylla-tools-java | 3fdce3d357b64402799742f61d3cc33b6f8fcfbb | [
"Apache-2.0"
]
| null | null | null | #!/usr/bin/env python2
import sys;
from yaml import load, dump, load_all
from cassandra_attributes import *
def main():
attributes = dict()
for i in range(1, len(sys.argv)):
attributes.update(load(open(sys.argv[i], 'r')))
print dump(dict(filter(lambda (a, b): a in cassandra_attributes, attributes.items())))
if __name__ == "__main__":
main()
| 25.933333 | 109 | 0.637532 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 35 | 0.089974 |
90aa5dbc6e140871e083e339d177b3478bf2b89d | 526 | py | Python | ci/test_filename.py | climateamante/linode.docs | 9a2d26db11ab439f354bb9027eb62eda7453ff0b | [
"CC-BY-4.0"
]
| null | null | null | ci/test_filename.py | climateamante/linode.docs | 9a2d26db11ab439f354bb9027eb62eda7453ff0b | [
"CC-BY-4.0"
]
| null | null | null | ci/test_filename.py | climateamante/linode.docs | 9a2d26db11ab439f354bb9027eb62eda7453ff0b | [
"CC-BY-4.0"
]
| null | null | null | import pytest
import itertools
# Cartesian product of file names and extensions
# e.g. README.txt, README.md, CHANGELOG.txt, CHANGELOG.md ...
file_extensions = ['txt', 'md']
names = ['README', 'CHANGELOG', 'CONTRIBUTING', 'LICENSE', 'CODE_OF_CONDUCT']
exempt_files = [('.'.join(x)) for x in itertools.product(names, file_extensions)]
def test_filename(md_filepath):
if any(e in md_filepath for e in exempt_files):
assert True
else:
assert md_filepath.islower() == True,'Filename should be lowercase'
| 35.066667 | 81 | 0.709125 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 210 | 0.39924 |
90aa7fec2a73694bfef5aa1b7617bf2c7fb170fa | 1,570 | py | Python | test/test_sshtransport.py | stribika/sshlabs | 421e62433aab0e21456254a0b2c5feb830d0c77c | [
"Unlicense"
]
| 76 | 2015-01-24T19:18:31.000Z | 2021-03-11T11:17:14.000Z | test/test_sshtransport.py | stribika/sshlabs | 421e62433aab0e21456254a0b2c5feb830d0c77c | [
"Unlicense"
]
| 8 | 2015-01-24T18:59:57.000Z | 2017-06-07T13:07:34.000Z | test/test_sshtransport.py | stribika/sshlabs | 421e62433aab0e21456254a0b2c5feb830d0c77c | [
"Unlicense"
]
| 21 | 2015-01-24T18:56:52.000Z | 2021-03-10T14:33:14.000Z | import sys
import unittest
sys.path.append("../main")
from sshtransport import *
class FakeSocket(object):
def __init__(self):
self.recv_buffer = b""
self.send_buffer = b""
def recv(self, n):
resp = self.recv_buffer[:n]
self.recv_buffer = self.recv_buffer[n:]
return resp
def send(self, x):
self.send_buffer += x
class TestIdentificationString(unittest.TestCase):
def test_recv(self):
conn = FakeSocket()
conn.recv_buffer = b"SSH-2.00-SecureMcShellface_1.0\r\n"
idstr = IdentificationString(recvfrom=conn)
self.assertEqual(idstr.protoversion, "2.00")
self.assertEqual(idstr.softwareversion, "SecureMcShellface_1.0")
def test_send(self):
conn = FakeSocket()
idstr = IdentificationString(protoversion="2.00", softwareversion="SecureMcShellface_1.0")
idstr.send(conn)
self.assertEqual(conn.send_buffer, b"SSH-2.00-SecureMcShellface_1.0\r\n")
class TestBinaryPacket(unittest.TestCase):
def test_recv(self):
conn = FakeSocket()
conn.recv_buffer = b"\x00\x00\x00\x14\x07Hello World!\x00\x00\x00\x00\x00\x00\x00"
binpkt = BinaryPacket(recvfrom=conn)
self.assertEqual(binpkt.payload, b"Hello World!")
self.assertEqual(binpkt.mac, b"")
def test_send(self):
conn = FakeSocket()
binpkt = BinaryPacket(payload=b"Hello World!")
binpkt.send(conn)
self.assertEqual(conn.send_buffer, b"\x00\x00\x00\x14\x07Hello World!\x00\x00\x00\x00\x00\x00\x00")
| 32.708333 | 107 | 0.658599 | 1,481 | 0.943312 | 0 | 0 | 0 | 0 | 0 | 0 | 306 | 0.194904 |
90ab146abe91415bc0bc793fedf75c04fb9406e9 | 7,357 | py | Python | activity-classification/main_scenario_baseline.py | bstollnitz/grad-school-portfolio | 484e00cc4857de2eda6848f61a1e6fbf26309d42 | [
"MIT"
]
| 2 | 2019-10-24T16:40:44.000Z | 2020-06-21T03:56:18.000Z | activity-classification/main_scenario_baseline.py | bstollnitz/portfolio | 484e00cc4857de2eda6848f61a1e6fbf26309d42 | [
"MIT"
]
| null | null | null | activity-classification/main_scenario_baseline.py | bstollnitz/portfolio | 484e00cc4857de2eda6848f61a1e6fbf26309d42 | [
"MIT"
]
| null | null | null | import random
import time
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import torch
from torch.utils import data
from torch.utils.tensorboard import SummaryWriter
import utils_graph
import utils_io
import utils_nn
from feed_forward import FeedForward
from hyperparameters import Hyperparameters
from signal_data import SignalData
from signal_dataset import SignalDataset
PLOTS_FOLDER = 'plots'
USE_CUDA = torch.cuda.is_available()
def _train_ff_network(hyperparameter_dict: dict,
data: SignalData) -> Tuple[FeedForward, List, List, List, List]:
"""Trains a feed-forward network using the specified hyperparameters.
"""
# Ensure reproducibility by giving PyTorch the same seed every time we train.
torch.manual_seed(1)
# Print hyperparameters.
print(f'Hyperparameters: {hyperparameter_dict}')
# Get hyperparameters.
learning_rate = hyperparameter_dict['learning_rate']
batch_size = hyperparameter_dict['batch_size']
optimizer_str = hyperparameter_dict['optimizer']
# There are 6 labels, and Pytorch expects them to go from 0 to 5.
full_train_labels = data.train_labels - 1
# Get generators.
signal_dataset = SignalDataset(data.train_signals, full_train_labels)
(training_generator, validation_generator) = utils_nn.get_trainval_generators(
signal_dataset, batch_size, num_workers=0, training_fraction=0.8)
# Crete feed forward network.
input_size = data.num_timesteps * data.num_components
feed_forward = FeedForward(input_size, input_size, data.num_activity_labels)
print(feed_forward)
# Parameters should be moved to GPU before constructing the optimizer.
device = torch.device('cuda:0' if USE_CUDA else 'cpu')
feed_forward = feed_forward.to(device)
# Get optimizer.
optimizer = None
if optimizer_str == 'adam':
optimizer = torch.optim.Adam(feed_forward.parameters(), lr=learning_rate)
elif optimizer_str == 'sgd':
optimizer = torch.optim.SGD(feed_forward.parameters(), lr=learning_rate)
else:
raise Exception(f'Specified optimizer not valid: {optimizer_str}')
training_accuracy_list = []
training_loss_list = []
validation_accuracy_list = []
validation_loss_list = []
max_epochs = 10
for epoch in range(max_epochs):
print(f'Epoch {epoch}')
# Training data.
(training_accuracy, training_loss) = utils_nn.fit(feed_forward,
training_generator, optimizer, USE_CUDA)
training_accuracy_list.append(training_accuracy)
training_loss_list.append(training_loss)
# Validation data.
(validation_accuracy, validation_loss) = utils_nn.evaluate(feed_forward,
validation_generator, 'Validation', USE_CUDA)
validation_accuracy_list.append(validation_accuracy)
validation_loss_list.append(validation_loss)
return (feed_forward, training_accuracy_list, training_loss_list,
validation_accuracy_list, validation_loss_list)
def _get_ff_hyperparameters() -> Hyperparameters:
"""Returns hyperparameters used to tune the feed-forward network.
"""
# First pass:
hyperparameter_values = Hyperparameters({
'learning_rate': [0.1, 0.01, 0.001],
'batch_size': [32, 64, 128],
'optimizer': ['adam', 'sgd']
})
# Best:
# optimizer: sgd, batch size: 64, learning rate: 0.1
# Second pass:
hyperparameter_values = Hyperparameters({
'learning_rate': [0.05, 0.1, 0.2],
'batch_size': [16, 32, 64],
'optimizer': ['sgd']
})
# Best:
# optimizer: sgd, batch size: 16, learning rate: 0.1
return hyperparameter_values
def _tune_ff_hyperparameters(data: SignalData) -> None:
"""Classifies temporal signals using a feed-forward network.
"""
print(' Tuning hyperparameters.')
start_time = time.time()
# Hyperparameters to tune.
hyperparameter_values = _get_ff_hyperparameters()
hyperparameter_combinations = hyperparameter_values.sample_combinations()
# Create Tensorboard writer.
with SummaryWriter(f'runs/signals', filename_suffix='') as writer:
# Hyperparameter loop.
for hyperparameter_dict in hyperparameter_combinations:
(_, _, _, validation_accuracy_list, _) = _train_ff_network(
hyperparameter_dict, data)
writer.add_hparams(hyperparameter_dict,
{'hparam/signals/validation_accuracy': validation_accuracy_list[-1]})
utils_io.print_elapsed_time(start_time, time.time())
def _test_ff_network(feed_forward: FeedForward, signal_data: SignalData,
hyperparameter_dict: dict) -> Tuple[float, float]:
"""Returns accuracy and loss of specified network for specified test data
and specified hyperparameters.
"""
# There are 6 labels, and Pytorch expects them to go from 0 to 5.
test_labels = signal_data.test_labels - 1
# Get test generator.
batch_size = hyperparameter_dict['batch_size']
test_data = SignalDataset(signal_data.test_signals, test_labels)
params = {'batch_size': batch_size, 'shuffle': True, 'num_workers': 0}
test_generator = data.DataLoader(test_data, **params)
(test_avg_accuracy, test_avg_loss) = utils_nn.evaluate(feed_forward,
test_generator, 'Test', USE_CUDA)
return (test_avg_accuracy, test_avg_loss)
def _test_best_ff_hyperparameters(data: SignalDataset) -> None:
"""Use network with best hyperparameters to predict labels for test data.
Produces accuracy and loss graphs for training and validation data, as
well as accuracy and loss values for test data.
"""
hyperparameter_dict = {
'learning_rate': 0.1,
'batch_size': 16,
'optimizer': 'sgd',
}
(feed_forward, training_accuracy_list,
training_loss_list,
validation_accuracy_list,
validation_loss_list) = _train_ff_network(hyperparameter_dict,
data)
utils_graph.graph_nn_results(training_accuracy_list, validation_accuracy_list,
f'Training and validation accuracy of classification of temporal signals',
'Accuracy', PLOTS_FOLDER, f'signals_accuracy.html')
utils_graph.graph_nn_results(training_loss_list, validation_loss_list,
f'Training and validation loss of classification of temporal signals',
'Loss', PLOTS_FOLDER, f'signals_loss.html')
_test_ff_network(feed_forward, data, hyperparameter_dict)
with SummaryWriter(f'runs/signals', filename_suffix='') as writer:
num_epochs_train_val = len(training_accuracy_list)
for i in range(num_epochs_train_val):
writer.add_scalars(f'signals/accuracy', {
'training': training_accuracy_list[i],
'validation': validation_accuracy_list[i]
}, i)
writer.add_scalars(f'signals/loss', {
'training': training_loss_list[i],
'validation': validation_loss_list[i]
}, i)
# Test accuracy: 87.25%
# Test loss: 0.45
def scenario1(data: SignalData) -> None:
"""Uses a simple feed forward network to classify the raw signal.
"""
print('Scenario 1: feed forward network on raw signal')
# _tune_ff_hyperparameters(data)
_test_best_ff_hyperparameters(data) | 35.887805 | 85 | 0.703819 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,150 | 0.292239 |
90ab4c6f6273b660fe6334ebc9b6fb8fce97ce8e | 868 | py | Python | 2020/day04/day4_part1.py | dstjacques/AdventOfCode | 75bfb46a01487430d552ea827f0cf8ae3368f686 | [
"MIT"
]
| null | null | null | 2020/day04/day4_part1.py | dstjacques/AdventOfCode | 75bfb46a01487430d552ea827f0cf8ae3368f686 | [
"MIT"
]
| null | null | null | 2020/day04/day4_part1.py | dstjacques/AdventOfCode | 75bfb46a01487430d552ea827f0cf8ae3368f686 | [
"MIT"
]
| null | null | null | input = """
ecl:gry pid:860033327 eyr:2020 hcl:#fffffd
byr:1937 iyr:2017 cid:147 hgt:183cm
iyr:2013 ecl:amb cid:350 eyr:2023 pid:028048884
hcl:#cfa07d byr:1929
hcl:#ae17e1 iyr:2013
eyr:2024
ecl:brn pid:760753108 byr:1931
hgt:179cm
hcl:#cfa07d eyr:2025 pid:166559648
iyr:2011 ecl:brn hgt:59in
"""
def validate(passport):
passport_fields = { "byr": False, "iyr": False, "eyr": False, "hgt": False, "hcl": False, "ecl": False, "pid": False }
for line in passport.split("\n"):
values = line.split(" ")
for value in values:
field = value.split(":")[0]
if field == "cid":
continue
passport_fields[field] = True
if False in passport_fields.values():
return False
return True
count = 0
for i in input.strip().split("\n\n"):
if validate(i):
count += 1
print(count) | 25.529412 | 122 | 0.615207 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 346 | 0.398618 |
90ad0d873a774414aef935d258105887a6980e80 | 3,322 | py | Python | flit_core/flit_core/tests/test_common.py | rahul-deepsource/flit | 5d5be0a9c6f77a2dbbffd3369ad8ac7813a926bf | [
"BSD-3-Clause"
]
| null | null | null | flit_core/flit_core/tests/test_common.py | rahul-deepsource/flit | 5d5be0a9c6f77a2dbbffd3369ad8ac7813a926bf | [
"BSD-3-Clause"
]
| null | null | null | flit_core/flit_core/tests/test_common.py | rahul-deepsource/flit | 5d5be0a9c6f77a2dbbffd3369ad8ac7813a926bf | [
"BSD-3-Clause"
]
| 1 | 2021-06-24T10:21:43.000Z | 2021-06-24T10:21:43.000Z | import os.path as osp
from unittest import TestCase
import pytest
from flit_core.common import (
Module, get_info_from_module, InvalidVersion, NoVersionError, check_version,
normalize_file_permissions, Metadata
)
samples_dir = osp.join(osp.dirname(__file__), 'samples')
class ModuleTests(TestCase):
def test_package_importable(self):
i = Module('package1', samples_dir)
assert i.path == osp.join(samples_dir, 'package1')
assert i.file == osp.join(samples_dir, 'package1', '__init__.py')
assert i.is_package
def test_module_importable(self):
i = Module('module1', samples_dir)
assert i.path == osp.join(samples_dir, 'module1.py')
assert not i.is_package
def test_missing_name(self):
with self.assertRaises(ValueError):
i = Module('doesnt_exist', samples_dir)
def test_get_info_from_module(self):
info = get_info_from_module(Module('module1', samples_dir))
self.assertEqual(info, {'summary': 'Example module',
'version': '0.1'}
)
info = get_info_from_module(Module('module2', samples_dir))
self.assertEqual(info, {'summary': 'Docstring formatted like this.',
'version': '7.0'}
)
info = get_info_from_module(Module('package1', samples_dir))
self.assertEqual(info, {'summary': 'A sample package',
'version': '0.1'}
)
info = get_info_from_module(Module('moduleunimportable', samples_dir))
self.assertEqual(info, {'summary': 'A sample unimportable module',
'version': '0.1'}
)
info = get_info_from_module(Module('modulewithconstructedversion', samples_dir))
self.assertEqual(info, {'summary': 'This module has a __version__ that requires runtime interpretation',
'version': '1.2.3'}
)
with self.assertRaises(InvalidVersion):
get_info_from_module(Module('invalid_version1', samples_dir))
def test_version_raise(self):
with pytest.raises(InvalidVersion):
check_version('a.1.0.beta0')
with pytest.raises(InvalidVersion):
check_version('3!')
with pytest.raises(InvalidVersion):
check_version((1, 2))
with pytest.raises(NoVersionError):
check_version(None)
assert check_version('4.1.0beta1') == '4.1.0b1'
assert check_version('v1.2') == '1.2'
def test_normalize_file_permissions():
assert normalize_file_permissions(0o100664) == 0o100644 # regular file
assert normalize_file_permissions(0o40775) == 0o40755 # directory
@pytest.mark.parametrize(
("requires_python", "expected_result"),
[
("", True),
(">2.7", True),
("3", False),
(">= 3.7", False),
("<4, > 3.2", False),
(">3.4", False),
(">=2.7, !=3.0.*, !=3.1.*, !=3.2.*", True),
],
)
def test_supports_py2(requires_python, expected_result):
metadata = object.__new__(Metadata)
metadata.requires_python = requires_python
result = metadata.supports_py2
assert result == expected_result
| 35.340426 | 112 | 0.599639 | 2,351 | 0.707706 | 0 | 0 | 500 | 0.150512 | 0 | 0 | 642 | 0.193257 |
90af463579adb14e899b746a24caf95a35d80b1b | 3,017 | py | Python | flumine/markets/market.py | jsphon/flumine | bd5cacf9793d53a99595fe4694aeb9b8d2962abb | [
"MIT"
]
| null | null | null | flumine/markets/market.py | jsphon/flumine | bd5cacf9793d53a99595fe4694aeb9b8d2962abb | [
"MIT"
]
| null | null | null | flumine/markets/market.py | jsphon/flumine | bd5cacf9793d53a99595fe4694aeb9b8d2962abb | [
"MIT"
]
| null | null | null | import datetime
import logging
from typing import Optional
from betfairlightweight.resources.bettingresources import MarketBook, MarketCatalogue
from .blotter import Blotter
from ..events import events
logger = logging.getLogger(__name__)
class Market:
def __init__(
self,
flumine,
market_id: str,
market_book: MarketBook,
market_catalogue: MarketCatalogue = None,
):
self.flumine = flumine
self.market_id = market_id
self.closed = False
self.date_time_closed = None
self.market_book = market_book
self.market_catalogue = market_catalogue
self.context = {"simulated": {}} # data store (raceCard / scores etc)
self.blotter = Blotter(self)
def __call__(self, market_book: MarketBook):
self.market_book = market_book
def open_market(self) -> None:
self.closed = False
def close_market(self) -> None:
self.closed = True
self.date_time_closed = datetime.datetime.utcnow()
# order
def place_order(self, order, execute: bool = True) -> None:
order.place(self.market_book.publish_time)
if order.id not in self.blotter:
self.blotter[order.id] = order
if order.trade.market_notes is None:
order.trade.update_market_notes(self.market_book)
self.flumine.log_control(events.TradeEvent(order.trade)) # todo dupes?
else:
return # retry attempt so ignore?
if execute: # handles replaceOrder
self.blotter.pending_place.append(order)
def cancel_order(self, order, size_reduction: float = None) -> None:
order.cancel(size_reduction)
self.blotter.pending_cancel.append(order)
def update_order(self, order, new_persistence_type: str) -> None:
order.update(new_persistence_type)
self.blotter.pending_update.append(order)
def replace_order(self, order, new_price: float) -> None:
order.replace(new_price)
self.blotter.pending_replace.append(order)
@property
def event_type_id(self) -> str:
if self.market_book:
return self.market_book.market_definition.event_type_id
@property
def event_id(self) -> str:
if self.market_book:
return self.market_book.market_definition.event_id
@property
def seconds_to_start(self):
return (self.market_start_datetime - datetime.datetime.utcnow()).total_seconds()
@property
def elapsed_seconds_closed(self) -> Optional[float]:
if self.closed and self.date_time_closed:
return (datetime.datetime.utcnow() - self.date_time_closed).total_seconds()
@property
def market_start_datetime(self):
if self.market_catalogue:
return self.market_catalogue.market_start_time
elif self.market_book:
return self.market_book.market_definition.market_time
else:
return datetime.datetime.utcfromtimestamp(0)
| 33.153846 | 88 | 0.670534 | 2,773 | 0.919125 | 0 | 0 | 915 | 0.303281 | 0 | 0 | 115 | 0.038117 |
90b067d91d1317f4e26b80f4ccf8b819d42bc981 | 206 | py | Python | {{cookiecutter.project_name}}/tests/conftest.py | nelsonHolic/common-fastapi-microservice | 06a995264ced42a59565f1f703bab7bfed8e7cc1 | [
"MIT"
]
| 1 | 2021-12-14T17:08:24.000Z | 2021-12-14T17:08:24.000Z | {{cookiecutter.project_name}}/tests/conftest.py | nelsonHolic/common-fastapi-microservice | 06a995264ced42a59565f1f703bab7bfed8e7cc1 | [
"MIT"
]
| null | null | null | {{cookiecutter.project_name}}/tests/conftest.py | nelsonHolic/common-fastapi-microservice | 06a995264ced42a59565f1f703bab7bfed8e7cc1 | [
"MIT"
]
| null | null | null | import pytest
from fastapi.testclient import TestClient
from {{cookiecutter.project_name}}.app import app
@pytest.fixture()
def app_client() -> TestClient:
client = TestClient(app)
return client
| 18.727273 | 49 | 0.757282 | 0 | 0 | 0 | 0 | 96 | 0.466019 | 0 | 0 | 0 | 0 |
90b264bddefd9c5d8b81c5073da1b99d48704da6 | 2,228 | py | Python | scripts/naive_search.py | simonbowly/lp-generators | 937c44074c234333b6a5408c3e18f498c2205948 | [
"MIT"
]
| 9 | 2020-01-02T23:07:36.000Z | 2022-01-26T10:04:04.000Z | scripts/naive_search.py | simonbowly/lp-generators | 937c44074c234333b6a5408c3e18f498c2205948 | [
"MIT"
]
| null | null | null | scripts/naive_search.py | simonbowly/lp-generators | 937c44074c234333b6a5408c3e18f498c2205948 | [
"MIT"
]
| 1 | 2020-01-02T23:08:26.000Z | 2020-01-02T23:08:26.000Z |
import itertools
import multiprocessing
import json
import numpy as np
from tqdm import tqdm
from lp_generators.features import coeff_features, solution_features
from lp_generators.performance import clp_simplex_performance
from search_operators import lp_column_neighbour, lp_row_neighbour
from seeds import cli_seeds
from search_common import condition, objective, start_instance
def calculate_features(instance):
return dict(
**coeff_features(instance),
**solution_features(instance))
def generate_by_search(seed):
results = []
pass_condition = 0
step_change = 0
random_state = np.random.RandomState(seed)
current_instance = start_instance(random_state)
current_features = calculate_features(current_instance)
for step in range(10001):
if (step % 100) == 0:
results.append(dict(
**coeff_features(current_instance),
**solution_features(current_instance),
**clp_simplex_performance(current_instance),
pass_condition=pass_condition,
step_change=step_change,
step=step, seed=seed))
if (step % 2) == 0:
new_instance = lp_row_neighbour(random_state, current_instance, 1)
else:
new_instance = lp_column_neighbour(random_state, current_instance, 1)
new_features = calculate_features(new_instance)
if condition(new_features):
pass_condition += 1
if objective(new_features) < objective(current_features):
step_change += 1
current_instance = new_instance
current_features = new_features
return results
@cli_seeds
def run(seed_values):
''' Generate the required number of instances and store feature results. '''
pool = multiprocessing.Pool()
mapper = pool.imap_unordered
print('Generating instances by naive search.')
features = list(tqdm(
mapper(generate_by_search, seed_values),
total=len(seed_values), smoothing=0))
features = list(itertools.chain(*features))
with open('data/naive_search.json', 'w') as outfile:
json.dump(features, outfile, indent=4, sort_keys=True)
run()
| 33.253731 | 81 | 0.685817 | 0 | 0 | 0 | 0 | 520 | 0.233393 | 0 | 0 | 142 | 0.063734 |
90b42e2cf853da75296b6d0c2d2e8e3942e4a7bb | 1,066 | py | Python | tests/test_list_.py | aefalcon/iterable_collections | 8e3b4ea84083a100413f23af30ea27dfd4b838ff | [
"MIT"
]
| 4 | 2018-06-05T14:07:56.000Z | 2021-04-17T12:09:23.000Z | tests/test_list_.py | aefalcon/iterable_collections | 8e3b4ea84083a100413f23af30ea27dfd4b838ff | [
"MIT"
]
| 1 | 2018-07-10T19:53:54.000Z | 2018-07-10T19:58:38.000Z | tests/test_list_.py | aefalcon/iterable_collections | 8e3b4ea84083a100413f23af30ea27dfd4b838ff | [
"MIT"
]
| 2 | 2020-01-29T10:51:11.000Z | 2021-11-11T21:37:24.000Z | import unittest
from iterable_collections import collect
class TestList_(unittest.TestCase):
def test_list(self):
c = collect(list(range(10))).list_()
self.assertEqual(c.iterable, list(list(range(10))))
def test_set(self):
c = collect(set(range(10))).list_()
self.assertEqual(c.iterable, list(set(range(10))))
def test_tuple(self):
c = collect(tuple(range(10))).list_()
self.assertEqual(c.iterable, list(tuple(range(10))))
def test_iterator(self):
c = collect(iter(range(10))).list_()
self.assertEqual(c.iterable, list(iter(range(10))))
def test_dict(self):
c = collect({'a': 1, 'b': 2}).list_()
self.assertEqual(c.iterable, list({'a': 1, 'b': 2}))
def test_dict_items(self):
c = collect({'a': 1, 'b': 2}.items()).list_()
self.assertEqual(c.iterable, list({'a': 1, 'b': 2}.items()))
def test_enumerate(self):
c = collect(list(range(10))).enumerate().list_()
self.assertEqual(c.iterable, list(enumerate(range(10))))
| 29.611111 | 68 | 0.605066 | 1,004 | 0.941839 | 0 | 0 | 0 | 0 | 0 | 0 | 24 | 0.022514 |
90b5ceb756a46b298c1cfb2d69501dea6821b502 | 8,354 | py | Python | parcels/parcels/examples/example_peninsula.py | pdnooteboom/NA_forams | 789b45d8cc14225f31242c9c648f4f36c76d2fc4 | [
"MIT"
]
| 1 | 2021-04-12T16:07:42.000Z | 2021-04-12T16:07:42.000Z | parcels/parcels/examples/example_peninsula.py | pdnooteboom/NA_forams | 789b45d8cc14225f31242c9c648f4f36c76d2fc4 | [
"MIT"
]
| null | null | null | parcels/parcels/examples/example_peninsula.py | pdnooteboom/NA_forams | 789b45d8cc14225f31242c9c648f4f36c76d2fc4 | [
"MIT"
]
| 1 | 2021-04-12T16:07:45.000Z | 2021-04-12T16:07:45.000Z | from parcels import FieldSet, ParticleSet, ScipyParticle, JITParticle, Variable
from parcels import AdvectionRK4, AdvectionEE, AdvectionRK45
from argparse import ArgumentParser
import numpy as np
import math # NOQA
import pytest
from datetime import timedelta as delta
ptype = {'scipy': ScipyParticle, 'jit': JITParticle}
method = {'RK4': AdvectionRK4, 'EE': AdvectionEE, 'RK45': AdvectionRK45}
def peninsula_fieldset(xdim, ydim, mesh='flat'):
"""Construct a fieldset encapsulating the flow field around an
idealised peninsula.
:param xdim: Horizontal dimension of the generated fieldset
:param xdim: Vertical dimension of the generated fieldset
:param mesh: String indicating the type of mesh coordinates and
units used during velocity interpolation:
1. spherical: Lat and lon in degree, with a
correction for zonal velocity U near the poles.
2. flat (default): No conversion, lat/lon are assumed to be in m.
The original test description can be found in Fig. 2.2.3 in:
North, E. W., Gallego, A., Petitgas, P. (Eds). 2009. Manual of
recommended practices for modelling physical - biological
interactions during fish early life.
ICES Cooperative Research Report No. 295. 111 pp.
http://archimer.ifremer.fr/doc/00157/26792/24888.pdf
To avoid accuracy problems with interpolation from A-grid
to C-grid, we return NetCDF files that are on an A-grid.
"""
# Set Parcels FieldSet variables
# Generate the original test setup on A-grid in m
domainsizeX, domainsizeY = (1.e5, 5.e4)
dx, dy = domainsizeX / xdim, domainsizeY / ydim
La = np.linspace(dx, 1.e5-dx, xdim, dtype=np.float32)
Wa = np.linspace(dy, 5.e4-dy, ydim, dtype=np.float32)
u0 = 1
x0 = domainsizeX / 2
R = 0.32 * domainsizeX / 2
# Create the fields
x, y = np.meshgrid(La, Wa, sparse=True, indexing='xy')
P = (u0*R**2*y/((x-x0)**2+y**2)-u0*y) / 1e3
U = u0-u0*R**2*((x-x0)**2-y**2)/(((x-x0)**2+y**2)**2)
V = -2*u0*R**2*((x-x0)*y)/(((x-x0)**2+y**2)**2)
# Set land points to NaN
landpoints = P >= 0.
P[landpoints] = np.nan
U[landpoints] = np.nan
V[landpoints] = np.nan
# Convert from m to lat/lon for spherical meshes
lon = La / 1852. / 60. if mesh == 'spherical' else La
lat = Wa / 1852. / 60. if mesh == 'spherical' else Wa
data = {'U': U, 'V': V, 'P': P}
dimensions = {'lon': lon, 'lat': lat}
return FieldSet.from_data(data, dimensions, mesh=mesh)
def UpdateP(particle, fieldset, time):
particle.p = fieldset.P[time, particle.depth, particle.lat, particle.lon]
def pensinsula_example(fieldset, npart, mode='jit', degree=1,
verbose=False, output=True, method=AdvectionRK4):
"""Example configuration of particle flow around an idealised Peninsula
:arg filename: Basename of the input fieldset
:arg npart: Number of particles to intialise"""
# First, we define a custom Particle class to which we add a
# custom variable, the initial stream function value p.
# We determine the particle base class according to mode.
class MyParticle(ptype[mode]):
# JIT compilation requires a-priori knowledge of the particle
# data structure, so we define additional variables here.
p = Variable('p', dtype=np.float32, initial=0.)
p_start = Variable('p_start', dtype=np.float32, initial=fieldset.P)
# Initialise particles
if fieldset.U.grid.mesh == 'flat':
x = 3000 # 3 km offset from boundary
else:
x = 3. * (1. / 1.852 / 60) # 3 km offset from boundary
y = (fieldset.U.lat[0] + x, fieldset.U.lat[-1] - x) # latitude range, including offsets
pset = ParticleSet.from_line(fieldset, size=npart, pclass=MyParticle,
start=(x, y[0]), finish=(x, y[1]), time=0)
if verbose:
print("Initial particle positions:\n%s" % pset)
# Advect the particles for 24h
time = delta(hours=24)
dt = delta(minutes=5)
k_adv = pset.Kernel(method)
k_p = pset.Kernel(UpdateP)
out = pset.ParticleFile(name="MyParticle", outputdt=delta(hours=1)) if output else None
print("Peninsula: Advecting %d particles for %s" % (npart, str(time)))
pset.execute(k_adv + k_p, runtime=time, dt=dt, output_file=out)
if verbose:
print("Final particle positions:\n%s" % pset)
return pset
@pytest.mark.parametrize('mode', ['scipy', 'jit'])
@pytest.mark.parametrize('mesh', ['flat', 'spherical'])
def test_peninsula_fieldset(mode, mesh):
"""Execute peninsula test from fieldset generated in memory"""
fieldset = peninsula_fieldset(100, 50, mesh)
pset = pensinsula_example(fieldset, 5, mode=mode, degree=1)
# Test advection accuracy by comparing streamline values
err_adv = np.array([abs(p.p_start - p.p) for p in pset])
assert(err_adv <= 1.e-3).all()
# Test Field sampling accuracy by comparing kernel against Field sampling
err_smpl = np.array([abs(p.p - pset.fieldset.P[0., p.depth, p.lat, p.lon]) for p in pset])
assert(err_smpl <= 1.e-3).all()
def fieldsetfile(mesh):
"""Generate fieldset files for peninsula test"""
filename = 'peninsula'
fieldset = peninsula_fieldset(100, 50, mesh=mesh)
fieldset.write(filename)
return filename
@pytest.mark.parametrize('mode', ['scipy', 'jit'])
@pytest.mark.parametrize('mesh', ['flat', 'spherical'])
def test_peninsula_file(mode, mesh):
"""Open fieldset files and execute"""
fieldset = FieldSet.from_parcels(fieldsetfile(mesh), extra_fields={'P': 'P'}, allow_time_extrapolation=True)
pset = pensinsula_example(fieldset, 5, mode=mode, degree=1)
# Test advection accuracy by comparing streamline values
err_adv = np.array([abs(p.p_start - p.p) for p in pset])
assert(err_adv <= 1.e-3).all()
# Test Field sampling accuracy by comparing kernel against Field sampling
err_smpl = np.array([abs(p.p - pset.fieldset.P[0., p.depth, p.lat, p.lon]) for p in pset])
assert(err_smpl <= 1.e-3).all()
if __name__ == "__main__":
p = ArgumentParser(description="""
Example of particle advection around an idealised peninsula""")
p.add_argument('mode', choices=('scipy', 'jit'), nargs='?', default='jit',
help='Execution mode for performing RK4 computation')
p.add_argument('-p', '--particles', type=int, default=20,
help='Number of particles to advect')
p.add_argument('-d', '--degree', type=int, default=1,
help='Degree of spatial interpolation')
p.add_argument('-v', '--verbose', action='store_true', default=False,
help='Print particle information before and after execution')
p.add_argument('-o', '--nooutput', action='store_true', default=False,
help='Suppress trajectory output')
p.add_argument('--profiling', action='store_true', default=False,
help='Print profiling information after run')
p.add_argument('-f', '--fieldset', type=int, nargs=2, default=None,
help='Generate fieldset file with given dimensions')
p.add_argument('-m', '--method', choices=('RK4', 'EE', 'RK45'), default='RK4',
help='Numerical method used for advection')
args = p.parse_args()
if args.fieldset is not None:
filename = 'peninsula'
fieldset = peninsula_fieldset(args.fieldset[0], args.fieldset[1], mesh='flat')
fieldset.write(filename)
# Open fieldset file set
fieldset = FieldSet.from_parcels('peninsula', extra_fields={'P': 'P'}, allow_time_extrapolation=True)
if args.profiling:
from cProfile import runctx
from pstats import Stats
runctx("pensinsula_example(fieldset, args.particles, mode=args.mode,\
degree=args.degree, verbose=args.verbose,\
output=not args.nooutput, method=method[args.method])",
globals(), locals(), "Profile.prof")
Stats("Profile.prof").strip_dirs().sort_stats("time").print_stats(10)
else:
pensinsula_example(fieldset, args.particles, mode=args.mode,
degree=args.degree, verbose=args.verbose,
output=not args.nooutput, method=method[args.method])
| 43.061856 | 112 | 0.649988 | 298 | 0.035672 | 0 | 0 | 1,421 | 0.170098 | 0 | 0 | 3,433 | 0.410941 |
90b614eb6ed41d954f776b1b26da34eda803102b | 456 | py | Python | TestBegin.py | FrankWangJQ/HttpRunner-master | f0456a5b7b9d23ddb54415b1ea5951416e9601ef | [
"MIT"
]
| null | null | null | TestBegin.py | FrankWangJQ/HttpRunner-master | f0456a5b7b9d23ddb54415b1ea5951416e9601ef | [
"MIT"
]
| null | null | null | TestBegin.py | FrankWangJQ/HttpRunner-master | f0456a5b7b9d23ddb54415b1ea5951416e9601ef | [
"MIT"
]
| null | null | null | from httprunner import HttpRunner
import time
kwargs = {
"failfast":False,
#"dot_env_path": "/path/to/.env"
}
runner = HttpRunner(**kwargs)
#入口
runner.run("/Users/wangjianqing/PycharmProjects/HttpRunner-master/tests/testcases/Release/账号管理-设置项.yml")
runner.gen_html_report(html_report_name="reportTestForBetaYunZS",html_report_template="/Users/wangjianqing/PycharmProjects/HttpRunner-master/httprunner/templates/default_report_template.html")
| 26.823529 | 192 | 0.800439 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 284 | 0.599156 |
90b636cded4c580440a67538e3ed1bce323607f4 | 2,186 | py | Python | pyaz/synapse/sql/pool/classification/recommendation/__init__.py | py-az-cli/py-az-cli | 9a7dc44e360c096a5a2f15595353e9dad88a9792 | [
"MIT"
]
| null | null | null | pyaz/synapse/sql/pool/classification/recommendation/__init__.py | py-az-cli/py-az-cli | 9a7dc44e360c096a5a2f15595353e9dad88a9792 | [
"MIT"
]
| null | null | null | pyaz/synapse/sql/pool/classification/recommendation/__init__.py | py-az-cli/py-az-cli | 9a7dc44e360c096a5a2f15595353e9dad88a9792 | [
"MIT"
]
| 1 | 2022-02-03T09:12:01.000Z | 2022-02-03T09:12:01.000Z | '''
Manage sensitivity classification recommendations.
'''
from ...... pyaz_utils import _call_az
def list(name, resource_group, workspace_name, filter=None, included_disabled=None, skip_token=None):
'''
List the recommended sensitivity classifications of a given SQL pool.
Required Parameters:
- name -- The SQL pool name.
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
- workspace_name -- The workspace name.
Optional Parameters:
- filter -- An OData filter expression that filters elements in the collection.
- included_disabled -- Indicates whether the result should include disabled recommendations
- skip_token -- An OData query option to indicate how many elements to skip in the collection.
'''
return _call_az("az synapse sql pool classification recommendation list", locals())
def enable(column, name, resource_group, schema, table, workspace_name):
'''
Enable sensitivity recommendations for a given column(recommendations are enabled by default on all columns).
Required Parameters:
- column -- The name of column.
- name -- The SQL pool name.
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
- schema -- The name of schema.
- table -- The name of table.
- workspace_name -- The workspace name.
'''
return _call_az("az synapse sql pool classification recommendation enable", locals())
def disable(column, name, resource_group, schema, table, workspace_name):
'''
Disable sensitivity recommendations for a given column(recommendations are enabled by default on all columns).
Required Parameters:
- column -- The name of column.
- name -- The SQL pool name.
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
- schema -- The name of schema.
- table -- The name of table.
- workspace_name -- The workspace name.
'''
return _call_az("az synapse sql pool classification recommendation disable", locals())
| 42.038462 | 128 | 0.718664 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,780 | 0.814273 |
90b6494c1b5e12c59216a90f809c6005fdb48a7e | 333 | py | Python | December Month Challenge/4KthfactorN.py | adesh-gadge/LeetCodePractice | 4b142c102e64ec93465af7f4193762e8fd2866ec | [
"MIT"
]
| null | null | null | December Month Challenge/4KthfactorN.py | adesh-gadge/LeetCodePractice | 4b142c102e64ec93465af7f4193762e8fd2866ec | [
"MIT"
]
| null | null | null | December Month Challenge/4KthfactorN.py | adesh-gadge/LeetCodePractice | 4b142c102e64ec93465af7f4193762e8fd2866ec | [
"MIT"
]
| null | null | null | class Solution:
def kthFactor(self, n: int, k: int) -> int:
s1 = set()
s2 = set()
for i in range(1,int(n**0.5)+1):
if n%i ==0:
s1.add(i)
s2.add(int(n/i))
l = list(s1|s2)
l.sort()
if k > len(l):
return -1
return l[k-1] | 25.615385 | 47 | 0.381381 | 333 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
90b74a470408ddeb782e48bf20e39ffd4457275e | 1,755 | py | Python | dipy/utils/tests/test_arrfuncs.py | martcous/dipy | 6bff5655f03db19bde5aa951ffb91987983a889b | [
"MIT"
]
| null | null | null | dipy/utils/tests/test_arrfuncs.py | martcous/dipy | 6bff5655f03db19bde5aa951ffb91987983a889b | [
"MIT"
]
| null | null | null | dipy/utils/tests/test_arrfuncs.py | martcous/dipy | 6bff5655f03db19bde5aa951ffb91987983a889b | [
"MIT"
]
| null | null | null | """ Testing array utilities
"""
import sys
import numpy as np
from ..arrfuncs import as_native_array, pinv, eigh
from numpy.testing import (assert_array_almost_equal,
assert_array_equal)
from nose.tools import assert_true, assert_false, assert_equal, assert_raises
NATIVE_ORDER = '<' if sys.byteorder == 'little' else '>'
SWAPPED_ORDER = '>' if sys.byteorder == 'little' else '<'
def test_as_native():
arr = np.arange(5) # native
assert_equal(arr.dtype.byteorder, '=')
narr = as_native_array(arr)
assert_true(arr is narr)
sdt = arr.dtype.newbyteorder('s')
barr = arr.astype(sdt)
assert_equal(barr.dtype.byteorder, SWAPPED_ORDER)
narr = as_native_array(barr)
assert_false(barr is narr)
assert_array_equal(barr, narr)
assert_equal(narr.dtype.byteorder, NATIVE_ORDER)
def test_pinv():
arr = np.random.randn(4, 4, 4, 3, 7)
_pinv = pinv(arr)
for i in range(4):
for j in range(4):
for k in range(4):
assert_array_almost_equal(_pinv[i, j, k],
np.linalg.pinv(arr[i, j, k]))
def test_eigh():
for i in range(10):
arr = np.random.randn(7, 7)
evals1, evecs1 = eigh(arr)
evals2, evecs2 = np.linalg.eigh(arr)
assert_array_almost_equal(evals1, evals2)
assert_array_almost_equal(evecs1, evecs2)
arr = np.random.randn(4, 4, 4, 7, 7)
evals, evecs = eigh(arr)
for i in range(4):
for j in range(4):
for k in range(4):
evals_vox, evecs_vox = np.linalg.eigh(arr[i, j, k])
assert_array_almost_equal(evals[i, j, k], evals_vox)
assert_array_almost_equal(evecs[i, j, k], evecs_vox)
| 29.745763 | 77 | 0.616524 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 73 | 0.041595 |
90b769e3d5d7b99ed6ee9f9dfa67655328ca1e58 | 1,571 | py | Python | ProgressBar.py | ArisKots1992/Similar-World-News-Articles | 426aef1d6d9566e66ad634bc8468d554d887551c | [
"MIT"
]
| 1 | 2017-09-09T13:53:09.000Z | 2017-09-09T13:53:09.000Z | ProgressBar.py | ArisKots1992/Similar-World-News-Articles | 426aef1d6d9566e66ad634bc8468d554d887551c | [
"MIT"
]
| null | null | null | ProgressBar.py | ArisKots1992/Similar-World-News-Articles | 426aef1d6d9566e66ad634bc8468d554d887551c | [
"MIT"
]
| null | null | null | # -*- coding: utf-8 -*-
import time
import sys
import math
#HOMEMADE WITHOUT ONLINE CODE by Aris
#LIENCE BY ARIS
class ProgressBar:
def __init__(self,max_size=36):
ProgressBar.max_size = max_size
ProgressBar.tick = 20.0/max_size
ProgressBar.progress_counter = 0.0
ProgressBar.counter = 0
spaces = ' ' * 20
hashes = '█' * 0
sys.stdout.write("\rPercent: ┃{0}┃{1}%".format(hashes + spaces, 0))
sys.stdout.flush()
def update(self):
ProgressBar.counter += 1
if ProgressBar.counter == ProgressBar.max_size:
hashes = '█' * 20
spaces = ' ' * 0
sys.stdout.write("\rPercent: ┃{0}┃{1}%".format(hashes + spaces, 100))
print
print "Finished Successfully!"
sys.stdout.flush()
return
elif ProgressBar.counter >= ProgressBar.max_size:
return
ProgressBar.progress_counter += ProgressBar.tick
hashes = '█' * int(ProgressBar.progress_counter)
spaces = ' ' * (20 - int(ProgressBar.progress_counter))
percentage = int(round(ProgressBar.progress_counter * 5))
sys.stdout.write("\rPercent: ┃{0}┃{1}%".format(hashes + spaces, percentage))
sys.stdout.flush()
return
class SupportBar:
def __init__(self):
SupportBar.counter = 0
def increase(self):
SupportBar.counter += 1
def init(self):
SupportBar.counter = 0
def get(self):
return SupportBar.counter
| 29.092593 | 84 | 0.57352 | 1,472 | 0.926369 | 0 | 0 | 0 | 0 | 0 | 0 | 201 | 0.126495 |
90b7ba0980ae3d667866aa6f68a2acda5b4f0621 | 1,895 | py | Python | src/vtra/plot/rail_network_map.py | GFDRR/vietnam-transport | 71f6fc8cb7f1ca7bccb9a29d544869b442e68bfc | [
"MIT"
]
| 3 | 2018-07-09T12:15:46.000Z | 2020-12-03T07:02:23.000Z | src/vtra/plot/rail_network_map.py | GFDRR/vietnam-transport | 71f6fc8cb7f1ca7bccb9a29d544869b442e68bfc | [
"MIT"
]
| 1 | 2019-05-09T21:57:20.000Z | 2019-05-09T21:57:20.000Z | src/vtra/plot/rail_network_map.py | GFDRR/vietnam-transport | 71f6fc8cb7f1ca7bccb9a29d544869b442e68bfc | [
"MIT"
]
| 2 | 2018-07-23T12:49:21.000Z | 2021-06-03T11:00:44.000Z | """Rail network map
"""
import os
import sys
from collections import OrderedDict
import cartopy.crs as ccrs
import cartopy.io.shapereader as shpreader
import matplotlib.pyplot as plt
from vtra.utils import *
def main():
config = load_config()
output_file = os.path.join(config['paths']['figures'], 'rail-map.png')
rail_edge_file = os.path.join(
config['paths']['data'], 'post_processed_networks', 'rail_edges.shp')
rail_node_file = os.path.join(
config['paths']['data'], 'post_processed_networks', 'rail_nodes.shp')
color_by_type = {'Rail line': '#006d2c', 'Rail stop': '#000000'}
ax = get_axes()
plot_basemap(ax, config['paths']['data'],highlight_region=[])
scale_bar(ax, location=(0.8, 0.05))
plot_basemap_labels(ax, config['paths']['data'])
proj_lat_lon = ccrs.PlateCarree()
for record in shpreader.Reader(rail_edge_file).records():
geom = record.geometry
ax.add_geometries(
geom,
crs=proj_lat_lon,
linewidth=1.5,
edgecolor='#006d2c',
facecolor='none',
zorder=3,
label='Rail line'
)
# Stations
xs = []
ys = []
for record in shpreader.Reader(rail_node_file).records():
node_type = record.attributes['name']
if node_type != '0':
geom = record.geometry
x = geom.x
y = geom.y
xs.append(x)
ys.append(y)
name = record.attributes['name']
ax.scatter(xs, ys, transform=proj_lat_lon, facecolor='#000000',
s=4, zorder=5, label='Rail station')
# Legend
legend_handles = [
mpatches.Patch(color=color, label=line)
for line, color in color_by_type.items()
]
plt.legend(handles=legend_handles, loc='lower left')
save_fig(output_file)
if __name__ == '__main__':
main()
| 28.712121 | 77 | 0.604749 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 331 | 0.17467 |
90b801d343545a11009f0b5ecc8dd2af2c9f92ca | 3,189 | py | Python | ecommerce_project/apps/ecommerce/migrations/0001_initial.py | mlopezf2019/guadalupe_sowos_examen_3 | 813f960f2428ac5d753a02888134ac3992e9018e | [
"MIT"
]
| null | null | null | ecommerce_project/apps/ecommerce/migrations/0001_initial.py | mlopezf2019/guadalupe_sowos_examen_3 | 813f960f2428ac5d753a02888134ac3992e9018e | [
"MIT"
]
| null | null | null | ecommerce_project/apps/ecommerce/migrations/0001_initial.py | mlopezf2019/guadalupe_sowos_examen_3 | 813f960f2428ac5d753a02888134ac3992e9018e | [
"MIT"
]
| null | null | null | # Generated by Django 3.1.1 on 2020-09-27 20:02
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=200)),
('description', models.TextField()),
],
),
migrations.CreateModel(
name='Customer',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('is_active', models.BooleanField(default=False)),
('is_deleted', models.BooleanField(default=False)),
],
),
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=200)),
('description', models.TextField()),
('quantity', models.IntegerField()),
('price', models.DecimalField(decimal_places=2, max_digits=5)),
('category_id', models.ForeignKey(on_delete=django.db.models.deletion.RESTRICT, to='ecommerce.category')),
],
),
migrations.CreateModel(
name='Purchase',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('iva', models.DecimalField(decimal_places=2, max_digits=5)),
('subtotal', models.DecimalField(decimal_places=2, max_digits=5)),
('total', models.DecimalField(decimal_places=2, max_digits=5)),
('customer_id', models.ForeignKey(on_delete=django.db.models.deletion.RESTRICT, to='ecommerce.customer')),
],
),
migrations.CreateModel(
name='PurchaseProducts',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('quantity', models.IntegerField()),
('product_id', models.ForeignKey(on_delete=django.db.models.deletion.RESTRICT, to='ecommerce.product')),
('purchase_id', models.ForeignKey(on_delete=django.db.models.deletion.RESTRICT, to='ecommerce.purchase')),
],
),
migrations.CreateModel(
name='Person',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=200)),
('last_name', models.CharField(max_length=200)),
('user_id', models.ForeignKey(on_delete=django.db.models.deletion.RESTRICT, to='users.user')),
],
),
migrations.AddField(
model_name='customer',
name='person_id',
field=models.ForeignKey(on_delete=django.db.models.deletion.RESTRICT, to='ecommerce.person'),
),
]
| 41.415584 | 123 | 0.54688 | 3,056 | 0.958294 | 0 | 0 | 0 | 0 | 0 | 0 | 475 | 0.14895 |
90b9151bc28db99fb5989633cea86f3faad362ff | 4,471 | py | Python | pydl/pydlspec2d/tests/test_spec1d.py | jhennawi/pydl | 3926aab6fd57c27e13d571156077de41343881c1 | [
"BSD-3-Clause"
]
| null | null | null | pydl/pydlspec2d/tests/test_spec1d.py | jhennawi/pydl | 3926aab6fd57c27e13d571156077de41343881c1 | [
"BSD-3-Clause"
]
| null | null | null | pydl/pydlspec2d/tests/test_spec1d.py | jhennawi/pydl | 3926aab6fd57c27e13d571156077de41343881c1 | [
"BSD-3-Clause"
]
| null | null | null | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
import numpy as np
import os
from astropy.tests.helper import raises
from astropy.utils.data import get_pkg_data_filename
from .. import Pydlspec2dException
from ..spec1d import (HMF, findspec, spec_append, spec_path, template_metadata,
wavevector)
class TestSpec1d(object):
"""Test the functions in pydl.pydlspec2d.spec1d.
"""
def setup(self):
self.env = {'BOSS_SPECTRO_REDUX': '/boss/spectro/redux',
'SPECTRO_REDUX': '/sdss/spectro/redux',
'RUN2D': 'v1_2_3',
'RUN1D': 'v1_2_3'}
self.original_env = dict()
for key in self.env:
if key in os.environ:
self.original_env[key] = os.environ[key]
else:
self.original_env[key] = None
os.environ[key] = self.env[key]
def teardown(self):
for key in self.original_env:
if self.original_env[key] is None:
del os.environ[key]
else:
os.environ[key] = self.original_env[key]
def test_findspec(self):
"""This is just a placeholder for now.
"""
# slist = findspec(infile='file.in', sdss=True)
assert True
def test_hmf_init(self):
"""Test initialization of HMF object
"""
spec = np.random.random((20, 100))
invvar = np.random.random((20, 100))
hmf = HMF(spec, invvar)
assert hmf.K == 4
assert hmf.log.level == 20 # INFO
hmf = HMF(spec, invvar, K=6, verbose=True)
assert hmf.K == 6
assert hmf.log.level == 10 # DEBUG
def test_spec_append(self):
spec1 = np.array([[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1]])
spec2 = np.array([[2, 2, 2, 2, 2],
[2, 2, 2, 2, 2]])
s = spec_append(spec1, spec2)
assert (s == np.array([[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1],
[2, 2, 2, 2, 2],
[2, 2, 2, 2, 2]])).all()
spec2 = np.array([[2, 2, 2, 2],
[2, 2, 2, 2]])
s = spec_append(spec1, spec2)
assert (s == np.array([[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1],
[2, 2, 2, 2, 0],
[2, 2, 2, 2, 0]])).all()
s = spec_append(spec1, spec2, 1)
assert (s == np.array([[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1],
[0, 2, 2, 2, 2],
[0, 2, 2, 2, 2]])).all()
spec1 = np.array([[1, 1, 1],
[1, 1, 1]])
spec2 = np.array([[2, 2, 2, 2, 2],
[2, 2, 2, 2, 2]])
s = spec_append(spec1, spec2, -2)
assert (s == np.array([[0, 0, 1, 1, 1],
[0, 0, 1, 1, 1],
[2, 2, 2, 2, 2],
[2, 2, 2, 2, 2]])).all()
def test_spec_path(self):
bsr = self.env['BOSS_SPECTRO_REDUX']
run2d = self.env['RUN2D']
p = spec_path(123)
assert p[0] == os.path.join(bsr, run2d, '0123')
p = spec_path(1234)
assert p[0] == os.path.join(bsr, run2d, '1234')
p = spec_path(1234, topdir=bsr, run2d=run2d)
assert p[0] == os.path.join(bsr, run2d, '1234')
p = spec_path(np.array([1234, 5678]), topdir=bsr, run2d=run2d)
assert p[0] == os.path.join(bsr, run2d, '1234')
assert p[1] == os.path.join(bsr, run2d, '5678')
p = spec_path(1234, path=bsr)
assert p[0] == bsr
def test_template_metadata(self):
with raises(Pydlspec2dException):
slist, metadata = template_metadata('/no/such/file.par')
inputfile = get_pkg_data_filename('t/test_template_metadata.par')
slist, metadata = template_metadata(inputfile)
assert metadata['object'] == 'gal'
assert not metadata['nonnegative']
def test_wavevector(self):
l = wavevector(3, 4, binsz=0.1)
ll = np.array([3.1, 3.2, 3.3, 3.4, 3.5, 3.6, 3.7, 3.8, 3.9, 4.0])
assert np.allclose(l, ll)
l = wavevector(3, 4, wavemin=3, binsz=0.1)
ll = np.array([3.0, 3.1, 3.2, 3.3, 3.4, 3.5, 3.6, 3.7, 3.8, 3.9, 4.0])
assert np.allclose(l, ll)
| 38.543103 | 79 | 0.47193 | 4,109 | 0.919034 | 0 | 0 | 0 | 0 | 0 | 0 | 539 | 0.120555 |
90b979db4f0ee9199884997c5ba3cb24bb11e60e | 7,800 | py | Python | final/good_evaluate.py | wuyuMk7/CSCI8980 | 9cceffcac7975ee158655f3953e27b502fc383ea | [
"MIT"
]
| null | null | null | final/good_evaluate.py | wuyuMk7/CSCI8980 | 9cceffcac7975ee158655f3953e27b502fc383ea | [
"MIT"
]
| null | null | null | final/good_evaluate.py | wuyuMk7/CSCI8980 | 9cceffcac7975ee158655f3953e27b502fc383ea | [
"MIT"
]
| null | null | null | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import os
from absl import flags
import numpy as np
import skimage.io as io
import cv2
import matplotlib.pyplot as plt
# import tensorflow as tf
# from psbody.mesh import Mesh
from smpl_webuser.serialization import load_model
import pyrender
import trimesh
from util import renderer as vis_util
from util import image as img_util
from flame import FLAME
from flame_config import get_config
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
import torch.optim as optim
import MyRingnet
def renderMesh(vertices, faces, vertex_colors, total_lmks):
scene = pyrender.Scene()
mesh = trimesh.Trimesh(vertices, faces, vertex_colors=vertex_colors)
render_mesh = pyrender.Mesh.from_trimesh(mesh)
scene.add(render_mesh)
sm = trimesh.creation.uv_sphere(radius=0.005)
sm.visual.vertex_colors = [0.9, 0.1, 0.1, 1.0]
tfs = np.tile(np.eye(4), (len(total_lmks), 1, 1))
tfs[:, :3, 3] = total_lmks
joints_pcl = pyrender.Mesh.from_trimesh(sm, poses=tfs)
scene.add(joints_pcl)
pyrender.Viewer(scene, use_raymond_lighting=True)
class Identity(nn.Module):
def __init__(self):
super(Identity, self).__init__()
def forward(self, x):
return x
# Input size: 2048 + 159, fc1_size: 512, fc2_size: 512, out_size: 159
class Regression(nn.Module):
def __init__(
self, input_size = 2048+159, fc1_size = 512,
fc2_size = 512, out_size = 159, iter = 8):
super().__init__()
self.fc1 = nn.Linear(input_size, fc1_size, bias=True)
self.relu1 = nn.ReLU()
self.dropout1 = nn.Dropout(p=0.2)
self.fc2 = nn.Linear(fc1_size, fc2_size, bias = True)
self.relu2 = nn.ReLU()
self.dropout2 = nn.Dropout(p=0.2)
self.fc3 = nn.Linear(fc2_size, out_size, bias=True)
# init.normal_(self.fc1, 0, 1)
# init.normal_(self.fc2, 0, 1)
# init.normal_(self.fc3, 0, 1)
def forward(self, x):
#x = self.dropout1(self.relu1(self.fc1(x)))
#x = self.dropout2(self.relu2(self.fc2(x)))
x = self.relu1(self.fc1(x))
x = self.relu2(self.fc2(x))
x = self.fc3(x)
return x
# if __name__ == '__main__':
config = get_config()
template_mesh = Mesh(filename='./flame_model/FLAME_sample.ply')
renderer = vis_util.SMPLRenderer(faces=template_mesh.f)
if not os.path.exists(config.out_folder):
os.makedirs(config.out_folder)
if not os.path.exists(config.out_folder + '/images'):
os.mkdir(config.out_folder + '/images')
main(config, template_mesh)
config_img_size = 244
if __name__ == '__main__':
# read images and scale
#input_img_path = "./training_set/NoW_Dataset/final_release_version/iphone_pictures/FaMoS_180424_03335_TA/multiview_neutral/IMG_0101.jpg"
#input_img_path = "./training_set/NoW_Dataset/final_release_version/iphone_pictures/FaMoS_180704_03355_TA/multiview_expressions/IMG_1948.jpg"
input_img_path = "./training_set/NoW_Dataset/final_release_version/iphone_pictures/FaMoS_180427_03338_TA/multiview_expressions/IMG_0230.jpg"
#input_img_path = "./training_set/NoW_Dataset/final_release_version/iphone_pictures/FaMoS_180502_00145_TA/multiview_expressions/IMG_0407.jpg"
openpose = np.load(input_img_path.replace("iphone_pictures", "openpose").replace("jpg", "npy"), allow_pickle=True, encoding='latin1')
img = io.imread(input_img_path)
if np.max(img.shape[:2]) != config_img_size:
# print('Resizing so the max image size is %d..' % self.config_img_size)
scale = (float(config_img_size) / np.max(img.shape[:2]))
else:
scale = 1.0#scaling_factor
center = np.round(np.array(img.shape[:2]) / 2).astype(int)
# image center in (x,y)
center = center[::-1]
crop, proc_param = img_util.scale_and_crop(
img, scale, center, config_img_size)
print(proc_param)
#exit(0)
crop = torch.tensor(crop)
crop = crop.permute(2, 0, 1)
crop = crop[None, :, :, :].float().cuda()
# print(crop)
# build model
resnet50 = torch.load("./good_resnet50.pkl")
resnet50.cuda()
resnet50.fc = Identity()
# print(resnet50)
regression = torch.load("./good_model.pkl")
regression.cuda()
config = get_config()
config.batch_size = 1
flamelayer = FLAME(config)
flamelayer.requires_grad_ = False
flamelayer.cuda()
# run the model
res_output = resnet50(crop)
# Empty estimates as the initial value for concatenation
regress_estimates = torch.zeros([ res_output.shape[0], MyRingnet.regress_out_size ]).cuda()
# Regression model
for _ in range(MyRingnet.regress_iteration_cnt):
# Preprocess regression input - concatenation
regress_input = torch.cat([res_output, regress_estimates], 1)
regress_estimates = regression(regress_input)
regress_output = regress_estimates
# FLAME model
cam_params, pose_params = regress_output[0:, 0:3], regress_output[0:, 3:9]
shape_params, exp_params = regress_output[0:, 9:109], regress_output[0:, 109:159]
# pose_params[0,2] = 3.14/5
flame_vert, flame_lmk = flamelayer(shape_params, exp_params, pose_params)
# Render and display the mesh
print(flame_lmk, cam_params)
# flame_lmk[0]=cam_params[0]*-1
# a_params = cam_params[:,:]*-1
mesh_vertices, mesh_faces = flame_vert.detach().cpu().numpy().squeeze(), flamelayer.faces
mesh_vertices_colors = np.ones([mesh_vertices.shape[0], 4]) * [0.3, 0.3, 0.3, 0.8]
renderMesh(mesh_vertices, mesh_faces, mesh_vertices_colors, flame_lmk.detach().cpu().numpy().squeeze())
#renderMesh(mesh_vertices, mesh_faces, mesh_vertices_colors, cam_params[0])
# flame_lmk[:, :, 1] *= -1
# cam_params[:,1]*=-1
# cam_params[:, 0] = 2
# cam_params[:, 1] = 0.2
# print(flame_lmk)
center = torch.tensor(center.copy()).cuda()
print(cam_params)
new_cam = MyRingnet.transform_cam(cam_params, 1. / scale, config_img_size, center[None, :])
projected_lmks = MyRingnet.project_points(flame_lmk, new_cam)
#op_pts = openpose[0,:68,:]
#ground_truth_weights = ((op_pts[:,2] > 0.41).astype(float))
#print(ground_truth_weights)
#print(op_pts)
# print(projected_lmks)
# print(openpose)
plt.figure
plt.imshow(img)
count = 0
cpu_lmks = projected_lmks.cpu()
#print(img.shape)
for i in cpu_lmks[0]:
x = i[0].int()
y = i[1].int()
plt.annotate(str(count), xy=(x, y))
plt.scatter(x, y, s=50, c='red', marker='o')
count = count + 1
count = 0
#openpose[0] *= scale
for i in openpose[0]:
x = i[0]
y = i[1]
plt.annotate(str(count), xy=(x, y))
plt.scatter(x, y, s=50, c='blue', marker='o')
count = count + 1
plt.show()
renderer = vis_util.SMPLRenderer(faces=mesh_faces)
print(img.shape[:2])
cam_for_render, vert_shifted = vis_util.get_original(
#proc_param, mesh_vertices, new_cam.detach().cpu().numpy().squeeze(), img_size=img.shape[:2]
proc_param, mesh_vertices, cam_params.detach().cpu().numpy().squeeze(), img_size=img.shape[:2]
)
print(cam_params, new_cam, cam_for_render)
#exit(0)
# rend_img_overlay = renderer(
# #vert_shifted * 1.0, cam=new_cam.squeeze().detach().cpu().numpy(), img=img, do_alpha=True
# #vert_shifted * 1.0, cam=cam_for_render, img=img, do_alpha=True
# vert_shifted * 1.0, cam=cam_for_render, img=img, do_alpha=True
# )
rend_img_vp1 = renderer.rotated(
mesh_vertices, 30, cam=new_cam.squeeze().detach().cpu().numpy(), img_size=img.shape[:2]
#vert_shifted * 1.0, 30, cam=cam_for_render, img_size=img.shape[:2]
)
plt.imshow(rend_img_vp1)
plt.show()
| 34.513274 | 145 | 0.680385 | 1,328 | 0.170256 | 0 | 0 | 0 | 0 | 0 | 0 | 2,305 | 0.295513 |
90b9c9ce2f3208b12b35d5e78f9d7d9be8454378 | 92 | py | Python | quick-scan.py | B3ND1X/py-air-script | d6756cc2b5ec2a7e7950b13b09c78c776488fd6e | [
"Apache-2.0"
]
| 2 | 2021-11-19T10:40:07.000Z | 2022-02-28T16:39:49.000Z | quick-scan.py | B3ND1X/py-air-script | d6756cc2b5ec2a7e7950b13b09c78c776488fd6e | [
"Apache-2.0"
]
| null | null | null | quick-scan.py | B3ND1X/py-air-script | d6756cc2b5ec2a7e7950b13b09c78c776488fd6e | [
"Apache-2.0"
]
| null | null | null | #!/usr/bin/python
import os
os.system("sudo ./scan.py")
os.system("sudo ./enable-wifi.py")
| 15.333333 | 34 | 0.673913 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 56 | 0.608696 |
90b9ca60618e207e4f11df9555b71806b45d69af | 1,538 | py | Python | src/classifier/classifier_tuning/tune_sklearn.py | krangelie/bias-in-german-nlg | 9fbaf50fde7d41d64692ae90c41beae61bc78d44 | [
"MIT"
]
| 14 | 2021-08-24T12:36:37.000Z | 2022-03-18T12:14:36.000Z | src/classifier/classifier_tuning/tune_sklearn.py | krangelie/bias-in-german-nlg | 9fbaf50fde7d41d64692ae90c41beae61bc78d44 | [
"MIT"
]
| null | null | null | src/classifier/classifier_tuning/tune_sklearn.py | krangelie/bias-in-german-nlg | 9fbaf50fde7d41d64692ae90c41beae61bc78d44 | [
"MIT"
]
| 1 | 2021-10-21T20:22:55.000Z | 2021-10-21T20:22:55.000Z | from sklearn.ensemble import RandomForestClassifier
import xgboost
def suggest_xgb(model_params, trial, xgb=None):
n_estimators = trial.suggest_int(
model_params.n_estimators.name,
model_params.n_estimators.lower,
model_params.n_estimators.upper,
model_params.n_estimators.step,
)
lr = trial.suggest_float(
model_params.learning_rate.name,
model_params.learning_rate.lower,
model_params.learning_rate.upper,
log=True,
)
max_depth = trial.suggest_int(
model_params.max_depth.name,
model_params.max_depth.lower,
model_params.max_depth.upper,
model_params.max_depth.step,
)
classifier = xgboost.XGBClassifier(
n_estimators=n_estimators,
learning_rate=lr,
max_depth=max_depth,
random_state=42,
use_label_encoder=False,
tree_method="gpu_hist",
gpu_id=0,
)
return classifier
def suggest_rf(model_params, trial):
n_estimators = trial.suggest_int(
model_params.n_estimators.name,
model_params.n_estimators.lower,
model_params.n_estimators.upper,
model_params.n_estimators.step,
)
max_depth = trial.suggest_int(
model_params.max_depth.name,
model_params.max_depth.lower,
model_params.max_depth.upper,
model_params.max_depth.step,
)
classifier = RandomForestClassifier(
n_estimators=n_estimators, max_depth=max_depth, random_state=42
)
return classifier
| 27.963636 | 71 | 0.683355 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 0.006502 |
90ba1f62b3ac0c6dc5b223b48142b7f90d52dc27 | 4,958 | py | Python | textgenrnn/model.py | cosandr/textgenrnn | b2140c1a5704e866ff934fbfad4e14f3c827d439 | [
"MIT"
]
| null | null | null | textgenrnn/model.py | cosandr/textgenrnn | b2140c1a5704e866ff934fbfad4e14f3c827d439 | [
"MIT"
]
| null | null | null | textgenrnn/model.py | cosandr/textgenrnn | b2140c1a5704e866ff934fbfad4e14f3c827d439 | [
"MIT"
]
| null | null | null | from keras.optimizers import RMSprop
from keras.layers import Input, Embedding, Dense, LSTM, Bidirectional, GRU
from keras.layers import concatenate, Reshape, SpatialDropout1D
from keras.models import Model
from keras import backend as K
from .AttentionWeightedAverage import AttentionWeightedAverage
def textgenrnn_model(num_classes, cfg, context_size=None,
weights_path=None,
dropout=0.0,
optimizer=RMSprop(lr=4e-3, rho=0.99)):
'''
Builds the model architecture for textgenrnn and
loads the specified weights for the model.
'''
input = Input(shape=(cfg['max_length'],), name='input')
embedded = Embedding(num_classes, cfg['dim_embeddings'],
input_length=cfg['max_length'],
name='embedding')(input)
if dropout > 0.0:
embedded = SpatialDropout1D(dropout, name='dropout')(embedded)
rnn_layer_list = []
for i in range(cfg['rnn_layers']):
prev_layer = embedded if i == 0 else rnn_layer_list[-1]
if cfg.get('rnn_type') == 'gru':
rnn_layer_list.append(new_rnn_gru(cfg, i + 1)(prev_layer))
else:
rnn_layer_list.append(new_rnn(cfg, i + 1)(prev_layer))
seq_concat = concatenate([embedded] + rnn_layer_list, name='rnn_concat')
attention = AttentionWeightedAverage(name='attention')(seq_concat)
output = Dense(num_classes, name='output', activation='softmax')(attention)
if context_size is None:
model = Model(inputs=[input], outputs=[output])
if weights_path is not None:
model.load_weights(weights_path, by_name=True)
model.compile(loss='categorical_crossentropy', optimizer=optimizer)
else:
context_input = Input(
shape=(context_size,), name='context_input')
context_reshape = Reshape((context_size,),
name='context_reshape')(context_input)
merged = concatenate([attention, context_reshape], name='concat')
main_output = Dense(num_classes, name='context_output',
activation='softmax')(merged)
model = Model(inputs=[input, context_input],
outputs=[main_output, output])
if weights_path is not None:
model.load_weights(weights_path, by_name=True)
model.compile(loss='categorical_crossentropy', optimizer=optimizer,
loss_weights=[0.8, 0.2])
return model
'''
Create a new LSTM layer per parameters. Unfortunately,
each combination of parameters must be hardcoded.
The normal LSTMs use sigmoid recurrent activations
for parity with CuDNNLSTM:
https://github.com/keras-team/keras/issues/8860
'''
def new_rnn(cfg, layer_num):
use_cudnnlstm = K.backend() == 'tensorflow' and len(K.tensorflow_backend._get_available_gpus()) > 0
if use_cudnnlstm:
from keras.layers import CuDNNLSTM
if cfg['rnn_bidirectional']:
return Bidirectional(CuDNNLSTM(cfg['rnn_size'],
return_sequences=True),
name='rnn_{}'.format(layer_num))
return CuDNNLSTM(cfg['rnn_size'],
return_sequences=True,
name='rnn_{}'.format(layer_num))
else:
if cfg['rnn_bidirectional']:
return Bidirectional(LSTM(cfg['rnn_size'],
return_sequences=True,
recurrent_activation='sigmoid'),
name='rnn_{}'.format(layer_num))
return LSTM(cfg['rnn_size'],
return_sequences=True,
recurrent_activation='sigmoid',
name='rnn_{}'.format(layer_num))
def new_rnn_gru(cfg, layer_num):
use_cudnngru = K.backend() == 'tensorflow' and len(K.tensorflow_backend._get_available_gpus()) > 0
if use_cudnngru:
from keras.layers import CuDNNGRU
if cfg['rnn_bidirectional']:
return Bidirectional(CuDNNGRU(cfg['rnn_size'],
return_sequences=True),
name='rnn_{}'.format(layer_num))
return CuDNNGRU(cfg['rnn_size'],
return_sequences=True,
name='rnn_{}'.format(layer_num))
else:
if cfg['rnn_bidirectional']:
return Bidirectional(GRU(cfg['rnn_size'],
return_sequences=True,
recurrent_activation='sigmoid',
reset_after=True),
name='rnn_{}'.format(layer_num))
return GRU(cfg['rnn_size'],
return_sequences=True,
recurrent_activation='sigmoid',
reset_after=True,
name='rnn_{}'.format(layer_num))
| 40.308943 | 103 | 0.583905 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 881 | 0.177693 |
90bbeed86ea6726d8cf4682e4d77c05a1d88ab5a | 121,331 | py | Python | tests/adapters/switches/brocade_test.py | FrancoisLopez/netman | a40d3235f7ea0cdaf52daab97b0d5ad20857b00e | [
"Apache-2.0"
]
| 38 | 2015-11-30T10:11:42.000Z | 2022-02-10T18:31:44.000Z | tests/adapters/switches/brocade_test.py | FrancoisLopez/netman | a40d3235f7ea0cdaf52daab97b0d5ad20857b00e | [
"Apache-2.0"
]
| 143 | 2015-12-10T19:00:42.000Z | 2020-08-20T13:51:42.000Z | tests/adapters/switches/brocade_test.py | FrancoisLopez/netman | a40d3235f7ea0cdaf52daab97b0d5ad20857b00e | [
"Apache-2.0"
]
| 15 | 2015-12-14T23:03:30.000Z | 2019-01-15T19:35:45.000Z | # Copyright 2015 Internap.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import mock
from flexmock import flexmock, flexmock_teardown
from hamcrest import assert_that, has_length, equal_to, is_, none, empty
from netaddr import IPNetwork
from netaddr.ip import IPAddress
from netman.adapters.switches import brocade_factory_ssh, brocade_factory_telnet
from netman.adapters.switches.brocade import Brocade, parse_if_ranges
from netman.adapters.switches.util import SubShell
from netman.core.objects.access_groups import IN, OUT
from netman.core.objects.exceptions import IPNotAvailable, UnknownVlan, UnknownIP, UnknownAccessGroup, BadVlanNumber, \
BadVlanName, UnknownInterface, TrunkVlanNotSet, UnknownVrf, VlanVrfNotSet, VrrpAlreadyExistsForVlan, BadVrrpPriorityNumber, BadVrrpGroupNumber, \
BadVrrpTimers, BadVrrpTracking, NoIpOnVlanForVrrp, VrrpDoesNotExistForVlan, UnknownDhcpRelayServer, DhcpRelayServerAlreadyExists, \
VlanAlreadyExist, InvalidAccessGroupName, IPAlreadySet
from netman.core.objects.interface_states import OFF, ON
from netman.core.objects.port_modes import ACCESS, TRUNK
from netman.core.objects.switch_descriptor import SwitchDescriptor
class BrocadeTest(unittest.TestCase):
def setUp(self):
self.switch = Brocade(SwitchDescriptor(model='brocade', hostname="my.hostname"), None)
SubShell.debug = True
self.shell_mock = flexmock()
self.switch.shell = self.shell_mock
def tearDown(self):
flexmock_teardown()
def test_switch_has_a_logger_configured_with_the_switch_name(self):
assert_that(self.switch.logger.name, is_(Brocade.__module__ + ".my.hostname"))
def test_ip_redirect_enable(self):
self.shell_mock.should_receive("do").with_args("show vlan 1234").once().ordered().and_return(
vlan_with_vif_display(1234, 999, name="Shizzle")
)
self.shell_mock.should_receive("do").with_args("show running-config interface ve 999").once().ordered().and_return([
"interface ve 999",
"!",
])
self.shell_mock.should_receive("do").with_args("configure terminal").once().ordered().and_return([])
self.shell_mock.should_receive("do").with_args("interface ve 999").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("enable").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("ip redirect").once().ordered()
self.shell_mock.should_receive("do").with_args("exit").and_return([]).twice()
self.switch.set_vlan_icmp_redirects_state(1234, True)
def test_ip_redirect_disable(self):
self.shell_mock.should_receive("do").with_args("show vlan 1234").once().ordered().and_return(
vlan_with_vif_display(1234, 999, name="Shizzle")
)
self.shell_mock.should_receive("do").with_args("show running-config interface ve 999").once().ordered().and_return([
"interface ve 999",
"!",
])
self.shell_mock.should_receive("do").with_args("configure terminal").once().ordered().and_return([])
self.shell_mock.should_receive("do").with_args("interface ve 999").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("enable").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("no ip redirect").once().ordered()
self.shell_mock.should_receive("do").with_args("exit").and_return([]).twice()
self.switch.set_vlan_icmp_redirects_state(1234, False)
def test_set_vlan_icmp_redirects_state_without_interface_creates_it(self):
self.shell_mock.should_receive("do").with_args("show vlan 1234").once().ordered().and_return(
vlan_with_vif_display(1234, 999, name="Shizzle")
)
self.shell_mock.should_receive("do").with_args("show running-config interface ve 999").once().ordered().and_return([
"Error - ve 999 was not configured"
])
self.shell_mock.should_receive("do").with_args("configure terminal").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("interface ve 999").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("enable").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("no ip redirect").once().ordered()
self.shell_mock.should_receive("do").with_args("exit").and_return([]).twice()
self.switch.set_vlan_icmp_redirects_state(1234, False)
def test_set_vlan_icmp_redirects_state_unknown_vlan(self):
self.shell_mock.should_receive("do").with_args("show vlan 1234").once().ordered().and_return([
"Error: vlan 1234 is not configured"
])
with self.assertRaises(UnknownVlan) as expect:
self.switch.set_vlan_icmp_redirects_state(1234, False)
assert_that(str(expect.exception), equal_to("Vlan 1234 not found"))
def test_get_vlans(self):
self.shell_mock.should_receive("do").with_args("show running-config vlan | begin vlan").once().ordered().and_return([
"vlan 1 name DEFAULT-VLAN",
""
" no untagged ethe 1/1 ethe 1/3 to 1/22",
"!",
"vlan 201",
" tagged ethe 1/1",
" router-interface ve 201",
"!",
"vlan 2222 name your-name-is-way-too-long-for-t",
" tagged ethe 1/1",
" untagged ethe 1/2",
"!",
"vlan 3333 name some-name",
"!",
"!"
])
self.shell_mock.should_receive("do").with_args("show running-config interface").once()\
.ordered().and_return([
'interface ve 428',
' port-name "My Awesome Port Name"',
' ip address 10.241.0.33/27',
' ip access-group ACL-IN in',
' ip access-group ACL-OUT out',
'!',
'interface ve 201',
' vrf forwarding SHIZZLE',
' ip address 1.1.1.1/24',
' ip address 2.1.1.1/27',
' ip address 1.1.1.9/24 secondary',
' ip helper-address 10.10.10.1',
' ip helper-address 10.10.10.2',
' ip vrrp-extended auth-type simple-text-auth VLAN201',
' ip vrrp-extended vrid 1',
' backup priority 110 track-priority 50',
' ip-address 1.1.1.2',
' hello-interval 5',
' dead-interval 15',
' advertise backup',
' track-port ethernet 1/1',
' activate',
' ip vrrp-extended vrid 2',
' backup priority 110 track-priority 50',
' ip-address 1.1.1.3',
' ip-address 1.1.1.4',
' hello-interval 5',
' dead-interval 15',
' advertise backup',
' track-port ethernet 1/1',
' activate',
' no ip redirect'
'!',
'interface ve 1203',
'!',
'interface ve 3993',
' port-name Another-port-name',
' ip address 4.4.4.0/27',
'!'])
vlan1, vlan201, vlan2222, vlan3333 = self.switch.get_vlans()
assert_that(vlan1.number, equal_to(1))
assert_that(vlan1.name, equal_to("default"))
assert_that(vlan1.ips, has_length(0))
assert_that(vlan1.vrf_forwarding, is_(none()))
assert_that(vlan201.number, equal_to(201))
assert_that(vlan201.name, equal_to(None))
assert_that(vlan201.ips, has_length(3))
assert_that(vlan201.vrf_forwarding, is_("SHIZZLE"))
assert_that(vlan201.icmp_redirects, equal_to(False))
assert_that(vlan2222.number, equal_to(2222))
assert_that(vlan2222.name, equal_to("your-name-is-way-too-long-for-t"))
assert_that(vlan2222.ips, has_length(0))
assert_that(vlan2222.icmp_redirects, equal_to(True))
assert_that(vlan3333.number, equal_to(3333))
assert_that(vlan3333.name, equal_to("some-name"))
assert_that(vlan3333.ips, has_length(0))
vrrp_group1, vrrp_group2 = vlan201.vrrp_groups
assert_that(len(vrrp_group1.ips), equal_to(1))
assert_that(vrrp_group1.ips[0], equal_to(IPAddress('1.1.1.2')))
assert_that(vrrp_group1.hello_interval, equal_to(5))
assert_that(vrrp_group1.dead_interval, equal_to(15))
assert_that(vrrp_group1.priority, equal_to(110))
assert_that(vrrp_group1.track_id, equal_to('ethernet 1/1'))
assert_that(vrrp_group1.track_decrement, equal_to(50))
assert_that(len(vrrp_group2.ips), equal_to(2))
assert_that(vrrp_group2.ips[0], equal_to(IPAddress('1.1.1.3')))
assert_that(vrrp_group2.ips[1], equal_to(IPAddress('1.1.1.4')))
assert_that(vrrp_group2.hello_interval, equal_to(5))
assert_that(vrrp_group2.dead_interval, equal_to(15))
assert_that(vrrp_group2.priority, equal_to(110))
assert_that(vrrp_group2.track_id, equal_to('ethernet 1/1'))
assert_that(vrrp_group2.track_decrement, equal_to(50))
assert_that(len(vlan201.dhcp_relay_servers), equal_to(2))
assert_that(str(vlan201.dhcp_relay_servers[0]), equal_to('10.10.10.1'))
assert_that(str(vlan201.dhcp_relay_servers[1]), equal_to('10.10.10.2'))
def test_get_vlan_with_no_interface(self):
self.shell_mock.should_receive("do").with_args("show vlan 1750").once().ordered().and_return(
vlan_display(1750)
)
vlan = self.switch.get_vlan(1750)
assert_that(vlan.number, is_(1750))
assert_that(vlan.name, is_(None))
assert_that(vlan.access_groups[IN], is_(none()))
assert_that(vlan.access_groups[OUT], is_(none()))
assert_that(vlan.vrf_forwarding, is_(none()))
assert_that(vlan.ips, is_(empty()))
assert_that(vlan.vrrp_groups, is_(empty()))
assert_that(vlan.dhcp_relay_servers, is_(empty()))
def test_get_vlan_with_an_empty_interface(self):
self.shell_mock.should_receive("do").with_args("show vlan 1750").once().ordered().and_return(
vlan_with_vif_display(1750, 999, name="Shizzle")
)
self.shell_mock.should_receive("do").with_args("show running-config interface ve 999").once().ordered().and_return([
"interface ve 999",
"!",
])
vlan = self.switch.get_vlan(1750)
assert_that(vlan.number, is_(1750))
assert_that(vlan.name, is_("Shizzle"))
assert_that(vlan.access_groups[IN], is_(none()))
assert_that(vlan.access_groups[OUT], is_(none()))
assert_that(vlan.vrf_forwarding, is_(none()))
assert_that(vlan.ips, is_(empty()))
assert_that(vlan.vrrp_groups, is_(empty()))
assert_that(vlan.dhcp_relay_servers, is_(empty()))
def test_get_vlan_with_a_full_interface(self):
self.shell_mock.should_receive("do").with_args("show vlan 1750").once().ordered().and_return(
vlan_with_vif_display(1750, 1750, name="Shizzle")
)
self.shell_mock.should_receive("do").with_args("show running-config interface ve 1750").once().ordered().and_return([
"interface ve 1750",
" vrf forwarding SHIZZLE",
" ip address 1.1.1.1/24",
" ip address 2.1.1.1/27",
" ip address 1.1.1.9/24 secondary",
" ip access-group ACL-IN in",
" ip access-group ACL-OUT out",
" ip helper-address 10.10.10.1",
" ip helper-address 10.10.10.2",
" ip vrrp-extended auth-type simple-text-auth VLAN201",
" ip vrrp-extended vrid 1",
" backup priority 110 track-priority 50",
" ip-address 1.1.1.2",
" hello-interval 5",
" dead-interval 15",
" advertise backup",
" track-port ethernet 1/1",
" activate",
" ip vrrp-extended vrid 2",
" backup priority 110 track-priority 50",
" ip-address 1.1.1.3",
" ip-address 1.1.1.4",
" hello-interval 5",
" dead-interval 15",
" advertise backup",
" track-port ethernet 1/1",
" activate",
"!",
])
vlan = self.switch.get_vlan(1750)
assert_that(vlan.number, is_(1750))
assert_that(vlan.name, is_("Shizzle"))
assert_that(vlan.access_groups[IN], is_("ACL-IN"))
assert_that(vlan.access_groups[OUT], is_("ACL-OUT"))
assert_that(vlan.vrf_forwarding, is_("SHIZZLE"))
assert_that(vlan.ips, has_length(3))
assert_that(vlan.icmp_redirects, equal_to(True))
vrrp_group1, vrrp_group2 = vlan.vrrp_groups
assert_that(len(vrrp_group1.ips), equal_to(1))
assert_that(vrrp_group1.ips[0], equal_to(IPAddress('1.1.1.2')))
assert_that(vrrp_group1.hello_interval, equal_to(5))
assert_that(vrrp_group1.dead_interval, equal_to(15))
assert_that(vrrp_group1.priority, equal_to(110))
assert_that(vrrp_group1.track_id, equal_to('ethernet 1/1'))
assert_that(vrrp_group1.track_decrement, equal_to(50))
assert_that(len(vrrp_group2.ips), equal_to(2))
assert_that(vrrp_group2.ips[0], equal_to(IPAddress('1.1.1.3')))
assert_that(vrrp_group2.ips[1], equal_to(IPAddress('1.1.1.4')))
assert_that(vrrp_group2.hello_interval, equal_to(5))
assert_that(vrrp_group2.dead_interval, equal_to(15))
assert_that(vrrp_group2.priority, equal_to(110))
assert_that(vrrp_group2.track_id, equal_to('ethernet 1/1'))
assert_that(vrrp_group2.track_decrement, equal_to(50))
assert_that(len(vlan.dhcp_relay_servers), equal_to(2))
assert_that(str(vlan.dhcp_relay_servers[0]), equal_to('10.10.10.1'))
assert_that(str(vlan.dhcp_relay_servers[1]), equal_to('10.10.10.2'))
def test_get_vlan_interface_with_untagged_interface(self):
self.shell_mock.should_receive("do").with_args("show vlan 1").once().ordered().and_return(
vlan_display(1, 'DEFAULT-VLAN', tagged_port_str="ethe 1/2 ethe 1/23 to 1/24")
)
vlan_interfaces = self.switch.get_vlan_interfaces(1)
assert_that(vlan_interfaces, equal_to(["ethernet 1/2", "ethernet 1/23", "ethernet 1/24"]))
def test_get_vlan_interface_with_tagged_interface(self):
self.shell_mock.should_receive("do").with_args("show vlan 1").once().ordered().and_return(
vlan_display(1, 'DEFAULT-VLAN', untagged_port_str="ethe 1/2")
)
vlan_interfaces = self.switch.get_vlan_interfaces(1)
assert_that(vlan_interfaces, equal_to(["ethernet 1/2"]))
def test_get_vlan_interface_with_untagged_and_tagged_interface(self):
self.shell_mock.should_receive("do").with_args("show vlan 1").once().ordered().and_return(
vlan_display(1, 'DEFAULT-VLAN', untagged_port_str="ethe 1/1", tagged_port_str="ethe 1/2 ethe 1/23 to 1/24")
)
vlan_interfaces = self.switch.get_vlan_interfaces(1)
assert_that(vlan_interfaces, equal_to(["ethernet 1/1", "ethernet 1/2", "ethernet 1/23", "ethernet 1/24"]))
def test_get_vlan_interface_unknown_vlan(self):
self.shell_mock.should_receive("do").with_args("show vlan inexistent").once().ordered().and_return([
"Error: vlan inexistent is not configured"
])
with self.assertRaises(UnknownVlan):
self.switch.get_vlan_interfaces("inexistent")
def test_get_vlan_unknown_interface_raises(self):
self.shell_mock.should_receive("do").with_args("show vlan 1750").once().ordered().and_return([
"Error: vlan 1750 is not configured"
])
with self.assertRaises(UnknownVlan) as expect:
self.switch.get_vlan(1750)
assert_that(str(expect.exception), equal_to("Vlan 1750 not found"))
def test_get_vlan_with_both_ip_and_ipv6_vrrp_groups_ipv6_is_ignored(self):
self.shell_mock.should_receive("do").with_args("show vlan 1750").once().ordered().and_return(
vlan_with_vif_display(1750, 1750, name="Shizzle")
)
self.shell_mock.should_receive("do").with_args("show running-config interface ve 1750").once()\
.ordered().and_return([
'interface ve 1750',
'port-name vrrp-extended vrid 42',
' ip address 10.241.0.33/27',
' no ip redirect',
' ip helper-address 10.10.10.1',
' ip helper-address 10.10.10.2',
' ipv6 address 2001:47c2:19:5::2/64',
' ipv6 address 2001:47c2:19:5::3/64',
' ipv6 nd suppress-ra',
' ip vrrp-extended vrid 42',
' backup priority 130 track-priority 20',
' ip-address 1.1.1.2',
' advertise backup',
' hello-interval 4',
' track-port ethernet 1/3',
' activate',
' ipv6 vrrp-extended vrid 43',
' backup priority 110 track-priority 50',
' ipv6-address 2001:47c2:19:5::1',
' advertise backup',
' hello-interval 5',
' track-port ethernet 1/2',
' activate',
'!'])
vlan = self.switch.get_vlan(1750)
assert_that(vlan.number, is_(1750))
assert_that(vlan.ips, has_length(1))
assert_that(vlan.icmp_redirects, equal_to(False))
assert_that(vlan.vrrp_groups, has_length(1))
vrrp_group1 = vlan.vrrp_groups[0]
assert_that(len(vrrp_group1.ips), equal_to(1))
assert_that(vrrp_group1.ips[0], equal_to(IPAddress('1.1.1.2')))
assert_that(vrrp_group1.hello_interval, equal_to(4))
assert_that(vrrp_group1.priority, equal_to(130))
assert_that(vrrp_group1.track_id, equal_to('ethernet 1/3'))
assert_that(vrrp_group1.track_decrement, equal_to(20))
assert_that(len(vlan.dhcp_relay_servers), equal_to(2))
assert_that(str(vlan.dhcp_relay_servers[0]), equal_to('10.10.10.1'))
assert_that(str(vlan.dhcp_relay_servers[1]), equal_to('10.10.10.2'))
def test_get_vlan_with_both_ip_and_ipv6_in_the_same_vrrp_group(self):
self.shell_mock.should_receive("do").with_args("show vlan 1750").once().ordered().and_return(
vlan_with_vif_display(1750, 1750, name="Shizzle")
)
self.shell_mock.should_receive("do").with_args("show running-config interface ve 1750").once() \
.ordered()\
.and_return(['interface ve 1750',
'port-name vrrp-extended vrid 42',
' ip address 10.241.0.33/27',
' no ip redirect',
' ip helper-address 10.10.10.1',
' ip helper-address 10.10.10.2',
' ipv6 address 2001:47c2:19:5::2/64',
' ipv6 address 2001:47c2:19:5::3/64',
' ipv6 nd suppress-ra',
' ip vrrp-extended vrid 42',
' backup priority 130 track-priority 20',
' ip-address 1.1.1.2',
' advertise backup',
' hello-interval 4',
' track-port ethernet 1/3',
' activate',
' ipv6 vrrp-extended vrid 42',
' backup priority 170 track-priority 40',
' ipv6-address 2001:47c2:19:5::1',
' advertise backup',
' hello-interval 400',
' track-port ethernet 4/6',
' activate',
'!'])
vlan = self.switch.get_vlan(1750)
assert_that(vlan.number, is_(1750))
assert_that(vlan.ips, has_length(1))
assert_that(vlan.icmp_redirects, equal_to(False))
vrrp_group = vlan.vrrp_groups[0]
assert_that(len(vrrp_group.ips), equal_to(1))
assert_that(vrrp_group.ips[0], equal_to(IPAddress('1.1.1.2')))
assert_that(vrrp_group.hello_interval, equal_to(4))
assert_that(vrrp_group.priority, equal_to(130))
assert_that(vrrp_group.track_id, equal_to('ethernet 1/3'))
assert_that(vrrp_group.track_decrement, equal_to(20))
def test_add_vlan(self):
self.shell_mock.should_receive("do").with_args("show vlan 2999").and_return([
"Error: vlan 2999 is not configured"
])
self.shell_mock.should_receive("do").with_args("configure terminal").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("vlan 2999 name Gertrude").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
self.switch.add_vlan(2999, name="Gertrude")
def test_add_vlan_bad_number(self):
self.shell_mock.should_receive("do").with_args("show vlan 5000").and_return([
"Error: vlan 5000 is not configured"
])
self.shell_mock.should_receive("do").with_args("configure terminal").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("vlan 5000 name Gertrude").once().ordered().and_return([
"Error: vlan id 4091 is outside of allowed max of 4090"
])
self.shell_mock.should_receive("do").with_args("exit").once().ordered()
with self.assertRaises(BadVlanNumber) as expect:
self.switch.add_vlan(5000, name="Gertrude")
assert_that(str(expect.exception), equal_to("Vlan number is invalid"))
def test_add_vlan_bad_name(self):
self.shell_mock.should_receive("do").with_args("show vlan 5000").and_return([
"Error: vlan 5000 is not configured"
])
self.shell_mock.should_receive("do").with_args("configure terminal").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("vlan 5000 name Gertr ude").once().ordered().and_return([
"Invalid input -> ude"
])
self.shell_mock.should_receive("do").with_args("exit").once().ordered()
with self.assertRaises(BadVlanName) as expect:
self.switch.add_vlan(5000, name="Gertr ude")
assert_that(str(expect.exception), equal_to("Vlan name is invalid"))
def test_add_vlan_no_name(self):
self.shell_mock.should_receive("do").with_args("show vlan 2999").and_return([
"Error: vlan 2999 is not configured"
])
self.shell_mock.should_receive("do").with_args("configure terminal").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("vlan 2999").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
self.switch.add_vlan(2999)
def test_add_vlan_already_exist_fails(self):
self.shell_mock.should_receive("do").with_args("show vlan 2999").and_return(
vlan_display(2999)
)
with self.assertRaises(VlanAlreadyExist) as expect:
self.switch.add_vlan(2999)
assert_that(str(expect.exception), equal_to("Vlan 2999 already exists"))
def test_remove_vlan(self):
self.shell_mock.should_receive("do").with_args("show vlan 2999").once().ordered().and_return(
vlan_display(2999)
)
self.shell_mock.should_receive("do").with_args("configure terminal").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("no vlan 2999").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("exit").and_return([]).once().ordered()
self.switch.remove_vlan(2999)
def test_remove_vlan_invalid_vlan_raises(self):
self.shell_mock.should_receive("do").with_args("show vlan 2999").once().ordered().and_return([
"Error: vlan 2999 is not configured"
])
self.shell_mock.should_receive("do").with_args("configure terminal").never()
with self.assertRaises(UnknownVlan) as expect:
self.switch.remove_vlan(2999)
assert_that(str(expect.exception), equal_to("Vlan 2999 not found"))
def test_set_access_vlan(self):
self.shell_mock.should_receive("do").with_args("show vlan 2999").once().ordered().and_return(
vlan_display(2999)
)
self.shell_mock.should_receive("do").with_args("configure terminal").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("vlan 2999").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("untagged ethernet 1/4").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
self.switch.set_access_vlan("ethernet 1/4", vlan=2999)
def test_set_access_vlan_invalid_vlan_raises(self):
self.shell_mock.should_receive("do").with_args("show vlan 2999").once().ordered().and_return([
"Error: vlan 2999 is not configured"
])
with self.assertRaises(UnknownVlan) as expect:
self.switch.set_access_vlan("ethernet 1/4", vlan=2999)
assert_that(str(expect.exception), equal_to("Vlan 2999 not found"))
def test_set_access_vlan_invalid_interface_raises(self):
self.shell_mock.should_receive("do").with_args("show vlan 2999").once().ordered().and_return(
vlan_display(2999)
)
self.shell_mock.should_receive("do").with_args("configure terminal").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("vlan 2999").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("untagged ethernet 9/999").once().ordered().and_return([
'Invalid input -> 9/999'
'Type ? for a list'
])
self.shell_mock.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
with self.assertRaises(UnknownInterface) as expect:
self.switch.set_access_vlan("ethernet 9/999", vlan=2999)
assert_that(str(expect.exception), equal_to("Unknown interface ethernet 9/999"))
def test_reset_interfaces_works(self):
self.shell_mock.should_receive("do").with_args("show vlan ethernet 1/4").once().ordered().and_return([])
self.shell_mock.should_receive("do").with_args("configure terminal").once().ordered().and_return([])
self.shell_mock.should_receive("do").with_args("no interface ethernet 1/4").once().ordered().and_return([])
self.shell_mock.should_receive("do").with_args("exit").once().ordered()
self.switch.reset_interface("ethernet 1/4")
def test_reset_interfaces_on_invalid_input_raises_unknown_interface(self):
self.shell_mock.should_receive("do").with_args("show vlan ethernet 9/999").once().ordered().and_return([
'Invalid input -> 9/999',
'Type ? for a list'])
with self.assertRaises(UnknownInterface):
self.switch.reset_interface("ethernet 9/999")
def test_reset_interfaces_on_invalid_interface_raises_unknown_interface(self):
self.shell_mock.should_receive("do").with_args("show vlan ethernet 1/64").once().ordered().and_return([
'Error - invalid interface 1/64'])
with self.assertRaises(UnknownInterface):
self.switch.reset_interface("ethernet 1/64")
def test_reset_interfaces_on_invalid_slot_raises_unknown_interface(self):
self.shell_mock.should_receive("do").with_args("show vlan ethernet 2/1").once().ordered().and_return([
'Error - interface 2/1 is not an ETHERNET interface'])
with self.assertRaises(UnknownInterface):
self.switch.reset_interface("ethernet 2/1")
def test_reset_interfaces_cleans_tagged_vlans(self):
self.shell_mock.should_receive("do").with_args("show vlan ethernet 1/4").and_return(['VLAN: 1200 Untagged',
'VLAN: 1201 Tagged'])
self.shell_mock.should_receive("do").with_args("configure terminal").once().ordered().and_return([])
self.shell_mock.should_receive("do").with_args("vlan 1200").once().ordered().and_return([])
self.shell_mock.should_receive("do").with_args("no untagged ethernet 1/4").once().ordered().and_return([])
self.shell_mock.should_receive("do").with_args("vlan 1201").once().ordered().and_return([])
self.shell_mock.should_receive("do").with_args("no tagged ethernet 1/4").once().ordered().and_return([])
self.shell_mock.should_receive("do").with_args("exit").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("no interface ethernet 1/4").once().ordered().and_return([])
self.shell_mock.should_receive("do").with_args("exit").and_return([]).once().ordered()
self.switch.reset_interface("ethernet 1/4")
def test_unset_interface_access_vlan(self):
self.shell_mock.should_receive("do").with_args("show vlan brief | include ethe 1/4").once().ordered().and_return([
"1202 your-name- 1202 - Untagged Ports : ethe 1/10"
])
self.shell_mock.should_receive("do").with_args("configure terminal").once().ordered().and_return([])
self.shell_mock.should_receive("do").with_args("vlan 1202").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("no untagged ethernet 1/4").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
self.switch.unset_interface_access_vlan("ethernet 1/4")
def test_unset_interface_access_vlan_invalid_interface_raises(self):
self.shell_mock.should_receive("do").with_args("show vlan brief | include ethe 9/999").once().ordered().and_return([])
with self.assertRaises(UnknownInterface) as expect:
self.switch.unset_interface_access_vlan("ethernet 9/999")
assert_that(str(expect.exception), equal_to("Unknown interface ethernet 9/999"))
def test_set_access_mode_does_nothing_if_nothing_is_set(self):
self.shell_mock.should_receive("do").with_args("show vlan ethernet 1/4").once().ordered().and_return([
"VLAN: 1 Untagged"
])
self.shell_mock.should_receive("do").with_args("configure terminal").never()
self.switch.set_access_mode("ethernet 1/4")
def test_set_access_mode_invalid_interface_raises(self):
self.shell_mock.should_receive("do").with_args("show vlan ethernet 9/999").once().ordered().and_return([
'Invalid input -> 9/999'
'Type ? for a list'
])
with self.assertRaises(UnknownInterface) as expect:
self.switch.set_access_mode("ethernet 9/999")
assert_that(str(expect.exception), equal_to("Unknown interface ethernet 9/999"))
def test_set_access_mode_does_nothing_if_only_an_untagged_vlan_not_knowing_if_it_is_an_access_or_native(self):
self.shell_mock.should_receive("do").with_args("show vlan ethernet 1/4").once().ordered().and_return([
"VLAN: 123 Untagged"
])
self.shell_mock.should_receive("do").with_args("configure terminal").never()
self.switch.set_access_mode("ethernet 1/4")
def test_set_access_mode_removes_all_tagged_vlans_and_the_untagged_because_it_is_a_native_vlan(self):
self.shell_mock.should_receive("do").with_args("show vlan ethernet 1/4").once().ordered().and_return([
"VLAN: 100 Tagged",
"VLAN: 300 Untagged",
])
self.shell_mock.should_receive("do").with_args("configure terminal").once().ordered().and_return([])
self.shell_mock.should_receive("do").with_args("vlan 100").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("no tagged ethernet 1/4").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("vlan 300").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("no untagged ethernet 1/4").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
self.switch.set_access_mode("ethernet 1/4")
def test_set_trunk_mode(self):
self.shell_mock.should_receive("do").with_args("show vlan ethernet 1/4").once().ordered().and_return([
"VLAN: 1 Untagged"
])
self.shell_mock.should_receive("do").with_args("configure terminal").never()
self.switch.set_trunk_mode("ethernet 1/4")
def test_set_trunk_mode_invalid_interface_raises(self):
self.shell_mock.should_receive("do").with_args("show vlan ethernet 9/999").once().ordered().and_return([
'Invalid input -> 9/999'
'Type ? for a list'
])
with self.assertRaises(UnknownInterface) as expect:
self.switch.set_trunk_mode("ethernet 9/999")
assert_that(str(expect.exception), equal_to("Unknown interface ethernet 9/999"))
def test_add_trunk_vlan(self):
self.shell_mock.should_receive("do").with_args("show vlan 2999").once().ordered().and_return(
vlan_display(2999)
)
self.shell_mock.should_receive("do").with_args("configure terminal").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("vlan 2999").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("tagged ethernet 1/1").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
self.switch.add_trunk_vlan("ethernet 1/1", vlan=2999)
def test_add_trunk_vlan_invalid_interface_raises(self):
self.shell_mock.should_receive("do").with_args("show vlan 2999").once().ordered().and_return(
vlan_display(2999)
)
self.shell_mock.should_receive("do").with_args("configure terminal").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("vlan 2999").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("tagged ethernet 9/999").once().ordered().and_return([
'Invalid input -> 9/999'
'Type ? for a list'
])
self.shell_mock.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
with self.assertRaises(UnknownInterface) as expect:
self.switch.add_trunk_vlan("ethernet 9/999", vlan=2999)
assert_that(str(expect.exception), equal_to("Unknown interface ethernet 9/999"))
def test_add_trunk_vlan_invalid_vlan_raises(self):
self.shell_mock.should_receive("do").with_args("show vlan 2999").once().ordered().and_return([
"Error: vlan 2999 is not configured"
])
self.shell_mock.should_receive("do").with_args("configure terminal").never()
with self.assertRaises(UnknownVlan) as expect:
self.switch.add_trunk_vlan("ethernet 1/1", vlan=2999)
assert_that(str(expect.exception), equal_to("Vlan 2999 not found"))
def test_remove_trunk_vlan(self):
self.shell_mock.should_receive("do").with_args("show vlan 2999").once().ordered().and_return(
vlan_display(2999)
)
self.shell_mock.should_receive("do").with_args("configure terminal").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("vlan 2999").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("no tagged ethernet 1/11").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
self.switch.remove_trunk_vlan("ethernet 1/11", vlan=2999)
def test_remove_trunk_vlan_invalid_vlan_raises(self):
self.shell_mock.should_receive("do").with_args("show vlan 2999").once().ordered().and_return([
"Error: vlan 2999 is not configured"
])
with self.assertRaises(UnknownVlan) as expect:
self.switch.remove_trunk_vlan("ethernet 1/2", vlan=2999)
assert_that(str(expect.exception), equal_to("Vlan 2999 not found"))
def test_remove_trunk_vlan_not_set_raises(self):
self.shell_mock.should_receive("do").with_args("show vlan 2999").once().ordered().and_return(
vlan_display(2999)
)
self.shell_mock.should_receive("do").with_args("configure terminal").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("vlan 2999").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("no tagged ethernet 1/14").and_return([
"Error: ports ethe 1/14 are not tagged members of vlan 2999"
]).once().ordered()
self.shell_mock.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
with self.assertRaises(TrunkVlanNotSet) as expect:
self.switch.remove_trunk_vlan("ethernet 1/14", vlan=2999)
assert_that(str(expect.exception), equal_to("Trunk Vlan is not set on interface ethernet 1/14"))
def test_remove_trunk_vlan_invalid_interface_raises(self):
self.shell_mock.should_receive("do").with_args("show vlan 2999").once().ordered().and_return(
vlan_display(2999)
)
self.shell_mock.should_receive("do").with_args("configure terminal").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("vlan 2999").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("no tagged ethernet 9/999").and_return([
"Invalid input -> 1/99",
"Type ? for a list",
]).once().ordered()
self.shell_mock.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
with self.assertRaises(UnknownInterface) as expect:
self.switch.remove_trunk_vlan("ethernet 9/999", vlan=2999)
assert_that(str(expect.exception), equal_to("Unknown interface ethernet 9/999"))
def test_set_interface_state_off(self):
self.shell_mock.should_receive("do").with_args("configure terminal").once().ordered().and_return([])
self.shell_mock.should_receive("do").with_args("interface ethernet 1/4").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("disable").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
self.switch.set_interface_state("ethernet 1/4", OFF)
def test_set_interface_state_off_invalid_interface_raises(self):
self.shell_mock.should_receive("do").with_args("configure terminal").once().ordered().and_return([])
self.shell_mock.should_receive("do").with_args("interface ethernet 9/999").once().ordered().and_return([
'Invalid input -> 9/999'
'Type ? for a list'
])
self.shell_mock.should_receive("do").with_args("exit").and_return([]).once().ordered()
with self.assertRaises(UnknownInterface) as expect:
self.switch.set_interface_state("ethernet 9/999", OFF)
assert_that(str(expect.exception), equal_to("Unknown interface ethernet 9/999"))
def test_set_interface_state_on(self):
self.shell_mock.should_receive("do").with_args("configure terminal").once().ordered().and_return([])
self.shell_mock.should_receive("do").with_args("interface ethernet 1/4").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("enable").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
self.switch.set_interface_state("ethernet 1/4", ON)
def test_set_interface_state_on_invalid_interface_raises(self):
self.shell_mock.should_receive("do").with_args("configure terminal").once().ordered().and_return([])
self.shell_mock.should_receive("do").with_args("interface ethernet 9/999").once().ordered().and_return([
'Invalid input -> 9/999'
'Type ? for a list'
])
self.shell_mock.should_receive("do").with_args("exit").and_return([]).once().ordered()
with self.assertRaises(UnknownInterface) as expect:
self.switch.set_interface_state("ethernet 9/999", ON)
assert_that(str(expect.exception), equal_to("Unknown interface ethernet 9/999"))
def test_set_interface_native_vlan_on_trunk(self):
self.shell_mock.should_receive("do").with_args("show vlan 2999").once().ordered().and_return(
vlan_display(2999)
)
self.shell_mock.should_receive("do").with_args("configure terminal").once().ordered().and_return([])
self.shell_mock.should_receive("do").with_args("vlan 2999").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("untagged ethernet 1/4").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
self.switch.set_interface_native_vlan("ethernet 1/4", vlan=2999)
def test_set_interface_native_vlan_on_trunk_invalid_interface_raises(self):
self.shell_mock.should_receive("do").with_args("show vlan 2999").once().ordered().and_return(
vlan_display(2999)
)
self.shell_mock.should_receive("do").with_args("configure terminal").once().ordered().and_return([])
self.shell_mock.should_receive("do").with_args("vlan 2999").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("untagged ethernet 9/999").once().ordered().and_return([
'Invalid input -> 9/999'
'Type ? for a list'
])
self.shell_mock.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
with self.assertRaises(UnknownInterface) as expect:
self.switch.set_interface_native_vlan("ethernet 9/999", vlan=2999)
assert_that(str(expect.exception), equal_to("Unknown interface ethernet 9/999"))
def test_set_interface_native_vlan_on_trunk_invalid_vlan_raises(self):
self.shell_mock.should_receive("do").with_args("show vlan 2999").once().ordered().and_return([
"Error: vlan 2999 is not configured"
])
self.shell_mock.should_receive("do").with_args("configure terminal").never()
with self.assertRaises(UnknownVlan) as expect:
self.switch.set_interface_native_vlan("ethernet 1/4", vlan=2999)
assert_that(str(expect.exception), equal_to("Vlan 2999 not found"))
def test_unset_interface_native_vlan_on_trunk(self):
self.shell_mock.should_receive("do").with_args("show vlan brief | include ethe 1/4").once().ordered().and_return([
"1202 your-name- 1202 - Untagged Ports : ethe 1/10"
])
self.shell_mock.should_receive("do").with_args("configure terminal").once().ordered().and_return([])
self.shell_mock.should_receive("do").with_args("vlan 1202").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("no untagged ethernet 1/4").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
self.switch.unset_interface_native_vlan("ethernet 1/4")
def test_unset_interface_native_vlan_on_trunk_invalid_interface_raises(self):
self.shell_mock.should_receive("do").with_args("show vlan brief | include ethe 9/999").once().ordered().and_return([])
with self.assertRaises(UnknownInterface) as expect:
self.switch.unset_interface_native_vlan("ethernet 9/999")
assert_that(str(expect.exception), equal_to("Unknown interface ethernet 9/999"))
def test_add_ip_creates_router_interface(self):
self.shell_mock.should_receive("do").with_args("show vlan 1234").once().ordered().and_return(
vlan_display(1234)
)
self.shell_mock.should_receive("do").with_args("configure terminal").once().ordered().and_return([])
self.shell_mock.should_receive("do").with_args("vlan 1234").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("router-interface ve 1234").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("interface ve 1234").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("enable").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("ip address 1.2.3.4/25").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
self.switch.add_ip_to_vlan(1234, IPNetwork("1.2.3.4/25"))
def test_add_ip_doesnt_creates_router_interface_if_already_created(self):
self.shell_mock.should_receive("do").with_args("show vlan 1234").once().ordered().and_return(
vlan_with_vif_display(1234, 3333)
)
self.shell_mock.should_receive("do").with_args("show running-config interface ve 3333").once().ordered().and_return([
"interface ve 3333",
"!",
])
self.shell_mock.should_receive("do").with_args("configure terminal").once().ordered().and_return([])
self.shell_mock.should_receive("do").with_args("interface ve 3333").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("enable").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("ip address 1.2.3.4/25").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
self.switch.add_ip_to_vlan(1234, IPNetwork("1.2.3.4/25"))
def test_add_ip_contained_in_a_subnet_already_present_requires_the_keyword_secondary(self):
self.shell_mock.should_receive("do").with_args("show vlan 1234").once().ordered().and_return(
vlan_with_vif_display(1234, 1234)
)
self.shell_mock.should_receive("do").with_args("show running-config interface ve 1234").once().ordered().and_return([
"interface ve 1234",
" ip address 1.2.3.1/24",
"!",
])
self.shell_mock.should_receive("do").with_args("configure terminal").once().ordered().and_return([])
self.shell_mock.should_receive("do").with_args("interface ve 1234").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("enable").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("ip address 1.2.3.4/25 secondary").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
self.switch.add_ip_to_vlan(1234, IPNetwork("1.2.3.4/25"))
def test_add_ip_already_defined_elsewhere_raises(self):
self.shell_mock.should_receive("do").with_args("show vlan 1234").once().ordered().and_return(
vlan_with_vif_display(1234, 1234)
)
self.shell_mock.should_receive("do").with_args("show running-config interface ve 1234").once().ordered().and_return([
"interface ve 1234",
"!",
])
self.shell_mock.should_receive("do").with_args("configure terminal").once().ordered().and_return([])
self.shell_mock.should_receive("do").with_args("interface ve 1234").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("enable").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("ip address 1.2.3.4/25").and_return([
"IP/Port: Errno(6) Duplicate ip address"
]).once().ordered()
self.shell_mock.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
with self.assertRaises(IPNotAvailable) as expect:
self.switch.add_ip_to_vlan(1234, IPNetwork("1.2.3.4/25"))
assert_that(str(expect.exception), equal_to("IP 1.2.3.4/25 is not available in this vlan"))
def test_add_ip_already_a_subnet_of_another_ve(self):
self.shell_mock.should_receive("do").with_args("show vlan 1234").once().ordered().and_return(
vlan_with_vif_display(1234, 1234)
)
self.shell_mock.should_receive("do").with_args("show running-config interface ve 1234").once().ordered().and_return([
"interface ve 1234",
"!",
])
self.shell_mock.should_receive("do").with_args("configure terminal").once().ordered().and_return([])
self.shell_mock.should_receive("do").with_args("interface ve 1234").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("enable").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("ip address 1.2.3.4/25").and_return([
"IP/Port: Errno(11) ip subnet overlap with another interface"
]).once().ordered()
self.shell_mock.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
with self.assertRaises(IPNotAvailable) as expect:
self.switch.add_ip_to_vlan(1234, IPNetwork("1.2.3.4/25"))
assert_that(str(expect.exception), equal_to("IP 1.2.3.4/25 is not available in this vlan"))
def test_add_ip_already_in_this_interface(self):
self.shell_mock.should_receive("do").with_args("show vlan 1234").once().ordered().and_return(
vlan_with_vif_display(1234, 1234)
)
self.shell_mock.should_receive("do").with_args("show running-config interface ve 1234").once().ordered().and_return([
"interface ve 1234",
" ip address 1.2.3.4/24",
"!",
])
with self.assertRaises(IPAlreadySet) as expect:
self.switch.add_ip_to_vlan(1234, IPNetwork("1.2.3.4/25"))
assert_that(str(expect.exception), equal_to("IP 1.2.3.4/25 is already present in this vlan as None"))
def test_add_ip_already_in_this_interface_as_a_secondary(self):
self.shell_mock.should_receive("do").with_args("show vlan 1234").once().ordered().and_return(
vlan_with_vif_display(1234, 1234)
)
self.shell_mock.should_receive("do").with_args("show running-config interface ve 1234").once().ordered().and_return([
"interface ve 1234",
" ip address 1.2.3.4/24",
" ip address 1.2.3.5/24 secondary",
"!",
])
with self.assertRaises(IPAlreadySet) as expect:
self.switch.add_ip_to_vlan(1234, IPNetwork("1.2.3.5/25"))
assert_that(str(expect.exception), equal_to("IP 1.2.3.5/25 is already present in this vlan as None"))
def test_add_ip_to_unknown_vlan(self):
self.shell_mock.should_receive("do").with_args("show vlan 1234").once().ordered().and_return([
"Error: vlan 1234 is not configured"
])
with self.assertRaises(UnknownVlan) as expect:
self.switch.add_ip_to_vlan(1234, IPNetwork("1.2.3.5/25"))
assert_that(str(expect.exception), equal_to("Vlan 1234 not found"))
def test_remove_ip(self):
self.shell_mock.should_receive("do").with_args("show vlan 1234").once().ordered().and_return(
vlan_with_vif_display(1234, 1234)
)
self.shell_mock.should_receive("do").with_args("show running-config interface ve 1234").once().ordered().and_return([
"interface ve 1234",
" ip address 1.2.3.4/24",
"!",
])
self.shell_mock.should_receive("do").with_args("configure terminal").once().ordered().and_return([])
self.shell_mock.should_receive("do").with_args("interface ve 1234").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("enable").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("no ip address 1.2.3.4/24").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
self.switch.remove_ip_from_vlan(1234, IPNetwork("1.2.3.4/24"))
def test_remove_secondary_ip(self):
self.shell_mock.should_receive("do").with_args("show vlan 1234").once().ordered().and_return(
vlan_with_vif_display(1234, 1234)
)
self.shell_mock.should_receive("do").with_args("show running-config interface ve 1234").once().ordered().and_return([
"interface ve 1234",
" ip address 1.2.3.4/24",
" ip address 1.2.3.5/24 secondary",
"!",
])
self.shell_mock.should_receive("do").with_args("configure terminal").once().ordered().and_return([])
self.shell_mock.should_receive("do").with_args("interface ve 1234").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("enable").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("no ip address 1.2.3.5/24").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
self.switch.remove_ip_from_vlan(1234, IPNetwork("1.2.3.5/24"))
def test_remove_ip_that_has_secondary_ip(self):
self.shell_mock.should_receive("do").with_args("show vlan 1234").once().ordered().and_return(
vlan_with_vif_display(1234, 1234)
)
self.shell_mock.should_receive("do").with_args("show running-config interface ve 1234").once().ordered().and_return([
"interface ve 1234",
" ip address 1.2.3.4/24",
" ip address 1.2.3.5/24 secondary",
" ip address 1.2.3.6/24 secondary",
"!",
])
self.shell_mock.should_receive("do").with_args("configure terminal").once().ordered().and_return([])
self.shell_mock.should_receive("do").with_args("interface ve 1234").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("enable").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("no ip address 1.2.3.5/24").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("no ip address 1.2.3.6/24").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("no ip address 1.2.3.4/24").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("ip address 1.2.3.5/24").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("ip address 1.2.3.6/24 secondary").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
self.switch.remove_ip_from_vlan(1234, IPNetwork("1.2.3.4/24"))
def test_remove_unknown_ip_raises(self):
self.shell_mock.should_receive("do").with_args("show vlan 1234").once().ordered().and_return(
vlan_with_vif_display(1234, 1234)
)
self.shell_mock.should_receive("do").with_args("show running-config interface ve 1234").once().ordered().and_return([
"interface ve 1234",
" ip address 1.2.3.4/24",
" ip address 1.2.3.5/24 secondary",
" ip address 1.2.3.6/24 secondary",
"!",
])
with self.assertRaises(UnknownIP) as expect:
self.switch.remove_ip_from_vlan(1234, IPNetwork("5.5.5.5/25"))
assert_that(str(expect.exception), equal_to("IP 5.5.5.5/25 not found"))
def test_remove_known_ip_with_wrong_mask_raises(self):
self.shell_mock.should_receive("do").with_args("show vlan 1234").once().ordered().and_return(
vlan_with_vif_display(1234, 1234)
)
self.shell_mock.should_receive("do").with_args("show running-config interface ve 1234").once().ordered().and_return([
"interface ve 1234",
" ip address 1.2.3.4/24",
" ip address 1.2.3.5/24 secondary",
" ip address 1.2.3.6/24 secondary",
"!",
])
with self.assertRaises(UnknownIP) as expect:
self.switch.remove_ip_from_vlan(1234, IPNetwork("1.2.3.5/25"))
assert_that(str(expect.exception), equal_to("IP 1.2.3.5/25 not found"))
def test_remove_ip_fails_if_there_aint_even_a_router_interface(self):
self.shell_mock.should_receive("do").with_args("show vlan 1234").once().ordered().and_return(
vlan_display(1234)
)
with self.assertRaises(UnknownIP) as expect:
self.switch.remove_ip_from_vlan(1234, IPNetwork("1.2.3.4/24"))
assert_that(str(expect.exception), equal_to("IP 1.2.3.4/24 not found"))
def test_remove_ip_on_unknown_vlan(self):
self.shell_mock.should_receive("do").with_args("show vlan 1234").once().ordered().and_return([
"Error: vlan 1234 is not configured"
])
with self.assertRaises(UnknownVlan) as expect:
self.switch.remove_ip_from_vlan(1234, IPNetwork("1.2.3.5/25"))
assert_that(str(expect.exception), equal_to("Vlan 1234 not found"))
def test_set_vlan_vrf_success(self):
self.shell_mock.should_receive("do").with_args("show vlan 2500").once().ordered().and_return(
vlan_with_vif_display(2500, 2500)
)
self.shell_mock.should_receive("do").with_args("show running-config interface ve 2500").and_return([
"interface ve 2500",
"!",
])
self.shell_mock.should_receive("do").with_args("configure terminal").once().ordered().and_return([])
self.shell_mock.should_receive("do").with_args("interface ve 2500").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("enable").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("vrf forwarding MYVRF").once().ordered().and_return([
"Warning: All IPv4 and IPv6 addresses (including link-local) on this interface have been removed"
])
self.shell_mock.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
self.switch.set_vlan_vrf(2500, "MYVRF")
def test_set_vlan_vrf_incorrect_name(self):
self.shell_mock.should_receive("do").with_args("show vlan 2500").once().ordered().and_return(
vlan_with_vif_display(2500, 2500)
)
self.shell_mock.should_receive("do").with_args("show running-config interface ve 2500").and_return([
"interface ve 2500",
"!",
])
self.shell_mock.should_receive("do").with_args("configure terminal").once().ordered().and_return([])
self.shell_mock.should_receive("do").with_args("interface ve 2500").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("enable").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("vrf forwarding MYVRF").once().ordered().and_return([
"Error - VRF(MYVRF) does not exist or Route-Distinguisher not specified or Address Family not configured"
])
self.shell_mock.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
with self.assertRaises(UnknownVrf) as expect:
self.switch.set_vlan_vrf(2500, "MYVRF")
assert_that(str(expect.exception), equal_to("VRF name \"MYVRF\" was not configured."))
def test_set_vlan_vrf_without_interface_creates_it(self):
self.shell_mock.should_receive("do").with_args("show vlan 2500").once().ordered().and_return(
vlan_display(2500)
)
self.shell_mock.should_receive("do").with_args("configure terminal").once().ordered().and_return([])
self.shell_mock.should_receive("do").with_args("vlan 2500").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("router-interface ve 2500").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("interface ve 2500").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("enable").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("vrf forwarding MYVRF").once().ordered().and_return([
"Warning: All IPv4 and IPv6 addresses (including link-local) on this interface have been removed"
])
self.shell_mock.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
self.switch.set_vlan_vrf(2500, "MYVRF")
def test_set_vlan_vrf_unknown_vlan(self):
self.shell_mock.should_receive("do").with_args("show vlan 2500").once().ordered().and_return([
"Error: vlan 2500 is not configured"
])
with self.assertRaises(UnknownVlan) as expect:
self.switch.set_vlan_vrf(2500, "MYVRF")
assert_that(str(expect.exception), equal_to("Vlan 2500 not found"))
def test_unset_vlan_vrf_success(self):
self.shell_mock.should_receive("do").with_args("show vlan 2500").once().ordered().and_return(
vlan_with_vif_display(2500, 2500)
)
self.shell_mock.should_receive("do").with_args("show running-config interface ve 2500").and_return([
"interface ve 2500",
" vrf forwarding MYVRF",
"!",
])
self.shell_mock.should_receive("do").with_args("configure terminal").once().ordered().and_return([])
self.shell_mock.should_receive("do").with_args("interface ve 2500").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("enable").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("no vrf forwarding MYVRF").once().ordered().and_return([
"Warning: All IPv4 and IPv6 addresses (including link-local) on this interface have been removed"
])
self.shell_mock.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
self.switch.unset_vlan_vrf(2500)
def test_unset_vlan_vrf_not_set(self):
self.shell_mock.should_receive("do").with_args("show vlan 2500").once().ordered().and_return(
vlan_with_vif_display(2500, 2500)
)
self.shell_mock.should_receive("do").with_args("show running-config interface ve 2500").and_return([
"interface ve 2500",
"!",
])
with self.assertRaises(VlanVrfNotSet) as expect:
self.switch.unset_vlan_vrf(2500)
assert_that(str(expect.exception), equal_to("VRF is not set on vlan 2500"))
def test_unset_vlan_vrf_from_known_vlan_with_no_interface(self):
self.shell_mock.should_receive("do").with_args("show vlan 2500").once().ordered().and_return(
vlan_display(2500)
)
with self.assertRaises(VlanVrfNotSet) as expect:
self.switch.unset_vlan_vrf(2500)
assert_that(str(expect.exception), equal_to("VRF is not set on vlan 2500"))
def test_unset_vlan_vrf_from_unknown_vlan(self):
self.shell_mock.should_receive("do").with_args("show vlan 2500").once().ordered().and_return([
"Error: vlan 2500 is not configured"
])
with self.assertRaises(UnknownVlan) as expect:
self.switch.unset_vlan_vrf(2500)
assert_that(str(expect.exception), equal_to("Vlan 2500 not found"))
def test_set_access_group_creates_router_interface(self):
self.shell_mock.should_receive("do").with_args("show vlan 2500").once().ordered().and_return(
vlan_display(2500)
)
self.shell_mock.should_receive("do").with_args("configure terminal").once().ordered().and_return([])
self.shell_mock.should_receive("do").with_args("vlan 2500").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("router-interface ve 2500").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("interface ve 2500").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("enable").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("ip access-group TheAccessGroup in").and_return([
"Warning: An undefined or zero length ACL has been applied. "
"Filtering will not occur for the specified interface VE 2500 (outbound)."
]).once().ordered()
self.shell_mock.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
self.switch.set_vlan_access_group(2500, IN, "TheAccessGroup")
def test_set_access_group_doesnt_creates_router_interface_if_already_created(self):
self.shell_mock.should_receive("do").with_args("show vlan 2500").once().ordered().and_return(
vlan_with_vif_display(2500, 3333)
)
self.shell_mock.should_receive("do").with_args("show running-config interface ve 3333").once().ordered().and_return([
"interface ve 3333",
"!",
])
self.shell_mock.should_receive("do").with_args("configure terminal").once().ordered().and_return([])
self.shell_mock.should_receive("do").with_args("interface ve 3333").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("enable").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("ip access-group TheAccessGroup out").and_return([
"Warning: An undefined or zero length ACL has been applied. "
"Filtering will not occur for the specified interface VE 2500 (outbound)."
]).once().ordered()
self.shell_mock.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
self.switch.set_vlan_access_group(2500, OUT, "TheAccessGroup")
def test_set_access_group_fails_if_switch_says_so(self):
self.shell_mock.should_receive("do").with_args("show vlan 2500").once().ordered().and_return(
vlan_with_vif_display(2500, 3333)
)
self.shell_mock.should_receive("do").with_args("show running-config interface ve 3333").once().ordered().and_return([
"interface ve 3333",
"!",
])
self.shell_mock.should_receive("do").with_args("configure terminal").once().ordered().and_return([])
self.shell_mock.should_receive("do").with_args("interface ve 3333").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("enable").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("ip access-group TheAcc essGroup out").once().ordered().and_return([
"Invalid input -> sss out",
"Type ? for a list"
])
self.shell_mock.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
with self.assertRaises(InvalidAccessGroupName) as expect:
self.switch.set_vlan_access_group(2500, OUT, "TheAcc essGroup")
assert_that(str(expect.exception), equal_to("Access Group Name is invalid: TheAcc essGroup"))
def test_set_access_group_needs_to_remove_actual_access_group_to_override_it(self):
self.shell_mock.should_receive("do").with_args("show vlan 2500").once().ordered().and_return(
vlan_with_vif_display(2500, 2500)
)
self.shell_mock.should_receive("do").with_args("show running-config interface ve 2500").once().ordered().and_return([
"interface ve 2500",
" ip access-group helloThere! in",
"!",
])
self.shell_mock.should_receive("do").with_args("configure terminal").once().ordered().and_return([])
self.shell_mock.should_receive("do").with_args("interface ve 2500").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("enable").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("no ip access-group helloThere! in").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("ip access-group TheAccessGroup in").and_return([
"Warning: An undefined or zero length ACL has been applied. "
"Filtering will not occur for the specified interface VE 2500 (outbound)."
]).once().ordered()
self.shell_mock.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
self.switch.set_vlan_access_group(2500, IN, "TheAccessGroup")
def test_set_access_group_to_unknown_vlan(self):
self.shell_mock.should_receive("do").with_args("show vlan 2500").once().ordered().and_return([
"Error: vlan 2500 is not configured"
])
with self.assertRaises(UnknownVlan) as expect:
self.switch.set_vlan_access_group(2500, IN, "TheAccessGroup")
assert_that(str(expect.exception), equal_to("Vlan 2500 not found"))
def test_remove_access_group(self):
self.shell_mock.should_receive("do").with_args("show vlan 2500").once().ordered().and_return(
vlan_with_vif_display(2500, 2500)
)
self.shell_mock.should_receive("do").with_args("show running-config interface ve 2500").once().ordered().and_return([
"interface ve 2500",
" ip access-group helloThere! in",
"!",
])
self.shell_mock.should_receive("do").with_args("configure terminal").once().ordered().and_return([])
self.shell_mock.should_receive("do").with_args("interface ve 2500").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("enable").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("no ip access-group helloThere! in").and_return([
"Warning: An undefined or zero length ACL has been applied. "
"Filtering will not occur for the specified interface VE 2500 (outbound)."
]).once().ordered()
self.shell_mock.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
self.switch.unset_vlan_access_group(2500, IN)
def test_remove_access_group_out(self):
self.shell_mock.should_receive("do").with_args("show vlan 2500").once().ordered().and_return(
vlan_with_vif_display(2500, 2500)
)
self.shell_mock.should_receive("do").with_args("show running-config interface ve 2500").once().ordered().and_return([
"interface ve 2500",
" ip access-group Waaaat out",
"!",
])
self.shell_mock.should_receive("do").with_args("configure terminal").once().ordered().and_return([])
self.shell_mock.should_receive("do").with_args("interface ve 2500").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("enable").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("no ip access-group Waaaat out").and_return([
"Warning: An undefined or zero length ACL has been applied. "
"Filtering will not occur for the specified interface VE 2500 (outbound)."
]).once().ordered()
self.shell_mock.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
self.switch.unset_vlan_access_group(2500, OUT)
def test_remove_access_group_unknown_access_group_raises(self):
self.shell_mock.should_receive("do").with_args("show vlan 2500").once().ordered().and_return(
vlan_with_vif_display(2500, 2500)
)
self.shell_mock.should_receive("do").with_args("show running-config interface ve 2500").once().ordered().and_return([
"interface ve 2500",
" ip access-group Waaaat out",
"!",
])
with self.assertRaises(UnknownAccessGroup) as expect:
self.switch.unset_vlan_access_group(2500, IN)
assert_that(str(expect.exception), equal_to("Inbound IP access group not found"))
def test_remove_access_group_fails_if_there_aint_even_a_router_interface(self):
self.shell_mock.should_receive("do").with_args("show vlan 2500").once().ordered().and_return(
vlan_display(2500)
)
with self.assertRaises(UnknownAccessGroup) as expect:
self.switch.unset_vlan_access_group(2500, OUT)
assert_that(str(expect.exception), equal_to("Outgoing IP access group not found"))
def test_remove_access_group_on_unknown_vlan(self):
self.shell_mock.should_receive("do").with_args("show vlan 2500").once().ordered().and_return([
"Error: vlan 2500 is not configured"
])
with self.assertRaises(UnknownVlan) as expect:
self.switch.unset_vlan_access_group(2500, OUT)
assert_that(str(expect.exception), equal_to("Vlan 2500 not found"))
def test_get_interfaces(self):
self.shell_mock.should_receive("do").with_args("show interfaces").once().ordered().and_return([
"GigabitEthernet1/1 is down, line protocol is down",
" Hardware is GigabitEthernet, address is 0000.0000.0000 (bia 0000.0000.0000,",
" Member of VLAN 1999 (untagged), port is in untagged mode, port state is Disabled",
" No port name",
"GigabitEthernet1/2 is disabled, line protocol is down",
" Hardware is GigabitEthernet, address is 0000.0000.0000 (bia 0000.0000.0000,",
" Member of VLAN 2999 (untagged), 3 L2 VLANS (tagged), port is in dual mode, port state is Disabled",
" Port name is hello",
"GigabitEthernet1/3 is down, line protocol is down",
" Hardware is GigabitEthernet, address is 0000.0000.0000 (bia 0000.0000.0000,",
" Member of VLAN 1 (untagged), port is in untagged mode, port state is Disabled",
" No port name",
"GigabitEthernet1/4 is disabled, line protocol is down",
" Hardware is GigabitEthernet, address is 0000.0000.0000 (bia 0000.0000.0000,",
" Member of VLAN 1 (untagged), 1 L2 VLANS (tagged), port is in dual mode (default vlan), port state is Disabled",
" No port name",
"GigabitEthernet1/5 is disabled, line protocol is down",
" Hardware is GigabitEthernet, address is 0000.0000.0000 (bia 0000.0000.0000,",
" Member of 1 L2 VLAN(S) (tagged), port is in tagged mode, port state is Disabled",
" No port name",
"GigabitEthernet1/6 is disabled, line protocol is down",
" Hardware is GigabitEthernet, address is 0000.0000.0000 (bia 0000.0000.0000,",
" Member of 1 L2 VLAN(S) (tagged), port is in tagged mode, port state is Disabled",
" No port name",
"Ve1000 is down, line protocol is down",
" Hardware is Virtual Ethernet, address is 0000.0000.0000 (bia 0000.0000.0000,",
" Port name is Salut",
" Vlan id: 1000",
" Internet address is 0.0.0.0/0, IP MTU 1500 bytes, encapsulation ethernet",
"Ve2000 is down, line protocol is down",
" Hardware is Virtual Ethernet, address is 0000.0000.0000 (bia 0000.0000.0000,",
" No port name",
" Vlan id: 2000",
" Internet address is 1.1.1.1/24, IP MTU 1500 bytes, encapsulation ethernet",
"Loopback1 is up, line protocol is up",
" Hardware is Loopback",
" Port name is LOOPBACK",
" Internet address is 108.163.134.4/32, IP MTU 1500 bytes, encapsulation LOOPBACK"
])
self.shell_mock.should_receive("do").with_args("show running-config vlan").once().ordered().and_return([
"spanning-tree",
"!",
"vlan 1 name DEFAULT-VLAN",
" no untagged ethe 1/3",
"!",
"vlan 100",
" tagged ethe 1/2 ethe 1/4 to 1/6",
"!",
"vlan 200",
" tagged ethe 1/2",
"!",
"vlan 300",
" tagged ethe 1/2",
"!",
"vlan 1999",
" untagged ethe 1/1",
"!",
"vlan 2999",
" untagged ethe 1/2",
"!",
"!"
])
result = self.switch.get_interfaces()
if1, if2, if3, if4, if5, if6 = result
assert_that(if1.name, equal_to("ethernet 1/1"))
assert_that(if1.shutdown, equal_to(False))
assert_that(if1.port_mode, equal_to(ACCESS))
assert_that(if1.access_vlan, equal_to(1999))
assert_that(if1.trunk_native_vlan, equal_to(None))
assert_that(if1.trunk_vlans, equal_to([]))
assert_that(if2.name, equal_to("ethernet 1/2"))
assert_that(if2.shutdown, equal_to(True))
assert_that(if2.port_mode, equal_to(TRUNK))
assert_that(if2.access_vlan, equal_to(None))
assert_that(if2.trunk_native_vlan, equal_to(2999))
assert_that(if2.trunk_vlans, equal_to([100, 200, 300]))
assert_that(if3.name, equal_to("ethernet 1/3"))
assert_that(if3.port_mode, equal_to(ACCESS))
assert_that(if3.access_vlan, equal_to(None))
assert_that(if3.trunk_native_vlan, equal_to(None))
assert_that(if3.trunk_vlans, equal_to([]))
assert_that(if4.name, equal_to("ethernet 1/4"))
assert_that(if4.port_mode, equal_to(TRUNK))
assert_that(if4.access_vlan, equal_to(None))
assert_that(if4.trunk_native_vlan, equal_to(None))
assert_that(if4.trunk_vlans, equal_to([100]))
assert_that(if5.trunk_vlans, equal_to([100]))
assert_that(if6.trunk_vlans, equal_to([100]))
def test_get_interface(self):
self.shell_mock.should_receive("do").with_args("show interfaces ethernet 1/2").once().ordered().and_return([
"GigabitEthernet1/2 is disabled, line protocol is down",
" Hardware is GigabitEthernet, address is 0000.0000.0000 (bia 0000.0000.0000,",
" Member of VLAN 2999 (untagged), 3 L2 VLANS (tagged), port is in dual mode, port state is Disabled",
" Port name is hello"
])
self.shell_mock.should_receive("do").with_args("show running-config vlan").once().ordered().and_return([
"spanning-tree",
"!",
"vlan 1 name DEFAULT-VLAN",
" no untagged ethe 1/3",
"!",
"vlan 100",
" tagged ethe 1/2 ethe 1/4 to 1/6",
"!",
"vlan 200",
" tagged ethe 1/2",
"!",
"vlan 300",
" tagged ethe 1/2",
"!",
"vlan 1999",
" untagged ethe 1/1",
"!",
"vlan 2999",
" untagged ethe 1/2",
"!",
"!"
])
interface = self.switch.get_interface("ethernet 1/2")
assert_that(interface.name, equal_to("ethernet 1/2"))
assert_that(interface.shutdown, equal_to(True))
assert_that(interface.port_mode, equal_to(TRUNK))
assert_that(interface.access_vlan, equal_to(None))
assert_that(interface.trunk_native_vlan, equal_to(2999))
assert_that(interface.trunk_vlans, equal_to([100, 200, 300]))
def test_get_nonexistent_interface_raises(self):
self.shell_mock.should_receive("do").with_args("show interfaces ethernet 1/1999").once().ordered().and_return([
"Invalid input -> 1/1999",
"Type ? for a list"
])
self.shell_mock.should_receive("do").with_args("show running-config vlan").never()
with self.assertRaises(UnknownInterface) as expect:
self.switch.get_interface("ethernet 1/1999")
assert_that(str(expect.exception), equal_to("Unknown interface ethernet 1/1999"))
def test_add_vrrp_success_single_ip(self):
self.shell_mock.should_receive("do").with_args("show vlan 1234").once().ordered().and_return(
vlan_with_vif_display(1234, 1234)
)
self.shell_mock.should_receive("do").with_args("show running-config interface ve 1234").once().ordered().and_return([
"interface ve 1234",
" ip address 1.2.3.1/27",
"!",
])
self.shell_mock.should_receive("do").with_args("configure terminal").once().ordered().and_return([])
self.shell_mock.should_receive("do").with_args("interface ve 1234").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("enable").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("ip vrrp-extended auth-type simple-text-auth VLAN1234").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("ip vrrp-extended vrid 1").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("backup priority 110 track-priority 50").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("ip-address 1.2.3.4").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("hello-interval 5").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("dead-interval 15").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("advertise backup").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("track-port ethernet 1/1").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("activate").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
self.switch.add_vrrp_group(1234, 1, ips=[IPAddress("1.2.3.4")], priority=110, hello_interval=5, dead_interval=15,
track_id="ethernet 1/1", track_decrement=50)
def test_add_vrrp_success_multiple_ip(self):
self.shell_mock.should_receive("do").with_args("show vlan 1234").once().ordered().and_return(
vlan_with_vif_display(1234, 1234)
)
self.shell_mock.should_receive("do").with_args("show running-config interface ve 1234").once().ordered().and_return([
"interface ve 1234",
" ip address 1.2.3.1/27",
"!",
])
self.shell_mock.should_receive("do").with_args("configure terminal").once().ordered().and_return([])
self.shell_mock.should_receive("do").with_args("interface ve 1234").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("enable").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("ip vrrp-extended auth-type simple-text-auth VLAN1234").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("ip vrrp-extended vrid 1").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("backup priority 110 track-priority 50").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("ip-address 1.2.3.4").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("ip-address 1.2.3.5").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("hello-interval 5").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("dead-interval 15").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("advertise backup").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("track-port ethernet 1/1").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("activate").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
self.switch.add_vrrp_group(1234, 1, ips=[IPAddress("1.2.3.4"), IPAddress("1.2.3.5")], priority=110,
hello_interval=5, dead_interval=15, track_id="ethernet 1/1", track_decrement=50)
def test_add_vrrp_from_unknown_vlan(self):
self.shell_mock.should_receive("do").with_args("show vlan 1234").once().ordered().and_return([
"Error: vlan 1234 is not configured"
])
with self.assertRaises(UnknownVlan) as expect:
self.switch.add_vrrp_group(1234, 1, ips=[IPAddress("1.2.3.4")], priority=110, hello_interval=5, dead_interval=15,
track_id="ethernet 1/1", track_decrement=50)
assert_that(str(expect.exception), equal_to("Vlan 1234 not found"))
def test_add_existing_vrrp_to_same_vlan(self):
self.shell_mock.should_receive("do").with_args("show vlan 1234").once().ordered().and_return(
vlan_with_vif_display(1234, 1234)
)
self.shell_mock.should_receive("do").with_args("show running-config interface ve 1234").once().ordered().and_return([
"interface ve 1234",
" ip address 1.2.3.1/27",
" ip vrrp-extended auth-type simple-text-auth ********",
" ip vrrp-extended vrid 1",
" backup priority 110 track-priority 50",
" ip-address 1.2.3.4",
" hello-interval 5",
" dead-interval 15",
" advertise backup",
" track-port ethernet 1/1",
" activate",
"!",
])
with self.assertRaises(VrrpAlreadyExistsForVlan) as expect:
self.switch.add_vrrp_group(1234, 1, ips=[IPAddress("1.2.3.4")], priority=110, hello_interval=5, dead_interval=15,
track_id="ethernet 1/1", track_decrement=50)
assert_that(str(expect.exception), equal_to("Vrrp group 1 is already in use on vlan 1234"))
def test_add_vrrp_to_vlan_with_another_vrrp(self):
self.shell_mock.should_receive("do").with_args("show vlan 1234").once().ordered().and_return(
vlan_with_vif_display(1234, 1234)
)
self.shell_mock.should_receive("do").with_args("show running-config interface ve 1234").once().ordered().and_return([
"interface ve 1234",
" ip address 1.2.3.1/27",
" ip vrrp-extended auth-type simple-text-auth ********",
" ip vrrp-extended vrid 1",
" backup priority 110 track-priority 50",
" ip-address 1.2.3.4",
" hello-interval 5",
" dead-interval 15",
" advertise backup",
" track-port ethernet 1/1",
" activate",
"!",
])
self.shell_mock.should_receive("do").with_args("configure terminal").once().ordered().and_return([])
self.shell_mock.should_receive("do").with_args("interface ve 1234").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("enable").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("ip vrrp-extended vrid 2").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("backup priority 110 track-priority 50").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("ip-address 1.2.3.5").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("hello-interval 5").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("dead-interval 15").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("advertise backup").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("track-port ethernet 1/1").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("activate").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
self.switch.add_vrrp_group(1234, 2, ips=[IPAddress("1.2.3.5")], priority=110, hello_interval=5, dead_interval=15,
track_id="ethernet 1/1", track_decrement=50)
def test_add_vrrp_with_out_of_range_group_id(self):
self.shell_mock.should_receive("do").with_args("show vlan 1234").once().ordered().and_return(
vlan_with_vif_display(1234, 1234)
)
self.shell_mock.should_receive("do").with_args("show running-config interface ve 1234").once().ordered().and_return([
"interface ve 1234",
" ip address 1.2.3.1/27",
"!",
])
self.shell_mock.should_receive("do").with_args("configure terminal").once().ordered().and_return([])
self.shell_mock.should_receive("do").with_args("interface ve 1234").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("enable").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("ip vrrp-extended auth-type simple-text-auth VLAN1234").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("ip vrrp-extended vrid 256").and_return([
"Error - 256 not between 1 and 255"
]).once().ordered()
self.shell_mock.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
with self.assertRaises(BadVrrpGroupNumber) as expect:
self.switch.add_vrrp_group(1234, 256, ips=[IPAddress("1.2.3.4")], priority=110, hello_interval=5, dead_interval=15,
track_id="ethernet 1/1", track_decrement=50)
assert_that(str(expect.exception), equal_to("VRRP group number is invalid, must be contained between 1 and 255"))
def test_add_vrrp_with_bad_hello_interval(self):
self.shell_mock.should_receive("do").with_args("show vlan 1234").once().ordered().and_return(
vlan_with_vif_display(1234, 1234)
)
self.shell_mock.should_receive("do").with_args("show running-config interface ve 1234").once().ordered().and_return([
"interface ve 1234",
" ip address 1.2.3.1/27",
"!",
])
self.shell_mock.should_receive("do").with_args("configure terminal").once().ordered().and_return([])
self.shell_mock.should_receive("do").with_args("interface ve 1234").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("enable").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("ip vrrp-extended auth-type simple-text-auth VLAN1234").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("ip vrrp-extended vrid 1").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("backup priority 110 track-priority 50").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("ip-address 1.2.3.4").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("hello-interval 100").and_return([
"Error - 100 not between 1 and 84"
]).once().ordered()
self.shell_mock.should_receive("do").with_args("exit").and_return([]).times(3).ordered().ordered().ordered()
with self.assertRaises(BadVrrpTimers) as expect:
self.switch.add_vrrp_group(1234, 1, ips=[IPAddress("1.2.3.4")], priority=110, hello_interval=100, dead_interval=15,
track_id="ethernet 1/1", track_decrement=50)
assert_that(str(expect.exception), equal_to("VRRP timers values are invalid"))
def test_add_vrrp_with_bad_dead_interval(self):
self.shell_mock.should_receive("do").with_args("show vlan 1234").once().ordered().and_return(
vlan_with_vif_display(1234, 1234)
)
self.shell_mock.should_receive("do").with_args("show running-config interface ve 1234").once().ordered().and_return([
"interface ve 1234",
" ip address 1.2.3.1/27",
"!",
])
self.shell_mock.should_receive("do").with_args("configure terminal").once().ordered().and_return([])
self.shell_mock.should_receive("do").with_args("interface ve 1234").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("enable").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("ip vrrp-extended auth-type simple-text-auth VLAN1234").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("ip vrrp-extended vrid 1").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("backup priority 110 track-priority 50").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("ip-address 1.2.3.4").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("hello-interval 5").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("dead-interval 100").and_return([
"Error - 100 not between 1 and 84"
]).once().ordered()
self.shell_mock.should_receive("do").with_args("exit").and_return([]).times(3).ordered().ordered().ordered()
with self.assertRaises(BadVrrpTimers) as expect:
self.switch.add_vrrp_group(1234, 1, ips=[IPAddress("1.2.3.4")], priority=110, hello_interval=5, dead_interval=100,
track_id="ethernet 1/1", track_decrement=50)
assert_that(str(expect.exception), equal_to("VRRP timers values are invalid"))
def test_add_vrrp_with_bad_priority(self):
self.shell_mock.should_receive("do").with_args("show vlan 1234").once().ordered().and_return(
vlan_with_vif_display(1234, 1234)
)
self.shell_mock.should_receive("do").with_args("show running-config interface ve 1234").once().ordered().and_return([
"interface ve 1234",
" ip address 1.2.3.1/27",
"!",
])
self.shell_mock.should_receive("do").with_args("configure terminal").once().ordered().and_return([])
self.shell_mock.should_receive("do").with_args("interface ve 1234").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("enable").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("ip vrrp-extended auth-type simple-text-auth VLAN1234").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("ip vrrp-extended vrid 1").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("backup priority 256 track-priority 50").and_return([
"Error - 256 not between 1 and 255"
]).once().ordered()
self.shell_mock.should_receive("do").with_args("exit").and_return([]).times(3).ordered().ordered().ordered()
with self.assertRaises(BadVrrpPriorityNumber) as expect:
self.switch.add_vrrp_group(1234, 1, ips=[IPAddress("1.2.3.4")], priority=256, hello_interval=5, dead_interval=100,
track_id="ethernet 1/1", track_decrement=50)
assert_that(str(expect.exception), equal_to("VRRP priority value is invalid, must be contained between 1 and 255"))
def test_add_vrrp_with_bad_priority_type(self):
self.shell_mock.should_receive("do").with_args("show vlan 1234").once().ordered().and_return(
vlan_with_vif_display(1234, 1234)
)
self.shell_mock.should_receive("do").with_args("show running-config interface ve 1234").once().ordered().and_return([
"interface ve 1234",
" ip address 1.2.3.1/27",
"!",
])
self.shell_mock.should_receive("do").with_args("configure terminal").once().ordered().and_return([])
self.shell_mock.should_receive("do").with_args("interface ve 1234").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("enable").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("ip vrrp-extended auth-type simple-text-auth VLAN1234").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("ip vrrp-extended vrid 1").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("backup priority testvalue track-priority 50").and_return([
"Invalid input -> testvalue track-priority 50"
]).once().ordered()
self.shell_mock.should_receive("do").with_args("exit").and_return([]).times(3).ordered().ordered().ordered()
with self.assertRaises(BadVrrpPriorityNumber) as expect:
self.switch.add_vrrp_group(1234, 1, ips=[IPAddress("1.2.3.4")], priority='testvalue', hello_interval=5, dead_interval=15,
track_id="ethernet 1/1", track_decrement=50)
assert_that(str(expect.exception), equal_to("VRRP priority value is invalid, must be contained between 1 and 255"))
def test_add_vrrp_with_bad_track_decrement(self):
self.shell_mock.should_receive("do").with_args("show vlan 1234").once().ordered().and_return(
vlan_with_vif_display(1234, 1234)
)
self.shell_mock.should_receive("do").with_args("show running-config interface ve 1234").once().ordered().and_return([
"interface ve 1234",
" ip address 1.2.3.1/27",
"!",
])
self.shell_mock.should_receive("do").with_args("configure terminal").once().ordered().and_return([])
self.shell_mock.should_receive("do").with_args("interface ve 1234").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("enable").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("ip vrrp-extended auth-type simple-text-auth VLAN1234").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("ip vrrp-extended vrid 1").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("backup priority 110 track-priority 255").and_return([
"Error - 255 not between 1 and 254"
]).once().ordered()
self.shell_mock.should_receive("do").with_args("exit").and_return([]).times(3).ordered().ordered().ordered()
with self.assertRaises(BadVrrpTracking) as expect:
self.switch.add_vrrp_group(1234, 1, ips=[IPAddress("1.2.3.4")], priority=110, hello_interval=5, dead_interval=15,
track_id="ethernet 1/1", track_decrement=255)
assert_that(str(expect.exception), equal_to("VRRP tracking values are invalid"))
def test_add_vrrp_with_bad_track_decrement_type(self):
self.shell_mock.should_receive("do").with_args("show vlan 1234").once().ordered().and_return(
vlan_with_vif_display(1234, 1234)
)
self.shell_mock.should_receive("do").with_args("show running-config interface ve 1234").once().ordered().and_return([
"interface ve 1234",
" ip address 1.2.3.1/27",
"!",
])
self.shell_mock.should_receive("do").with_args("configure terminal").once().ordered().and_return([])
self.shell_mock.should_receive("do").with_args("interface ve 1234").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("enable").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("ip vrrp-extended auth-type simple-text-auth VLAN1234").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("ip vrrp-extended vrid 1").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("backup priority 110 track-priority testvalue").and_return([
"Invalid input -> testvalue"
]).once().ordered()
self.shell_mock.should_receive("do").with_args("exit").and_return([]).times(3).ordered().ordered().ordered()
with self.assertRaises(BadVrrpTracking) as expect:
self.switch.add_vrrp_group(1234, 1, ips=[IPAddress("1.2.3.4")], priority=110, hello_interval=5, dead_interval=15,
track_id="ethernet 1/1", track_decrement='testvalue')
assert_that(str(expect.exception), equal_to("VRRP tracking values are invalid"))
def test_add_vrrp_with_no_ip_on_interface(self):
self.shell_mock.should_receive("do").with_args("show vlan 1234").once().ordered().and_return(
vlan_with_vif_display(1234, 1234)
)
self.shell_mock.should_receive("do").with_args("show running-config interface ve 1234").once().ordered().and_return([
"interface ve 1234",
"!",
])
self.shell_mock.should_receive("do").with_args("configure terminal").once().ordered().and_return([])
self.shell_mock.should_receive("do").with_args("interface ve 1234").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("enable").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("ip vrrp-extended auth-type simple-text-auth VLAN1234").and_return([
"error - please configure ip address before configuring vrrp-extended"
]).once().ordered()
self.shell_mock.should_receive("do").with_args("exit").and_return([]).times(2).ordered().ordered()
with self.assertRaises(NoIpOnVlanForVrrp) as expect:
self.switch.add_vrrp_group(1234, 1, ips=[IPAddress("1.2.3.4")], priority=110, hello_interval=5, dead_interval=100,
track_id="ethernet 1/1", track_decrement=50)
assert_that(str(expect.exception), equal_to("Vlan 1234 needs an IP before configuring VRRP"))
def test_add_vrrp_with_bad_tracking_id(self):
self.shell_mock.should_receive("do").with_args("show vlan 1234").once().ordered().and_return(
vlan_with_vif_display(1234, 1234)
)
self.shell_mock.should_receive("do").with_args("show running-config interface ve 1234").once().ordered().and_return([
"interface ve 1234",
" ip address 1.2.3.1/27",
"!",
])
self.shell_mock.should_receive("do").with_args("configure terminal").once().ordered().and_return([])
self.shell_mock.should_receive("do").with_args("interface ve 1234").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("enable").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("ip vrrp-extended auth-type simple-text-auth VLAN1234").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("ip vrrp-extended vrid 1").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("backup priority 110 track-priority 50").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("ip-address 1.2.3.4").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("hello-interval 5").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("dead-interval 15").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("advertise backup").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("track-port ethernet not_an_interface").and_return([
"Invalid input -> not_an_interface"
]).once().ordered()
self.shell_mock.should_receive("do").with_args("exit").and_return([]).times(3).ordered().ordered().ordered()
with self.assertRaises(BadVrrpTracking) as expect:
self.switch.add_vrrp_group(1234, 1, ips=[IPAddress("1.2.3.4")], priority=110, hello_interval=5, dead_interval=15,
track_id="ethernet not_an_interface", track_decrement=50)
assert_that(str(expect.exception), equal_to("VRRP tracking values are invalid"))
def test_remove_vrrp_success(self):
self.shell_mock.should_receive("do").with_args("show vlan 1234").once().ordered().and_return(
vlan_with_vif_display(1234, 1234)
)
self.shell_mock.should_receive("do").with_args("show running-config interface ve 1234").once().ordered().and_return([
"interface ve 1234",
" ip address 1.2.3.1/27",
" ip vrrp-extended auth-type simple-text-auth ********",
" ip vrrp-extended vrid 1",
" backup priority 110 track-priority 50",
" ip-address 1.1.1.1",
" hello-interval 5",
" dead-interval 15",
" advertise backup",
" track-port ethernet 1/1",
" activate",
"!",
])
self.shell_mock.should_receive("do").with_args("configure terminal").once().ordered().and_return([])
self.shell_mock.should_receive("do").with_args("interface ve 1234").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("enable").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("no ip vrrp-extended vrid 1").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("ip vrrp-extended auth-type no-auth").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
self.switch.remove_vrrp_group(1234, 1)
def test_remove_one_of_two_vrrp_success(self):
self.shell_mock.should_receive("do").with_args("show vlan 1234").once().ordered().and_return(
vlan_with_vif_display(1234, 1234)
)
self.shell_mock.should_receive("do").with_args("show running-config interface ve 1234").once().ordered().and_return([
"interface ve 1234",
" ip address 1.2.3.1/27",
" ip vrrp-extended auth-type simple-text-auth ********",
" ip vrrp-extended vrid 1",
" backup priority 110 track-priority 50",
" ip-address 1.1.1.1",
" hello-interval 5",
" dead-interval 15",
" advertise backup",
" track-port ethernet 1/1",
" activate",
" ip vrrp-extended vrid 2",
" backup priority 110 track-priority 50",
" ip-address 1.1.1.2",
" hello-interval 5",
" dead-interval 15",
" advertise backup",
" track-port ethernet 1/1",
" activate",
"!",
])
self.shell_mock.should_receive("do").with_args("configure terminal").once().ordered().and_return([])
self.shell_mock.should_receive("do").with_args("interface ve 1234").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("enable").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("no ip vrrp-extended vrid 1").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
self.switch.remove_vrrp_group(1234, 1)
def test_remove_vrrp_with_invalid_group_id(self):
self.shell_mock.should_receive("do").with_args("show vlan 1234").once().ordered().and_return(
vlan_with_vif_display(1234, 1234)
)
self.shell_mock.should_receive("do").with_args("show running-config interface ve 1234").once().ordered().and_return([
"interface ve 1234",
" ip address 1.2.3.1/27",
" ip vrrp-extended auth-type simple-text-auth ********",
" ip vrrp-extended vrid 1",
" backup priority 110 track-priority 50",
" ip-address 1.1.1.1",
" hello-interval 5",
" dead-interval 15",
" advertise backup",
" track-port ethernet 1/1",
" activate",
"!",
])
with self.assertRaises(VrrpDoesNotExistForVlan) as expect:
self.switch.remove_vrrp_group(1234, 2)
assert_that(str(expect.exception), equal_to("Vrrp group 2 does not exist for vlan 1234"))
def test_remove_vrrp_from_unknown_vlan(self):
self.shell_mock.should_receive("do").with_args("show vlan 1234").once().ordered().and_return([
"Error: vlan 1234 is not configured"
])
with self.assertRaises(UnknownVlan) as expect:
self.switch.remove_vrrp_group(1234, 2)
assert_that(str(expect.exception), equal_to("Vlan 1234 not found"))
def parse_range_test(self):
result = parse_if_ranges("")
assert_that(list(result), equal_to([]))
result = parse_if_ranges("ethe 1/2")
assert_that(list(result), equal_to(["ethe 1/2"]))
result = parse_if_ranges("ethe 1/1/2 to 1/1/5")
assert_that(list(result), equal_to(["ethe 1/1/2", "ethe 1/1/3", "ethe 1/1/4", "ethe 1/1/5"]))
result = parse_if_ranges("shizzle 1/1 shizzle 1/3 to 1/5 shizzle 1/7")
assert_that(list(result), equal_to(["shizzle 1/1", "shizzle 1/3", "shizzle 1/4", "shizzle 1/5", "shizzle 1/7"]))
@mock.patch("netman.adapters.switches.brocade.SshClient")
def test_connect(self, ssh_client_class_mock):
self.switch = brocade_factory_ssh(SwitchDescriptor(
hostname="my.hostname", username="the_user", password="the_password", model="brocade", port=22), mock.Mock())
self.shell_mock = flexmock()
ssh_client_class_mock.return_value = self.shell_mock
self.shell_mock.should_receive("get_current_prompt").and_return("hostname>").once().ordered()
self.shell_mock.should_receive("do").with_args("enable", wait_for=":").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("the_password").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("skip-page-display").and_return([]).once().ordered()
self.switch.connect()
ssh_client_class_mock.assert_called_with(
host="my.hostname",
username="the_user",
password="the_password",
port=22
)
@mock.patch("netman.adapters.switches.brocade.TelnetClient")
def test_connect_without_port_uses_default(self, telnet_client_class_mock):
self.switch = brocade_factory_telnet(SwitchDescriptor(
hostname="my.hostname", username="the_user", password="the_password", model="brocade"), mock.Mock())
self.shell_mock = flexmock()
telnet_client_class_mock.return_value = self.shell_mock
self.shell_mock.should_receive("get_current_prompt").and_return("hostname>").once().ordered()
self.shell_mock.should_receive("do").with_args("enable", wait_for=":").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("the_password").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("skip-page-display").and_return([]).once().ordered()
self.switch.connect()
telnet_client_class_mock.assert_called_with(
host="my.hostname",
username="the_user",
password="the_password"
)
@mock.patch("netman.adapters.switches.brocade.SshClient")
def test_auto_enabled_switch_doesnt_require_enable(self, ssh_client_class_mock):
self.switch = brocade_factory_ssh(SwitchDescriptor(hostname="my.hostname", username="the_user", password="the_password", model="brocade", port=8000), mock.Mock())
self.shell_mock = flexmock()
ssh_client_class_mock.return_value = self.shell_mock
self.shell_mock.should_receive("get_current_prompt").and_return("hostname#").once().ordered()
self.shell_mock.should_receive("do").with_args("enable", wait_for=": ").never()
self.shell_mock.should_receive("do").with_args("skip-page-display").and_return([]).once().ordered()
self.switch.connect()
ssh_client_class_mock.assert_called_with(
host="my.hostname",
username="the_user",
password="the_password",
port=8000
)
def test_disconnect(self):
logger = flexmock()
self.switch.logger = logger
logger.should_receive("debug")
self.shell_mock.should_receive("quit").with_args("exit").once().ordered()
logger.should_receive("info").with_args("FULL TRANSACTION LOG").once()
self.switch.shell.full_log = "FULL TRANSACTION LOG"
self.switch.disconnect()
def test_transactions_commit_write_memory(self):
self.shell_mock.should_receive("do").with_args("show vlan 2999").and_return([
"Error: vlan 2999 is not configured"
])
self.shell_mock.should_receive("do").with_args("configure terminal").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("vlan 2999 name Gertrude").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("exit").once().ordered().and_return([])
self.shell_mock.should_receive("do").with_args("exit").once().ordered().and_return([])
self.switch.start_transaction()
self.switch.add_vlan(2999, name="Gertrude")
self.shell_mock.should_receive("do").with_args("write memory").once().ordered()
self.switch.commit_transaction()
self.switch.end_transaction()
def test_add_dhcp_relay_server(self):
self.shell_mock.should_receive("do").with_args("show vlan 1234").once().ordered().and_return(
vlan_with_vif_display(1234, 1234)
)
self.shell_mock.should_receive("do").with_args("show running-config interface ve 1234").once().ordered().and_return([
"interface ve 1234",
" ip address 1.2.3.1/27",
"!",
])
self.shell_mock.should_receive("do").with_args("configure terminal").once().ordered().and_return([])
self.shell_mock.should_receive("do").with_args("interface ve 1234").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("enable").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("ip helper-address 10.10.10.1").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
self.switch.add_dhcp_relay_server(1234, IPAddress('10.10.10.1'))
def test_add_second_dhcp_relay_server(self):
self.shell_mock.should_receive("do").with_args("show vlan 1234").once().ordered().and_return(
vlan_with_vif_display(1234, 1234)
)
self.shell_mock.should_receive("do").with_args("show running-config interface ve 1234").once().ordered().and_return([
"interface ve 1234",
" ip address 1.2.3.1/27",
" ip helper-address 10.10.10.1",
"!",
])
self.shell_mock.should_receive("do").with_args("configure terminal").once().ordered().and_return([])
self.shell_mock.should_receive("do").with_args("interface ve 1234").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("enable").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("ip helper-address 10.10.10.2").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
self.switch.add_dhcp_relay_server(1234, IPAddress('10.10.10.2'))
def test_add_same_dhcp_relay_server_fails(self):
self.shell_mock.should_receive("do").with_args("show vlan 1234").once().ordered().and_return(
vlan_with_vif_display(1234, 1234)
)
self.shell_mock.should_receive("do").with_args("show running-config interface ve 1234").once().ordered().and_return([
"interface ve 1234",
" ip address 1.2.3.1/27",
" ip helper-address 10.10.10.1",
"!",
])
with self.assertRaises(DhcpRelayServerAlreadyExists) as expect:
self.switch.add_dhcp_relay_server(1234, IPAddress('10.10.10.1'))
assert_that(str(expect.exception), equal_to("DHCP relay server 10.10.10.1 already exists on VLAN 1234"))
def test_remove_dhcp_relay_server(self):
self.shell_mock.should_receive("do").with_args("show vlan 1234").once().ordered().and_return(
vlan_with_vif_display(1234, 1234)
)
self.shell_mock.should_receive("do").with_args("show running-config interface ve 1234").once().ordered().and_return([
"interface ve 1234",
" ip address 1.2.3.1/27",
" ip helper-address 10.10.10.1",
"!",
])
self.shell_mock.should_receive("do").with_args("configure terminal").once().ordered().and_return([])
self.shell_mock.should_receive("do").with_args("interface ve 1234").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("enable").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("no ip helper-address 10.10.10.1").and_return([]).once().ordered()
self.shell_mock.should_receive("do").with_args("exit").and_return([]).twice().ordered().ordered()
self.switch.remove_dhcp_relay_server(1234, IPAddress('10.10.10.1'))
def test_remove_non_existent_dhcp_relay_server_fails(self):
self.shell_mock.should_receive("do").with_args("show vlan 1234").once().ordered().and_return(
vlan_with_vif_display(1234, 1234)
)
self.shell_mock.should_receive("do").with_args("show running-config interface ve 1234").once().ordered().and_return([
"interface ve 1234",
" ip address 1.2.3.1/27",
"!",
])
with self.assertRaises(UnknownDhcpRelayServer) as expect:
self.switch.remove_dhcp_relay_server(1234, IPAddress('10.10.10.1'))
assert_that(str(expect.exception), equal_to("DHCP relay server 10.10.10.1 not found on VLAN 1234"))
def vlan_with_vif_display(vlan_id, vif_id, name="[None]"):
return vlan_display(vlan_id, name, vif_id=vif_id)
def vlan_display(vlan_id=9, vlan_name="[None]", tagged_port_str=None, untagged_port_str=None, vif_id=None):
ret = [
"PORT-VLAN {}, Name {}, Priority Level -, Priority Force 0, Creation Type STATIC".format(vlan_id, vlan_name),
"Topo HW idx : 81 Topo SW idx: 257 Topo next vlan: 0",
"L2 protocols : STP",
]
if untagged_port_str:
ret.append("Untagged Ports : {}".format(untagged_port_str))
if tagged_port_str:
ret.append("Statically tagged Ports : {}".format(tagged_port_str))
ret.extend([
"Associated Virtual Interface Id: {}".format(vif_id or "NONE"),
"----------------------------------------------------------",
"No ports associated with VLAN",
"Arp Inspection: 0",
"DHCP Snooping: 0",
"IPv4 Multicast Snooping: Disabled",
"IPv6 Multicast Snooping: Disabled",
])
if vif_id:
ret.extend([
"Ve{} is down, line protocol is down".format(vif_id),
" Type is Vlan (Vlan Id: {})".format(vlan_id),
" Hardware is Virtual Ethernet, address is 748e.f8a7.1b01 (bia 748e.f8a7.1b01)",
" No port name",
" Vlan id: {}".format(vlan_id),
" Internet address is 0.0.0.0/0, IP MTU 1500 bytes, encapsulation ethernet",
" Configured BW 0 kbps",
])
else:
ret.append("No Virtual Interfaces configured for this vlan")
return ret
| 52.365559 | 170 | 0.645779 | 118,061 | 0.973049 | 0 | 0 | 2,991 | 0.024652 | 0 | 0 | 31,501 | 0.259629 |
90bd59aae81c9889080df91dbd28e4a9b304ffd9 | 1,384 | py | Python | eahub/base/models.py | walambert/eahub.org | 21b6111b2626e4739c249d0881d16fbc818094cb | [
"MIT"
]
| 36 | 2019-02-22T23:07:14.000Z | 2022-02-10T13:24:27.000Z | eahub/base/models.py | walambert/eahub.org | 21b6111b2626e4739c249d0881d16fbc818094cb | [
"MIT"
]
| 717 | 2019-02-21T22:07:55.000Z | 2022-02-26T15:17:49.000Z | eahub/base/models.py | walambert/eahub.org | 21b6111b2626e4739c249d0881d16fbc818094cb | [
"MIT"
]
| 19 | 2019-04-14T14:37:56.000Z | 2022-02-14T22:05:16.000Z | import uuid
from authtools import models as authtools_models
from django.core.validators import URLValidator
from django.db import models
from django.utils import timezone
from solo.models import SingletonModel
class User(authtools_models.AbstractEmailUser):
# django-allauth puts Google or EA.org SSO data in those fields only, not Profile
# because they have a slightly inflexible architecture
first_name = models.CharField(max_length=256, blank=True)
last_name = models.CharField(max_length=256, blank=True)
def has_profile(self) -> bool:
return hasattr(self, "profile")
class FeedbackURLConfig(SingletonModel):
site_url = models.TextField(
default="https://feedback.eahub.org", validators=[URLValidator()]
)
def __str__(self):
return "Feedback URL"
class Meta:
verbose_name = "Feedback URL"
class MessagingLog(models.Model):
USER = "USER"
GROUP = "GROUP"
RECIPIENT_TYPE_CHOICES = [
(USER, "User"),
(GROUP, "Group"),
]
sender_email = models.EmailField(max_length=254)
recipient_email = models.EmailField(max_length=254)
recipient_type = models.CharField(
max_length=5,
choices=RECIPIENT_TYPE_CHOICES,
default=USER,
)
send_action_uuid = models.UUIDField(default=uuid.uuid4)
time = models.DateTimeField(default=timezone.now)
| 28.833333 | 85 | 0.710983 | 1,163 | 0.840318 | 0 | 0 | 0 | 0 | 0 | 0 | 226 | 0.163295 |
90bee561f7ee7014b2253c39a50c061487d0ec34 | 2,106 | py | Python | scripts/math/generate_matrix_test.py | chr15murray/ledger | 85be05221f19598de8c6c58652139a1f2d9e362f | [
"Apache-2.0"
]
| 96 | 2018-08-23T16:49:05.000Z | 2021-11-25T00:47:16.000Z | scripts/math/generate_matrix_test.py | chr15murray/ledger | 85be05221f19598de8c6c58652139a1f2d9e362f | [
"Apache-2.0"
]
| 1,011 | 2018-08-17T12:25:21.000Z | 2021-11-18T09:30:19.000Z | scripts/math/generate_matrix_test.py | chr15murray/ledger | 85be05221f19598de8c6c58652139a1f2d9e362f | [
"Apache-2.0"
]
| 65 | 2018-08-20T20:05:40.000Z | 2022-02-26T23:54:35.000Z | import numpy as np
types = ["int", "float", "double"]
def randi(*args):
return np.random.randint(-10, 10, size=args)
rngs = {"int": randi, "float": np.random.randn, "double": np.random.randn}
embodiments = {
"function": "R.%s(A,B).AllClose(C)",
"op": "(A %s B).AllClose(C)",
"inline_op": "(R = A, R %s B).AllClose(C)",
"inline_function": "( R = A, R.%s(B) ).AllClose(C)"
}
tests = {
'+': ("Addition", "Add", [], []),
'*': ("Multiplication", "Multiply", [], []),
'-': ("Subtraction", "Subtract", [], []),
'/': ("Division", "Divide", ["int"], []),
'dp': ("Dot product", "Dot", [], ["op", "inline_op"])
}
for type in types:
rng = rngs[type]
for op, details in tests.iteritems():
test_title, function, exclude, ignore = details
if type in exclude:
break
iop = op + "="
ifunction = "Inline" + function
names = {
"function": function,
"op": op,
"inline_op": iop,
"inline_function": ifunction
}
n = 7
m = 7
A = rng(n, m)
B = rng(n, m)
if op == "+":
C = A + B
elif op == "/":
C = A / B
elif op == "-":
C = A - B
elif op == "*":
C = A * B
elif op == "dp":
C = np.dot(A, B)
m1 = " ;\n".join([" ".join([str(y) for y in x]) for x in A])
m2 = " ;\n".join([" ".join([str(y) for y in x]) for x in B])
m3 = " ;\n".join([" ".join([str(y) for y in x]) for x in C])
print """
SCENARIO("%s") {
_M<%s> A,B,C,R;
R.Resize( %d, %d );
A = _M<%s>(R\"(\n%s\n)\");
B = _M<%s>(R\"(\n%s\n)\");
C = _M<%s>(R\"(\n%s\n)\");
""" % (test_title + " for " + type, type, n, m, type, m1, type, m2, type, m3)
for method, emb in embodiments.iteritems():
if method in ignore:
continue
name = names[method]
tt = emb % name
print "EXPECT( %s );" % tt
print "};"
print
| 25.071429 | 85 | 0.417854 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 645 | 0.306268 |
90c01fddb271dd8ab9c578d5f65f7244cd0b0416 | 2,824 | py | Python | Lab 2/utils/inference_utils.py | davedecoder/aws-deepcomposer-samples | 34f94a04436dc3fa0ded8c353e0f3260f1b3305e | [
"MIT-0"
]
| 6 | 2021-10-11T12:39:01.000Z | 2022-03-27T16:01:41.000Z | notebooks/AWS DeepComposer/reinvent-labs/lab-2/utils/inference_utils.py | jesussantana/AWS-Machine-Learning-Foundations | 526eddb486fe8398cafcc30184c4ecce49df5816 | [
"MIT"
]
| null | null | null | notebooks/AWS DeepComposer/reinvent-labs/lab-2/utils/inference_utils.py | jesussantana/AWS-Machine-Learning-Foundations | 526eddb486fe8398cafcc30184c4ecce49df5816 | [
"MIT"
]
| 5 | 2020-05-16T13:06:52.000Z | 2020-11-14T11:56:26.000Z | # Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import tensorflow as tf
import numpy as np
from utils import path_utils, midi_utils, display_utils
# --- local samples------------------------------------------------------------------
def load_melody_samples(n_sample=10):
"""Load the samples used for evaluation."""
sample_source_path = './dataset/eval.npy'
data = np.load(sample_source_path)
data = np.asarray(data, dtype=np.float32) # {-1, 1}
random_idx = np.random.choice(len(data), n_sample, replace=False)
sample_x = data[random_idx]
sample_z = tf.random.truncated_normal((n_sample, 2, 8, 512))
print("Loaded {} melody samples".format(len(sample_x)))
return sample_x, sample_z
# --- Training ------------------------------------------------------------------
def generate_pianoroll(generator, conditioned_track, noise_vector=None):
if noise_vector == None:
noise_vector = tf.random.truncated_normal((1, 2, 8, 512))
return generator((conditioned_track, noise_vector), training=False)
def generate_midi(generator, saveto_dir, input_midi_file='./Experiments/data/happy_birthday_easy.mid'):
conditioned_track = midi_utils.get_conditioned_track(midi=input_midi_file)
generated_pianoroll = generate_pianoroll(generator, conditioned_track)
destination_path = path_utils.new_temp_midi_path(saveto_dir=saveto_dir)
midi_utils.save_pianoroll_as_midi(generated_pianoroll.numpy(), destination_path=destination_path)
return destination_path
def show_generated_pianorolls(generator, eval_dir, input_midi_file='./Experiments/data/happy_birthday_easy.mid', n_pr = 4):
conditioned_track = midi_utils.get_conditioned_track(midi=input_midi_file)
for i in range(n_pr):
generated_pianoroll = generate_pianoroll(generator, conditioned_track)
display_utils.show_pianoroll(generated_pianoroll) | 46.295082 | 127 | 0.725921 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,293 | 0.457861 |
90c06ceec71cc460139a2abcafcd42b40b0a56a8 | 315 | py | Python | python/aisdk/player_movement.py | THUAI-Team/thuai2022-aisdk | 84d3239f3edd13cd9ffd9ad61c12890f393d8b88 | [
"MIT"
]
| null | null | null | python/aisdk/player_movement.py | THUAI-Team/thuai2022-aisdk | 84d3239f3edd13cd9ffd9ad61c12890f393d8b88 | [
"MIT"
]
| null | null | null | python/aisdk/player_movement.py | THUAI-Team/thuai2022-aisdk | 84d3239f3edd13cd9ffd9ad61c12890f393d8b88 | [
"MIT"
]
| null | null | null | from enum import Enum
from sys import stderr
class PlayerMovement(Enum):
STOPPED = 0
WALKING = 1
RUNNING = 2
SLIPPED = 3
def to_json_representation(self):
return (str(self).split('.')[1]).lower()
class MovementNotAllowedError(ValueError):
def __init__(self, message):
super().__init__(message) | 22.5 | 44 | 0.714286 | 267 | 0.847619 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.009524 |
90c0801975d3d3c99714cb7e0cfc32ffb8ce7205 | 251 | py | Python | diagnosticApp/admin.py | LASI-UFPI/diagnostic-imaging | 7afd732dd76fe92bf6a2eba48e69fa4102a978cc | [
"MIT"
]
| null | null | null | diagnosticApp/admin.py | LASI-UFPI/diagnostic-imaging | 7afd732dd76fe92bf6a2eba48e69fa4102a978cc | [
"MIT"
]
| 10 | 2021-04-04T19:07:41.000Z | 2022-03-12T00:54:50.000Z | diagnosticApp/admin.py | LASI-UFPI/diagnostic-imaging | 7afd732dd76fe92bf6a2eba48e69fa4102a978cc | [
"MIT"
]
| null | null | null | from django.contrib import admin
from .models import Image
@admin.register(Image)
class ImageAdmin(admin.ModelAdmin):
list_display = ('image', 'predict_covid', 'predict_no_findings', 'predict_pneumonia', 'created_at', 'updated_at', 'activated_at')
| 35.857143 | 131 | 0.776892 | 167 | 0.665339 | 0 | 0 | 190 | 0.756972 | 0 | 0 | 100 | 0.398406 |
90c1b22f3826d40d5a8ce3040c417f17a6d33b58 | 1,247 | py | Python | digraph/source/generator.py | addy1997/python-RRT | 93983e17f2e6e93ff79c8f04a86ce28718ba2779 | [
"MIT"
]
| 11 | 2020-05-28T00:55:55.000Z | 2022-01-03T10:59:26.000Z | digraph/source/generator.py | addy1997/Internship-HTIC | 93983e17f2e6e93ff79c8f04a86ce28718ba2779 | [
"MIT"
]
| 1 | 2020-10-19T16:20:30.000Z | 2021-03-22T19:01:14.000Z | digraph/source/generator.py | addy1997/Internship-HTIC | 93983e17f2e6e93ff79c8f04a86ce28718ba2779 | [
"MIT"
]
| 2 | 2021-07-07T01:09:50.000Z | 2022-03-12T23:40:56.000Z | #!/usr/bin/env python
# coding: utf-8
# In[2]:
from time import time
import networkx as nx
from source.DiGraph import DiGraph
from source.model.Edge import Edge
def text_to_dict(filename):
in_file = open("filename", "r")
lines = in_file.read()
in_file.close()
open_bracket = lines.index("{")
close_bracket = lines.index("}")
graph = eval(lines[open_bracket:close_bracket])
return graph
def specify_vertices(graph):
vertices = []
for node in graph.keys():
vertices.append(node)
return vertices
def specify_edges(graph):
edges = []
for node in graph.key():
edges.append(Edge(node, i))
return edges
def design_graph_object(graph, G= None):
if not G:
G = DiGraph()
for node in graph.keys():
if (node not in G.get_vertices()):
G.add_node(node)
for z in graph[node]:
if (z not in G.get_vertices()):
G.add_node(z)
G.add_edge(node, z)
return G
def set_digraph_library(graph, G):
for nodes in graph.keys():
G.add_node(nodes)
for i in graph[nodes]:
G.add_edge(nodes, i)
return G
# In[ ]:
| 18.893939 | 51 | 0.572574 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 71 | 0.056937 |
90c3c10d1bd2a3bf341092a750b3ced35a3d1342 | 451 | py | Python | [1] BEGINNER/1000 - Hello World!.py | tiago040/URI-SOLUTIONS | 519d3950252a6002e8926416b2f8217ba08fe721 | [
"MIT"
]
| 1 | 2022-03-15T03:03:26.000Z | 2022-03-15T03:03:26.000Z | [1] BEGINNER/1000 - Hello World!.py | tiago040/URI-SOLUTIONS | 519d3950252a6002e8926416b2f8217ba08fe721 | [
"MIT"
]
| null | null | null | [1] BEGINNER/1000 - Hello World!.py | tiago040/URI-SOLUTIONS | 519d3950252a6002e8926416b2f8217ba08fe721 | [
"MIT"
]
| null | null | null | '''
https://resources.urionlinejudge.com.br/gallery/images/problems/UOJ_1000.png
Bem-vindo ao URI Online Judge!
O seu primeiro programa em qualquer linguagem de programação normalmente é o "Hello World!". Neste primeiro problema tudo o que você precisa fazer é imprimir esta mensagem na tela.
Entrada
Este problema não possui nenhuma entrada.
Saída
Você deve imprimir a mensagem "Hello World!" conforme o exemplo abaixo.
'''
print('Hello World!') | 30.066667 | 180 | 0.78714 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 450 | 0.980392 |
90c4143d466cb79c3fc483f709341546a5611433 | 615 | py | Python | py/py_0668_square_root_smooth_numbers.py | lcsm29/project-euler | fab794ece5aa7a11fc7c2177f26250f40a5b1447 | [
"MIT"
]
| null | null | null | py/py_0668_square_root_smooth_numbers.py | lcsm29/project-euler | fab794ece5aa7a11fc7c2177f26250f40a5b1447 | [
"MIT"
]
| null | null | null | py/py_0668_square_root_smooth_numbers.py | lcsm29/project-euler | fab794ece5aa7a11fc7c2177f26250f40a5b1447 | [
"MIT"
]
| null | null | null | # Solution of;
# Project Euler Problem 668: Square root smooth Numbers
# https://projecteuler.net/problem=668
#
# A positive integer is called square root smooth if all of its prime factors
# are strictly less than its square root. Including the number $1$, there are
# $29$ square root smooth numbers not exceeding $100$. How many square root
# smooth numbers are there not exceeding $10\,000\,000\,000$?
#
# by lcsm29 http://github.com/lcsm29/project-euler
import timed
def dummy(n):
pass
if __name__ == '__main__':
n = 1000
i = 10000
prob_id = 668
timed.caller(dummy, n, i, prob_id)
| 26.73913 | 78 | 0.705691 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 464 | 0.754472 |
90c531d029592c14df121556138eab86864faa16 | 2,927 | py | Python | user/forms.py | Zidan-Kharisma-Sakana/uts-f02 | d29652cb73829ffa63e0ca4d0e5f8d6d62500367 | [
"Unlicense"
]
| null | null | null | user/forms.py | Zidan-Kharisma-Sakana/uts-f02 | d29652cb73829ffa63e0ca4d0e5f8d6d62500367 | [
"Unlicense"
]
| null | null | null | user/forms.py | Zidan-Kharisma-Sakana/uts-f02 | d29652cb73829ffa63e0ca4d0e5f8d6d62500367 | [
"Unlicense"
]
| null | null | null | from django import forms
from django.contrib.auth import authenticate
from django.contrib.auth.models import User
from django.contrib.auth.forms import AuthenticationForm, UserCreationForm
from django.forms import ValidationError, EmailField
from user import models
class MyAuthenticationForm(AuthenticationForm):
""""
Overide method clean from AuthenticationForm to show that a user hasn't activate their account
"""
error_messages = {
'invalid_login': (
"Please enter a correct %(username)s and password. Note that both "
"fields may be case-sensitive."
),
'inactive': ("This Account hasn't been activated yet, Please check your email :)"),
}
def confirm_login_allowed(self, user):
if not user.is_active:
raise ValidationError(
self.error_messages['inactive'],
code='inactive',
)
def clean(self):
username = self.cleaned_data.get('username')
password = self.cleaned_data.get('password')
if username is not None and password:
self.user_cache = authenticate(self.request, username=username, password=password)
if self.user_cache is None:
print(username)
try:
user_temp = User.objects.get(username=username)
except:
user_temp = None
print(user_temp)
if user_temp is not None:
self.confirm_login_allowed(user_temp)
else:
raise ValidationError(
self.error_messages['invalid_login'],
code='invalid_login',
params={'username': self.username_field.verbose_name},
)
return self.cleaned_data
class CreateUserForm(UserCreationForm):
""""
Override UserCreationForm to include email field
"""
email = EmailField(required=True, label='Email')
class Meta:
model = User
fields = ("username", "email", "password1", "password2")
error_messages = {
'password_mismatch': ('The two password fields didn’t match.'),
'email_taken': 'Your email has been taken'
}
def clean_email(self):
"""
Check if the email had already been taken
"""
email = self.cleaned_data.get('email')
num = User.objects.filter(email=email)
if num.count() > 0:
raise ValidationError(
self.error_messages['email_taken'],
code='email_taken',
)
return email
def save(self, commit= True):
user = super(CreateUserForm, self).save(commit=False)
email = self.cleaned_data.get('email')
user.email = email
user.is_active=False
if commit:
user.save()
return user
| 32.522222 | 98 | 0.585924 | 2,656 | 0.906794 | 0 | 0 | 0 | 0 | 0 | 0 | 699 | 0.238648 |
90c5a20a145e1728ffe9af8247c9eff4f8b98279 | 3,476 | py | Python | data-structures-and-algorithms/examples/binary_tree_recursive.py | vinnyhoward/til | 8bb0055578220c4d83ddd12ac34bbb2fd3ae000e | [
"MIT"
]
| null | null | null | data-structures-and-algorithms/examples/binary_tree_recursive.py | vinnyhoward/til | 8bb0055578220c4d83ddd12ac34bbb2fd3ae000e | [
"MIT"
]
| 32 | 2020-07-16T07:11:35.000Z | 2022-02-27T19:01:03.000Z | data-structures-and-algorithms/examples/binary_tree_recursive.py | vinnyhoward/til | 8bb0055578220c4d83ddd12ac34bbb2fd3ae000e | [
"MIT"
]
| null | null | null | class Node(object): # Similar to Linked List initial set-up
def __init__(self, value): # Constructor
self.value = value
self.left = None
self.right = None
class BinaryTree(object):
def __init__(self, root):
self.root = Node(root)
def print_tree(self, traversal_type):
if traversal_type == "preorder":
return self.preorder_print(tree.root, "") # init
elif traversal_type == "inorder":
return self.in_order_print(tree.root, "") # init
elif traversal_type == "postorder":
return self.post_order_print(tree.root, "") # init
else:
print("Traversal type " + str(traversal_type) + "not valid")
return False
def preorder_print(self, start, traversal):
# Root --> Left --> Right
if start:
traversal += (str(start.value) + "--")
traversal = self.preorder_print(start.left, traversal)
traversal = self.preorder_print(start.right, traversal)
return traversal
def in_order_print(self, start, traversal):
# Very Left --> Root --> Very Right
if start:
traversal = self.in_order_print(start.left, traversal)
traversal += (str(start.value) + '--')
traversal = self.in_order_print(start.right, traversal)
return traversal
def post_order_print(self, start, traversal):
# Very Left --> Very Right --> Root
if start:
traversal = self.post_order_print(start.left, traversal)
traversal = self.post_order_print(start.right, traversal)
traversal += (str(start.value) + '--')
return traversal
"""Try doing Post-Order tomorrow"""
# Visualization of Current Tree
# Pre-Order Output: 1--2--4--9--10--11--5--3--6--7--8--
# In-Order Output: 11--10--9--4--2--5--1--6--3--7--8--
# Pre-Order Output: 11--10--9--4--5--2--6--8--7--3--1--
# 1
# / \
# 2 3
# / | / |
# 4 5 6 7
# / \
# 9 8
# /
# 10
# /
# 11
# Tree Set-Up
# Another implementation
# class BinaryTree(object):
# def __init__(self, root):
# self.root = Node(root)
# def search(self, find_val):
# return self.preorder_search(tree.root, find_val)
# def print_tree(self):
# return self.preorder_print(tree.root, "")[:-1]
# def preorder_search(self, start, find_val):
# if start:
# if start.value == find_val:
# return True
# else:
# return self.preorder_search(start.left, find_val) or self.preorder_search(start.right, find_val)
# return False
# def preorder_print(self, start, traversal):
# if start:
# traversal += (str(start.value) + "-")
# traversal = self.preorder_print(start.left, traversal)
# traversal = self.preorder_print(start.right, traversal)
# return traversal
tree = BinaryTree(1)
tree.root.left = Node(2)
tree.root.right = Node(3)
tree.root.left.left = Node(4)
tree.root.left.right = Node(5)
tree.root.right.left = Node(6)
tree.root.right.right = Node(7)
tree.root.right.right.right = Node(8)
tree.root.left.left.left = Node(9)
tree.root.left.left.left.left = Node(10)
tree.root.left.left.left.left.left = Node(11)
# print(tree.print_tree("preorder"))
# print(tree.print_tree("inorder"))
print(tree.print_tree("postorder"))
| 31.315315 | 114 | 0.585155 | 1,702 | 0.489643 | 0 | 0 | 0 | 0 | 0 | 0 | 1,577 | 0.453682 |
90c608f7d84094a6c38930b235bc4cc2b22ca8af | 2,044 | py | Python | powerranger/files.py | clayboone/powerranger | 09315c8b37132add56ce31f1b0c1dd0b1692bd23 | [
"MIT"
]
| null | null | null | powerranger/files.py | clayboone/powerranger | 09315c8b37132add56ce31f1b0c1dd0b1692bd23 | [
"MIT"
]
| 8 | 2020-04-18T20:20:08.000Z | 2020-05-06T13:39:03.000Z | powerranger/files.py | clayboone/powerranger | 09315c8b37132add56ce31f1b0c1dd0b1692bd23 | [
"MIT"
]
| null | null | null | import curses
import itertools
import os
from pathlib import Path
import stat
from typing import Optional, Union
import config
from colors import Colors
class Item:
"""An item inside of a Directory."""
def __init__(self, path: Union[Path, str]):
self._path = Path(path)
self._selected = False
@property
def name(self) -> str:
"""The name of the item, not including parents."""
return self._path.name
@property
def color(self) -> curses.color_pair:
"""An initialized ncurses color pair associated with the type of file
for this Item.
"""
if self.selected:
return Colors.black_on_white()
if self._path.is_dir():
return Colors.blue_on_black()
return Colors.default()
@property
def selected(self) -> Optional[bool]:
"""Return whether this item should appear as selected"""
return self._selected
@selected.setter
def selected(self, value: bool):
self._selected = value
def is_hidden(self) -> bool:
"""Return whether or not the file should be hidden."""
return self._has_hidden_attribute() or self._path.name.startswith(".")
def _has_hidden_attribute(self) -> bool:
return bool(os.stat(self._path.resolve()).st_file_attributes & stat.FILE_ATTRIBUTE_HIDDEN)
class Directory:
"""A list of items inside of a directory."""
def __init__(self, path: Union[Path, str]):
self.path = Path(path)
def __iter__(self):
elements = self.path.iterdir()
if config.SORT_FOLDERS_ON_TOP:
element1, element2 = itertools.tee(elements)
elements = itertools.chain(
(item for item in element1 if item.is_dir()),
(item for item in element2 if not item.is_dir()),
)
for element in elements:
item = Item(element)
if item.is_hidden() and not config.SHOW_HIDDEN_FILES:
continue
yield Item(element)
| 27.253333 | 98 | 0.620841 | 1,884 | 0.921722 | 529 | 0.258806 | 697 | 0.340998 | 0 | 0 | 347 | 0.169765 |
90c62d1dcdf46b749f960d31f0195049399228f2 | 12,647 | py | Python | parlai/mturk/webapp/run_mocks/mock_turk_manager.py | lizekang/ParlAI | 9e113105d1690deda0b372f6aeaf933ab58e3a72 | [
"BSD-3-Clause"
]
| 1 | 2019-04-19T06:39:41.000Z | 2019-04-19T06:39:41.000Z | parlai/mturk/webapp/run_mocks/mock_turk_manager.py | lizekang/ParlAI | 9e113105d1690deda0b372f6aeaf933ab58e3a72 | [
"BSD-3-Clause"
]
| null | null | null | parlai/mturk/webapp/run_mocks/mock_turk_manager.py | lizekang/ParlAI | 9e113105d1690deda0b372f6aeaf933ab58e3a72 | [
"BSD-3-Clause"
]
| null | null | null | #!/usr/bin/env python3
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
import logging
import os
import threading
import time
import uuid
from parlai.mturk.core.agents import AssignState
from parlai.mturk.core.socket_manager import Packet
from parlai.mturk.webapp.run_mocks.mock_turk_agent import MockTurkAgent
import parlai.mturk.core.data_model as data_model
import parlai.mturk.core.shared_utils as shared_utils
parent_dir = os.path.dirname(os.path.abspath(__file__))
class MockTurkManager():
"""Manages interactions between MTurk agents as well as direct interactions
between a world and the MTurk server.
"""
current_manager = None
def __init__(self, opt, mturk_agent_ids, is_test=False, use_db=False):
"""Fake an MTurk manager that has the functionality to run a task,
but not on mturk
"""
self.opt = opt
self.mturk_agent_ids = mturk_agent_ids
self.has_run = False
self.sandbox = True
self.db_logger = None
# Required lifecycle functions below
def setup_server(self, task_directory_path=None):
"""Noop, we aren't connecting to a server"""
print('[mock] setup_server called')
def start_new_run(self):
"""Initialize expected state to not cause crashes"""
self.run_id = str(int(time.time()))
self.task_group_id = '{}_{}'.format(self.opt['task'], self.run_id)
print('[mock] start_new_run called')
def ready_to_accept_workers(self, timeout_seconds=None):
"""No threads, as there is no sustained worker pool. Instead
we instantiate x MockTurkAgents in onboarding"""
self.id_to_agent = {
agent_id: MockTurkAgent(
self.opt, self, 'hit_id_{}'.format(agent_id),
'assignment_id_{}'.format(agent_id), agent_id,
) for agent_id in self.mturk_agent_ids
}
self.agents = list(self.id_to_agent.values())
MockTurkManager.current_manager = self
print('[mock] ready_to_accept_workers called')
def set_onboard_function(self, onboard_function):
self.onboard_function = onboard_function
print('[mock] set_onboard_function called')
def start_task(self, eligibility_function, assign_role_function,
task_function):
"""Handle running a task by checking to see when enough agents are
in the pool to start an instance of the task. Continue doing this
until the desired number of conversations is had.
"""
print('[mock] start_task called')
if callable(eligibility_function):
# Convert legacy eligibility_functions to the new format
eligibility_function = {
'multiple': False,
'func': eligibility_function,
}
else:
# Ensure the eligibility function is valid
if 'func' not in eligibility_function:
shared_utils.print_and_log(
logging.CRITICAL,
"eligibility_function has no 'func'. Cancelling."
)
raise Exception(
'eligibility_function dict must contain a `func` field '
'containing the actual function.'
)
elif not callable(eligibility_function['func']):
shared_utils.print_and_log(
logging.CRITICAL,
"eligibility_function['func'] not a function. Cancelling."
)
raise Exception(
"eligibility_function['func'] must contain a function. "
"If eligibility_function['multiple'] is set, it should "
"filter through the list of workers and only return those "
"that are currently eligible to participate. If it is not "
"set, it should take in a single worker and return whether"
" or not they are eligible."
)
if 'multiple' not in eligibility_function:
eligibility_function['multiple'] = False
valid_agents = [a for a in self.agents if a.mock_status == 'waiting']
needed_agents = len(self.mturk_agent_ids)
while len(valid_agents) < needed_agents:
valid_agents = [a for a in self.agents
if a.mock_status == 'waiting']
# Add the required number of valid agents to the conv
agents = [a for a in valid_agents[:needed_agents]]
assign_role_function(agents)
# Allow task creator to filter out agents and run
# versions of the task that require fewer agents
agents = [a for a in agents if a.id is not None]
for agent in agents:
agent.mock_status = AssignState.STATUS_IN_TASK
agent.set_status(AssignState.STATUS_IN_TASK)
agent.conversation_id = 'in_task'
task_function(mturk_manager=self, opt=self.opt, workers=agents)
for agent in agents:
agent.mock_status = AssignState.STATUS_DONE
agent.set_status(AssignState.STATUS_DONE)
agent.task_done = True
def shutdown(self, force=False):
"""No servers, nothing to clean up"""
print('[mock] shutdown called')
def move_agents_to_waiting(self, agents):
"""Mock moving to a waiting world"""
for agent in agents:
agent.mock_status = AssignState.STATUS_WAITING
agent.set_status(AssignState.STATUS_WAITING)
agent.conversation_id = 'waiting'
def disconnect_agent(self, worker_id, assignment_id):
"""Set an agent to status disconnect, and all other agents to
partner disconnect. send them the correct message. Mocks
MTurkManager._handle_agent_disconnect
"""
worker = self.id_to_agent[worker_id]
worker.disconnected = True
for agent in self.agents:
if not agent.disconnected:
agent.some_agent_disconnected = True
def worker_alive(self, worker_id, hit_id, assign_id):
"""Mocks baseline worker_alive status changes for mock agents"""
agent = self.id_to_agent[worker_id]
if agent.mock_status == AssignState.STATUS_NONE:
agent.status = AssignState.STATUS_ONBOARDING
agent.set_status(AssignState.STATUS_ONBOARDING)
self.onboard_new_agent(agent)
else:
if agent.status in [AssignState.STATUS_ONBOARDING,
AssignState.STATUS_IN_TASK]:
pass
elif (agent.status == AssignState.STATUS_DISCONNECT or
agent.status == AssignState.STATUS_DONE or
agent.status == AssignState.STATUS_EXPIRED or
agent.status == AssignState.STATUS_RETURNED or
agent.status == AssignState.STATUS_PARTNER_DISCONNECT):
# reconnect is an inactive command
data = agent.get_inactive_command_data()
self.send_command(worker_id, assign_id, data)
def on_new_message(self, worker_id, msg):
agent = self.id_to_agent[worker_id]
agent.put_data(msg.id, msg.data)
agent.append_message(msg.data)
def onboard_new_agent(self, agent):
"""Creates an onboarding thread for the given agent"""
# get state variable in question
worker_id = agent.worker_id
assignment_id = agent.assignment_id
def _onboard_function(agent):
"""Onboarding wrapper to set state to onboarding properly"""
if self.onboard_function:
agent.id = 'Onboarding'
self.onboard_function(agent)
# once onboarding is done, move into a waiting world
self.move_agents_to_waiting([agent])
# Start the onboarding thread and run it
onboard_thread = threading.Thread(
target=_onboard_function,
args=(agent,),
name='onboard-{}-{}'.format(worker_id, assignment_id)
)
onboard_thread.daemon = True
onboard_thread.start()
return True
# MTurk Agent Interaction Functions #
def send_message(self, receiver_id, assignment_id, data,
blocking=True, ack_func=None):
"""'Send' a message directly by updating the queue of messages not
yet recieved that the agent can pull from
"""
data = data.copy() # Ensure data packet is sent in current state
data['type'] = data_model.MESSAGE_TYPE_MESSAGE
# Force messages to have a unique ID
if 'message_id' not in data:
data['message_id'] = str(uuid.uuid4())
conversation_id = None
agent = self.id_to_agent[receiver_id]
conversation_id = agent.conversation_id
event_id = shared_utils.generate_event_id(receiver_id)
packet = Packet(
event_id,
Packet.TYPE_MESSAGE,
'world',
receiver_id,
assignment_id,
data,
conversation_id=conversation_id,
blocking=blocking,
ack_func=ack_func
)
shared_utils.print_and_log(
logging.INFO,
'Manager sending: {}'.format(packet),
should_print=self.opt['verbose']
)
# Push message to restore queue and incoming queue
agent.append_message(packet.data)
agent.unread_messages.append(packet)
return data['message_id']
def send_command(self, receiver_id, assignment_id, data, blocking=True,
ack_func=None):
"""Commands aren't actually sent this way, as state updates are read"""
return None
def timeout_all_agents(self):
"""Set all agent statuses to disconnect to kill the world"""
for agent in self.agents:
agent.disconnected = True
# BELOW ARE STUBS THAT EXIST TO HOPEFULLY MAKE RUN FILES NOT CRASH
# NONE OF THEM DO ANYTHING (though some return success values)
def mark_workers_done(self, workers):
pass
def free_workers(self, workers):
pass
def get_agent_work_status(self, assignment_id):
pass
def get_qualification_list(self, qualifications=None):
return []
def create_additional_hits(self, num_hits, qualifications=None):
return 'fake_page_url'
def create_hits(self, qualifications=None):
return 'fake_page_url'
def get_hit(self, hit_id):
pass
def get_assignment(self, assignment_id):
pass
def get_assignments_for_hit(self, hit_id):
pass
def expire_all_unassigned_hits(self):
pass
def approve_work(self, assignment_id, override_rejection=False):
print('[mock] Assignment {} approved'.format(assignment_id))
def reject_work(self, assignment_id, reason):
print('[mock] Assignment {} rejected for {}'.format(
assignment_id, reason))
def approve_assignments_for_hit(self, hit_id, override_rejection=False):
print('[mock] HIT {} approved'.format(hit_id))
def block_worker(self, worker_id, reason):
print('[mock] Worker {} blocked for reason {}'.format(
worker_id, reason))
def soft_block_worker(self, worker_id, qual='block_qualification'):
print('[mock] Worker {} given qual {}'.format(worker_id, qual))
def un_soft_block_worker(self, worker_id, qual='block_qualification'):
print('[mock] Worker {} revoked qual {}'.format(worker_id, qual))
def give_worker_qualification(self, worker_id, qual_name, qual_value=None):
print('[mock] Worker {} given qual {}'.format(worker_id, qual_name))
def remove_worker_qualification(self, worker_id, qual_name, reason=''):
print('[mock] Worker {} revoked qual {}'.format(worker_id, qual_name))
def create_qualification(self, qualification_name, description,
can_exist=True):
pass
def pay_bonus(self, worker_id, bonus_amount, assignment_id, reason,
unique_request_token):
print('[mock] Worker {} paid bonus {}'.format(worker_id, bonus_amount))
def email_worker(self, worker_id, subject, message_text):
print('[mock] Worker {} emailed {}'.format(worker_id, message_text))
return {'success': True}
| 39.276398 | 79 | 0.633826 | 11,925 | 0.942911 | 0 | 0 | 0 | 0 | 0 | 0 | 3,729 | 0.294853 |
90c6e79ecc4dd80005f6bae2d4343c17f1c96324 | 6,517 | py | Python | scripts/train_model.py | allenai/sledgehammer | 03982da9cd0c543a76832a903033c2d97dbfb113 | [
"Apache-2.0"
]
| 47 | 2020-04-14T18:10:05.000Z | 2022-02-16T05:17:03.000Z | scripts/train_model.py | yardenTal1/sledgehammer | 842a48023451a81c83cdf29cdd7c601b1114a207 | [
"Apache-2.0"
]
| 1 | 2020-05-04T19:35:06.000Z | 2020-06-08T02:53:53.000Z | scripts/train_model.py | yardenTal1/sledgehammer | 842a48023451a81c83cdf29cdd7c601b1114a207 | [
"Apache-2.0"
]
| 9 | 2020-04-12T17:31:12.000Z | 2022-03-19T21:23:59.000Z | #!/usr/bin/env python
import sys
import os
import random
import copy
import subprocess
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
# PYTHON_DIR="/".join(os.environ['CONDA_EXE'].split("/")[:-2])+'/envs/allennlp_0.8.4/bin/'
exit_threshold=0.9
def main():
parser = arg_parser()
args = parser.parse_args()
lrs = [2e-5, 3e-5, 5e-5]
dropout = [0.1, 0.1]
layer_indices = args.layer_indices
dont_delete = len(layer_indices.split("_")) > 1
n_test = args.n_tests
dataset = args.dataset
start = args.start
is_lowercase = args.bert_type[-7:] == 'uncased'
cwd = os.getcwd()+"/"
training_config_file = cwd+"training_config/sledgehammer_bert_classification.jsonnet"
base_path = args.data_dir+"/text_cat/"
if args.nli:
training_config_file = cwd+"training_config/sledgehammer_bert_nli.jsonnet"
base_path = args.data_dir+"/nli/"
slurm = args.slurm
extra_args = ""
if slurm is None:
os.environ["BERT_TYPE"] = args.bert_type
os.environ["IS_LOWERCASE"] = str(is_lowercase).lower()
os.environ["TRAIN_PATH"] = base_path+dataset+"/train"
os.environ["DEV_PATH"] = base_path+dataset+"/dev"
os.environ["TEST_PATH"] = base_path+dataset+"/test"
os.environ["LAYER_INDICES"] = layer_indices
# @todo change me back to 0
os.environ["CUDA_DEVICE"] = str(args.cuda_device)
os.environ["SCALING_TEMPERATURE"] = "_".join(["1" for i in range(len(layer_indices.split("_")))])
os.environ["BATCH_SIZE"] = str(args.batch_size)
os.environ["MAX_PIECES"] = str(args.max_pieces)
os.environ["TEMPERATURE_THRESHOLD"] = str(exit_threshold)
os.environ["ADD_PREVIOUS_LAYER_LOGITS"] = 'false'
os.environ["MULTITASK"] = 'false'
os.environ["NUM_EPOCHS"] = str(args.num_epochs)
else:
extra_args = "--export BERT_TYPE={},IS_LOWERCASE={},TRAIN_PATH={},DEV_PATH={},TEST_PATH={},LAYER_INDICES={},CUDA_DEVICE={},SCALING_TEMPERATURE={},BATCH_SIZE={},MAX_PIECES={},TEMPERATURE_THRESHOLD={},ADD_PREVIOUS_LAYER_LOGITS={},MULTITASK={},NUM_EPOCHS={}".format(args.bert_type,str(is_lowercase).lower(),base_path+dataset+"/train",base_path+dataset+"/dev",base_path+dataset+"/test","'"+layer_indices+"'",0,"'"+"_".join(["1" for i in range(len(layer_indices.split("_")))])+"'",args.batch_size,args.max_pieces,exit_threshold,'false','false',args.num_epochs)
for i in range(start, n_test):
#lr = str(10**random.uniform(lrs[0], lrs[1]))
lr = str(lrs[random.randint(0, len(lrs))-1])
dr = str(random.uniform(dropout[0], dropout[1]))
seed = str(random.randint(0,100000))
local_dir = args.work_dir+args.bert_type+"/"+dataset+"/experiment_{}_{}/".format(layer_indices, i)
local_extra_args = copy.copy(extra_args)
allennlp_cmd = "allennlp train {} --serialization-dir {} --include-package allennlp_overrides -f".format(training_config_file, local_dir)
if slurm is None:
os.environ["SEED"] = seed
os.environ["PYTORCH_SEED"] = seed
os.environ["NUMPY_SEED"] = seed
os.environ["DROPOUT"] = dr
os.environ["LEARNING_RATE"] = lr
cmd = allennlp_cmd
else:
local_extra_args += ",SEED={},PYTORCH_SEED={},NUMPY_SEED={},DROPOUT={},LEARNING_RATE={}".format(seed,seed,seed,dr,lr)
cmd = "srun -p allennlp_hipri -w {} --gpus=1 {} {}".format(slurm, local_extra_args, allennlp_cmd)
print(cmd)
return_value = subprocess.call(cmd, shell=True)
if return_value != 0:
for j in range(200):
if not dont_delete:
f = "{}/model_state_epoch_{}.th".format(local_dir, j)
rm_if_exists(f)
f = "{}/training_state_epoch_{}.th".format(local_dir, j)
rm_if_exists(f)
f = local_dir+"/best.th"
rm_if_exists(f)
# If we are not deleting intermediate models, we don't need the final model.tar.gz file
if dont_delete:
f = local_dir+"/model.tar.gz"
rm_if_exists(f)
return 0
def rm_if_exists(f):
if os.path.exists(f):
os.remove(f)
return 1
else:
return 0
def arg_parser():
"""Extracting CLI arguments"""
p = ArgumentParser(add_help=False)
p.add_argument("-b", "--batch_size", help="Batch size", type=int, default=72)
p.add_argument("-s", "--start",
help="First experiment index to run",
type=int, default=0)
p.add_argument("-t", "--bert_type", help="Bert type (bert-{base,large}-{cased,uncased})", type=str,
default='bert-large-uncased')
p.add_argument("-n", "--n_tests", help="Number of grid search experiments to run", type=int, default=1)
p.add_argument("-x", "--max_pieces", help="Maximum number of word pieces for BERT", type=int, default=512)
p.add_argument("-c", "--num_epochs", help="Number of epochs to run", type=int, default=2)
p.add_argument("-l", "--layer_indices", help="Indices of layers to train classifiers for", type=str, default="23")
p.add_argument("-d", "--dataset", help="Dataset to work with", required=True)
p.add_argument("-i", "--nli", help="Is this an NLI experiment? (if not, it's text_cat)", action='store_true')
p.add_argument("-r", "--slurm", help="Run jobs on SLURM using this server", type=str)
p.add_argument('-w', '--work_dir', help="Working directory. Should contain a directory for the bert_type, which contains another directory for the dataset", type=str, default="")
p.add_argument('--data_dir', help="Dataset directory. Should contain 'text_cat' and/or 'nli' folders, containing a directory for the dataset, which contains three files: train, dev and test", type=str, required=True)
p.add_argument("-u", "--cuda_device", help="CUDA device (or -1 for CPU)", type=int, default=0)
return ArgumentParser(description=__doc__,
formatter_class=ArgumentDefaultsHelpFormatter,
parents=[p])
if __name__ == '__main__':
sys.exit(main())
| 45.894366 | 570 | 0.603038 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,251 | 0.345404 |
90c797e8f34b8974fc670dfa1133ddb2a05d7a20 | 918 | py | Python | utils/gather_files.py | letsgo247/KFG | dd055fc8349a9ea95497d04ecc4b52c04e09debc | [
"MIT"
]
| null | null | null | utils/gather_files.py | letsgo247/KFG | dd055fc8349a9ea95497d04ecc4b52c04e09debc | [
"MIT"
]
| null | null | null | utils/gather_files.py | letsgo247/KFG | dd055fc8349a9ea95497d04ecc4b52c04e09debc | [
"MIT"
]
| null | null | null | import os
import shutil
import time
def read_all_file(path):
output = os.listdir(path)
file_list = []
for i in output:
if os.path.isdir(path+"/"+i):
file_list.extend(read_all_file(path+"/"+i))
elif os.path.isfile(path+"/"+i):
file_list.append(path+"/"+i)
return file_list
def copy_all_file(file_list, new_path):
for src_path in file_list:
file = src_path.split("/")[-1]
shutil.copyfile(src_path, new_path+"/"+file)
# print("파일 {} 작업 완료".format(file)) # 작업한 파일명 출력
src_path = "C:\dev\KFG\Data/Korean/AFAD/AFAD-Lite" # 기존 폴더 경로
new_path = "C:\dev\KFG\Data/Korean/AFAD/AFAD_gathered" # 옮길 폴더 경로
start_time = time.time() # 작업 시작 시간
file_list = read_all_file(src_path)
copy_all_file(file_list, new_path)
print("=" * 40)
print("러닝 타임 : {}".format(time.time() - start_time)) # 총 소요시간 계산 | 27 | 65 | 0.604575 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 293 | 0.291833 |
90c806da308d72b961e15453707e27dd9643ad2b | 1,897 | py | Python | code/diva_evaluation_cli/bin/commands/actev_get_system_subcommands/git_command.py | wenhel/Argus | 39768a8d1671eb80f86bbd67e58478a4cbdcdeca | [
"MIT"
]
| 4 | 2019-06-28T23:27:43.000Z | 2021-09-27T03:17:58.000Z | code/diva_evaluation_cli/bin/commands/actev_get_system_subcommands/git_command.py | wenhel/Argus | 39768a8d1671eb80f86bbd67e58478a4cbdcdeca | [
"MIT"
]
| 2 | 2020-01-16T19:39:44.000Z | 2021-02-24T22:45:37.000Z | code/diva_evaluation_cli/bin/commands/actev_get_system_subcommands/git_command.py | wenhel/Argus | 39768a8d1671eb80f86bbd67e58478a4cbdcdeca | [
"MIT"
]
| 1 | 2019-09-09T07:40:45.000Z | 2019-09-09T07:40:45.000Z | """Actev module: get-system git
Actev modules are used to parse actev commands in order to get arguments
before calling associated entry point methods to execute systems.
Warning: this file should not be modified: see src/entry_points to add your source code.
"""
from diva_evaluation_cli.bin.commands.actev_command import ActevCommand
class ActevGetSystemGit(ActevCommand):
"""Clones a git repository
Command Args:
* location or l: path to store the system
* user or U: url to get the system
* password or p: password to access the url
* token or t: token to access the url
* install-cli or i: install the cli to use it
"""
def __init__(self):
super(ActevGetSystemGit, self).__init__('git', "get_git.sh")
def cli_parser(self, arg_parser):
"""Configure the description and the arguments (positional and optional) to parse.
Args:
arg_parser(:obj:`ArgParser`): Python arg parser to describe how to parse the command
"""
arg_parser.description= "Downloads a git repository"
required_named = arg_parser.add_argument_group('required named arguments')
arg_parser.add_argument("-U", "--user", help="username to access the url")
arg_parser.add_argument("-p", "--password", help="password to access the url"
"Warning: if password starts with \'-\', use this: --password=<your password>")
arg_parser.add_argument("-l", "--location", help="path to store the system")
arg_parser.add_argument("-t", "--token", help="token to access the url"
"Warning: if token starts with \'-\', use this: --token=<your token>",
type=str)
arg_parser.add_argument("-i", "--install-cli", help="install the cli to use it", action='store_true')
| 43.113636 | 111 | 0.641012 | 1,554 | 0.819188 | 0 | 0 | 0 | 0 | 0 | 0 | 1,231 | 0.648919 |
90c8c7759fc79bbde613e0b6f813d1b176c1ecaf | 902 | py | Python | 1.8.first-promise.py | senpl/course-promises | df4b64a02d0a7ab4bb372c0fae5e92c98830cd44 | [
"MIT"
]
| 3 | 2017-02-05T03:17:13.000Z | 2019-03-11T19:44:34.000Z | 1.8.first-promise.py | senpl/course-promises | df4b64a02d0a7ab4bb372c0fae5e92c98830cd44 | [
"MIT"
]
| null | null | null | 1.8.first-promise.py | senpl/course-promises | df4b64a02d0a7ab4bb372c0fae5e92c98830cd44 | [
"MIT"
]
| 7 | 2016-06-19T02:24:09.000Z | 2018-09-11T01:32:02.000Z | import re
textinput = widget_inputs["text1"]
comments = []
def commentizer(new):
if new not in comments:
comments.append(new)
is_correct = False
result = re.match(".*window.*", textinput, flags=re.IGNORECASE)
if result:
is_correct = True
commentizer("You're right, but there's a little more to it than that. Make sure you watch the solution video.")
result = re.match(".*global.*", textinput, flags=re.IGNORECASE)
if result:
is_correct = True
commentizer("Right! It's the global object.")
result = re.match(".*promise.*", textinput, flags=re.IGNORECASE)
if result:
is_correct = False
commentizer("It's not the Promise. Take another look!")
if not is_correct and len(comments) == 0:
commentizer("Not quite. Just log `this` somewhere in the Promise to see what happens.")
grade_result["comment"] = "\n\n".join(comments)
grade_result["correct"] = is_correct | 29.096774 | 115 | 0.701774 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 314 | 0.348115 |
90c8e1c41e29404a0d0d0511d6b46db43890fb89 | 4,106 | py | Python | test/test_markdown_parser.py | Asana/SGTM | 0e9e236980ed68e80e021470da6374945bbac501 | [
"MIT"
]
| 8 | 2020-12-05T00:13:03.000Z | 2022-01-11T11:35:51.000Z | test/test_markdown_parser.py | Asana/SGTM | 0e9e236980ed68e80e021470da6374945bbac501 | [
"MIT"
]
| 12 | 2020-12-14T18:21:21.000Z | 2022-03-29T17:06:20.000Z | test/test_markdown_parser.py | Asana/SGTM | 0e9e236980ed68e80e021470da6374945bbac501 | [
"MIT"
]
| 2 | 2021-06-27T09:32:55.000Z | 2022-02-27T23:17:36.000Z | import unittest
from html import escape
from src.markdown_parser import convert_github_markdown_to_asana_xml
class TestConvertGithubMarkdownToAsanaXml(unittest.TestCase):
def test_basic_markdown(self):
md = """~~strike~~ **bold** _italic_ `code` [link](asana.com)"""
xml = convert_github_markdown_to_asana_xml(md)
self.assertEqual(
xml,
'<s>strike</s> <strong>bold</strong> <em>italic</em> <code>code</code> <a href="asana.com">link</a>\n',
)
def test_ul_tag(self):
md = """* bullet one\n* bullet two"""
xml = convert_github_markdown_to_asana_xml(md)
self.assertEqual(
xml, """<ul>\n<li>bullet one</li>\n<li>bullet two</li>\n</ul>\n""",
)
def test_ol_tag(self):
md = """1. bullet one\n2. bullet two"""
xml = convert_github_markdown_to_asana_xml(md)
self.assertEqual(
xml, """<ol>\n<li>bullet one</li>\n<li>bullet two</li>\n</ol>\n""",
)
def test_paragraph(self):
md = "we don't wrap random text in p tags"
xml = convert_github_markdown_to_asana_xml(md)
self.assertEqual(md + "\n", xml)
def test_block_quote(self):
md = "> block quote"
xml = convert_github_markdown_to_asana_xml(md)
self.assertEqual(xml, "<em>> block quote\n</em>")
def test_horizontal_rule(self):
# Asana doesn't support <hr /> tags, so we just ignore them
md = "hello\n\n---\nworld\n"
xml = convert_github_markdown_to_asana_xml(md)
self.assertEqual(xml, md) # unchanged
def test_auto_linking(self):
md = "https://asana.com/ [still works](www.test.com)"
xml = convert_github_markdown_to_asana_xml(md)
self.assertEqual(
xml,
'<a href="https://asana.com/">https://asana.com/</a> <a href="www.test.com">still works</a>\n',
)
def test_converts_headings_to_bold(self):
md = "## heading"
xml = convert_github_markdown_to_asana_xml(md)
self.assertEqual(xml, "\n<b>heading</b>\n")
def test_nested_code_within_block_quote(self):
md = "> abc `123`"
xml = convert_github_markdown_to_asana_xml(md)
self.assertEqual(xml, "<em>> abc <code>123</code>\n</em>")
def test_removes_pre_tags_inline(self):
md = """```test```"""
xml = convert_github_markdown_to_asana_xml(md)
self.assertEqual(xml, "<code>test</code>\n")
def test_removes_pre_tags_block(self):
md = """see:
```
function foo = () => null;
```
"""
xml = convert_github_markdown_to_asana_xml(md)
self.assertEqual(xml, "see:\n<code>function foo = () => null;\n</code>\n")
def test_escapes_raw_html_mixed_with_markdown(self):
md = """## <img href="link" />still here <h3>header</h3>"""
xml = convert_github_markdown_to_asana_xml(md)
self.assertEqual(
xml,
"\n<b>"
+ escape('<img href="link" />')
+ "still here "
+ escape("<h3>header</h3>")
+ "</b>\n",
)
def test_escapes_raw_html_on_own_lines(self):
md = """## blah blah blah
<img href="link">
still here <h3>header</h3>"""
xml = convert_github_markdown_to_asana_xml(md)
self.assertEqual(
xml,
"\n<b>blah blah blah</b>\n"
+ escape('<img href="link">\n')
+ "still here "
+ escape("<h3>header</h3>"),
)
def test_escapes_raw_html(self):
md = """<img href="link" />still here <h3>header</h3>"""
xml = convert_github_markdown_to_asana_xml(md)
self.assertEqual(
xml,
escape('<img href="link" />') + "still here " + escape("<h3>header</h3>\n"),
)
def test_removes_images(self):
md = """"""
xml = convert_github_markdown_to_asana_xml(md)
self.assertEqual(xml, '<a href="https://image.com">image</a>\n')
if __name__ == "__main__":
from unittest import main as run_tests
run_tests()
| 33.933884 | 115 | 0.585485 | 3,904 | 0.950804 | 0 | 0 | 0 | 0 | 0 | 0 | 1,343 | 0.327082 |
90cb7a749d3451fbf2614d7b4cc6d0e278b4fb6b | 3,724 | py | Python | ansible/my_env/lib/python2.7/site-packages/ansible/modules/network/ftd/ftd_file_upload.py | otus-devops-2019-02/yyashkin_infra | 0cd0c003884155ac922e3e301305ac202de7028c | [
"MIT"
]
| 1 | 2019-04-16T21:23:15.000Z | 2019-04-16T21:23:15.000Z | ansible/my_env/lib/python2.7/site-packages/ansible/modules/network/ftd/ftd_file_upload.py | otus-devops-2019-02/yyashkin_infra | 0cd0c003884155ac922e3e301305ac202de7028c | [
"MIT"
]
| 5 | 2020-02-26T20:10:50.000Z | 2021-09-23T23:23:18.000Z | ansible/my_env/lib/python2.7/site-packages/ansible/modules/network/ftd/ftd_file_upload.py | otus-devops-2019-02/yyashkin_infra | 0cd0c003884155ac922e3e301305ac202de7028c | [
"MIT"
]
| null | null | null | #!/usr/bin/python
# Copyright (c) 2018 Cisco and/or its affiliates.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: ftd_file_upload
short_description: Uploads files to Cisco FTD devices over HTTP(S)
description:
- Uploads files to Cisco FTD devices including disk files, backups, and upgrades.
version_added: "2.7"
author: "Cisco Systems, Inc."
options:
operation:
description:
- The name of the operation to execute.
- Only operations that upload file can be used in this module.
required: true
type: str
file_to_upload:
description:
- Absolute path to the file that should be uploaded.
required: true
type: path
version_added: "2.8"
register_as:
description:
- Specifies Ansible fact name that is used to register received response from the FTD device.
type: str
"""
EXAMPLES = """
- name: Upload disk file
ftd_file_upload:
operation: 'postuploaddiskfile'
file_to_upload: /tmp/test1.txt
"""
RETURN = """
msg:
description: The error message describing why the module failed.
returned: error
type: string
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.ftd.common import construct_ansible_facts, FtdServerError, HTTPMethod
from ansible.module_utils.network.ftd.fdm_swagger_client import OperationField
def is_upload_operation(op_spec):
return op_spec[OperationField.METHOD] == HTTPMethod.POST or 'UploadStatus' in op_spec[OperationField.MODEL_NAME]
def main():
fields = dict(
operation=dict(type='str', required=True),
file_to_upload=dict(type='path', required=True),
register_as=dict(type='str'),
)
module = AnsibleModule(argument_spec=fields,
supports_check_mode=True)
params = module.params
connection = Connection(module._socket_path)
op_spec = connection.get_operation_spec(params['operation'])
if op_spec is None:
module.fail_json(msg='Operation with specified name is not found: %s' % params['operation'])
if not is_upload_operation(op_spec):
module.fail_json(
msg='Invalid upload operation: %s. The operation must make POST request and return UploadStatus model.' %
params['operation'])
try:
if module.check_mode:
module.exit_json()
resp = connection.upload_file(params['file_to_upload'], op_spec[OperationField.URL])
module.exit_json(changed=True, response=resp, ansible_facts=construct_ansible_facts(resp, module.params))
except FtdServerError as e:
module.fail_json(msg='Upload request for %s operation failed. Status code: %s. '
'Server response: %s' % (params['operation'], e.code, e.response))
if __name__ == '__main__':
main()
| 33.854545 | 117 | 0.705961 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,072 | 0.556391 |
90cbba8be9d213494b028b083d0b0f3629b1868d | 1,244 | py | Python | agent/indy_catalyst_agent/messaging/trustping/routes.py | nairobi222/indy-catalyst | dcbd80524ace7747ecfecd716ff932e9b571d69a | [
"Apache-2.0"
]
| 2 | 2019-08-16T11:21:10.000Z | 2022-01-03T21:10:59.000Z | agent/indy_catalyst_agent/messaging/trustping/routes.py | nairobi222/indy-catalyst | dcbd80524ace7747ecfecd716ff932e9b571d69a | [
"Apache-2.0"
]
| null | null | null | agent/indy_catalyst_agent/messaging/trustping/routes.py | nairobi222/indy-catalyst | dcbd80524ace7747ecfecd716ff932e9b571d69a | [
"Apache-2.0"
]
| null | null | null | """Trust ping admin routes."""
from aiohttp import web
from aiohttp_apispec import docs
from ..connections.models.connection_record import ConnectionRecord
from .messages.ping import Ping
from ...storage.error import StorageNotFoundError
@docs(tags=["trustping"], summary="Send a trust ping to a connection")
async def connections_send_ping(request: web.BaseRequest):
"""
Request handler for sending a trust ping to a connection.
Args:
request: aiohttp request object
"""
context = request.app["request_context"]
connection_id = request.match_info["id"]
outbound_handler = request.app["outbound_message_router"]
try:
connection = await ConnectionRecord.retrieve_by_id(context, connection_id)
except StorageNotFoundError:
return web.HTTPNotFound()
if connection.is_active or connection.state == connection.STATE_RESPONSE:
msg = Ping()
await outbound_handler(msg, connection_id=connection_id)
await connection.log_activity(context, "ping", connection.DIRECTION_SENT)
return web.HTTPOk()
async def register(app: web.Application):
"""Register routes."""
app.add_routes([web.post("/connections/{id}/send-ping", connections_send_ping)])
| 29.619048 | 84 | 0.730707 | 0 | 0 | 0 | 0 | 844 | 0.678457 | 927 | 0.745177 | 304 | 0.244373 |
90cceb9760edd56688a99f6ee73a68bdf983a84e | 619 | py | Python | python/wotdbg.py | wanyancan/wot-debugserver | 4c6dd5c511659885abb355bef7c9318ed3e42937 | [
"MIT"
]
| 32 | 2015-01-23T16:13:20.000Z | 2021-05-29T21:11:42.000Z | python/wotdbg.py | wanyancan/wot-debugserver | 4c6dd5c511659885abb355bef7c9318ed3e42937 | [
"MIT"
]
| null | null | null | python/wotdbg.py | wanyancan/wot-debugserver | 4c6dd5c511659885abb355bef7c9318ed3e42937 | [
"MIT"
]
| 16 | 2015-08-25T08:02:52.000Z | 2022-01-19T19:16:16.000Z | import os.path
import tcprepl
import BigWorld
def echo(s):
'''Send string to client'''
if tcprepl.write_client is not None:
tcprepl.write_client(s)
def exec_file(filename, exec_globals=None):
'''
Execute file
Try to find file named `filename` and execute it. If `exec_globals` is
specified it is used as globals-dict in exec context.
'''
if exec_globals is None:
exec_globals = {}
if not os.path.isfile(filename):
filename = BigWorld.wg_resolveFileName(filename)
with open(filename, 'r') as f:
code = f.read()
exec code in exec_globals
| 22.107143 | 74 | 0.660743 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 192 | 0.310178 |
90cd458888a31c41557f6a303abf3a9a1b516bae | 40 | py | Python | quicken/_internal/__init__.py | chrahunt/quicken | 2dd00a5f024d7b114b211aad8a2618ec8f101956 | [
"MIT"
]
| 3 | 2019-11-12T17:56:08.000Z | 2022-03-12T03:43:10.000Z | quicken/_internal/__init__.py | chrahunt/quicken | 2dd00a5f024d7b114b211aad8a2618ec8f101956 | [
"MIT"
]
| 47 | 2018-12-10T04:08:58.000Z | 2022-03-20T14:54:36.000Z | quicken/_internal/__init__.py | chrahunt/quicken | 2dd00a5f024d7b114b211aad8a2618ec8f101956 | [
"MIT"
]
| 1 | 2019-11-12T17:55:17.000Z | 2019-11-12T17:55:17.000Z | class QuickenError(Exception):
pass
| 13.333333 | 30 | 0.75 | 39 | 0.975 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
90d0a5da5e0a56dfea4ae7b895d09b65486f6dc4 | 1,861 | py | Python | folderikon/exceptions.py | demberto/FolderIkon | 62088b3e46266a6a53632a1e85bdb3d0a46e8cab | [
"MIT"
]
| 1 | 2021-10-30T17:49:28.000Z | 2021-10-30T17:49:28.000Z | folderikon/exceptions.py | demberto/FolderIkon | 62088b3e46266a6a53632a1e85bdb3d0a46e8cab | [
"MIT"
]
| null | null | null | folderikon/exceptions.py | demberto/FolderIkon | 62088b3e46266a6a53632a1e85bdb3d0a46e8cab | [
"MIT"
]
| null | null | null | """Exception which are not actually thrown, only their docstrings are used."""
import colorama
import sys
__all__ = [
"Error",
"ParentIsNotAFolderError",
"InvalidURLError",
"ImageFormatNotSupportedError",
"ImageNotSpecifiedError",
"FolderIconAlreadyExistsError",
"DesktopIniError",
"exception_exit",
]
class Error(Exception):
"""Base class for all FolderIkon errors."""
color = False
def __repr__(self):
return self.red(self.__doc__)
@staticmethod
def red(string):
if Error.color:
return colorama.Fore.RED + string
return string
class ParentIsNotAFolderError(Error):
"""Argument passed to --parent is not a folder."""
class InvalidURLError(Error):
"""Invalid image URL"""
def __init__(self, url):
self.__url = url
super().__init__()
def __repr__(self):
return super().__repr__() + " " + self.__url
class ImageFormatNotSupportedError(Error):
def __init__(self, fmt):
self.__fmt = fmt
super().__init__()
def __repr__(self):
return f"Image format {self.red(self.__fmt)} is not supported. Only ICO, JPG and PNG are supported."
class ImageNotSpecifiedError(Error):
"""An image with a supported format could not be found in this directory."""
class FolderIconAlreadyExistsError(Error):
"""Folder icon already exists."""
class DesktopIniError(Error):
"""The 'desktop.ini' file could not be parsed. Delete it and try again."""
def __init__(self, exc):
self.__exc = exc
super().__init__()
def __repr__(self):
exc_name = self.__exc.__class__.__name__
exc_info = f"An exception of {exc_name} occured when parsing it."
return super().__repr__() + " " + exc_info
def exception_exit(exc):
print(repr(exc()))
sys.exit(-1)
| 22.975309 | 108 | 0.652875 | 1,437 | 0.772166 | 0 | 0 | 126 | 0.067706 | 0 | 0 | 696 | 0.373992 |
90d0ff663bec28d2147cf9908b2baeccc98682a7 | 1,224 | py | Python | quickbooks/objects/companycurrency.py | varunbheemaiah/python-quickbooks | f5459a07619ae220e24099c6e0c8e8db890bb66b | [
"MIT"
]
| 234 | 2015-08-25T02:41:33.000Z | 2020-03-30T15:30:23.000Z | quickbooks/objects/companycurrency.py | varunbheemaiah/python-quickbooks | f5459a07619ae220e24099c6e0c8e8db890bb66b | [
"MIT"
]
| 170 | 2015-09-12T07:02:32.000Z | 2020-03-20T13:34:34.000Z | quickbooks/objects/companycurrency.py | varunbheemaiah/python-quickbooks | f5459a07619ae220e24099c6e0c8e8db890bb66b | [
"MIT"
]
| 142 | 2015-08-26T07:08:56.000Z | 2020-03-20T11:59:52.000Z | from six import python_2_unicode_compatible
from .base import QuickbooksManagedObject, QuickbooksTransactionEntity, Ref, CustomField, MetaData
@python_2_unicode_compatible
class CompanyCurrency(QuickbooksManagedObject, QuickbooksTransactionEntity):
"""
QBO definition: Applicable only for those companies that enable multicurrency, a companycurrency object
defines a currency that is active in the QuickBooks Online company. One or more companycurrency objects
are active based on the company's multicurrency business requirements and correspond to the list
displayed by the Currency Center in the QuickBooks Online UI
"""
class_dict = {
"CustomField": CustomField,
"MetaData": MetaData,
}
qbo_object_name = "CompanyCurrency"
def __init__(self):
super(CompanyCurrency, self).__init__()
self.Id = None
self.Code = ""
self.Name = ""
self.Active = True
self.CustomField = None
self.MetaData = None
def __str__(self):
return self.Name
def to_ref(self):
ref = Ref()
ref.name = self.Name
ref.type = self.qbo_object_name
ref.value = self.Id
return ref
| 28.465116 | 107 | 0.688725 | 1,049 | 0.857026 | 0 | 0 | 1,078 | 0.880719 | 0 | 0 | 437 | 0.357026 |
90d1622a98abf59a298f4b58a00b76f812b4c744 | 604 | py | Python | books/migrations/0004_alter_book_category.py | MwinyiMoha/books-service | 31a980a8505c5d5c2acad698bb493fad8c0ce8fe | [
"MIT"
]
| null | null | null | books/migrations/0004_alter_book_category.py | MwinyiMoha/books-service | 31a980a8505c5d5c2acad698bb493fad8c0ce8fe | [
"MIT"
]
| 3 | 2021-04-08T17:44:07.000Z | 2021-04-12T09:38:26.000Z | books/migrations/0004_alter_book_category.py | MwinyiMoha/books-service | 31a980a8505c5d5c2acad698bb493fad8c0ce8fe | [
"MIT"
]
| null | null | null | # Generated by Django 3.2 on 2021-04-10 12:34
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("books", "0003_auto_20210410_1231")]
operations = [
migrations.AlterField(
model_name="book",
name="category",
field=models.CharField(
choices=[
("fiction", "Fiction"),
("regular", "Regular"),
("novel", "Novel"),
],
default="regular",
max_length=7,
),
)
]
| 24.16 | 57 | 0.47351 | 513 | 0.849338 | 0 | 0 | 0 | 0 | 0 | 0 | 152 | 0.251656 |
90d18046bd7a28075eedb453f3f4d1aabe4f7e65 | 16,575 | py | Python | syft/execution/placeholder.py | juharris/PySyft | dbb70f24cc55a7dca032fb06f1a8662cb15092a9 | [
"Apache-2.0"
]
| null | null | null | syft/execution/placeholder.py | juharris/PySyft | dbb70f24cc55a7dca032fb06f1a8662cb15092a9 | [
"Apache-2.0"
]
| null | null | null | syft/execution/placeholder.py | juharris/PySyft | dbb70f24cc55a7dca032fb06f1a8662cb15092a9 | [
"Apache-2.0"
]
| null | null | null | from itertools import zip_longest
import syft
from syft.generic.frameworks.hook import hook_args
from syft.generic.abstract.tensor import AbstractTensor
from syft.workers.abstract import AbstractWorker
from syft_proto.execution.v1.placeholder_pb2 import Placeholder as PlaceholderPB
class PlaceHolder(AbstractTensor):
def __init__(
self,
role=None,
tracing=False,
id=None,
tags: set = None,
description: str = None,
shape=None,
expected_dtype=None,
):
"""A PlaceHolder acts as a tensor but does nothing special. It can get
"instantiated" when a real tensor is appended as a child attribute. It
will send forward all the commands it receives to its child tensor.
When you send a PlaceHolder, you don't sent the instantiated tensors.
Args:
id: An optional string or integer id of the PlaceHolder.
"""
super().__init__(id=id, tags=tags, description=description)
if not isinstance(self.id, syft.execution.placeholder_id.PlaceholderId):
self.id = syft.execution.placeholder_id.PlaceholderId(self.id)
self.expected_shape = tuple(shape) if shape is not None else None
self.expected_dtype = expected_dtype
self.child = None
self.role = role
self.tracing = tracing
def get_class_attributes(self):
"""
Specify all the attributes need to build a wrapper correctly when returning a response.
"""
return {"role": self.role, "tracing": self.tracing}
@classmethod
def handle_func_command(cls, command):
""" Receive an instruction for a function to be applied on a Placeholder,
Replace in the args with their child attribute, forward the command
instruction to the handle_function_command of the type of the child attributes,
get the response and wrap it in a Placeholder.
We use this method to perform the tracing.
Args:
command: instruction of a function command: (command name,
<no self>, arguments[, kwargs])
Returns:
the response of the function command
"""
cmd, _, args, kwargs = command
# Replace all PlaceHolders with their child attribute
new_args, new_kwargs, new_type = hook_args.unwrap_args_from_function(cmd, args, kwargs)
# build the new command
new_command = (cmd, None, new_args, new_kwargs)
# Send it to the appropriate class and get the response
response = new_type.handle_func_command(new_command)
# Find first placeholder in args
template_placeholder = None
for arg in args:
if isinstance(arg, PlaceHolder):
template_placeholder = arg
placeholders = PlaceHolder.convert_to_placeholders(response, template_placeholder)
if template_placeholder.tracing:
template_placeholder.role.register_action(
(command, placeholders), syft.execution.computation.ComputationAction
)
return placeholders
@staticmethod
def convert_to_placeholders(response, template_placeholder):
""" Turn back response to PlaceHolders """
if isinstance(response, (tuple, list)):
placeholders = tuple(
PlaceHolder.create_from(
r, role=template_placeholder.role, tracing=template_placeholder.tracing
)
for r in response
)
else:
placeholders = PlaceHolder.create_from(
response, role=template_placeholder.role, tracing=template_placeholder.tracing
)
return placeholders
def __getattribute__(self, name):
"""Try to find the attribute in the current object
and in case we can not then we forward it to the child
"""
try:
response = object.__getattribute__(self, name)
except AttributeError:
child = object.__getattribute__(self, "child")
response = getattr(child, name)
return response
def instantiate(self, tensor):
"""
Add a tensor as a child attribute. All operations on the placeholder will be also
executed on this child tensor.
We remove Placeholders if is there are any.
"""
if isinstance(tensor, PlaceHolder):
self.child = tensor.child
else:
self.child = tensor
if hasattr(self.child, "shape"):
self.expected_shape = tuple(self.child.shape)
if hasattr(self.child, "dtype"):
self.expected_dtype = self.child.dtype
return self
def __str__(self) -> str:
"""
Compact representation of a Placeholder, including tags and optional child
"""
tags = " ".join(list(self.tags or []))
out = f"{type(self).__name__ }[Id:{self.id.value}]"
if hasattr(self, "child") and self.child is not None:
out += f">{self.child}"
return out
__repr__ = __str__
def send(self, *args, **kwargs):
"""
calls move on child & register_action to role
"""
response = self.child.send(*args, **kwargs)
placeholder = PlaceHolder.convert_to_placeholders(response, self)
command = ("send", self, args, kwargs)
self.role.register_action(
(command, placeholder), syft.execution.communication.CommunicationAction
)
return placeholder
def move(self, *args, **kwargs):
"""
calls move on a pointer tensor & register_action to role
"""
response = self.child.move(*args, **kwargs)
placeholder = PlaceHolder.convert_to_placeholders(response, self)
command = ("move", self, args, kwargs)
self.role.register_action(
(command, placeholder), syft.execution.communication.CommunicationAction
)
return placeholder
def share(self, *args, **kwargs):
"""
Send a command to remote worker to additively share a tensor via pointer tensor
"""
response = self.child.share(*args, **kwargs)
placeholder = PlaceHolder.convert_to_placeholders(response, self)
command = ("share", self, args, kwargs)
self.role.register_action(
(command, placeholder), syft.execution.communication.CommunicationAction
)
return placeholder
def fix_prec(self, *args, **kwargs):
"""
sends command to remote worker to transform a tensor to fix_precision via pointer tensor
"""
response = self.child.fix_prec(*args, **kwargs)
placeholder = PlaceHolder.convert_to_placeholders(response, self)
command = ("fix_prec", self, args, kwargs)
self.role.register_action(
(command, placeholder), syft.execution.computation.ComputationAction
)
return placeholder
def mid_get(self, *args, **kwargs):
response = self.child.mid_get(*args, **kwargs)
placeholder = PlaceHolder.convert_to_placeholders(self.child, self)
command = ("mid_get", self, args, kwargs)
self.role.register_action(
(command, placeholder), syft.execution.communication.CommunicationAction
)
return placeholder
def remote_get(self, *args, **kwargs):
"""
calls remote_get on child & register_action to role
"""
response = self.child.remote_get(*args, **kwargs)
placeholder = PlaceHolder.convert_to_placeholders(response, self)
command = ("remote_get", self, args, kwargs)
self.role.register_action(
(command, placeholder), syft.execution.communication.CommunicationAction
)
return placeholder
def remote_send(self, *args, **kwargs):
"""
calls remote_send on child & register_action to role
"""
response = self.child.remote_send(*args, **kwargs)
placeholder = PlaceHolder.convert_to_placeholders(response, self)
command = ("remote_send", self, args, kwargs)
self.role.register_action(
(command, placeholder), syft.execution.communication.CommunicationAction
)
return placeholder
def share_(self, *args, **kwargs):
"""
calls share_ on child & register_action to role
"""
response = self.child.share_(*args, **kwargs)
placeholder = PlaceHolder.convert_to_placeholders(response, self)
command = ("share_", self, args, kwargs)
self.role.register_action(
(command, placeholder), syft.execution.communication.CommunicationAction
)
return placeholder
def get(self, *args, **kwargs):
"""Requests the tensor/chain being pointed to, be serialized and return via child"""
response = self.child.get(*args, **kwargs)
placeholder = PlaceHolder.convert_to_placeholders(response, self)
command = ("get", self, args, kwargs)
self.role.register_action(
(command, placeholder), syft.execution.communication.CommunicationAction
)
return placeholder
def copy(self):
"""
Copying a placeholder doesn't duplicate the child attribute, because all
copy operations happen locally where we want to keep reference to the same
instantiated object. As the child doesn't get sent, this is not an issue.
"""
placeholder = PlaceHolder(
role=self.role,
tracing=self.tracing,
tags=self.tags,
shape=self.expected_shape,
expected_dtype=self.expected_dtype,
)
placeholder.child = self.child
if self.tracing:
command = ("copy", self, (), {}), placeholder
self.role.register_action(command, syft.execution.computation.ComputationAction)
return placeholder
@staticmethod
def create_from(tensor, role=None, tracing=False):
""" Helper method to create a placeholder already
instantiated with tensor.
"""
return PlaceHolder(role=role, tracing=tracing).instantiate(tensor)
@staticmethod
def insert(tensor, after, role=None, tracing=False):
""" Helper method to add a placeholder in the specific place of tensor chain. """
current_level = tensor
while not isinstance(current_level, after) and current_level is not None:
current_level = getattr(current_level, "child", None)
if current_level is None:
raise RuntimeError(
f"Cannot insert Placeholder, chain does not contain {after.__name__} tensor type."
)
child = getattr(current_level, "child", None)
if child is None:
raise RuntimeError(
f"Cannot insert Placeholder, {after.__name__} does not wrap anything."
)
placeholder = PlaceHolder.create_from(child, role, tracing)
current_level.child = placeholder
return placeholder
@staticmethod
def extract(tensor):
""" Helper method to find and return placeholder in the tensor chain. """
current_level = tensor
while not isinstance(current_level, PlaceHolder) and current_level is not None:
current_level = getattr(current_level, "child", None)
return current_level
@staticmethod
def create_placeholders(args_shape, args_dtypes=()):
""" Helper method to create a list of placeholders with shapes
in args_shape.
"""
# In order to support -1 value in shape to indicate any dimension
# we map -1 to 1 for shape dimensions.
# TODO: A more complex strategy could be used
mapped_shapes = []
for shape in args_shape:
if list(filter(lambda x: x < -1, shape)):
raise ValueError(f"Invalid shape {shape}")
mapped_shapes.append(tuple(map(lambda y: 1 if y == -1 else y, shape)))
return [
syft.framework.hook.create_zeros(shape, dtype=dtype, requires_grad=False)
for shape, dtype in zip_longest(mapped_shapes, args_dtypes)
]
@staticmethod
def instantiate_placeholders(obj, response):
"""
Utility function to instantiate recursively an object containing placeholders
with a similar object but containing tensors
"""
if obj is not None:
if isinstance(obj, PlaceHolder):
obj.instantiate(response)
elif isinstance(obj, (list, tuple)):
for ph, rep in zip(obj, response):
PlaceHolder.instantiate_placeholders(ph, rep)
else:
raise ValueError(
f"Response of type {type(response)} is not supported in "
"Placeholder.instantiate."
)
@staticmethod
def simplify(worker: AbstractWorker, placeholder: "PlaceHolder") -> tuple:
"""Takes the attributes of a PlaceHolder and saves them in a tuple.
Args:
worker: the worker doing the serialization
placeholder: a PlaceHolder.
Returns:
tuple: a tuple holding the unique attributes of the PlaceHolder.
"""
return (
syft.serde.msgpack.serde._simplify(worker, placeholder.id),
syft.serde.msgpack.serde._simplify(worker, placeholder.tags),
syft.serde.msgpack.serde._simplify(worker, placeholder.description),
syft.serde.msgpack.serde._simplify(worker, placeholder.expected_shape),
)
@staticmethod
def detail(worker: AbstractWorker, tensor_tuple: tuple) -> "PlaceHolder":
"""
This function reconstructs a PlaceHolder given it's attributes in form of a tuple.
Args:
worker: the worker doing the deserialization
tensor_tuple: a tuple holding the attributes of the PlaceHolder
Returns:
PlaceHolder: a PlaceHolder
"""
tensor_id, tags, description, shape = tensor_tuple
tensor_id = syft.serde.msgpack.serde._detail(worker, tensor_id)
tags = syft.serde.msgpack.serde._detail(worker, tags)
description = syft.serde.msgpack.serde._detail(worker, description)
shape = syft.serde.msgpack.serde._detail(worker, shape)
return PlaceHolder(id=tensor_id, tags=tags, description=description, shape=shape)
@staticmethod
def bufferize(worker: AbstractWorker, placeholder: "PlaceHolder") -> PlaceholderPB:
"""Takes the attributes of a PlaceHolder and saves them in a Protobuf message.
Args:
worker: the worker doing the serialization
placeholder: a PlaceHolder.
Returns:
PlaceholderPB: a Protobuf message holding the unique attributes of the PlaceHolder.
"""
protobuf_placeholder = PlaceholderPB()
syft.serde.protobuf.proto.set_protobuf_id(protobuf_placeholder.id, placeholder.id.value)
protobuf_placeholder.tags.extend(placeholder.tags)
if placeholder.description:
protobuf_placeholder.description = placeholder.description
if placeholder.expected_shape:
protobuf_placeholder.expected_shape.dims.extend(placeholder.expected_shape)
return protobuf_placeholder
@staticmethod
def unbufferize(worker: AbstractWorker, protobuf_placeholder: PlaceholderPB) -> "PlaceHolder":
"""
This function reconstructs a PlaceHolder given it's attributes in form of a
Protobuf message.
Args:
worker: the worker doing the deserialization
protobuf_placeholder: a Protobuf message holding the attributes of the PlaceHolder
Returns:
PlaceHolder: a PlaceHolder
"""
tensor_id = syft.serde.protobuf.proto.get_protobuf_id(protobuf_placeholder.id)
tags = set(protobuf_placeholder.tags)
description = None
if bool(protobuf_placeholder.description):
description = protobuf_placeholder.description
expected_shape = tuple(protobuf_placeholder.expected_shape.dims) or None
return PlaceHolder(id=tensor_id, tags=tags, description=description, shape=expected_shape)
@staticmethod
def get_protobuf_schema() -> PlaceholderPB:
return PlaceholderPB
### Register the tensor with hook_args.py ###
hook_args.default_register_tensor(PlaceHolder)
| 37.247191 | 98 | 0.640422 | 16,193 | 0.976953 | 0 | 0 | 8,575 | 0.517345 | 0 | 0 | 5,092 | 0.30721 |
90d1a9736c8c20db26fd37eb2b021b1ef2da5b41 | 21,547 | py | Python | creativeflow/blender/render_main.py | idaho777/creativeflow | adf7a9e1cf70005560cfbf8064137fb1236bc574 | [
"MIT"
]
| 53 | 2019-10-02T17:10:49.000Z | 2022-03-04T17:35:26.000Z | creativeflow/blender/render_main.py | idaho777/creativeflow | adf7a9e1cf70005560cfbf8064137fb1236bc574 | [
"MIT"
]
| 1 | 2021-09-24T01:00:54.000Z | 2021-09-24T03:06:43.000Z | creativeflow/blender/render_main.py | idaho777/creativeflow | adf7a9e1cf70005560cfbf8064137fb1236bc574 | [
"MIT"
]
| 6 | 2019-11-26T11:44:45.000Z | 2021-11-11T22:23:50.000Z | """
MAIN STYLING AND RENDERING FILE
Requirements:
------------------------------------------------------------------------------
IMPORTANT! This has only been tested with Blender 2.79 API. We have run this
on Linux and MacOS.
Execution:
------------------------------------------------------------------------------
This script is intended to run inside blender launched in background mode.
Sample invocation is:
blender --background --factory-startup --python-exit-code 1 PATH_TO_MY_BLEND.blend \
--python blender/render_main.py -- \
--width=500 <ANY OTHER PYTHON FLAGS FROM render_main.py>
'--factory-startup' is used to prevent custom settings from interfering.
'--python-exit-code 1' makes blender exit with code 1 if this script throws an error
'--' causes blender to ignore all following arguments so python can use them.
See blender --help for details. See pipeline.sh for sample usage.
Capabilities:
------------------------------------------------------------------------------
It is assumed that blender is invoked with a single blend. This script is a
jack-of-all-trades for setting up camera, lighting, styling, and rendering for
a custom stylized animation benchmark. We found it easier to run the script
separately for each phase of data processing (see pipeline.sh),
as this way the output can be easily examined for problems after every stage.
However, one-shot execution should also be possible.
See flags below for full capabilities. The trickiest bit is: different metadata
only works with particular render engine option. The script will raise errors
if incorrect engine is specified:
- Vertex paint for correspondences - blender render (no gamma correction!)
- Normals in camera space - blender render (no gamma correction!)
- Flow vector pass - cycles (blender render is buggy)
- Red stylit reference material - cycles
- Env lighting for mixamo models - blender render only
"""
import bpy
import argparse
import logging
import os
import random
import sys
import time
import traceback
# Add to path to make sure we can import modules while running inside Blender.
__sdir = os.path.dirname(os.path.realpath(__file__))
if __sdir not in sys.path:
sys.path.append(__sdir)
import color_util
import geo_util
import io_util
import render_util
import stylit_util
LOG = logging.getLogger(__name__)
if __name__ == "__main__":
try:
# FLAGS
# --------------------------------------------------------------------------
parser = argparse.ArgumentParser(
description='Configurable utility to modify blend and/or render images/flow/metadata.')
parser.add_argument(
'--random_seed', action='store', type=int, default=-1,
help='Integer seed for random number generator; used if > 0.')
# Rendering ----------------------------------------------------------------
parser.add_argument(
'--width', action='store', type=int, default=1500,
help='Width to render at.')
parser.add_argument(
'--height', action='store', type=int, default=1500,
help='Height to render at.')
parser.add_argument(
'--quality_samples', action='store', type=int, default=-1,
help='If positive and using cycles, will use this many samples per pixel; ' +
'e.g. 128 is slow, 10 is comparatively fast.')
parser.add_argument(
'--start_frame', action='store', type=int, default=0,
help='Frame to start rendering at (relative to first frame).')
parser.add_argument(
'--rendered_frames', action='store', type=int, default=0,
help='Maximum frames to render; 0 for none; -1 for all.')
parser.add_argument(
'--skip_existing_frames', action='store_true', default=False,
help='If true, skips existing frames matching --frame_output_prefix.')
parser.add_argument(
'--use_cycles', action='store_true', default=False,
help='If true, sets Cycles as the rendering engine, else leaves unchanged.')
parser.add_argument(
'--use_blender_render', action='store_true', default=False,
help='If true, sets Blender Render as the rendering engine, else leaves unchanged.')
# Outputs ------------------------------------------------------------------
parser.add_argument(
'--frame_output_prefix', action='store', type=str, default='',
help='If set, will set image output to <frame_output_prefix><frame#>.PNG; ' +
'should include full path.')
parser.add_argument(
'--render_metadata_exr', action='store_true', default=False,
help='If true, renders all metadata passes as a multilayer EXR file.')
parser.add_argument(
'--objectids_key_file', action='store', type=str, default='',
help='Directory to write objectids to, as images.')
parser.add_argument(
'--world_normals_output_dir', action='store', type=str, default='',
help='Directory to write world space normals to, as images ' +
'(only compatible with --use_cycles.')
parser.add_argument(
'--camera_normals_output_dir', action='store', type=str, default='',
help='Directory to write camera space normals to, as images ' +
'(only compatible with --use_blender_render.')
parser.add_argument(
'--enable_gamma_correction', action='store_true', default=False,
help='We disable gamma correction by default, as it corrupts the ' +
'metadata rendering; set this on to enable.')
parser.add_argument(
'--bg_name', action='store', type=str, default="STYMO_BG",
help='If any object name matches this substring, it will be treated as ' +
'background for the purpose of id labeling and stylit rendering.')
parser.add_argument(
'--output_blend', action='store', type=str, default='',
help='If set, will output modified blend here (must be absolute path); ' +
'if setting linestyle and/or material, will replace special substrings ' +
'<M> and <L> with material and linestyle.')
parser.add_argument(
'--info_file', action='store', type=str, default='',
help='If set, may output auxiliary information into this file.')
# Camera -------------------------------------------------------------------
parser.add_argument(
'--set_camera', action='store', type=int, default=0,
help='If >= 0, selects ith camera and deletes all other cameras; ' +
'if i > num cameras, generates a random one instead.')
parser.add_argument(
'--keep_extra_cameras', action='store_true',
help='If --set_camera, will not delete extra cameras.')
parser.add_argument(
'--add_random_camera_motion', action='store_true',
help='If generating a random camera and this is true, creates zoom/flyaround/pan; '
'WARNING: parameters are tuned for mixamo character blends.')
# Animation range ----------------------------------------------------------
parser.add_argument(
'--offset_scene_start_frame_by', action='store', type=int, default=0,
help='Unlike --start_frame, which just controls the rendering range, this ' +
'flag offsets the current scene start frame in the timeline by the ' +
'specified amount. Relevant to blends that do not begin at frame 0.')
parser.add_argument(
'--offset_scene_end_frame_by', action='store', type=int, default=0,
help='Unlike --rendered_frames, which just controls the rendering range, this ' +
'flag offsets the current scene end frame in the timeline by the ' +
'specified amount. Relevant to blends that do not begin at frame 0.')
# Lighting -----------------------------------------------------------------
parser.add_argument(
'--set_env_lighting_image', action='store', type=str, default='',
help='Set to image path or directory of environment map images to set ' +
'environment lighting; only works with --use_blender_render.')
parser.add_argument(
'--set_stylit_lighting', action='store_true',
help='If true, sets consistent lighting to render input for stylit.')
# Styles -------------------------------------------------------------------
parser.add_argument(
'--set_stylit_style', action='store_true',
help='If true, sets red material style used for stylit style transfer.')
parser.add_argument(
'--set_corresp_style', action='store_true',
help='If true, will set per-vertex materials to render correspondences.')
parser.add_argument(
'--set_objectids_style', action='store_true',
help='If true, will set objectids to render using flat materials.')
parser.add_argument(
'--deterministic_objectid_colors', action='store_true',
help='If true, objectid colors will not be shuffled; use for testing.')
parser.add_argument(
'--linestyles_blend', action='store', type=str, default='',
help='Path to blend containing all the line styles.')
parser.add_argument(
'--set_linestyle_matching', action='store', type=str, default='',
help='Regex matching linestyle(s) in --line_styles_blend; '
'if more than one match, picks random one; '
'"" for none; ".*" for all; "hi|bye" to match either.')
parser.add_argument(
'--randomize_line_color', action='store_true',
help='If true, randomizes line color if line is set.')
parser.add_argument(
'--materials_blend', action='store', type=str, default='',
help='Path to blend containing all the material styles (e.g. textured blender styles).')
parser.add_argument(
'--set_materials_matching', action='store', type=str, default='',
help='Regex matching materials(s) in --materials_blend; '
'if more than one match, picks random one; '
'"" for none; ".*" for all; "hi|bye" to match either.')
parser.add_argument(
'--randomize_material_color', action='store_true',
help='If true, randomizes material color if material is set.')
# Custom color control
parser.add_argument(
'--material_color_choices', action='store', type=str, default='',
help='String of format R,G,B R2,G2,B2 ... of colors to choose from if ' +
'randomizing material colors.')
parser.add_argument(
'--line_hue_range', action='store', type=str, default='0,1.0',
help='If --randomize_line_color, will keep HSV Hue in this range (two numbers,csv).')
parser.add_argument(
'--line_sat_range', action='store', type=str, default='0,1.0',
help='If --randomize_line_color, will keep HSV Saturation in this range (two numbers,csv).')
parser.add_argument(
'--line_value_range', action='store', type=str, default='0,1.0',
help='If --randomize_line_color, will keep HSV Value in this range (two numbers,csv).')
# Parse only arguments after --
# --------------------------------------------------------------------------
argv = sys.argv
if "--" not in argv:
argv = [] # as if no args are passed
else:
argv = argv[argv.index("--") + 1:]
args = parser.parse_args(argv)
if args.random_seed > 0:
print('Using --random_seed=%d as random seed.' % args.random_seed)
random.seed(args.random_seed)
else:
print('Using time as random seed.')
random.seed(time.time())
render_util.print_blend_diagnostics()
# Handle camera ------------------------------------------------------------
if args.set_camera >= 0:
cam = None
if args.keep_extra_cameras:
cam = geo_util.get_camera_by_number(args.set_camera)
else:
cam = geo_util.delete_all_but_one_camera(args.set_camera)
if cam is None:
print('Generating a random camera.')
bbox = geo_util.get_scene_bbox()
cam = geo_util.create_random_camera(bbox, 2.5, 2.5, 2.5)
if args.add_random_camera_motion:
print('Adding motion to camera.')
geo_util.mixamo_add_random_camera_motion(cam)
geo_util.disable_camera_depth_of_field(cam)
else:
cam = geo_util.get_single_camera_or_die()
# Set active camera
bpy.context.scene.camera = cam
# Handle frame bounds ------------------------------------------------------
orig_start = bpy.context.scene.frame_start
bpy.context.scene.frame_start = orig_start + args.offset_scene_start_frame_by
if args.offset_scene_end_frame_by > 0:
bpy.context.scene.frame_end = orig_start + args.offset_scene_end_frame_by
# Handle lighting ----------------------------------------------------------
info_file = None
if args.info_file:
info_file = open(args.info_file, 'w')
if len(args.set_env_lighting_image) > 0:
if not args.use_blender_render:
raise RuntimeError(
'Error: --set_env_lighting_image="img" only works with --use_blender_render')
render_util.setup_realistic_lighting(args.set_env_lighting_image, 10.0, False)
if args.set_stylit_lighting:
if not args.use_cycles:
raise RuntimeError(
'Error: --set_stylit_lighting only works with --use_cycles')
stylit_util.setup_stylit_lighting()
# Handle styles ------------------------------------------------------------
nstyles = len([x for x in [args.set_stylit_lighting,
args.set_corresp_style, args.set_objectids_style,
(args.set_linestyle_matching or args.set_materials_matching)]
if x])
if nstyles > 1:
raise RuntimeError(
'Error: incompatible rendering styles specified; only one of these can be true: ' +
'--set_stylit_lighting OR ' +
'--set_corresp_style OR --set_objectids_style OR ' +
'(--set_linestyle_matching and/or --set_materials_matching)')
linestyle_name = 'default'
material_name = 'default'
if args.set_stylit_style: # Red material used for stylit rendering
if not args.use_cycles:
raise RuntimeError(
'Error: --set_stylit_style only works with --use_cycles')
render_util.clear_unnecessary_settings()
stylit_util.setup_stylit_materials(bg_name=args.bg_name)
elif args.set_corresp_style: # Per-vertex correspondence rendering
if not args.use_blender_render:
raise RuntimeError(
'Correspondence rendering (--set_corresp_style) only implemented for ' +
'--use_blender_render')
render_util.clear_unnecessary_settings()
render_util.set_correspondence_style()
elif args.set_objectids_style: # Object Ids rendered in flat color
if not args.use_blender_render:
raise RuntimeError(
'Correspondence rendering (--set_objectids_style) only implemented for ' +
'--use_blender_render')
render_util.clear_unnecessary_settings()
idsinfo = render_util.set_objectids_style(
bg_name=args.bg_name, deterministic=args.deterministic_objectid_colors)
if idsinfo and args.objectids_key_file:
with open(os.path.join(args.objectids_key_file), 'w') as f:
for i in range(len(idsinfo)):
f.write('%s %d %d %d\n' %
(idsinfo[i][0], idsinfo[i][1][0],
idsinfo[i][1][1], idsinfo[i][1][2]))
elif args.set_linestyle_matching or args.set_materials_matching: # Freestyle / toon shading
if not args.use_blender_render:
raise RuntimeError(
'Linestyles and materials only implemented for --use_blender_render')
render_util.clear_unnecessary_settings()
if len(args.set_linestyle_matching) > 0:
if len(args.linestyles_blend) == 0:
raise RuntimeError(
'Error: Must set --linestyles_blend with line exemplars ' +
'if requesting --set_linestyle_matching.')
line_color = None
if args.randomize_line_color:
line_color = color_util.get_random_color(
prob_dark=0.8,
bounds=color_util.parse_hsv_bounds(args.line_hue_range,
args.line_sat_range,
args.line_value_range))
linestyle_name = render_util.set_linestyle(
args.linestyles_blend, args.set_linestyle_matching,
color=line_color)
if info_file:
info_file.write('LINESTYLE %s\n' % io_util.strip_blender_name(linestyle_name))
if len(args.set_materials_matching) > 0:
if len(args.materials_blend) == 0:
raise RuntimeError(
'Error: Must set --materials_blend with material ' +
'exemplars if requesting --set_materials_matching.')
mat_color_randomizer = None
if args.randomize_material_color:
if args.material_color_choices:
mat_color_randomizer = color_util.make_color_getter(
args.material_color_choices)
else:
mat_color_randomizer = color_util.make_random_color_getter()
material_name = render_util.set_materials(
args.materials_blend, args.set_materials_matching,
color_randomizer=mat_color_randomizer)
if info_file:
info_file.write('MATSTYLE %s\n' % io_util.strip_blender_name(material_name))
# Handle rendering settings ------------------------------------------------
if args.use_cycles and args.use_blender_render:
raise RuntimeError('Can specify only one of --use_cycles and --use_blender_render')
if args.use_cycles or args.use_blender_render:
nsamples = (args.quality_samples if args.quality_samples > 0 else None)
render_util.set_render_settings(args.use_cycles, nsamples=nsamples,
enable_gamma=args.enable_gamma_correction)
if args.width > 0 and args.height > 0:
render_util.set_width_height(args.width, args.height)
if args.world_normals_output_dir or args.camera_normals_output_dir:
if args.world_normals_output_dir and args.camera_normals_output_dir:
raise RuntimeError('Only one type of normals can be output at once.')
if args.world_normals_output_dir and not args.use_cycles:
raise RuntimeError('World normals can only be output with --use_cycles.')
elif args.camera_normals_output_dir and not args.use_blender_render:
raise RuntimeError('Camera space normals can only be output with --use_blender_render.')
render_util.init_normals_render_nodes(
(args.world_normals_output_dir or args.camera_normals_output_dir),
use_cycles=args.use_cycles)
# Handle saving -------------------------------------------------------
if len(args.output_blend) > 0:
bpy.ops.file.pack_all()
args.output_blend = args.output_blend.replace('<M>', io_util.strip_blender_name(material_name))
args.output_blend = args.output_blend.replace('<L>', io_util.strip_blender_name(linestyle_name))
print('Saving blend to %s' % args.output_blend)
geo_util.save_blend(args.output_blend)
if args.rendered_frames != 0:
if args.render_metadata_exr and not args.use_cycles:
raise RuntimeError('Must set --use_cycles=True to render out flow with ' +
'--render_metadata_exr')
print('Rendering frames')
render_util.render_animation(
args.frame_output_prefix, args.rendered_frames,
start_frame_offset=args.start_frame,
render_exr=args.render_metadata_exr,
skip_existing=args.skip_existing_frames)
except Exception as e:
tb = traceback.format_exc()
LOG.critical(tb)
LOG.critical('Script failed')
raise e
LOG.critical('Script completed')
| 49.194064 | 108 | 0.58616 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 9,526 | 0.442103 |
90d3d04333f570bee6151ef38e0d8057f563ad15 | 5,004 | py | Python | atlas/foundations_contrib/src/test/archiving/test_artifact_downloader.py | DeepLearnI/atlas | 8aca652d7e647b4e88530b93e265b536de7055ed | [
"Apache-2.0"
]
| 296 | 2020-03-16T19:55:00.000Z | 2022-01-10T19:46:05.000Z | atlas/foundations_contrib/src/test/archiving/test_artifact_downloader.py | DeepLearnI/atlas | 8aca652d7e647b4e88530b93e265b536de7055ed | [
"Apache-2.0"
]
| 57 | 2020-03-17T11:15:57.000Z | 2021-07-10T14:42:27.000Z | atlas/foundations_contrib/src/test/archiving/test_artifact_downloader.py | DeepLearnI/atlas | 8aca652d7e647b4e88530b93e265b536de7055ed | [
"Apache-2.0"
]
| 38 | 2020-03-17T21:06:05.000Z | 2022-02-08T03:19:34.000Z |
from foundations_spec import *
from unittest.mock import call
class TestArtifactDownloader(Spec):
mock_archiver = let_mock()
make_directory_mock = let_patch_mock('os.makedirs')
@let
def source_directory(self):
return self.faker.uri_path()
@let
def download_directory(self):
return self.faker.uri_path()
@let
def artifact_downloader(self):
from foundations_contrib.archiving.artifact_downloader import ArtifactDownloader
return ArtifactDownloader(self.mock_archiver)
@let
def mock_foundations_files(self):
return [
'foundations/a',
'foundations/b',
'foundations_contrib/c',
'foundations_contrib/d',
'foundations_events/e',
'foundations_events/f',
'foundations_internal/g',
'foundations_internal/h',
'jobs/i',
'jobs/j',
'model_serving/k',
'model_serving/l',
'venv/m',
'venv/n',
'docker_image_version.sh',
'download_gui_images.sh',
'foundations_gui.sh',
'foundations_package_manifest.yaml',
'foundations_requirements.txt',
'job.tgz',
'run.env',
'run.sh',
'p.bin',
'q.bin',
'template/t',
'template/u',
]
def test_downloads_single_file_to_specified_directory(self):
self._mock_file_list(['path/to/my/file'])
self.artifact_downloader.download_files('', self.download_directory)
self.mock_archiver.fetch_persisted_file.assert_called_with('path/to/my/file', self.download_directory + '/path/to/my/file')
def test_downloads_multiple_files_to_specified_directory(self):
self._mock_file_list(['different/file', 'other/different/file'])
self.artifact_downloader.download_files('', self.download_directory)
first_file_download = call('different/file', self.download_directory + '/different/file')
second_file_download = call('other/different/file', self.download_directory + '/other/different/file')
self.mock_archiver.fetch_persisted_file.assert_has_calls([first_file_download, second_file_download])
def test_ensures_target_directory_exists(self):
self._mock_file_list(['path/to/my/file'])
self.artifact_downloader.download_files('', self.download_directory)
self.make_directory_mock.assert_called_with(self.download_directory + '/path/to/my', exist_ok=True)
def test_downloads_multiple_files_to_specified_directory(self):
self._mock_file_list(['different/file', 'other/different/file'])
self.artifact_downloader.download_files('', self.download_directory)
first_dirctory_creation = call(self.download_directory + '/different', exist_ok=True)
second_dirctory_creation = call(self.download_directory + '/other/different', exist_ok=True)
self.make_directory_mock.assert_has_calls([first_dirctory_creation, second_dirctory_creation])
def test_downloads_only_files_with_specified_source_directory(self):
self._mock_file_list(['different/file', 'other/different/file'])
self.artifact_downloader.download_files('other/', self.download_directory)
self.mock_archiver.fetch_persisted_file.assert_called_once_with('other/different/file', self.download_directory + '/other/different/file')
def test_downloads_only_files_with_specified_source_directory_with_different_source_directory(self):
self._mock_file_list(['different/file', 'other/different/file'])
self.artifact_downloader.download_files('different/', self.download_directory)
self.mock_archiver.fetch_persisted_file.assert_called_once_with('different/file', self.download_directory + '/different/file')
def test_download_does_not_include_foundations_files(self):
for foundations_file in self.mock_foundations_files:
self._mock_file_list(['path/to/some/file', foundations_file])
self.artifact_downloader.download_files('', self.download_directory)
self.mock_archiver.fetch_persisted_file.assert_called_with('path/to/some/file', self.download_directory + '/path/to/some/file')
def test_download_includes_config_yamls(self):
for foundations_file in self.mock_foundations_files:
self._mock_file_list(['a.config.yaml', foundations_file])
self.artifact_downloader.download_files('', self.download_directory)
self.mock_archiver.fetch_persisted_file.assert_called_with('a.config.yaml', self.download_directory + '/a.config.yaml')
def _mock_file_list(self, file_list):
self.mock_archiver.fetch_miscellaneous = ConditionalReturn()
self.mock_archiver.fetch_miscellaneous.return_when(file_list, 'job_artifact_listing.pkl') | 42.769231 | 146 | 0.689249 | 4,932 | 0.985612 | 0 | 0 | 1,198 | 0.239408 | 0 | 0 | 1,027 | 0.205236 |
90d3d4041471f93cb0f82d38f927e393e1e21818 | 34,846 | py | Python | lib/python3.5/functional/test/test_functional.py | mklan/NX-Rom-Market | 33613d2177b63df9e0568038ffdf1dd91ad334d8 | [
"MIT"
]
| 21 | 2021-01-10T16:44:55.000Z | 2022-03-03T13:15:07.000Z | lib/python3.5/functional/test/test_functional.py | mklan/NX-Rom-Market | 33613d2177b63df9e0568038ffdf1dd91ad334d8 | [
"MIT"
]
| 3 | 2021-01-10T15:38:50.000Z | 2021-04-29T09:45:47.000Z | lib/python3.5/functional/test/test_functional.py | mklan/NX-Rom-Market | 33613d2177b63df9e0568038ffdf1dd91ad334d8 | [
"MIT"
]
| 1 | 2021-01-10T15:07:38.000Z | 2021-01-10T15:07:38.000Z | # pylint: skip-file
import unittest
import array
from collections import namedtuple
from itertools import product
from functional.pipeline import Sequence, is_iterable, _wrap, extend
from functional.transformations import name
from functional import seq, pseq
Data = namedtuple("Data", "x y")
def pandas_is_installed():
try:
global pandas
import pandas
return True
except ImportError:
return False
class TestPipeline(unittest.TestCase):
def setUp(self):
self.seq = seq
def assert_type(self, s):
self.assertTrue(isinstance(s, Sequence))
def assert_not_type(self, s):
self.assertFalse(isinstance(s, Sequence))
def assertIteratorEqual(self, iter_0, iter_1):
seq_0 = list(iter_0)
seq_1 = list(iter_1)
self.assertListEqual(seq_0, seq_1)
def test_is_iterable(self):
self.assertFalse(is_iterable([]))
self.assertTrue(is_iterable(iter([1, 2])))
def test_constructor(self):
self.assertRaises(TypeError, lambda: Sequence(1))
def test_base_sequence(self):
l = []
self.assert_type(self.seq(l))
self.assert_not_type(self.seq(l).sequence)
self.assert_type(self.seq(self.seq(l)))
self.assert_not_type(self.seq(self.seq(l)).sequence)
self.assert_not_type(self.seq(l)._base_sequence)
def test_eq(self):
l = [1, 2, 3]
self.assertIteratorEqual(self.seq(l).map(lambda x: x), self.seq(l))
def test_ne(self):
a = [1, 2, 3]
b = [1]
self.assertNotEqual(self.seq(a), self.seq(b))
def test_repr(self):
l = [1, 2, 3]
self.assertEqual(repr(l), repr(self.seq(l)))
def test_lineage_name(self):
f = lambda x: x
self.assertEqual(f.__name__, name(f))
f = "test"
self.assertEqual("test", name(f))
def test_str(self):
l = [1, 2, 3]
self.assertEqual(str(l), str(self.seq(l)))
def test_hash(self):
self.assertRaises(TypeError, lambda: hash(self.seq([1])))
def test_len(self):
l = [1, 2, 3]
s = self.seq(l)
self.assertEqual(len(l), s.size())
self.assertEqual(len(l), s.len())
def test_count(self):
l = self.seq([-1, -1, 1, 1, 1])
self.assertEqual(l.count(lambda x: x > 0), 3)
self.assertEqual(l.count(lambda x: x < 0), 2)
def test_getitem(self):
l = [1, 2, [3, 4, 5]]
s = self.seq(l).map(lambda x: x)
self.assertEqual(s[1], 2)
self.assertEqual(s[2], [3, 4, 5])
self.assert_type(s[2])
self.assertEqual(s[1:], [2, [3, 4, 5]])
self.assert_type(s[1:])
l = [{1, 2}, {2, 3}, {4, 5}]
s = self.seq(l)
self.assertIsInstance(s[0], set)
self.assertEqual(s[0], l[0])
def test_iter(self):
l = list(enumerate(self.seq([1, 2, 3])))
e = list(enumerate([1, 2, 3]))
self.assertEqual(l, e)
l = self.seq([1, 2, 3])
e = [1, 2, 3]
result = []
for n in l:
result.append(n)
self.assertEqual(result, e)
self.assert_type(l)
def test_contains(self):
string = "abcdef"
s = self.seq(iter(string)).map(lambda x: x)
self.assertTrue("c" in s)
def test_add(self):
l0 = self.seq([1, 2, 3]).map(lambda x: x)
l1 = self.seq([4, 5, 6])
l2 = [4, 5, 6]
expect = [1, 2, 3, 4, 5, 6]
self.assertEqual(l0 + l1, expect)
self.assertEqual(l0 + l2, expect)
def test_head(self):
l = self.seq([1, 2, 3]).map(lambda x: x)
self.assertEqual(l.head(), 1)
l = self.seq([[1, 2], 3, 4])
self.assertEqual(l.head(), [1, 2])
self.assert_type(l.head())
l = self.seq([])
with self.assertRaises(IndexError):
l.head()
def test_first(self):
l = self.seq([1, 2, 3]).map(lambda x: x)
self.assertEqual(l.first(), 1)
l = self.seq([[1, 2], 3, 4]).map(lambda x: x)
self.assertEqual(l.first(), [1, 2])
self.assert_type(l.first())
l = self.seq([])
with self.assertRaises(IndexError):
l.head()
def test_head_option(self):
l = self.seq([1, 2, 3]).map(lambda x: x)
self.assertEqual(l.head_option(), 1)
l = self.seq([[1, 2], 3, 4]).map(lambda x: x)
self.assertEqual(l.head_option(), [1, 2])
self.assert_type(l.head_option())
l = self.seq([])
self.assertIsNone(l.head_option())
def test_last(self):
l = self.seq([1, 2, 3]).map(lambda x: x)
self.assertEqual(l.last(), 3)
l = self.seq([1, 2, [3, 4]]).map(lambda x: x)
self.assertEqual(l.last(), [3, 4])
self.assert_type(l.last())
def test_last_option(self):
l = self.seq([1, 2, 3]).map(lambda x: x)
self.assertEqual(l.last_option(), 3)
l = self.seq([1, 2, [3, 4]]).map(lambda x: x)
self.assertEqual(l.last_option(), [3, 4])
self.assert_type(l.last_option())
l = self.seq([])
self.assertIsNone(l.last_option())
def test_init(self):
result = self.seq([1, 2, 3, 4]).map(lambda x: x).init()
expect = [1, 2, 3]
self.assertIteratorEqual(result, expect)
def test_tail(self):
l = self.seq([1, 2, 3, 4]).map(lambda x: x)
expect = [2, 3, 4]
self.assertIteratorEqual(l.tail(), expect)
def test_inits(self):
l = self.seq([1, 2, 3]).map(lambda x: x)
expect = [[1, 2, 3], [1, 2], [1], []]
self.assertIteratorEqual(l.inits(), expect)
self.assertIteratorEqual(l.inits().map(lambda s: s.sum()), [6, 3, 1, 0])
def test_tails(self):
l = self.seq([1, 2, 3]).map(lambda x: x)
expect = [[1, 2, 3], [2, 3], [3], []]
self.assertIteratorEqual(l.tails(), expect)
self.assertIteratorEqual(l.tails().map(lambda s: s.sum()), [6, 5, 3, 0])
def test_drop(self):
s = self.seq([1, 2, 3, 4, 5, 6])
expect = [5, 6]
result = s.drop(4)
self.assertIteratorEqual(result, expect)
self.assert_type(result)
self.assertIteratorEqual(s.drop(0), s)
self.assertIteratorEqual(s.drop(-1), s)
def test_drop_right(self):
s = self.seq([1, 2, 3, 4, 5]).map(lambda x: x)
expect = [1, 2, 3]
result = s.drop_right(2)
self.assert_type(result)
self.assertIteratorEqual(result, expect)
self.assertIteratorEqual(s.drop_right(0), s)
self.assertIteratorEqual(s.drop_right(-1), s)
def test_drop_while(self):
l = [1, 2, 3, 4, 5, 6, 7, 8]
f = lambda x: x < 4
expect = [4, 5, 6, 7, 8]
result = self.seq(l).drop_while(f)
self.assertIteratorEqual(expect, result)
self.assert_type(result)
def test_take(self):
s = self.seq([1, 2, 3, 4, 5, 6])
expect = [1, 2, 3, 4]
result = s.take(4)
self.assertIteratorEqual(result, expect)
self.assert_type(result)
self.assertIteratorEqual(s.take(0), self.seq([]))
self.assertIteratorEqual(s.take(-1), self.seq([]))
def test_take_while(self):
l = [1, 2, 3, 4, 5, 6, 7, 8]
f = lambda x: x < 4
expect = [1, 2, 3]
result = self.seq(l).take_while(f)
self.assertIteratorEqual(result, expect)
self.assert_type(result)
def test_union(self):
result = self.seq([1, 1, 2, 3, 3]).union([1, 4, 5])
expect = [1, 2, 3, 4, 5]
self.assert_type(result)
self.assertSetEqual(result.set(), set(expect))
def test_intersection(self):
result = self.seq([1, 2, 2, 3]).intersection([2, 3, 4, 5])
expect = [2, 3]
self.assert_type(result)
self.assertSetEqual(result.set(), set(expect))
def test_difference(self):
result = self.seq([1, 2, 3]).difference([2, 3, 4])
expect = [1]
self.assert_type(result)
self.assertSetEqual(result.set(), set(expect))
def test_symmetric_difference(self):
result = self.seq([1, 2, 3, 3]).symmetric_difference([2, 4, 5])
expect = [1, 3, 4, 5]
self.assert_type(result)
self.assertSetEqual(result.set(), set(expect))
def test_map(self):
f = lambda x: x * 2
l = [1, 2, 0, 5]
expect = [2, 4, 0, 10]
result = self.seq(l).map(f)
self.assertIteratorEqual(expect, result)
self.assert_type(result)
def test_select(self):
f = lambda x: x * 2
l = [1, 2, 0, 5]
expect = [2, 4, 0, 10]
result = self.seq(l).select(f)
self.assertIteratorEqual(expect, result)
self.assert_type(result)
def test_starmap(self):
f = lambda x, y: x * y
l = [(1, 1), (0, 3), (-3, 3), (4, 2)]
expect = [1, 0, -9, 8]
result = self.seq(l).starmap(f)
self.assertIteratorEqual(expect, result)
self.assert_type(result)
result = self.seq(l).smap(f)
self.assertIteratorEqual(expect, result)
self.assert_type(result)
def test_filter(self):
f = lambda x: x > 0
l = [0, -1, 5, 10]
expect = [5, 10]
s = self.seq(l)
result = s.filter(f)
self.assertIteratorEqual(expect, result)
self.assert_type(result)
def test_where(self):
f = lambda x: x > 0
l = [0, -1, 5, 10]
expect = [5, 10]
s = self.seq(l)
result = s.where(f)
self.assertIteratorEqual(expect, result)
self.assert_type(result)
def test_filter_not(self):
f = lambda x: x > 0
l = [0, -1, 5, 10]
expect = [0, -1]
result = self.seq(l).filter_not(f)
self.assertIteratorEqual(expect, result)
self.assert_type(result)
def test_map_filter(self):
f = lambda x: x > 0
g = lambda x: x * 2
l = [0, -1, 5]
s = self.seq(l)
expect = [10]
result = s.filter(f).map(g)
self.assertIteratorEqual(expect, result)
self.assert_type(result)
def test_reduce(self):
f = lambda x, y: x + y
l = ["a", "b", "c"]
expect = "abc"
s = self.seq(l)
self.assertEqual(expect, s.reduce(f))
with self.assertRaises(TypeError):
seq([]).reduce(f)
with self.assertRaises(ValueError):
seq([]).reduce(f, 0, 0)
self.assertEqual(seq([]).reduce(f, 1), 1)
self.assertEqual(seq([0, 2]).reduce(f, 1), 3)
def test_accumulate(self):
f = lambda x, y: x + y
l_char = ["a", "b", "c"]
expect_char = ["a", "ab", "abc"]
l_num = [1, 2, 3]
expect_num = [1, 3, 6]
self.assertEqual(seq(l_char).accumulate(), expect_char)
self.assertEqual(seq(l_num).accumulate(), expect_num)
def test_aggregate(self):
f = lambda current, next_element: current + next_element
l = self.seq([1, 2, 3, 4])
self.assertEqual(l.aggregate(f), 10)
self.assertEqual(l.aggregate(0, f), 10)
self.assertEqual(l.aggregate(0, f, lambda x: 2 * x), 20)
l = self.seq(["a", "b", "c"])
self.assertEqual(l.aggregate(f), "abc")
self.assertEqual(l.aggregate("", f), "abc")
self.assertEqual(l.aggregate("", f, lambda x: x.upper()), "ABC")
self.assertEqual(l.aggregate(f), "abc")
self.assertEqual(l.aggregate("z", f), "zabc")
self.assertEqual(l.aggregate("z", f, lambda x: x.upper()), "ZABC")
with self.assertRaises(ValueError):
l.aggregate()
with self.assertRaises(ValueError):
l.aggregate(None, None, None, None)
def test_fold_left(self):
f = lambda current, next_element: current + next_element
l = self.seq([1, 2, 3, 4])
self.assertEqual(l.fold_left(0, f), 10)
self.assertEqual(l.fold_left(-10, f), 0)
l = self.seq(["a", "b", "c"])
self.assertEqual(l.fold_left("", f), "abc")
self.assertEqual(l.fold_left("z", f), "zabc")
f = lambda x, y: x + [y]
self.assertEqual(l.fold_left([], f), ["a", "b", "c"])
self.assertEqual(l.fold_left(["start"], f), ["start", "a", "b", "c"])
def test_fold_right(self):
f = lambda next_element, current: current + next_element
l = self.seq([1, 2, 3, 4])
self.assertEqual(l.fold_right(0, f), 10)
self.assertEqual(l.fold_right(-10, f), 0)
l = self.seq(["a", "b", "c"])
self.assertEqual(l.fold_right("", f), "cba")
self.assertEqual(l.fold_right("z", f), "zcba")
f = lambda next_element, current: current + [next_element]
self.assertEqual(l.fold_right([], f), ["c", "b", "a"])
self.assertEqual(l.fold_right(["start"], f), ["start", "c", "b", "a"])
def test_sorted(self):
s = self.seq([1, 3, 2, 5, 4])
r = s.sorted()
self.assertIteratorEqual([1, 2, 3, 4, 5], r)
self.assert_type(r)
def test_order_by(self):
s = self.seq([(2, "a"), (1, "b"), (4, "c"), (3, "d")])
r = s.order_by(lambda x: x[0])
self.assertIteratorEqual([(1, "b"), (2, "a"), (3, "d"), (4, "c")], r)
self.assert_type(r)
def test_reverse(self):
l = [1, 2, 3]
expect = [4, 3, 2]
s = self.seq(l).map(lambda x: x + 1)
result = s.reverse()
self.assertIteratorEqual(expect, result)
self.assert_type(result)
result = s.__reversed__()
self.assertIteratorEqual(expect, result)
self.assert_type(result)
def test_distinct(self):
l = [1, 1, 2, 3, 2, 3]
expect = [1, 2, 3]
s = self.seq(l)
result = s.distinct()
for e in result:
self.assertTrue(e in expect)
result = s.distinct()
self.assertEqual(result.size(), len(expect))
self.assert_type(result)
def test_distinct_by(self):
s = self.seq(Data(1, 2), Data(1, 3), Data(2, 0), Data(3, -1), Data(1, 5))
expect = {Data(1, 2), Data(2, 0), Data(3, -1)}
result = s.distinct_by(lambda data: data.x)
self.assertSetEqual(set(result), expect)
self.assert_type(result)
def test_slice(self):
s = self.seq([1, 2, 3, 4])
result = s.slice(1, 2)
self.assertIteratorEqual(result, [2])
self.assert_type(result)
result = s.slice(1, 3)
self.assertIteratorEqual(result, [2, 3])
self.assert_type(result)
def test_any(self):
l = [True, False]
self.assertTrue(self.seq(l).any())
def test_all(self):
l = [True, False]
self.assertFalse(self.seq(l).all())
l = [True, True]
self.assertTrue(self.seq(l).all())
def test_enumerate(self):
l = [2, 3, 4]
e = [(0, 2), (1, 3), (2, 4)]
result = self.seq(l).enumerate()
self.assertIteratorEqual(result, e)
self.assert_type(result)
def test_inner_join(self):
l0 = [("a", 1), ("b", 2), ("c", 3)]
l1 = [("a", 2), ("c", 4), ("d", 5)]
result0 = self.seq(l0).inner_join(l1)
result1 = self.seq(l0).join(l1, "inner")
e = [("a", (1, 2)), ("c", (3, 4))]
self.assert_type(result0)
self.assert_type(result1)
self.assertDictEqual(dict(result0), dict(e))
self.assertDictEqual(dict(result1), dict(e))
result0 = self.seq(l0).inner_join(self.seq(l1))
result1 = self.seq(l0).join(self.seq(l1), "inner")
self.assert_type(result0)
self.assert_type(result1)
self.assertDictEqual(dict(result0), dict(e))
self.assertDictEqual(dict(result1), dict(e))
def test_left_join(self):
left = [("a", 1), ("b", 2)]
right = [("a", 2), ("c", 3)]
result0 = self.seq(left).left_join(right)
result1 = self.seq(left).join(right, "left")
expect = [("a", (1, 2)), ("b", (2, None))]
self.assert_type(result0)
self.assert_type(result1)
self.assertDictEqual(dict(result0), dict(expect))
self.assertDictEqual(dict(result1), dict(expect))
result0 = self.seq(left).left_join(self.seq(right))
result1 = self.seq(left).join(self.seq(right), "left")
self.assert_type(result0)
self.assert_type(result1)
self.assertDictEqual(dict(result0), dict(expect))
self.assertDictEqual(dict(result1), dict(expect))
def test_right_join(self):
left = [("a", 1), ("b", 2)]
right = [("a", 2), ("c", 3)]
result0 = self.seq(left).right_join(right)
result1 = self.seq(left).join(right, "right")
expect = [("a", (1, 2)), ("c", (None, 3))]
self.assert_type(result0)
self.assert_type(result1)
self.assertDictEqual(dict(result0), dict(expect))
self.assertDictEqual(dict(result1), dict(expect))
result0 = self.seq(left).right_join(self.seq(right))
result1 = self.seq(left).join(self.seq(right), "right")
self.assert_type(result0)
self.assert_type(result1)
self.assertDictEqual(dict(result0), dict(expect))
self.assertDictEqual(dict(result1), dict(expect))
def test_outer_join(self):
left = [("a", 1), ("b", 2)]
right = [("a", 2), ("c", 3)]
result0 = self.seq(left).outer_join(right)
result1 = self.seq(left).join(right, "outer")
expect = [("a", (1, 2)), ("b", (2, None)), ("c", (None, 3))]
self.assert_type(result0)
self.assert_type(result1)
self.assertDictEqual(dict(result0), dict(expect))
self.assertDictEqual(dict(result1), dict(expect))
result0 = self.seq(left).outer_join(self.seq(right))
result1 = self.seq(left).join(self.seq(right), "outer")
self.assert_type(result0)
self.assert_type(result1)
self.assertDictEqual(dict(result0), dict(expect))
self.assertDictEqual(dict(result1), dict(expect))
def test_join(self):
with self.assertRaises(TypeError):
self.seq([(1, 2)]).join([(2, 3)], "").to_list()
def test_max(self):
l = [1, 2, 3]
self.assertEqual(3, self.seq(l).max())
def test_min(self):
l = [1, 2, 3]
self.assertEqual(1, self.seq(l).min())
def test_max_by(self):
l = ["aa", "bbbb", "c", "dd"]
self.assertEqual("bbbb", self.seq(l).max_by(len))
def test_min_by(self):
l = ["aa", "bbbb", "c", "dd"]
self.assertEqual("c", self.seq(l).min_by(len))
def test_find(self):
l = [1, 2, 3]
f = lambda x: x == 3
g = lambda x: False
self.assertEqual(3, self.seq(l).find(f))
self.assertIsNone(self.seq(l).find(g))
def test_flatten(self):
l = [[1, 1, 1], [2, 2, 2], [[3, 3], [4, 4]]]
expect = [1, 1, 1, 2, 2, 2, [3, 3], [4, 4]]
result = self.seq(l).flatten()
self.assertIteratorEqual(expect, result)
self.assert_type(result)
def test_flat_map(self):
l = [[1, 1, 1], [2, 2, 2], [3, 3, 3]]
f = lambda x: x
expect = [1, 1, 1, 2, 2, 2, 3, 3, 3]
result = self.seq(l).flat_map(f)
self.assertIteratorEqual(expect, result)
self.assert_type(result)
def test_group_by(self):
l = [(1, 1), (1, 2), (1, 3), (2, 2)]
f = lambda x: x[0]
expect = {1: [(1, 1), (1, 2), (1, 3)], 2: [(2, 2)]}
result = self.seq(l).group_by(f)
result_comparison = {}
for kv in result:
result_comparison[kv[0]] = kv[1]
self.assertIteratorEqual(expect, result_comparison)
self.assert_type(result)
def test_group_by_key(self):
l = [("a", 1), ("a", 2), ("a", 3), ("b", -1), ("b", 1), ("c", 10), ("c", 5)]
e = {"a": [1, 2, 3], "b": [-1, 1], "c": [10, 5]}.items()
result = self.seq(l).group_by_key()
self.assertEqual(result.len(), len(e))
for e0, e1 in zip(result, e):
self.assertIteratorEqual(e0, e1)
self.assert_type(result)
def test_grouped(self):
l = self.seq([1, 2, 3, 4, 5, 6, 7, 8])
expect = [[1, 2], [3, 4], [5, 6], [7, 8]]
self.assertIteratorEqual(map(list, l.grouped(2)), expect)
expect = [[1, 2, 3], [4, 5, 6], [7, 8]]
self.assertIteratorEqual(map(list, l.grouped(3)), expect)
def test_grouped_returns_list(self):
l = self.seq([1, 2, 3, 4, 5, 6, 7, 8])
self.assertTrue(is_iterable(l.grouped(2)))
self.assertTrue(is_iterable(l.grouped(3)))
def test_sliding(self):
l = self.seq([1, 2, 3, 4, 5, 6, 7])
expect = [[1, 2], [2, 3], [3, 4], [4, 5], [5, 6], [6, 7]]
self.assertIteratorEqual(l.sliding(2), expect)
l = self.seq([1, 2, 3])
expect = [[1, 2], [3]]
self.assertIteratorEqual(l.sliding(2, 2), expect)
expect = [[1, 2]]
self.assertIteratorEqual(l.sliding(2, 3), expect)
def test_empty(self):
self.assertTrue(self.seq([]).empty())
def test_non_empty(self):
self.assertTrue(self.seq([1]).non_empty())
def test_non_zero_bool(self):
self.assertTrue(bool(self.seq([1])))
self.assertFalse(bool(self.seq([])))
def test_make_string(self):
l = [1, 2, 3]
expect1 = "123"
expect2 = "1:2:3"
s = self.seq(l)
self.assertEqual(expect1, s.make_string(""))
self.assertEqual(expect2, s.make_string(":"))
s = self.seq([])
self.assertEqual("", s.make_string(""))
self.assertEqual("", s.make_string(":"))
def test_partition(self):
l = [-1, -2, -3, 1, 2, 3]
e2 = [-1, -2, -3]
e1 = [1, 2, 3]
f = lambda x: x > 0
s = self.seq(l)
p1, p2 = s.partition(f)
self.assertIteratorEqual(e1, list(p1))
self.assertIteratorEqual(e2, list(p2))
self.assert_type(p1)
self.assert_type(p2)
result = self.seq([[1, 2, 3], [4, 5, 6]]).flatten().partition(lambda x: x > 2)
expect = [[3, 4, 5, 6], [1, 2]]
self.assertIteratorEqual(expect, list(result))
self.assert_type(result)
def test_cartesian(self):
result = seq.range(3).cartesian(range(3)).list()
self.assertListEqual(result, list(product(range(3), range(3))))
result = seq.range(3).cartesian(range(3), range(2)).list()
self.assertListEqual(result, list(product(range(3), range(3), range(2))))
result = seq.range(3).cartesian(range(3), range(2), repeat=2).list()
self.assertListEqual(
result, list(product(range(3), range(3), range(2), repeat=2))
)
def test_product(self):
l = [2, 2, 3]
self.assertEqual(12, self.seq(l).product())
self.assertEqual(96, self.seq(l).product(lambda x: x * 2))
s = self.seq([])
self.assertEqual(1, s.product())
self.assertEqual(2, s.product(lambda x: x * 2))
s = self.seq([5])
self.assertEqual(5, s.product())
self.assertEqual(10, s.product(lambda x: x * 2))
def test_sum(self):
l = [1, 2, 3]
self.assertEqual(6, self.seq(l).sum())
self.assertEqual(12, self.seq(l).sum(lambda x: x * 2))
def test_average(self):
l = [1, 2]
self.assertEqual(1.5, self.seq(l).average())
self.assertEqual(4.5, self.seq(l).average(lambda x: x * 3))
def test_set(self):
l = [1, 1, 2, 2, 3]
ls = set(l)
self.assertIteratorEqual(ls, self.seq(l).set())
def test_zip(self):
l1 = [1, 2, 3]
l2 = [-1, -2, -3]
e = [(1, -1), (2, -2), (3, -3)]
result = self.seq(l1).zip(l2)
self.assertIteratorEqual(e, result)
self.assert_type(result)
def test_zip_with_index(self):
l = [2, 3, 4]
e = [(2, 0), (3, 1), (4, 2)]
result = self.seq(l).zip_with_index()
self.assertIteratorEqual(result, e)
self.assert_type(result)
e = [(2, 5), (3, 6), (4, 7)]
result = self.seq(l).zip_with_index(5)
self.assertIteratorEqual(result, e)
self.assert_type(result)
def test_to_list(self):
l = [1, 2, 3, "abc", {1: 2}, {1, 2, 3}]
result = self.seq(l).to_list()
self.assertIteratorEqual(result, l)
self.assertTrue(isinstance(result, list))
result = self.seq(iter([0, 1, 2])).to_list()
self.assertIsInstance(result, list)
result = self.seq(l).list(n=2)
self.assertEqual(result, [1, 2])
def test_list(self):
l = [1, 2, 3, "abc", {1: 2}, {1, 2, 3}]
result = self.seq(l).list()
self.assertEqual(result, l)
self.assertTrue(isinstance(result, list))
result = self.seq(iter([0, 1, 2])).to_list()
self.assertIsInstance(result, list)
result = self.seq(l).list(n=2)
self.assertEqual(result, [1, 2])
def test_for_each(self):
l = [1, 2, 3, "abc", {1: 2}, {1, 2, 3}]
result = []
def f(e):
result.append(e)
self.seq(l).for_each(f)
self.assertEqual(result, l)
def test_exists(self):
l = ["aaa", "BBB", "ccc"]
self.assertTrue(self.seq(l).exists(str.islower))
self.assertTrue(self.seq(l).exists(str.isupper))
self.assertFalse(self.seq(l).exists(lambda s: "d" in s))
def test_for_all(self):
l = ["aaa", "bbb", "ccc"]
self.assertTrue(self.seq(l).for_all(str.islower))
self.assertFalse(self.seq(l).for_all(str.isupper))
def test_to_dict(self):
l = [(1, 2), (2, 10), (7, 2)]
d = {1: 2, 2: 10, 7: 2}
result = self.seq(l).to_dict()
self.assertDictEqual(result, d)
self.assertTrue(isinstance(result, dict))
result = self.seq(l).to_dict(default=lambda: 100)
self.assertTrue(1 in result)
self.assertFalse(3 in result)
self.assertEqual(result[4], 100)
result = self.seq(l).dict(default=100)
self.assertTrue(1 in result)
self.assertFalse(3 in result)
self.assertEqual(result[4], 100)
def test_dict(self):
l = [(1, 2), (2, 10), (7, 2)]
d = {1: 2, 2: 10, 7: 2}
result = self.seq(l).dict()
self.assertDictEqual(result, d)
self.assertTrue(isinstance(result, dict))
result = self.seq(l).dict(default=lambda: 100)
self.assertTrue(1 in result)
self.assertFalse(3 in result)
self.assertEqual(result[4], 100)
result = self.seq(l).dict(default=100)
self.assertTrue(1 in result)
self.assertFalse(3 in result)
self.assertEqual(result[4], 100)
def test_reduce_by_key(self):
l = [("a", 1), ("a", 2), ("a", 3), ("b", -1), ("b", 1), ("c", 10), ("c", 5)]
e = {"a": 6, "b": 0, "c": 15}.items()
result = self.seq(l).reduce_by_key(lambda x, y: x + y)
self.assertEqual(result.len(), len(e))
for e0, e1 in zip(result, e):
self.assertEqual(e0, e1)
self.assert_type(result)
def test_count_by_key(self):
l = [
("a", 1),
("a", 2),
("a", 3),
("b", -1),
("b", 1),
("c", 10),
("c", 5),
("d", 1),
]
e = {"a": 3, "b": 2, "c": 2, "d": 1}.items()
result = self.seq(l).count_by_key()
self.assertEqual(result.len(), len(e))
for e0, e1 in zip(result, e):
self.assertEqual(e0, e1)
self.assert_type(result)
def test_count_by_value(self):
l = ["a", "a", "a", "b", "b", "c", "d"]
e = {"a": 3, "b": 2, "c": 1, "d": 1}.items()
result = self.seq(l).count_by_value()
self.assertEqual(result.len(), len(e))
for e0, e1 in zip(result, e):
self.assertEqual(e0, e1)
self.assert_type(result)
def test_wrap(self):
self.assert_type(_wrap([1, 2]))
self.assert_type(_wrap((1, 2)))
self.assert_not_type(_wrap(1))
self.assert_not_type(_wrap(1.0))
self.assert_not_type(_wrap("test"))
self.assert_not_type(_wrap(True))
self.assert_not_type(_wrap(Data(1, 2)))
def test_wrap_objects(self):
class A(object):
a = 1
l = [A(), A(), A()]
self.assertIsInstance(_wrap(A()), A)
self.assert_type(self.seq(l))
@unittest.skipUnless(
pandas_is_installed(), "Skip pandas tests if pandas is not installed"
)
def test_wrap_pandas(self):
df1 = pandas.DataFrame({"name": ["name1", "name2"], "value": [1, 2]})
df2 = pandas.DataFrame({"name": ["name1", "name2"], "value": [3, 4]})
result = seq([df1, df2]).reduce(lambda x, y: x.append(y))
self.assertEqual(result.len(), 4)
self.assertEqual(result[0].to_list(), ["name1", 1])
self.assertEqual(result[1].to_list(), ["name2", 2])
self.assertEqual(result[2].to_list(), ["name1", 3])
self.assertEqual(result[3].to_list(), ["name2", 4])
def test_iterator_consumption(self):
sequence = self.seq([1, 2, 3])
first_transform = sequence.map(lambda x: x)
second_transform = first_transform.map(lambda x: x)
first_list = list(first_transform)
second_list = list(second_transform)
expect = [1, 2, 3]
self.assertIteratorEqual(first_list, expect)
self.assertIteratorEqual(second_list, expect)
def test_single_call(self):
if self.seq is pseq:
raise self.skipTest("pseq doesn't support functions with side-effects")
counter = []
def counter_func(x):
counter.append(1)
return x
list(self.seq([1, 2, 3, 4]).map(counter_func))
self.assertEqual(len(counter), 4)
def test_seq(self):
self.assertIteratorEqual(self.seq([1, 2, 3]), [1, 2, 3])
self.assertIteratorEqual(self.seq(1, 2, 3), [1, 2, 3])
self.assertIteratorEqual(self.seq(1), [1])
self.assertIteratorEqual(self.seq(iter([1, 2, 3])), [1, 2, 3])
with self.assertRaises(TypeError):
self.seq()
def test_lineage_repr(self):
s = self.seq(1).map(lambda x: x).filter(lambda x: True)
self.assertEqual(
repr(s._lineage), "Lineage: sequence -> map(<lambda>) -> filter(<lambda>)"
)
def test_cache(self):
if self.seq is pseq:
raise self.skipTest("pseq doesn't support functions with side-effects")
calls = []
func = lambda x: calls.append(x)
result = self.seq(1, 2, 3).map(func).cache().map(lambda x: x).to_list()
self.assertEqual(len(calls), 3)
self.assertEqual(result, [None, None, None])
result = self.seq(1, 2, 3).map(lambda x: x).cache()
self.assertEqual(
repr(result._lineage), "Lineage: sequence -> map(<lambda>) -> cache"
)
result = self.seq(1, 2, 3).map(lambda x: x).cache(delete_lineage=True)
self.assertEqual(repr(result._lineage), "Lineage: sequence")
def test_tabulate(self):
sequence = seq([[1, 2, 3], [4, 5, 6]])
self.assertEqual(sequence.show(), None)
self.assertNotEqual(sequence._repr_html_(), None)
result = sequence.tabulate()
self.assertEqual(result, "- - -\n1 2 3\n4 5 6\n- - -")
sequence = seq(1, 2, 3)
self.assertEqual(sequence.tabulate(), None)
class NotTabulatable(object):
pass
sequence = seq(NotTabulatable(), NotTabulatable(), NotTabulatable())
self.assertEqual(sequence.tabulate(), None)
long_data = seq([(i, i + 1) for i in range(30)])
self.assertTrue("Showing 10 of 30 rows" in long_data.tabulate(n=10))
self.assertTrue("Showing 10 of 30 rows" in long_data._repr_html_())
self.assertTrue(
"Showing 10 of 30 rows" not in long_data.tabulate(n=10, tablefmt="plain")
)
def test_tabulate_namedtuple(self):
sequence_tabulated = seq([Data(1, 2), Data(6, 7)]).tabulate()
self.assertEqual(sequence_tabulated, " x y\n--- ---\n 1 2\n 6 7")
def test_repr_max_lines(self):
sequence = seq.range(200)
self.assertEqual(len(repr(sequence)), 395)
sequence._max_repr_items = None
self.assertEqual(len(repr(sequence)), 890)
class TestExtend(unittest.TestCase):
def test_custom_functions(self):
@extend(aslist=True)
def my_zip(it):
return zip(it, it)
result = seq.range(3).my_zip().list()
expected = list(zip(range(3), range(3)))
self.assertEqual(result, expected)
result = seq.range(3).my_zip().my_zip().list()
expected = list(zip(expected, expected))
self.assertEqual(result, expected)
@extend
def square(it):
return [i ** 2 for i in it]
result = seq.range(100).square().list()
expected = [i ** 2 for i in range(100)]
self.assertEqual(result, expected)
name = "PARALLEL_SQUARE"
@extend(parallel=True, name=name)
def square_parallel(it):
return [i ** 2 for i in it]
result = seq.range(100).square_parallel()
self.assertEqual(result.sum(), sum(expected))
self.assertEqual(
repr(result._lineage), "Lineage: sequence -> extended[%s]" % name
)
@extend
def my_filter(it, n=10):
return (i for i in it if i > n)
# test keyword args
result = seq.range(20).my_filter(n=10).list()
expected = list(filter(lambda x: x > 10, range(20)))
self.assertEqual(result, expected)
# test args
result = seq.range(20).my_filter(10).list()
self.assertEqual(result, expected)
# test final
@extend(final=True)
def toarray(it):
return array.array("f", it)
result = seq.range(10).toarray()
expected = array.array("f", range(10))
self.assertEqual(result, expected)
result = seq.range(10).map(lambda x: x ** 2).toarray()
expected = array.array("f", [i ** 2 for i in range(10)])
self.assertEqual(result, expected)
# a more complex example combining all above
@extend()
def sum_pair(it):
return (i[0] + i[1] for i in it)
result = (
seq.range(100).my_filter(85).my_zip().sum_pair().square_parallel().toarray()
)
expected = array.array(
"f",
list(
map(
lambda x: (x[0] + x[1]) ** 2,
map(lambda x: (x, x), filter(lambda x: x > 85, range(100))),
)
),
)
self.assertEqual(result, expected)
class TestParallelPipeline(TestPipeline):
def setUp(self):
self.seq = pseq
| 34.229862 | 88 | 0.544711 | 34,394 | 0.987029 | 0 | 0 | 1,141 | 0.032744 | 0 | 0 | 1,316 | 0.037766 |
90d42edecd2171b0bdded78b6fa56be73310676a | 9,412 | py | Python | tests/test_git_commit_one_file.py | mubashshirjamal/code | d9c7adf7efed8e9c1ab3ff8cdeb94e7eb1a45382 | [
"BSD-3-Clause"
]
| 1,582 | 2015-01-05T02:41:44.000Z | 2022-03-30T20:03:22.000Z | tests/test_git_commit_one_file.py | mubashshirjamal/code | d9c7adf7efed8e9c1ab3ff8cdeb94e7eb1a45382 | [
"BSD-3-Clause"
]
| 66 | 2015-01-23T07:58:04.000Z | 2021-11-12T02:23:27.000Z | tests/test_git_commit_one_file.py | mubashshirjamal/code | d9c7adf7efed8e9c1ab3ff8cdeb94e7eb1a45382 | [
"BSD-3-Clause"
]
| 347 | 2015-01-05T07:47:07.000Z | 2021-09-20T21:22:32.000Z | # -*- coding: utf-8 -*-
import os
from vilya.models.project import CodeDoubanProject
from vilya.models import git
from tests.base import TestCase
from tests.utils import mkdtemp
from vilya.libs import gyt
from vilya.libs.permdir import get_repo_root
class TestGit(TestCase):
@property
def u(self):
return self.addUser()
def _path(self, name):
return os.path.join(get_repo_root(), '%s.git' % name)
def _path_work_tree(self, name):
return os.path.join(get_repo_root(), '%s.work_tree' % name)
def _repo(self, name, bare=True):
git_path = self._path(name)
if bare:
work_tree_path = None
else:
work_tree_path = self._path_work_tree(name)
if not os.path.exists(work_tree_path):
os.mkdir(work_tree_path)
try:
CodeDoubanProject.create_git_repo(git_path)
except:
pass
repo = git.GitRepo(git_path, work_tree=work_tree_path)
return repo
def _commit(self, repo, filename, content='testcontent',
message='testmessage'):
# TODO allow commiting more than one file
assert os.path.exists(repo.work_tree), \
"repo.work_tree must exist, check if repo has been created with bare=False" # noqa
path = os.path.join(repo.work_tree, filename)
dir_ = os.path.dirname(path)
if not os.path.exists(dir_):
os.makedirs(os.path.dirname(path))
f = open(path, 'w')
f.write(content)
f.close()
rep2 = gyt.repo(repo.path, repo.work_tree, bare=False)
rep2.call(['add', filename])
rep2.call(['commit', filename, '-m', message], _env=self.env_for_git)
return gyt.repo(repo.path).sha()
def test_simple_commit(self):
repo = self._repo('test', bare=False)
self._commit(repo, 'testfile1', 'content1', 'msg1')
src = repo.get_src('testfile1')
assert src == ('blob', u'content1')
repo.commit_one_file('testfile1', 'content1 modified',
'change1', self.u, orig_hash=hash('content1'))
src = repo.get_src('testfile1')
assert src == ('blob', u'content1 modified')
def test_simple_commit_do_not_delete_other_files(self):
repo = self._repo('test', bare=False)
self._commit(repo, 'testfile1', 'content1', 'msg1')
self._commit(repo, 'testfile2', 'content2', 'msg2')
repo.commit_one_file('testfile1', 'content1 modified',
'change1', self.u, orig_hash=hash('content1'))
src = repo.get_src('testfile1')
assert src == ('blob', u'content1 modified')
type_, files = repo.get_src('')
assert any(d['path'] == 'testfile2' for d in files), \
"testfile2 should exists in root tree"
src = repo.get_src('testfile2')
assert src == ('blob', u'content2')
def test_commit_in_inner_directory(self):
repo = self._repo('test', bare=False)
self._commit(repo, 'test/file1', 'content1', 'msg1')
src = repo.get_src('test/file1')
assert src == ('blob', u'content1')
repo.commit_one_file('test/file1', 'content1 modified',
'change1', self.u, orig_hash=hash('content1'))
src = repo.get_src('test/file1')
assert src == ('blob', u'content1 modified')
def test_create_file(self):
repo = self._repo('test', bare=False)
self._commit(repo, 'file1', 'content1', 'msg1')
repo.commit_one_file(
'file2', 'content2 created', 'create1', self.u)
assert repo.cat('HEAD:file1') == 'content1'
assert repo.cat('HEAD:file2') == 'content2 created'
def test_create_first_file(self):
repo = self._repo('test', bare=False)
repo.commit_one_file(
'file1', 'content1 created', 'create1', self.u)
assert repo.cat('HEAD:file1') == 'content1 created'
def test_create_first_file_and_more(self):
repo = self._repo('test', bare=False)
repo.commit_one_file(
'file1', 'content1 created', 'create1', self.u)
repo.commit_one_file(
'file2', 'content2 created', 'create2', self.u)
repo.commit_one_file(
'file3', 'content3 created', 'create3', self.u)
repo.commit_one_file(
'file4', 'content4 created', 'create4', self.u)
assert repo.cat('HEAD:file1') == 'content1 created'
assert repo.cat('HEAD:file2') == 'content2 created'
assert repo.cat('HEAD:file3') == 'content3 created'
assert repo.cat('HEAD:file4') == 'content4 created'
def test_commit_file_on_dirty_index(self):
repo = self._repo('test', bare=False)
repo.commit_one_file(
'file1', 'content1 created', 'create1', self.u)
repo.commit_one_file(
'file2', 'content2 created', 'create2', self.u)
repo.commit_one_file(
'file1', 'content1 modified', 'modify1', self.u)
# Now artificially rewind the index tree state
repo.call('read-tree HEAD^')
repo.commit_one_file(
'file2', 'content2 modified', 'modify2', self.u)
# the latest commit should not have anything related to file1
assert 'file1' not in repo.call('log -p -n1')
def test_create_file_in_dir(self):
repo = self._repo('test', bare=False)
self._commit(repo, 'test/file1', 'content1', 'msg1')
repo.commit_one_file(
'test/file2', 'content2 created', 'create1', self.u)
assert repo.cat('HEAD:test/file1') == 'content1'
assert repo.cat('HEAD:test/file2') == 'content2 created'
def test_simple_commit_in_branch(self):
repo = self._repo('test', bare=False)
self._commit(repo, 'testfile1', 'content1', 'msg1')
tmp_branch = repo.temp_branch_name()
repo.commit_one_file('testfile1', 'content1 modified', 'change1',
self.u, orig_hash=hash('content1'),
branch=tmp_branch)
with mkdtemp() as tmpdir:
gyt.call(['git', 'clone', repo.path, tmpdir])
repo_check = gyt.repo(tmpdir, bare=False)
src = repo_check.call('show HEAD:testfile1')
assert src == u'content1'
repo_check.call('checkout master')
src = repo_check.call('show HEAD:testfile1')
assert src == u'content1'
repo_check.call('checkout %s' % tmp_branch)
src = repo_check.call('show HEAD:testfile1')
assert src == u'content1 modified'
repo_check.call('checkout master')
src = repo_check.call('show HEAD:testfile1')
assert src == u'content1'
def test_simple_commit_in_branch_in_subdir(self):
repo = self._repo('test', bare=False)
self._commit(repo, 'test/file1', 'content1', 'msg1')
tmp_branch = repo.temp_branch_name()
repo.commit_one_file('test/file1', 'content1 modified', 'change1',
self.u, orig_hash=hash('content1'),
branch=tmp_branch)
with mkdtemp() as tmpdir:
gyt.call(['git', 'clone', repo.path, tmpdir])
repo_check = gyt.repo(tmpdir, bare=False)
src = repo_check.call('show HEAD:test/file1')
assert src == u'content1'
repo_check.call('checkout master')
src = repo_check.call('show HEAD:test/file1')
assert src == u'content1'
repo_check.call('checkout %s' % tmp_branch)
src = repo_check.call('show HEAD:test/file1')
assert src == u'content1 modified'
repo_check.call('checkout master')
src = repo_check.call('show HEAD:test/file1')
assert src == u'content1'
def test_simple_commit_in_branch_creates_branch(self):
repo = self._repo('test', bare=False)
self._commit(repo, 'testfile1', 'content1', 'msg1')
assert repo.get_branches() == ['master']
tmp_branch = repo.temp_branch_name()
repo.commit_one_file('testfile1', 'content1 modified', 'change1',
self.u, orig_hash=hash('content1'),
branch=tmp_branch)
assert repo.get_branches() == ['master', tmp_branch]
def test_simple_commit_in_branch_and_delete_branch(self):
repo = self._repo('test', bare=False)
self._commit(repo, 'testfile1', 'content1', 'msg1')
tmp_branch = repo.temp_branch_name()
repo.commit_one_file('testfile1', 'content1 modified', 'change1',
self.u, orig_hash=hash('content1'),
branch=tmp_branch)
assert tmp_branch in repo.get_branches()
repo.remove_temp_branch(tmp_branch)
assert tmp_branch not in repo.get_branches()
assert repo.get_branches() == ['master']
def test_simple_commit_in_another_branch(self):
repo = self._repo('test', bare=False)
self._commit(repo, 'testfile1', 'content1', 'msg1')
branch = 'mybranch'
repo.commit_one_file('testfile1', 'content1 modified', 'change1',
self.u, orig_hash=hash('content1'), branch=branch)
assert branch in repo.get_branches()
assert set(repo.get_branches()) == set(['master', branch])
| 42.977169 | 95 | 0.595516 | 9,155 | 0.972694 | 0 | 0 | 56 | 0.00595 | 0 | 0 | 2,461 | 0.261475 |
90d5aacb48382d69453f984a43223f2aa79a4272 | 11,704 | py | Python | py/desitarget/train/data_preparation/PredCountsFromQLF_ClassModule.py | echaussidon/desitarget | 1206380dac5155b9e7bf238c7cb187bc797d78a3 | [
"BSD-3-Clause"
]
| 13 | 2016-02-02T00:26:21.000Z | 2022-01-14T07:31:59.000Z | py/desitarget/train/data_preparation/PredCountsFromQLF_ClassModule.py | echaussidon/desitarget | 1206380dac5155b9e7bf238c7cb187bc797d78a3 | [
"BSD-3-Clause"
]
| 674 | 2015-09-15T15:02:06.000Z | 2022-02-23T18:39:02.000Z | py/desitarget/train/data_preparation/PredCountsFromQLF_ClassModule.py | echaussidon/desitarget | 1206380dac5155b9e7bf238c7cb187bc797d78a3 | [
"BSD-3-Clause"
]
| 29 | 2015-06-09T13:51:48.000Z | 2021-06-05T06:03:18.000Z | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
import re
import numpy as np
from scipy.interpolate import interp2d
from scipy.interpolate import interp1d
class PredCountsFromQLF_Class():
def __init__(self):
self.QLF_OK = False
self.EFF_OK = False
self.QLF_EFF_OK = False
# QLF
self.QLF_nz = 0
self.QLF_stepz = 0
# self.QLF_tabz = None
self.QLF_zlimit = None
self.QLF_nmag = 0
self.QLF_stepmag = 0
self.QLF_tabmag = None
self.QLF_maglimit = None
self.QLF_dNdzdmag = None
self.QLF_Ndzdmag = None
# EFF
self.EFF_zlimit = None
self.EFF_maglimit = None
self.EFF_dzdmag = None
# QLF_EFF
self.QLF_EFF_zlimit = None
self.QLF_EFF_maglimit = None
self.interpEFF_dzdmag = None
self.interpQLF_dNdzdmag = None
self.interpQLF_EFF_dNdzdmag = None
self.QLF_EFF_dNdz = None
self.QLF4Compl_dNdz = None
self.Compl_dz = None
self.QLF_EFF_dNdmag = None
self.QLF4Compl_dNdmag = None
self.Compl_dmag = None
self.QLF_EFF_dNdzdmag = None
self.QLF4Compl_dNdzdmag = None
self.Compl_dzdmag = None
def LoadQLF_Data(self, fpn_QLF_Data, mMzred=np.array([0., 6.]), skyArea=10000.):
# Data loading in "dataStr"
dataStr = np.loadtxt(fpn_QLF_Data, dtype=str, delimiter='\n')
self.QLF_nz = len(re.findall(r'\d+(?:\.\d+)?', dataStr[0])) - 1
self.QLF_nmag = len(dataStr)
# ZRED
self.QLF_zlimit = np.linspace(mMzred[0], mMzred[1], self.QLF_nz + 1, endpoint=True)
self.QLF_stepz = self.QLF_zlimit[1] - self.QLF_zlimit[0]
# self.QLF_tabz = self.QLF_zlimit[0:-1] + self.QLF_stepz / 2.
self.QLF_tabmag = np.zeros(self.QLF_nmag)
self.QLF_dNdzdmag = np.zeros((self.QLF_nmag + 1, self.QLF_nz + 1))
for nL, line in enumerate(dataStr):
dNdzdmag = re.findall(r'\d+(?:\.\d+)?', line)
dNdzdmag = np.asarray(dNdzdmag).astype(np.float)
self.QLF_tabmag[nL] = dNdzdmag[0]
self.QLF_dNdzdmag[nL + 1, 1:] = dNdzdmag[1:]
self.QLF_stepmag = self.QLF_tabmag[1] - self.QLF_tabmag[0]
# MAG
self.QLF_maglimit = np.zeros(self.QLF_nmag + 1)
self.QLF_maglimit[0:-1] = self.QLF_tabmag - self.QLF_stepmag / 2.
self.QLF_maglimit[-1] = self.QLF_maglimit[-2] + self.QLF_stepmag
self.QLF_dNdzdmag /= skyArea
self.QLF_Ndzdmag = np.cumsum(np.cumsum(
self.QLF_dNdzdmag, axis=0), axis=1)
self.QLF_OK = True
self.QLF_EFF_OK = False
def LoadEffData(self, EFFdata, EFFzlimit, EFFmaglimit):
self.EFF_dzdmag = np.copy(EFFdata)
self.EFF_zlimit = np.copy(EFFzlimit)
self.EFF_maglimit = np.copy(EFFmaglimit)
self.EFF_OK = True
self.QLF_EFF_OK = False
def PrelOpFunc(self):
if self.QLF_OK & self.EFF_OK & (not self.QLF_EFF_OK):
# QLF_EFF_zlimit
self.QLF_EFF_zlimit = np.unique(np.hstack((self.QLF_zlimit, self.EFF_zlimit)))
maxQLF_EFF_zlimit = min(float(np.max(self.QLF_zlimit)),
float(np.max(self.EFF_zlimit)))
minQLF_EFF_zlimit = max(float(np.min(self.QLF_zlimit)),
float(np.min(self.EFF_zlimit)))
test = (self.QLF_EFF_zlimit >= minQLF_EFF_zlimit) & \
(self.QLF_EFF_zlimit <= maxQLF_EFF_zlimit)
self.QLF_EFF_zlimit = self.QLF_EFF_zlimit[test]
# QLF_EFFmaglimit
self.QLF_EFF_maglimit = np.unique(
np.hstack((self.QLF_maglimit,
self.EFF_maglimit)))
maxQLF_EFF_maglimit = min(float(np.max(self.QLF_maglimit)),
float(np.max(self.EFF_maglimit)))
minQLF_EFF_maglimit = max(float(np.min(self.QLF_maglimit)),
float(np.min(self.EFF_maglimit)))
test = (self.QLF_EFF_maglimit >= minQLF_EFF_maglimit) & \
(self.QLF_EFF_maglimit <= maxQLF_EFF_maglimit)
self.QLF_EFF_maglimit = self.QLF_EFF_maglimit[test]
xnew = self.QLF_EFF_zlimit
ynew = self.QLF_EFF_maglimit
# EFF
x = self.EFF_zlimit.flatten()
y = self.EFF_maglimit.flatten()
z = self.EFF_dzdmag
# ==============================================================================
# f2d_EFF = interp2d(x, y, z, kind = 'linear',
# copy = True, bounds_error = True)
# interpEFF_dzdmag = f2d_EFF(xnew, ynew)
# ==============================================================================
interpXinds = np.digitize(xnew, x, right=True) - 1
interpXinds = np.maximum(interpXinds, 0)
interpYinds = np.digitize(ynew, y, right=True) - 1
interpYinds = np.maximum(interpYinds, 0)
interpXYgridInds = np.meshgrid(interpXinds, interpYinds)
self.interpEFF_dzdmag = z[interpXYgridInds[1],
interpXYgridInds[0]]
# QLF
x = self.QLF_zlimit.flatten()
y = self.QLF_maglimit.flatten()
z = self.QLF_Ndzdmag
f2d_QLF = interp2d(x, y, z, kind='linear', copy=True, bounds_error=True)
interpQLF_Ndzdmag = f2d_QLF(xnew, ynew)
interpQLF_dNdzdmag = np.copy(interpQLF_Ndzdmag)
interpQLF_dNdzdmag[:, 1:] -= np.copy(interpQLF_dNdzdmag[:, :-1])
interpQLF_dNdzdmag[1:, :] -= np.copy(interpQLF_dNdzdmag[:-1, :])
self.interpQLF_dNdzdmag = interpQLF_dNdzdmag
interpQLF_EFF_dNdzdmag = np.zeros(self.interpQLF_dNdzdmag.shape)
interpQLF_EFF_dNdzdmag = self.interpEFF_dzdmag * self.interpQLF_dNdzdmag
self.interpQLF_EFF_dNdzdmag = interpQLF_EFF_dNdzdmag
self.QLF_EFF_OK = True
def ZREDComplEvalFunc(self, zlimit):
if self.QLF_EFF_OK:
xnew = self.QLF_EFF_zlimit
assert(np.min(zlimit) >= np.min(xnew))
assert(np.max(zlimit) <= np.max(xnew))
interpQLF_dNdz = np.sum(self.interpQLF_dNdzdmag, axis=0)
interpQLF_Ndz = np.cumsum(interpQLF_dNdz)
# QLF_EFF dNdz
interpQLF_EFF_dNdz = np.sum(self.interpQLF_EFF_dNdzdmag, axis=0)
interpQLF_EFF_Ndz = np.cumsum(interpQLF_EFF_dNdz)
f1d_QLF_EFF = interp1d(xnew, interpQLF_EFF_Ndz, kind='linear', copy=True, bounds_error=True)
f1d_QLF = interp1d(xnew, interpQLF_Ndz, kind='linear', copy=True, bounds_error=True)
self.QLF_EFF_dNdz = f1d_QLF_EFF(zlimit)
self.QLF_EFF_dNdz[1:] -= np.copy(self.QLF_EFF_dNdz[:-1])
self.QLF4Compl_dNdz = f1d_QLF(zlimit)
self.QLF4Compl_dNdz[1:] -= np.copy(self.QLF4Compl_dNdz[:-1])
self.Compl_dz = self.QLF_EFF_dNdz[1:] / self.QLF4Compl_dNdz[1:]
self.Compl_dz[np.isnan(self.Compl_dz)] = 0.
return self.Compl_dz
def RComplEvalFunc(self, maglimit):
if self.QLF_EFF_OK:
ynew = self.QLF_EFF_maglimit
assert(np.min(maglimit) >= np.min(ynew))
assert(np.max(maglimit) <= np.max(ynew))
interpQLF_dNdmag = np.sum(self.interpQLF_dNdzdmag, axis=1)
interpQLF_Ndmag = np.cumsum(interpQLF_dNdmag)
# QLF_EFF dNdmag
interpQLF_EFF_dNdmag = np.sum(self.interpQLF_EFF_dNdzdmag, axis=1)
interpQLF_EFF_Ndmag = np.cumsum(interpQLF_EFF_dNdmag)
f1d_QLF_EFF = interp1d(ynew, interpQLF_EFF_Ndmag, kind='linear', copy=True, bounds_error=True)
f1d_QLF = interp1d(ynew, interpQLF_Ndmag, kind='linear', copy=True, bounds_error=True)
self.QLF_EFF_dNdmag = f1d_QLF_EFF(maglimit)
self.QLF_EFF_dNdmag[1:] -= np.copy(self.QLF_EFF_dNdmag[:-1])
self.QLF4Compl_dNdmag = f1d_QLF(maglimit)
self.QLF4Compl_dNdmag[1:] -= np.copy(self.QLF4Compl_dNdmag[:-1])
self.Compl_dmag = self.QLF_EFF_dNdmag[1:] / self.QLF4Compl_dNdmag[1:]
self.Compl_dmag[np.isnan(self.Compl_dmag)] = 0.
return self.Compl_dmag
def R_ZREDComplEvalFunc(self, zlimit, maglimit):
if self.QLF_EFF_OK:
xnew = self.QLF_EFF_zlimit
assert(np.min(zlimit) >= np.min(xnew))
assert(np.max(zlimit) <= np.max(xnew))
ynew = self.QLF_EFF_maglimit
assert(np.min(maglimit) >= np.min(ynew))
assert(np.max(maglimit) <= np.max(ynew))
interpQLF_EFF_Ndzdmag = np.cumsum(np.cumsum(self.interpQLF_EFF_dNdzdmag, axis=0), axis=1)
f2d_QLF_EFF = interp2d(xnew, ynew, interpQLF_EFF_Ndzdmag, kind='linear', copy=True, bounds_error=True)
QLF_EFF4Compl_Ndzdmag = f2d_QLF_EFF(zlimit, maglimit)
QLF_EFF4Compl_dNdzdmag = np.copy(QLF_EFF4Compl_Ndzdmag)
QLF_EFF4Compl_dNdzdmag[:, 1:] -= np.copy(QLF_EFF4Compl_dNdzdmag[:, :-1])
QLF_EFF4Compl_dNdzdmag[1:, :] -= np.copy(QLF_EFF4Compl_dNdzdmag[:-1, :])
self.QLF_EFF4Compl_dNdzdmag = QLF_EFF4Compl_dNdzdmag
# QLF
interpQLF_Ndzdmag = np.cumsum(np.cumsum(self.interpQLF_dNdzdmag, axis=0), axis=1)
f2d_QLF = interp2d(xnew, ynew, interpQLF_Ndzdmag, kind='linear', copy=True, bounds_error=True)
QLF4Compl_Ndzdmag = f2d_QLF(zlimit, maglimit)
QLF4Compl_dNdzdmag = np.copy(QLF4Compl_Ndzdmag)
QLF4Compl_dNdzdmag[:, 1:] -= np.copy(QLF4Compl_dNdzdmag[:, :-1])
QLF4Compl_dNdzdmag[1:, :] -= np.copy(QLF4Compl_dNdzdmag[:-1, :])
self.QLF4Compl_dNdzdmag = QLF4Compl_dNdzdmag
self.Compl_dzdmag = self.QLF_EFF4Compl_dNdzdmag[1:, 1:] / self.QLF4Compl_dNdzdmag[1:, 1:]
self.Compl_dzdmag[np.isnan(self.Compl_dzdmag)] = 0.
return self.Compl_dzdmag
def R_ZRED_EffVarEvalFunc(self, OBJ_QSO_dNdzdmag):
self.EffVar4Compl_dzdmag = None
self.Eff4Compl_dzdmag = np.copy(self.Compl_dzdmag)
if True:
self.EffVar4Compl_dzdmag = self.Eff4Compl_dzdmag * (1. - self.Eff4Compl_dzdmag)
self.EffVar4Compl_dzdmag /= OBJ_QSO_dNdzdmag
self.EffVar4Compl_dzdmag[OBJ_QSO_dNdzdmag == 0.] = 0.
else:
self.Count4Complt_Ndzdmag = self.Eff4Compl_dzdmag * OBJ_QSO_dNdzdmag
self.EffVar4Compl_dzdmag = OBJ_QSO_dNdzdmag - self.Count4Complt_Ndzdmag + 1.
self.EffVar4Compl_dzdmag *= self.Count4Complt_Ndzdmag + 1.
self.EffVar4Compl_dzdmag /= (OBJ_QSO_dNdzdmag + 2)**2 * (OBJ_QSO_dNdzdmag + 3)
self.EffVar4Compl_dzdmag[OBJ_QSO_dNdzdmag == 0.] = 0.
return self.EffVar4Compl_dzdmag
def ZRED_EffVarEvalFunc(self):
self.EffVar4Compl_dz = self.EffVar4Compl_dzdmag * (self.QLF4Compl_dNdzdmag[1:, 1:])**2
self.EffVar4Compl_dz = np.sum(self.EffVar4Compl_dz, axis=0)
tmp_var = np.sum(self.QLF4Compl_dNdzdmag[1:, 1:], axis=0)**2
self.EffVar4Compl_dz /= tmp_var
self.EffVar4Compl_dz[tmp_var == 0.] = 0.
return self.EffVar4Compl_dz
def R_EffVarEvalFunc(self):
self.EffVar4Compl_dmag = self.EffVar4Compl_dzdmag * (self.QLF4Compl_dNdzdmag[1:, 1:])**2
self.EffVar4Compl_dmag = np.sum(self.EffVar4Compl_dmag, axis=1)
tmp_var = np.sum(self.QLF4Compl_dNdzdmag[1:, 1:], axis=1)**2
self.EffVar4Compl_dmag /= tmp_var
self.EffVar4Compl_dmag[tmp_var == 0.] = 0.
return self.EffVar4Compl_dmag
| 38.5 | 114 | 0.605092 | 11,547 | 0.986586 | 0 | 0 | 0 | 0 | 0 | 0 | 691 | 0.05904 |
90d6fa60d16e379bff07b720da304a90340377ce | 1,135 | py | Python | safe_control_gym/controllers/__init__.py | gokhanalcan/safe-control-gym | e9086e102663a60a66f2cc9c8cd7610888744056 | [
"MIT"
]
| null | null | null | safe_control_gym/controllers/__init__.py | gokhanalcan/safe-control-gym | e9086e102663a60a66f2cc9c8cd7610888744056 | [
"MIT"
]
| null | null | null | safe_control_gym/controllers/__init__.py | gokhanalcan/safe-control-gym | e9086e102663a60a66f2cc9c8cd7610888744056 | [
"MIT"
]
| null | null | null | """Register controllers.
"""
from safe_control_gym.utils.registration import register
register(id="mpc",
entry_point="safe_control_gym.controllers.mpc.mpc:MPC",
config_entry_point="safe_control_gym.controllers.mpc:mpc.yaml")
register(id="linear_mpc",
entry_point="safe_control_gym.controllers.mpc.linear_mpc:LinearMPC",
config_entry_point="safe_control_gym.controllers.mpc:linear_mpc.yaml")
register(id="gp_mpc",
entry_point="safe_control_gym.controllers.mpc.gp_mpc:GPMPC",
config_entry_point="safe_control_gym.controllers.mpc:gp_mpc.yaml")
register(id="mpsc",
entry_point="safe_control_gym.controllers.mpsc.mpsc:MPSC",
config_entry_point="safe_control_gym.controllers.mpsc:mpsc.yaml")
register(id="ppo",
entry_point="safe_control_gym.controllers.ppo.ppo:PPO",
config_entry_point="safe_control_gym.controllers.ppo:ppo.yaml")
register(id="safe_explorer_ppo",
entry_point="safe_control_gym.controllers.safe_explorer.safe_ppo:SafeExplorerPPO",
config_entry_point="safe_control_gym.controllers.safe_explorer:safe_ppo.yaml")
| 39.137931 | 91 | 0.757709 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 669 | 0.589427 |
90d7c582c62cd57d3012bffede812a95aec2d89a | 378 | py | Python | tests/plot_profile/test_utils.py | mizeller/plot_profile | 832f1d47a182d65747f18cf1ac90afc9a3b821c1 | [
"MIT"
]
| null | null | null | tests/plot_profile/test_utils.py | mizeller/plot_profile | 832f1d47a182d65747f18cf1ac90afc9a3b821c1 | [
"MIT"
]
| 3 | 2021-11-10T15:37:27.000Z | 2022-03-28T13:29:42.000Z | tests/plot_profile/test_utils.py | MeteoSwiss-APN/plot_profile | 832f1d47a182d65747f18cf1ac90afc9a3b821c1 | [
"MIT"
]
| null | null | null | """Test module ``plot_profile/utils.py``."""
# Standard library
import logging
# First-party
from plot_profile.utils import count_to_log_level
def test_count_to_log_level():
assert count_to_log_level(0) == logging.ERROR
assert count_to_log_level(1) == logging.WARNING
assert count_to_log_level(2) == logging.INFO
assert count_to_log_level(3) == logging.DEBUG
| 27 | 51 | 0.759259 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 75 | 0.198413 |
90d92df312eb9bb3199567f0c02a6aff1611d818 | 1,116 | py | Python | spider/utilities/util_config.py | YunofHD/PSpider | c1c9e1e7c61365e140a55541cc558d7c1b9e50f2 | [
"BSD-2-Clause"
]
| null | null | null | spider/utilities/util_config.py | YunofHD/PSpider | c1c9e1e7c61365e140a55541cc558d7c1b9e50f2 | [
"BSD-2-Clause"
]
| null | null | null | spider/utilities/util_config.py | YunofHD/PSpider | c1c9e1e7c61365e140a55541cc558d7c1b9e50f2 | [
"BSD-2-Clause"
]
| null | null | null | # _*_ coding: utf-8 _*_
"""
util_config.py by xianhu
"""
__all__ = [
"CONFIG_FETCH_MESSAGE",
"CONFIG_PARSE_MESSAGE",
"CONFIG_MESSAGE_PATTERN",
"CONFIG_URL_LEGAL_PATTERN",
"CONFIG_URL_ILLEGAL_PATTERN",
]
# define the structure of message, used in Fetcher and Parser
CONFIG_FETCH_MESSAGE = "priority=%s, keys=%s, deep=%s, repeat=%s, url=%s"
CONFIG_PARSE_MESSAGE = "priority=%s, keys=%s, deep=%s, url=%s"
CONFIG_MESSAGE_PATTERN = r"priority=(?P<priority>\d+),\s*keys=(?P<keys>.+?),\s*deep=(?P<deep>\d+),\s*(repeat=(?P<repeat>\d+),\s*)?url=(?P<url>.+)$"
# define url_legal_pattern and url_illegal_pattern
CONFIG_URL_LEGAL_PATTERN = r"^https?:[^\s]+?\.[^\s]+?"
CONFIG_URL_ILLEGAL_PATTERN = r"\.(cab|iso|zip|rar|tar|gz|bz2|7z|tgz|apk|exe|app|pkg|bmg|rpm|deb|dmg|jar|jad|bin|msi|" \
"pdf|doc|docx|xls|xlsx|ppt|pptx|txt|md|odf|odt|rtf|py|java|c|cc|js|css|log|csv|tsv|" \
"jpg|jpeg|png|gif|bmp|xpm|xbm|ico|drm|dxf|eps|psd|pcd|pcx|tif|tiff|" \
"mp3|mp4|swf|mkv|avi|flv|mov|wmv|wma|3gp|mpg|mpeg|mp4a|wav|ogg|rmvb)$"
| 42.923077 | 147 | 0.634409 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 836 | 0.749104 |
90d9a5726836680355d0f136ca02e9d3ff263f57 | 1,087 | py | Python | modcma/__main__.py | IOHprofiler/ModularCMAES | 5ae3310d68b7e2bc37ef10de07945e89c16d6654 | [
"MIT"
]
| 2 | 2021-04-08T06:16:21.000Z | 2022-01-25T18:18:51.000Z | modcma/__main__.py | IOHprofiler/ModularCMAES | 5ae3310d68b7e2bc37ef10de07945e89c16d6654 | [
"MIT"
]
| 3 | 2020-11-16T15:24:53.000Z | 2021-11-10T10:27:50.000Z | modcma/__main__.py | IOHprofiler/ModularCMAES | 5ae3310d68b7e2bc37ef10de07945e89c16d6654 | [
"MIT"
]
| 2 | 2021-01-13T15:36:46.000Z | 2021-04-08T06:24:25.000Z | """Allows the user to call the library as a cli-module."""
from argparse import ArgumentParser
from .modularcmaes import evaluate_bbob
parser = ArgumentParser(description="Run single function CMAES")
parser.add_argument(
"-f", "--fid", type=int, help="bbob function id", required=False, default=5
)
parser.add_argument(
"-d", "--dim", type=int, help="dimension", required=False, default=5
)
parser.add_argument(
"-i",
"--iterations",
type=int,
help="number of iterations per agent",
required=False,
default=50,
)
parser.add_argument(
"-l", "--logging", required=False, action="store_true", default=False
)
parser.add_argument("-L", "--label", type=str, required=False, default="")
parser.add_argument("-s", "--seed", type=int, required=False, default=42)
parser.add_argument("-p", "--data_folder", type=str, required=False)
parser.add_argument("-a", "--arguments", nargs="+", required=False)
args = vars(parser.parse_args())
for arg in args.pop("arguments") or []:
# pylint: disable=exec-used
exec(arg, None, args)
evaluate_bbob(**args)
| 29.378378 | 79 | 0.689052 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 317 | 0.291628 |
90da83a21ced32c3bda1f81dfc548d05d09d8656 | 7,303 | py | Python | src/rto/optimization/solvers/de.py | vicrsp/rto | e9e0b87533ae6bc5e6ad228bb26172384802f9b7 | [
"MIT"
]
| null | null | null | src/rto/optimization/solvers/de.py | vicrsp/rto | e9e0b87533ae6bc5e6ad228bb26172384802f9b7 | [
"MIT"
]
| 17 | 2020-10-24T18:03:54.000Z | 2020-11-11T22:25:16.000Z | src/rto/optimization/solvers/de.py | vicrsp/rto | e9e0b87533ae6bc5e6ad228bb26172384802f9b7 | [
"MIT"
]
| null | null | null | import numpy as np
class DifferentialEvolution:
def __init__(self, lb, ub, mutation_prob=0.5, pop_size=10, max_generations=100, de_type='rand/1/bin', callback=None):
self.lb = np.asarray(lb).reshape(1, -1)
self.ub = np.asarray(ub).reshape(1, -1)
self.population_size = pop_size
self.max_generations = max_generations
self.num_variables = len(lb)
self.mutation_prob = mutation_prob
self.callback = callback
self.base, self.d, self.rec = de_type.split('/')
self.norm_lb = self.normalize(self.lb).flatten()
self.norm_ub = self.normalize(self.ub).flatten()
self.reset()
def reset(self):
self.fobj_evals = 0
self.population_fobj = []
self.best_objective = np.Infinity
self.best_solution = []
def normalize(self, x):
norm_x = np.zeros_like(x)
for i in range(x.shape[0]):
norm_x[i, :] = 100 * (x[i] - self.lb) / (self.ub - self.lb)
return norm_x
def denormalize(self, x):
xr = x.reshape(-1, self.num_variables)
denorm_x = np.zeros_like(xr)
for i in range(xr.shape[0]):
denorm_x[i, :] = xr[i] * (self.ub - self.lb) / 100 + self.lb
return denorm_x
def initialize_population(self):
pop_size = (self.population_size, self.num_variables)
self.population = np.random.uniform(
low=self.norm_lb, high=self.norm_ub, size=pop_size)
self.initial_population = self.population
def evaluate_population_cost(self, population):
pop_fobj = []
pop_g = []
# Calculating the fitness value of each solution in the current population.
for sol in population:
cost, g = self.eval_objective(sol)
if((cost < self.best_objective) & (not np.any(g > 0))):
self.best_objective = cost
self.best_solution = sol
pop_fobj.append(cost)
pop_g.append(g)
pop_fobj = np.array(pop_fobj)
pop_g = np.asarray(pop_g)
self.fobj_evals = self.fobj_evals + pop_fobj.shape[0]
self.population_fobj = pop_fobj
self.population_g = pop_g
if(self.callback != None):
self.callback(self.denormalize(population), pop_fobj, pop_g)
return pop_fobj, pop_g
def select_base_vector(self, population, cost):
if(self.base == 'rand'):
r1 = np.random.randint(0, self.population_size)
return r1, population[r1]
elif(self.base == 'mean'):
return None, np.mean(population, axis=0)
elif(self.base == 'best'):
best_idx = np.argmin(cost)
return None, population[best_idx]
else:
raise ValueError('Base={} is not implemented!'.format(self.base))
def select_difference_vector(self, r1, population):
if(self.d == "1"):
r2 = np.random.randint(0, self.population_size)
if(r1 != None):
while(r2 == r1):
r2 = np.random.randint(0, self.population_size)
r3 = np.random.randint(0, self.population_size)
if(r1 != None):
while(r3 == r1 | r3 == r2):
r3 = np.random.randint(0, self.population_size)
return population[r2] - population[r3]
else:
raise ValueError(
'd={} is not implemented!'.format(self.d))
def select_scale_factor(self):
return np.random.rand() * 0.5 + 0.5 # U(0.5, 1.0)
def mutate(self, target, scale_factor, difference):
return target + scale_factor * difference
def recombine(self, v, x):
if (self.rec == "bin"):
u_i = []
for i, v_i in enumerate(v):
u_j = []
delta = np.random.randint(0, self.num_variables)
for j in range(self.num_variables):
randnum = np.random.rand()
if((randnum <= self.mutation_prob) | (j == delta)):
u_j.append(v_i[j])
else:
u_j.append(x[i, j])
u_i.append(u_j)
return np.asarray(u_i)
else:
raise ValueError(
'Recombination={} is not implemented!'.format(self.rec))
def validate_bounds(self, x):
xc = []
for i, value in enumerate(x):
if((value < self.norm_lb[i]) | (value > self.norm_ub[i])):
# replace the variable by a random value inside the bounds
xc.append(np.random.rand() *
(self.norm_ub[i] - self.norm_lb[i]) + self.norm_lb[i])
else:
xc.append(value)
return np.asarray(xc)
def eval_objective(self, x):
cost, g = self.fobj(self.denormalize(x).flatten())
return cost, np.asarray(g)
def select_survivors(self, u, x, fx, gx):
survivors = []
for i in range(self.population_size):
u_i = self.validate_bounds(u[i])
gx_i = gx[i]
fx_i = fx[i]
fu, gu = self.eval_objective(u_i)
is_valid = (fu <= fx_i)
# only use the rule for restricted problems
if(len(gu) > 0):
rule1 = np.all(gu <= 0) & np.all(gx_i <= 0) & (fu <= fx_i)
rule2 = np.all(gu <= 0) & np.any(gx_i > 0)
rule3 = np.any(gu > 0) & np.all(np.maximum(gu, np.zeros_like(
gu)) <= np.maximum(gx_i, np.zeros_like(gx_i)))
is_valid = rule1 | rule2 | rule3
if(is_valid):
survivors.append(u_i)
else:
survivors.append(x[i])
return np.asarray(survivors)
def run(self, func, debug=False):
self.reset()
self.fobj = func
self.initialize_population()
for i in range(self.max_generations):
fobj, g = self.evaluate_population_cost(self.population)
v = []
# use penalization for base vector selection only
# fobj_penalized = fobj + 1000 * np.maximum(np.zeros(self.population_size), np.max(np.asarray(g), axis=1))
for _ in range(self.population_size):
r1, base = self.select_base_vector(self.population, None)
difference = self.select_difference_vector(r1, self.population)
scale_factor = self.select_scale_factor()
v.append(self.mutate(base, scale_factor, difference))
v = np.asarray(v)
u = self.recombine(v, self.population)
self.population = self.select_survivors(
u, self.population, fobj, g)
# if(debug == True):
# print('Progress: {:.2f}%'.format(
# 100 * i / self.max_generations))
if((debug == True) & (self.best_objective != np.Infinity)):
print('Best fobj: {}'.format(self.best_objective))
# print('Best sol: {}'.format(
# self.denormalize(self.best_solution)))
if(self.best_objective != np.Infinity):
return self.best_objective, self.denormalize(self.best_solution).flatten()
else:
return np.Infinity, None
| 37.451282 | 121 | 0.546899 | 7,281 | 0.996988 | 0 | 0 | 0 | 0 | 0 | 0 | 668 | 0.091469 |
90db629f11aa0043b43ed88cb2e40c8fc351233e | 2,122 | py | Python | openmdao/matrices/csr_matrix.py | onodip/OpenMDAO | 96a99806fb3a547b881d2ad3da2733bca9978567 | [
"Apache-2.0"
]
| null | null | null | openmdao/matrices/csr_matrix.py | onodip/OpenMDAO | 96a99806fb3a547b881d2ad3da2733bca9978567 | [
"Apache-2.0"
]
| null | null | null | openmdao/matrices/csr_matrix.py | onodip/OpenMDAO | 96a99806fb3a547b881d2ad3da2733bca9978567 | [
"Apache-2.0"
]
| null | null | null | """Define the CSRmatrix class."""
import numpy as np
from scipy.sparse import coo_matrix
from six import iteritems
from openmdao.matrices.coo_matrix import COOMatrix
class CSRMatrix(COOMatrix):
"""
Sparse matrix in Compressed Row Storage format.
"""
def _build(self, num_rows, num_cols):
"""
Allocate the matrix.
Parameters
----------
num_rows : int
number of rows in the matrix.
num_cols : int
number of cols in the matrix.
"""
data, rows, cols = self._build_sparse(num_rows, num_cols)
# get a set of indices that sorts into row major order
srtidxs = np.lexsort((cols, rows))
data = data[srtidxs]
rows = rows[srtidxs]
cols = cols[srtidxs]
# now sort these back into ascending order (our original stacked order)
# so in _update_submat() we can just extract the individual index
# arrays that will map each block into the combined data array.
revidxs = np.argsort(srtidxs)
metadata = self._metadata
for key, (ind1, ind2, idxs, jac_type, factor) in iteritems(metadata):
if idxs is None:
metadata[key] = (revidxs[ind1:ind2], jac_type, factor)
else:
# apply the reverse index to each part of revidxs so that
# we can avoid copying the index array during updates.
metadata[key] = (revidxs[ind1:ind2][np.argsort(idxs)],
jac_type, factor)
# data array for the CSR will be the same as for the COO since
# it was already in sorted order.
coo = coo_matrix((data, (rows, cols)), shape=(num_rows, num_cols))
coo_data_size = coo.data.size
self._matrix = coo.tocsr()
# make sure data size is the same between coo and csr, else indexing is
# messed up
if coo_data_size != self._matrix.data.size:
raise ValueError("CSR matrix data contains duplicate row/col entries. "
"This would break internal indexing.")
| 34.786885 | 83 | 0.60132 | 1,951 | 0.919416 | 0 | 0 | 0 | 0 | 0 | 0 | 941 | 0.44345 |
90decc71935f62f946a40921c43b3f8580f075de | 2,398 | py | Python | setup.py | medchemfi/sdfconf | 81b1ed383c1d4b3e633fdc555e4027091226b025 | [
"MIT"
]
| 6 | 2021-12-27T07:55:16.000Z | 2022-01-26T04:36:53.000Z | setup.py | medchemfi/sdfconf | 81b1ed383c1d4b3e633fdc555e4027091226b025 | [
"MIT"
]
| null | null | null | setup.py | medchemfi/sdfconf | 81b1ed383c1d4b3e633fdc555e4027091226b025 | [
"MIT"
]
| 3 | 2022-01-06T13:54:48.000Z | 2022-01-26T04:36:54.000Z | # -*- coding: utf-8 -*-
from setuptools import setup, find_packages
import re
import os
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
with open("src/sdfconf/_version.py", "rt") as vf:
VSRE = r"^__version__ = ['\"]([^'\"]*)['\"]"
for line in vf:
mo = re.search(VSRE, line, re.M)
if mo:
verstr = mo.group(1)
break
if not mo:
raise RuntimeError("Unable to find version string in %s." % (VERSIONFILE,))
setup(name = 'sdfconf',
version = verstr,
description = ("Diverse manipulation and analysis tool for .sdf files."),
long_description = read('README.rst'),
install_requires = ['numpy>=1.7.1','matplotlib>=1.4.2'],
author = 'Sakari Lätti',
author_email = '[email protected]',
maintainer = 'Sakari Lätti',
maintainer_email = '[email protected]',
packages = ['sdfconf'],
package_dir = {'sdfconf':'src/sdfconf'},
keywords = 'sdf mol2 conformation analyze histogram',
url = 'http://users.jyu.fi/~pentikai/',
license = 'MIT/expat',
entry_points =
{'console_scripts': ['sdfconf = sdfconf.runner:main'],
'setuptools.installation': ['eggsecutable = sdfconf.runner:main',],
},
classifiers= ['Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
#'Programming Language :: Python :: 3',
'Topic :: Scientific/Engineering :: Bio-Informatics',
'Topic :: Scientific/Engineering :: Chemistry' ,
'Topic :: Software Development :: Libraries',
],
##FIXME
#'''
#package_data = {
# 'sample':['sample_data.sdf']
# },
#'''
) | 42.821429 | 97 | 0.470809 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,025 | 0.427083 |
90df7ee581fbe2b55cfc18df37ae0baf763d0554 | 2,453 | py | Python | stacks/cognito_stack.py | adamdubey/devops-serverless-app-aws-cdk | 3e1ba3f9905c4f8469275e51448fb908b64a736a | [
"MIT"
]
| null | null | null | stacks/cognito_stack.py | adamdubey/devops-serverless-app-aws-cdk | 3e1ba3f9905c4f8469275e51448fb908b64a736a | [
"MIT"
]
| null | null | null | stacks/cognito_stack.py | adamdubey/devops-serverless-app-aws-cdk | 3e1ba3f9905c4f8469275e51448fb908b64a736a | [
"MIT"
]
| null | null | null | from aws_cdk import (
aws_cognito as cognito,
aws_iam as iam,
aws_ssm as ssm,
core
)
class CognitoStack(core.Stack):
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
super().__init__(scope, id, **kwargs)
prj_name = self.node.try_get_context("project_name")
env_name = self.node.try_get_context("env")
user_pool = cognito.CfnUserPool(self, 'cognitouserpool',
auto_verified_attributes = [
'email'
],
username_attributes = [
'email', 'phone_number'
],
user_pool_name = prj_name + '-user-pool',
schema = [
{
'attributeDataType': 'String',
'name': 'param1',
'mutable': True
}
],
policies = cognito.CfnUserPool.PoliciesProperty(
password_policy = cognito.CfnUserPool.PasswordPolicyProperty(
minimum_length = 10,
require_lowercase = True,
require_numbers = True,
require_symbols = False,
require_uppercase = True
)
)
)
user_pool_client = cognito.CfnUserPoolClient(self, 'pool-client',
user_pool_id = user_pool.ref,
client_name = env_name + '-app-client'
)
identity_pool = cognito.CfnIdentityPool(self, 'identity-pool',
allow_unauthenticated_identities = False,
cognito_identity_providers = [
cognito.CfnIdentityPool.CognitoIdentityProviderProperty(
client_id = user_pool_client.ref,
provider_name = user_pool.attr_provider_name
)
],
identity_pool_name = prj_name + '-identity-pool'
)
ssm.StringParameter(self, 'app-id',
parameter_name = '/' + env_name + '/cognito-app-client-id',
string_value = user_pool_client.ref
)
ssm.StringParameter(self, 'user-pool-id',
parameter_name = '/' + env_name + '/cognito-user-pool-id',
string_value = user_pool_client.user_pool_id
)
ssm.StringParameter(self, 'identity-pool-id',
parameter_name = '/' + env_name + '/cognito-identity-pool-id',
string_value = identity_pool.ref
)
| 34.069444 | 77 | 0.538932 | 2,350 | 0.958011 | 0 | 0 | 0 | 0 | 0 | 0 | 306 | 0.124745 |
90dfa79ebe79555eaeba7589bba91bb7586ad592 | 3,570 | py | Python | cloudrail/knowledge/rules/aws/context_aware/s3_bucket_policy_vpc_endpoint_rule.py | my-devops-info/cloudrail-knowledge | b7c1bbd6fe1faeb79c105a01c0debbe24d031a0e | [
"MIT"
]
| null | null | null | cloudrail/knowledge/rules/aws/context_aware/s3_bucket_policy_vpc_endpoint_rule.py | my-devops-info/cloudrail-knowledge | b7c1bbd6fe1faeb79c105a01c0debbe24d031a0e | [
"MIT"
]
| null | null | null | cloudrail/knowledge/rules/aws/context_aware/s3_bucket_policy_vpc_endpoint_rule.py | my-devops-info/cloudrail-knowledge | b7c1bbd6fe1faeb79c105a01c0debbe24d031a0e | [
"MIT"
]
| null | null | null | from typing import Dict, List
from cloudrail.knowledge.context.aws.iam.policy import S3Policy
from cloudrail.knowledge.context.aws.iam.policy_statement import StatementEffect
from cloudrail.knowledge.context.aws.s3.s3_bucket import S3Bucket
from cloudrail.knowledge.context.aws.ec2.vpc import Vpc
from cloudrail.knowledge.context.aws.ec2.vpc_endpoint import VpcEndpoint
from cloudrail.knowledge.context.aws.aws_environment_context import AwsEnvironmentContext
from cloudrail.knowledge.rules.aws.aws_base_rule import AwsBaseRule
from cloudrail.knowledge.rules.base_rule import Issue
from cloudrail.knowledge.rules.rule_parameters.base_paramerter import ParameterType
class S3BucketPolicyVpcEndpointRule(AwsBaseRule):
def execute(self, env_context: AwsEnvironmentContext, parameters: Dict[ParameterType, any]) -> List[Issue]:
issues_list: List[Issue] = []
vpc_to_buckets_map: Dict[Vpc, List[S3Bucket]] = self._create_vpc_to_buckets_map(env_context)
for vpc, bucket_list in vpc_to_buckets_map.items():
for s3_vpce in self._filter_by_service_name(vpc):
for bucket in bucket_list:
if bucket.resource_based_policy is None or \
not self._is_restrict_to_s3_vpce(bucket.resource_based_policy, s3_vpce):
issues_list.append(Issue(f"~{bucket.get_type()}~. `{bucket.get_friendly_name()}` is accessible via"
f" VPC endpoint `{s3_vpce.get_friendly_name()}`. "
f"~in VPC~. `{vpc.get_friendly_name()}` "
f"with a policy that is not restricting requests sourced from"
f" a VPC Endpoint.", bucket, bucket))
return issues_list
def get_id(self) -> str:
return "s3_bucket_policy_vpce"
@staticmethod
def _create_vpc_to_buckets_map(env_context: AwsEnvironmentContext) -> Dict[Vpc, List[S3Bucket]]:
region_to_buckets_map: Dict[str, List[S3Bucket]] = {}
vpc_to_buckets_map: Dict[Vpc, List[S3Bucket]] = {}
for bucket in env_context.s3_buckets:
if not bucket.is_public:
if bucket.region not in region_to_buckets_map:
region_to_buckets_map[bucket.region] = []
region_to_buckets_map[bucket.region].append(bucket)
for vpc in env_context.vpcs.values():
if vpc.region in region_to_buckets_map:
vpc_to_buckets_map[vpc] = region_to_buckets_map[vpc.region]
return vpc_to_buckets_map
@staticmethod
def _is_restrict_to_s3_vpce(policy: S3Policy, s3_vpce: VpcEndpoint) -> bool:
for statement in policy.statements:
expected_operator_prefix: str = "String" if statement.effect == StatementEffect.ALLOW else "StringNot"
for condition_block in statement.condition_block:
if condition_block.operator.startswith(expected_operator_prefix) and \
condition_block.key == "aws:SourceVpce" and \
s3_vpce.vpce_id in condition_block.values:
return True
return False
@staticmethod
def _filter_by_service_name(vpc: Vpc, service_name: str = "s3"):
return [s3_vpce for s3_vpce in vpc.endpoints if s3_vpce.service_name.endswith(f".{service_name}")]
def should_run_rule(self, environment_context: AwsEnvironmentContext) -> bool:
return bool(environment_context.s3_buckets)
| 51.73913 | 123 | 0.671989 | 2,900 | 0.812325 | 0 | 0 | 1,503 | 0.421008 | 0 | 0 | 326 | 0.091317 |
90e09e247242d171d4b02ca536f204cbdf79e682 | 2,034 | py | Python | src/gluonts/core/serde/_json.py | PascalIversen/gluon-ts | cb6b5145b94f942f7803383a70813363f6953509 | [
"Apache-2.0"
]
| 1 | 2020-11-30T18:05:24.000Z | 2020-11-30T18:05:24.000Z | src/gluonts/core/serde/_json.py | PascalIversen/gluon-ts | cb6b5145b94f942f7803383a70813363f6953509 | [
"Apache-2.0"
]
| null | null | null | src/gluonts/core/serde/_json.py | PascalIversen/gluon-ts | cb6b5145b94f942f7803383a70813363f6953509 | [
"Apache-2.0"
]
| null | null | null | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""
JSON Serialization/Deserialization
----------------------------------
The canonical way to do this is to define and `default` and `object_hook`
parameters to the json.dumps and json.loads methods. Unfortunately, due
to https://bugs.python.org/issue12657 this is not possible at the moment,
as support for custom NamedTuple serialization is broken.
To circumvent the issue, we pass the input value through custom encode
and decode functions that map nested object terms to JSON-serializable
data structures with explicit recursion.
"""
import json
from typing import Any, Optional
from ._base import encode, decode
def dump_json(o: Any, indent: Optional[int] = None) -> str:
"""
Serializes an object to a JSON string.
Parameters
----------
o
The object to serialize.
indent
An optional number of spaced to use as an indent.
Returns
-------
str
A string representing the object in JSON format.
See Also
--------
load_json
Inverse function.
"""
return json.dumps(encode(o), indent=indent, sort_keys=True)
def load_json(s: str) -> Any:
"""
Deserializes an object from a JSON string.
Parameters
----------
s
A string representing the object in JSON format.
Returns
-------
Any
The deserialized object.
See Also
--------
dump_json
Inverse function.
"""
return decode(json.loads(s))
| 26.076923 | 75 | 0.67355 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,738 | 0.854474 |
90e1d06ca5448614066727df6e46a00f224d0fe4 | 36,937 | py | Python | run.py | Aisbergg/docker-image-arch-aur-makepkg | 26b7fed14838f52560c8fd4ba222c1c3d41ce00d | [
"MIT"
]
| 7 | 2017-07-24T15:19:29.000Z | 2018-11-18T21:59:13.000Z | run.py | Aisbergg/docker-image-arch-aur-makepkg | 26b7fed14838f52560c8fd4ba222c1c3d41ce00d | [
"MIT"
]
| 1 | 2018-06-12T10:30:23.000Z | 2018-06-12T15:54:01.000Z | run.py | Aisbergg/docker-image-arch-aur-makepkg | 26b7fed14838f52560c8fd4ba222c1c3d41ce00d | [
"MIT"
]
| null | null | null | #!/usr/bin/python3
import argparse
import os
import sys
import re
import shutil
import tempfile
import pwd
import grp
import tarfile
import time
import glob
import urllib.request
from subprocess import Popen, PIPE
import aur
import pacman
local_source_dir = '/makepkg/local_src'
build_dir = os.path.abspath('/makepkg/build')
pacman_cache_dir = '/var/cache/pacman/pkg'
accepted_architectures = ['any', 'x86_64', 'i686']
packages_in_cache = None
packages_in_offical_repositories = None
class ConsoleColors:
blue = '\033[94m'
green = '\033[92m'
red = '\033[91m'
yellow = '\033[93m'
reset = '\033[0m'
class InvalidPackageSourceError(Exception):
"""Invalid package source exception.
Args:
message (str): Message passed with the exception
"""
def __init__(self, message):
super().__init__(message)
class NoSuchPackageError(Exception):
"""No such package exception.
Args:
message (str): Message passed with the exception
"""
def __init__(self, message):
super().__init__(message)
def printInfo(message):
"""Print a colorful info message.
Args:
message (str): Message to be printed
"""
print(ConsoleColors.blue + message + ConsoleColors.reset)
def printSuccessfull(message):
"""Print a colorful successfull message.
Args:
message (str): Message to be printed
"""
print(ConsoleColors.green + message + ConsoleColors.reset)
def printWarning(message):
"""Print a colorful warning message.
Args:
message (str): Message to be printed
"""
print(ConsoleColors.yellow + message + ConsoleColors.reset)
def printError(message):
"""Print a colorful error message.
Args:
message (str): Message to be printed
"""
print(ConsoleColors.red + message + ConsoleColors.reset)
class PackageRepository:
"""Represents an enum of all package repositories."""
CORE = "core"
EXTRA = "extra"
COMMUNITY = "community"
MULTILIB = "multilib"
AUR = "aur"
LOCAL = "local"
class PackageBase:
"""Base class for pacman packages and their sources.
Args:
name (str): Name of the Arch Linux package
"""
name = None
version = None
architecture = None
repository = None
dependencies = []
license = None
# is a cached version of this package available
# 0: not available
# 1: different version(s) available
# 2: same version available
cache_available = 0
# True if this package needs to be installed before a dependent package can
# be build
is_make_dependency = False
# status of the installtion
# -2: dependency failed to install
# -1: failed to install
# 0: is not installed
# 1: is installed
# 2: different version is installed
# 3: successfully installed
installation_status = 0
# store for errors
error_info = None
def __init__(self, name):
self.name = name
def _check_if_cache_is_available(self):
# check if same version is available
name = '{0}-{1}-{2}.pkg.tar.xz'.format(
self.name, self.version, self.architecture)
if name in packages_in_cache:
self.cache_available = 2
return
# check if different version is available
else:
regex_different = re.compile(r'{0}-(\S+)-{1}.pkg.tar.xz'.format(
self.name, self.architecture))
for cache_file in packages_in_cache:
match = regex_different.search(os.path.basename(cache_file))
if match:
self.cache_available = 1
return
self.cache_available = 0
def get_installation_status(self):
"""Get the installation status of the package."""
if pacman.is_installed(self.name):
pcm_info = pacman.get_info(self.name)
if pcm_info['Version'] == self.version:
self.installation_status = 1
else:
self.installation_status = 2
else:
self.installation_status = 0
class PacmanPackage(PackageBase):
"""Represents a pacman package from a official repository.
Args:
name (str): Name of the pacman package
"""
def __init__(self, name):
super().__init__(name)
try:
self._get_package_info()
self._check_if_cache_is_available()
self.get_installation_status()
except Exception as e:
self.error_info = e
def _get_package_info(self):
"""Get the needed package information."""
is_available = False
for pcm_info in packages_in_offical_repositories:
if pcm_info['id'] == self.name:
is_available = True
break
if is_available:
pkg_info = pacman.get_info(self.name)
self.version = pkg_info['Version']
self.architecture = pkg_info['Architecture']
if 'Repository' in pkg_info:
if pkg_info['Repository'] == PackageRepository.EXTRA:
self.repository = PackageRepository.EXTRA
elif pkg_info['Repository'] == PackageRepository.CORE:
self.repository = PackageRepository.CORE
elif pkg_info['Repository'] == PackageRepository.COMMUNITY:
self.repository = PackageRepository.COMMUNITY
elif pkg_info['Repository'] == PackageRepository.MULTILIB:
self.repository = PackageRepository.MULTILIB
else:
self.repository = PackageRepository.EXTRA
self.dependencies = pkg_info['Depends On'].split(' ')
self.license = pkg_info['Licenses']
else:
raise NoSuchPackageError(
"No package with the name '{0}' exists in the official repositories".format(self.name))
def install(self):
"""Install the Pacman package."""
if not (self.installation_status == 1 or self.installation_status == 3):
printInfo("Installing package {0} {1}...".format(
self.name, self.version))
rc, out, err = run_command(['pacman', '-S', '--force', '--needed',
'--noconfirm', '--noprogressbar',
'--ignore', 'package-query', '--ignore',
'pacman-mirrorlist', '--cachedir',
pacman_cache_dir, self.name])
if rc != 0:
self.installation_status = -1
self.error_info = Exception(
"Failed to install package {0}: {1}".format(self.name, '\n'.join(err)))
else:
self.installation_status = 3
class PackageSource(PackageBase):
"""Represents a source of a package.
Args:
name (str): Name of the package
remove_dowloaded_source (bool): If True remove the source downloaded by 'makepkg' before build. If False
the sources will be kept, under the condition that the source is of the same
version of the package to be build
local_source_path (str): Local path of the source. If 'None' the pckage will be fetched from the AUR
"""
# path that contains the package source
path = None
# the dependencies that need to be installed prior build
make_dependencies = []
# is marked as an explicit build, so it is not a dependency of another
# package
explicit_build = False
# the status of the build
# 0: not yet build
# 1: successfully build
# 2: skipped build
# 3: failed to build
# 4: make dependency failed
build_status = 0
# If True remove the source downloaded by 'makepkg' before build. If False
# the sources will be kept, under the condition that the source is of the same
# version of the package to be build
remove_dowloaded_source = False
# package source is build from git repository
build_from_git = False
# package source is build from git repository
split_package_names = None
def __init__(self, name, remove_dowloaded_source, local_source_path=None):
super().__init__(name)
self.remove_dowloaded_source = remove_dowloaded_source
try:
# is local source package
if local_source_path:
self.repository = PackageRepository.LOCAL
self.path = os.path.abspath(local_source_path)
# is AUR package
else:
self.repository = PackageRepository.AUR
self._download_aur_package_source()
self._parse_pkgbuild_file()
self._check_if_cache_is_available()
self.get_installation_status()
except Exception as e:
self.error_info = e
def _parse_from_string(self, name, string):
"""Parse a bash variable value from a string.
Args:
name (str): Name of the variable to be parsed
string (str): String containing the bash variables
Returns:
str. Value for given params
list. Value for given params
None. If given param wasn't found
"""
# search for array like value
match = re.compile(r'{0}=\(([^\)]*)\)'.format(name),
re.DOTALL).search(string)
if match:
m = match.group(1).replace('\n', '').replace('"', '').replace('\'', '')
return [x.strip('\"\'') for x in re.compile(r'\s').split(m) if x != '']
else:
# search for simple string value
match = re.compile(r'{0}=(.+)'.format(name)).search(string)
if match:
return match.group(1).strip('\"\' ')
return None
def _get_dependencies_from_alias(self, dep_alias_names):
"""Get the real package names if only an alias was supplied.
Args:
dep_alias_names (list): (Alias-)Names of the packages
Returns:
list. Real names of the packages
"""
dependencies = []
if dep_alias_names:
for dep_alias_name in dep_alias_names:
dep_alias_name = re.sub(r'(.+?)(<|<=|>|>=){1}.*?$', r'\1',
dep_alias_name)
rc, out, err = run_command(['package-query', '-QSiif', '%n', dep_alias_name], print_output=False)
if rc == 0:
dependencies.append(out[-1])
else:
dependencies.append(dep_alias_name)
return dependencies
def _parse_pkgbuild_file(self):
"""Parse package information from PKGBUILD file."""
pkgbuild_file = os.path.join(self.path, "PKGBUILD")
with open(pkgbuild_file, 'r') as f:
file_content = f.read()
# package name
pkgbase = self._parse_from_string('pkgbase', file_content)
if pkgbase:
self.name = pkgbase
split_package_names = self._parse_from_string('pkgname', file_content)
self.split_package_names = []
for spn in split_package_names:
self.split_package_names.append(
re.sub(r'\$\{{0,1}[A-Za-z_][A-Za-z0-9_]*\}{0,1}',
pkgbase, spn, flags=re.IGNORECASE))
else:
self.name = self._parse_from_string('pkgname', file_content)
self.build_from_git = self.name.endswith('-git')
# package version (combined with release)
version = self._parse_from_string('pkgver', file_content)
release = self._parse_from_string('pkgrel', file_content)
self.version = version + '-' + release
# package architecture
architectures = self._parse_from_string('arch', file_content)
for ac_arch in accepted_architectures:
if ac_arch in architectures:
self.architecture = ac_arch
break
if not self.architecture:
raise InvalidPackageSourceError(
"Architecture of the package '{0}' is not supported".format(os.path.basename(self.path)))
# package license
self.license = self._parse_from_string('license', file_content)
if type(self.license) == list:
self.license = self.license[0]
# raise an error if PKGBUILD file does not contain mandatory variables
if not self.name or \
not version or \
not release or \
not self.architecture or \
not self.license:
raise InvalidPackageSourceError(
"One or more mandatory variables (name, version, release, architecture, license) in the package '{0}' is missing".format(os.path.basename(self.path)))
# package dependencies
self.dependencies = self._get_dependencies_from_alias(
self._parse_from_string('depends', file_content))
# package make dependencies
self.make_dependencies = self._get_dependencies_from_alias(
self._parse_from_string('makedepends', file_content))
# package repository
self.repository = PackageRepository.LOCAL
def _copy_source_to_build_dir(self):
"""Copy the package source to the build dir."""
pkg_build_dir = os.path.join(build_dir, self.name)
if os.path.exists(pkg_build_dir) and \
os.path.isdir(pkg_build_dir) and \
(not self.remove_dowloaded_source or not self.build_from_git):
old_pkgbuild_file = os.path.join(pkg_build_dir,
'PKGBUILD')
if os.path.exists(old_pkgbuild_file) and \
os.path.isfile(old_pkgbuild_file):
try:
old_pkg_source = PackageSource(
self.name, False, pkg_build_dir)
if old_pkg_source.version == self.version:
if self.repository == PackageRepository.AUR:
shutil.rmtree(self.path, ignore_errors=True)
self.path = pkg_build_dir
return
except:
pass
shutil.rmtree(pkg_build_dir, ignore_errors=True)
shutil.copytree(self.path, pkg_build_dir)
if self.repository == PackageRepository.AUR:
shutil.rmtree(self.path, ignore_errors=True)
self.path = pkg_build_dir
def _download_aur_package_source(self):
"""Fetch package source from the AUR."""
aur_pkg_download_path = tempfile.mkdtemp()
try:
i = aur.info(self.name)
except:
raise NoSuchPackageError(
"No package with the name '{0}' exists in the AUR".format(self.name))
pkg_tar_file_path = os.path.join(aur_pkg_download_path,
i.name + ".tar.gz")
# download package sources from AUR
urllib.request.urlretrieve("https://aur.archlinux.org" +
i.url_path,
pkg_tar_file_path)
# extract source tarball
tar = tarfile.open(pkg_tar_file_path)
tar.extractall(path=aur_pkg_download_path)
tar.close()
os.remove(pkg_tar_file_path)
self.path = os.path.join(aur_pkg_download_path, os.listdir(aur_pkg_download_path)[0])
def makepkg(self, uid, gid):
"""Run makepkg.
Args:
uid (int): UID of the build user
gid (int): GID of the build user
Returns:
bool. True if build was successfull, False if not
"""
self._copy_source_to_build_dir()
# set uid and gid of the build dir
os.chown(self.path, uid, gid)
for root, dirs, files in os.walk(self.path):
for f in dirs + files:
if os.path.isfile(f) or os.path.isdir(f):
os.chown(os.path.join(root, f), uid, gid)
printInfo("Building package {0} {1}...".format(
self.name, self.version))
os.chdir(self.path)
rc, out, err = run_command(['makepkg', '--force', '--nodeps', '--noconfirm'], uid)
if rc != 0:
self.error_info = Exception("Failed to build package '{0}': {1}".format(
self.name, '\n'.join(err)))
return False
# get new version info when build from git
if self.build_from_git:
git_pkg = PackageSource(
self.name, False, self.path)
self.version = git_pkg.version
for pkg_file in glob.glob(os.path.join(self.path, '*.pkg.tar.xz')):
pkg_dest = os.path.join(pacman_cache_dir, os.path.basename(pkg_file))
# move created package to Pacman package cache
shutil.move(pkg_file, pkg_dest)
# set uid and gid of the build package
os.chown(pkg_dest, 0, 0)
if self.is_make_dependency:
self.install()
return True
def get_package_file_name(self):
"""Get the pacman package file name.
Returns:
str. The name of the package
"""
return '{0}-{1}-{2}.pkg.tar.xz'.format(
self.name, self.version, self.architecture)
def get_all_dependencies(self):
"""Get dependencies and make dependencies together.
Returns:
list. Names of all dependencies
"""
return self.dependencies + self.make_dependencies
def install(self):
"""Install the build package."""
if not (self.installation_status == 1 or self.installation_status == 3)\
and (self.build_status == 1 or self.build_status == 2):
pkg_names = [self.name]
# different names if package is a splitted package
if self.split_package_names:
pkg_names = self.split_package_names
for pkg_name in pkg_names:
printInfo("Installing package {0} {1}...".format(
pkg_name, self.version))
rc, out, err = run_command(
['pacman', '-U', '--noconfirm', '--force', '--ignore',
'package-query', '--ignore', 'pacman-mirrorlist',
'--cachedir', pacman_cache_dir, os.path.join(
pacman_cache_dir, '{0}-{1}-{2}.pkg.tar.xz'.format(
pkg_name, self.version, self.architecture))])
if rc != 0:
self.installation_status = -1
self.error_info = Exception(
"Failed to install package '{0}': {1}".format(pkg_name, '\n'.join(err)))
return False
self.installation_status = 3
def change_user(uid):
"""Temporarily change the UID and GID for code execution."""
def set_uid_and_guid():
os.setuid(uid)
return set_uid_and_guid
def run_command(command, uid=None, print_output=True):
"""Run a command in a subprocess.
Args:
command (string): Command to run
uid (int): UID of the user to run with
print_output (bool): True if the output should be printed to stdout and stderr
Returns:
(int, list, list). Return code of the subprocess, sdtout and stderr
"""
if uid:
process = Popen(command, stdout=PIPE, stderr=PIPE, universal_newlines=True, preexec_fn=change_user(uid))
else:
process = Popen(command, stdout=PIPE, stderr=PIPE, universal_newlines=True)
if print_output:
err = []
out = []
while True:
tmp = process.stdout.readline()
if tmp:
tmp = tmp.rstrip('\n ')
if tmp != '':
out.append(tmp)
print(tmp)
if process.poll() is not None:
break
time.sleep(.05)
for line in process.stdout.readlines():
tmp = line.rstrip('\n ')
out.append(tmp)
print(tmp)
rc = process.poll()
if rc != 0:
for line in process.stderr.readlines():
tmp = line.rstrip('\n ')
printError(tmp)
err.append(tmp)
return (rc, out, err)
else:
out, err = process.communicate()
rc = process.returncode
return (rc, out.splitlines(), err.splitlines())
def get_package_recursive(pkg_name,
explicit_build,
pkg_dict,
locally_available_package_sources,
remove_dowloaded_source,
is_make_dependency):
"""Get a package and all their dependencies.
Args:
pkg_name (str): Name of the package
explicit_build (bool): True if package source is given by the user
pkg_dict (dict): Store for package information
locally_available_package_sources (list): List of all locally available package sources
remove_dowloaded_source (bool): If True remove the source downloaded by 'makepkg' before build. If False
the sources will be kept, under the condition that the source is of the same
version of the package to be build
is_make_dependency (bool): True if package shall be installed as a make dependency
"""
# check if package is already in pkg_dict
if pkg_name in pkg_dict:
return
# check if package is in official repo
for pcm_info in packages_in_offical_repositories:
if pcm_info['id'] == pkg_name:
pcm_pkg = PacmanPackage(pkg_name)
pcm_pkg.is_make_dependency = is_make_dependency
pkg_dict[pkg_name] = pcm_pkg
return
# check if package source is locally available
if pkg_name in locally_available_package_sources:
pkg_path = os.path.join(local_source_dir, pkg_name)
lcl_pkg = PackageSource(pkg_name, remove_dowloaded_source, pkg_path)
if lcl_pkg.name in pkg_dict:
return
lcl_pkg.explicit_build = explicit_build
lcl_pkg.explicit_build = is_make_dependency
pkg_dict[pkg_name] = lcl_pkg
# if split package the name can defer
pkg_dict[lcl_pkg.name] = lcl_pkg
if not lcl_pkg.error_info:
for dependency in lcl_pkg.dependencies:
get_package_recursive(dependency,
False,
pkg_dict,
locally_available_package_sources,
remove_dowloaded_source,
True if is_make_dependency else False)
for make_dependency in lcl_pkg.make_dependencies:
get_package_recursive(make_dependency,
False,
pkg_dict,
locally_available_package_sources,
remove_dowloaded_source,
True)
# check for the package in the AUR
else:
aur_pkg = PackageSource(pkg_name, remove_dowloaded_source, None)
if aur_pkg.name in pkg_dict:
return
aur_pkg.explicit_build = explicit_build
pkg_dict[pkg_name] = aur_pkg
# if split package the name can defer
pkg_dict[aur_pkg.name] = aur_pkg
if not aur_pkg.error_info:
for dependency in aur_pkg.dependencies:
get_package_recursive(dependency,
False,
pkg_dict,
locally_available_package_sources,
remove_dowloaded_source,
True if is_make_dependency else False)
for make_dependency in aur_pkg.make_dependencies:
get_package_recursive(make_dependency,
False,
pkg_dict,
locally_available_package_sources,
remove_dowloaded_source,
True)
def build_package_recursive(pkg_name,
pkg_dict,
rebuild,
install_all_dependencies,
uid,
gid):
"""Build a package and all their dependencies.
Args:
pkg_name (str): Name of the package
pkg_dict (dict): Store for package information
rebuild (int): Rebuild behaviour:
0: Build only new versions of packages (default)
1: Rebuild all explicit listed packages
2: Rebuild all explicit listed packages and their dependencies
uid (int): UID of the build user
gid (int): GID of the build user
"""
pkg = pkg_dict[pkg_name]
# break if a error occurred
if pkg.error_info:
return
# break if the package has already been processed
if type(pkg) is PackageSource and pkg.build_status != 0:
return
if type(pkg) is PacmanPackage:
# break if the package has already been processed
if pkg.installation_status < 0 or pkg.installation_status == 3:
return
# install pacman package if it is a make dependency
if (pkg.is_make_dependency or install_all_dependencies):
pkg.install()
return
dependency_changed = False
for dependency in pkg.get_all_dependencies():
pkg_dependency = pkg_dict[dependency]
build_package_recursive(dependency, pkg_dict, rebuild, install_all_dependencies, uid, gid)
if pkg_dependency.error_info:
pkg.build_status = 4
return
else:
if type(pkg_dependency) is PackageSource and \
pkg_dependency.build_status == 1:
dependency_changed = True
pkg.get_installation_status()
if dependency_changed:
if pkg.makepkg(uid, gid):
pkg.build_status = 1
else:
pkg.build_status = 3
else:
# rebuild only if new version is available
if rebuild == 0:
if pkg.cache_available < 2:
if pkg.makepkg(uid, gid):
pkg.build_status = 1
else:
pkg.build_status = 3
else:
pkg.build_status = 2
# rebuild if explicit or a new version is available
elif rebuild == 1:
if pkg.cache_available < 2 or pkg.explicit_build:
if pkg.makepkg(uid, gid):
pkg.build_status = 1
else:
pkg.build_status = 3
else:
pkg.build_status = 2
# rebuild all
elif rebuild == 2:
if pkg.makepkg(uid, gid):
pkg.build_status = 1
else:
pkg.build_status = 3
if install_all_dependencies:
pkg.install()
return
def format_log(pkg, msg, prefix=''):
"""Format a build log for a given packge.
Args:
pkg (PackageBase): The package
msg (str): Message for the package
prefix (str): Prefix added for message in multiple lines
Returns:
str. The formatted build log
"""
msg_lines = msg.splitlines()
if len(msg_lines) > 1:
for i in range(1, len(msg_lines)):
msg_lines[i] = prefix + ' ' + msg_lines[i]
msg = '\n'.join(msg_lines)
if pkg.version:
return "{0} {1}: {2}".format(pkg.name, pkg.version, msg)
return "{0}: {1}".format(pkg.name, msg)
def print_build_log_recursive(pkg_names, pkg_dict, prefix='', is_root=False):
"""Recursivly prints a build log for a given package.
Args:
pkg_names (PackageBase): The package
pkg_dict (dict): Store for package information
prefix (str): Prefix for the message
is_root (bool): True if first recursion
Returns:
(bool, list). Tuple consting of the build status and the log messages as a list
"""
success = True
log = []
log_prefix = prefix + '├── '
intermediate_prefix = prefix + '| '
for pos, anchor, pkg_name in enumerate_package_names(pkg_names):
pkg = pkg_dict[pkg_name]
log_dep = []
if is_root:
log_prefix = ""
intermediate_prefix = ""
elif anchor == 1:
log_prefix = prefix + '└── '
intermediate_prefix = prefix + ' '
if type(pkg) == PacmanPackage:
if pkg.installation_status < 0:
success = False
log.append(log_prefix + format_log(
pkg, "Failed to install: " + str(pkg.error_info), intermediate_prefix))
elif pkg.installation_status == 0:
log.append(log_prefix + format_log(pkg, "Not installed"))
elif pkg.installation_status == 1:
log.append(log_prefix + format_log(pkg, "Skipped install"))
elif pkg.installation_status == 3:
log.append(log_prefix + format_log(pkg, "Successfully installed"))
else:
deps = pkg.get_all_dependencies()
if len(deps) > 0:
success, log_dep = print_build_log_recursive(
deps,
pkg_dict,
intermediate_prefix)
if not success:
log.append(log_prefix + format_log(
pkg, "Dependency Failed: " + str(pkg.error_info), intermediate_prefix))
elif pkg.error_info:
success = False
log.append(log_prefix + format_log(
pkg, "Failed: " + str(pkg.error_info), intermediate_prefix))
else:
if pkg.build_status == 1:
log.append(log_prefix + format_log(
pkg, "Successfully build"))
elif pkg.build_status == 2:
log.append(log_prefix + format_log(
pkg, "Skipped"))
elif pkg.build_status == 3:
log.append(log_prefix + format_log(pkg, "Failed"))
success = False
elif pkg.build_status == 4:
log.append(log_prefix + format_log(pkg, "Dependency Failed"))
success = False
log = log + log_dep
return success, log
def print_build_log(pkg_name, pkg_dict):
"""Print a build log for a given package.
Args:
pkg_names (PackageBase): The package
pkg_dict (dict): Store for package information
"""
success, log = print_build_log_recursive(
[pkg_name], pkg_dict, '', True)
for line in log:
if success:
printSuccessfull(line)
else:
printError(line)
def enumerate_package_names(sequence):
length = len(sequence)
for count, value in enumerate(sequence):
yield count, length - count, value
def main(argv):
"""Run the main logic.
Args:
argv (list): Command line arguments
"""
parser = argparse.ArgumentParser(
prog='aur-makepkg',
description='Build Pacman packages with makepkg from local source or the AUR',
epilog=''
)
parser.add_argument('-g', '--gid', dest='gid', type=int, default=1000,
help="GID of the build user")
parser.add_argument('-i', '--install-all-dependencies', action='store_true',
dest='install_all_dependencies', default=False,
help="Install all dependencies, not only 'make dependencies'")
parser.add_argument('-k', '--keyrings', dest='keyrings', default=None,
help="Pacman keyrings initialized prior building (comma seperated list)")
parser.add_argument('-p', '--pacman-update', action='store_true',
dest='pacman_update', default=False,
help="Update all installed pacman packages before build")
parser.add_argument('-r', '--rebuild', dest='rebuild', type=int, default=0,
help="""Rebuild behaviour:
0: Build only new versions of packages (default)
1: Rebuild all explicit listed packages
2: Rebuild all explicit listed packages and their dependencies""")
parser.add_argument('--remove-downloaded-source',
dest='remove_dowloaded_source',
action='store_true', default=False,
help="""Remove the source downloaded by 'makepkg' before build. If not
the sources will be kept, under the condition that the source is of the same
version of the package to be build. (Note: Sources of packages build from a Git repository
will always be removed.)""")
parser.add_argument('-u', '--uid', dest='uid', type=int, default=1000,
help="UID of the build user")
parser.add_argument('build_package_names', nargs='+',
help="Name fo packages to be build from local source or the AUR")
args = parser.parse_args(argv)
# create build user and group
try:
grp.getgrgid(args.gid)
except Exception:
os.system("groupadd -g {0} build-user".format(args.gid))
try:
pwd.getpwuid(args.uid)
except Exception:
os.system(
"useradd -p /makepkg/build -m -g {1} -s /bin/bash -u {0} build-user".format(args.uid, args.gid))
# refresh pacman package database
if args.keyrings:
printInfo("Initializing pacman keyring...")
run_command(['pacman-key', '--init'], print_output=False)
rc, out, err = run_command(['pacman-key', '--populate'] + args.keyrings.split(','), print_output=True)
if rc != 0:
raise Exception("Failed to initialize Pacman keyrings: " + '\n'.join(err))
# refresh pacman package database
printInfo("Update pacman package database...")
pacman.refresh()
global packages_in_cache, packages_in_offical_repositories
packages_in_cache = [x for x in os.listdir(pacman_cache_dir) if
os.path.isfile(os.path.join(pacman_cache_dir, x))]
packages_in_offical_repositories = pacman.get_available()
if args.pacman_update:
# upgrade installed pacman packages
printInfo("Upgrading installed pacman packages...")
rc, out, err = run_command(['pacman', '-Su', '--noconfirm', '--force',
'--ignore', 'package-query', '--ignore',
'pacman-mirrorlist', '--cachedir',
pacman_cache_dir], print_output=True)
if rc != 0:
raise Exception("Failed to upgrade Pacman packages: " + '\n'.join(err))
pkg_dict = dict()
build_package_names = [x.lower() for x in args.build_package_names]
# look for local package sources
locally_available_package_sources = []
if os.path.exists(local_source_dir) and \
os.path.isdir(local_source_dir):
for d in os.listdir(local_source_dir):
pkgbuild_file_path = os.path.join(d, "PKGBUILD")
if os.path.exists(pkgbuild_file_path) and \
os.path.isfile(pkgbuild_file_path):
locally_available_package_sources.append(os.path.basename(d))
# get packages and their dependencies
for pkg_name in build_package_names:
printInfo("Collecting information about {0}...".format(pkg_name))
get_package_recursive(pkg_name,
True,
pkg_dict,
locally_available_package_sources,
args.remove_dowloaded_source,
False)
# build packages
if pkg_name in pkg_dict:
build_package_recursive(pkg_name,
pkg_dict,
args.rebuild,
args.install_all_dependencies,
args.uid,
args.gid)
# print build statistics
printInfo("\nBuild Statistics:")
for pkg_name in build_package_names:
if pkg_name in pkg_dict:
print_build_log(pkg_name, pkg_dict)
try:
main(sys.argv[1:])
exit(0)
except Exception as e:
printError(str(e))
exit(1)
| 35.861165 | 166 | 0.570133 | 17,632 | 0.477198 | 153 | 0.004141 | 0 | 0 | 0 | 0 | 11,071 | 0.299629 |
90e34a12e8b9e4a3553e847fbec09a27e4f92d6a | 12,680 | py | Python | test/test_schemagen.py | hd23408/nist-schemagen | 258e453d6f3bdc763e48e2c32668c932f7ffcd40 | [
"MIT"
]
| 1 | 2021-09-01T02:11:03.000Z | 2021-09-01T02:11:03.000Z | test/test_schemagen.py | hd23408/nist-schemagen | 258e453d6f3bdc763e48e2c32668c932f7ffcd40 | [
"MIT"
]
| null | null | null | test/test_schemagen.py | hd23408/nist-schemagen | 258e453d6f3bdc763e48e2c32668c932f7ffcd40 | [
"MIT"
]
| null | null | null | """Test methods for testing the schemagen package (specifically,
the SchemaGenerator class).
Typical usage example:
python -m unittest
or, to run a single test:
python -m unittest -k test__build_schema
"""
import unittest
import pathlib
import logging
import copy
import os
import pandas as pd
import numpy as np
import schemagen
import filecmp
import string
# Suppress log messages so they don't confuse readers of the test output
logging.basicConfig(level=os.environ.get("LOGLEVEL", "CRITICAL"))
# Sample files for testing
INVALID_INPUT_DATA_FILE = str(pathlib.Path(__file__).parent.
joinpath("files_for_testing/invalid_input_data.csv"))
EMPTY_INPUT_DATA_FILE = str(pathlib.Path(__file__).parent.
joinpath("files_for_testing/empty_input_data.csv"))
VALID_INPUT_DATA_FILE = str(pathlib.Path(__file__).parent.
joinpath("files_for_testing/valid_input_data.csv"))
VALID_SCHEMA_FILE = str(pathlib.Path(__file__).parent.
joinpath("files_for_testing/parameters.json"))
TEST_OUTPUT_DIRECTORY = str(pathlib.Path(__file__).parent.
joinpath("test_output_files"))
VALID_OUTPUT_PARAMETERS_FILE = str(pathlib.Path(__file__).parent.
joinpath("files_for_testing/writing_tests/parameters.json"))
VALID_OUTPUT_DATATYPES_FILE = str(pathlib.Path(__file__).parent.
joinpath("files_for_testing/writing_tests/column_datatypes.json"))
# Test dataframes to convert to a schema. This should contain
# an assortment of the different types that we expect to parse:
# A - float numeric categorical (with missing values)
# B - int32 numeric range
# C - string categorical
#
VALID_TEST_DATAFRAME = pd.DataFrame.from_dict(
{
"A": [1, 2, 3, 4, 5, None, None, None, None, None] * 5,
"B": list(range(1000000, 1000050, 1)),
"C": ["A", "B", "C", "D", "E"] * 10,
"D": list(string.ascii_letters)[0 : 50]
}
)
# This isn't really a dataframe, it's a dict
INVALID_TEST_DATAFRAME = {
"A": ["a", "b", "c", "d", "e", "f", "g"],
"B": list(range(1, 8, 1))
}
# The appropriate schema and column datatypes to create from the test data above
VALID_TEST_SCHEMA = {
"schema": {
"A": {
"dtype": "float", # This gets turned into a float because of the 'None's
"kind": "categorical",
"values": [ 1.0, 2.0, 3.0, 4.0, 5.0 ],
"codes": [ 1, 2, 3, 4, 5 ]
},
"B": {
"dtype": "uint32",
"kind": "numeric",
"min": 1000000,
"max": 1000049,
"bins": 10
},
"C": {
"dtype": "str",
"kind": "categorical",
"values": ["A", "B", "C", "D", "E"],
"codes": [1, 2, 3, 4, 5]
},
"D": {
"dtype": "str",
"kind": "text"
}
}
}
VALID_TEST_COLUMN_DATATYPES = {
"dtype": {
"A": "float",
"B": "uint32",
"C": "str",
"D": "str"
}
}
class TestSchemaGenerator(unittest.TestCase):
"""Test class for the schemagen.SchemaGenerator class.
"""
def test_ctor(self):
"""
Test that a SchemaGenerator can be appropriately
instantiated, and that it initializes its internal variables
appropriately.
"""
schema_generator = schemagen.SchemaGenerator()
self.assertIs(type(schema_generator), schemagen.SchemaGenerator)
self.assertIs(schema_generator.input_csv_file, None)
self.assertIs(schema_generator.input_data_as_dataframe, None)
self.assertIs(schema_generator.output_schema, None)
def test_read_and_parse_csv(self):
"""
Test the full process of reading in and parsing a CSV file.
Make sure the `SchemaGenerator.read_and_parse_csv` method
returns True or False depending on whether it succeeded.
"""
schema_generator = schemagen.SchemaGenerator()
# Confirm that attempting to parse an invalid file results in "False"
result = schema_generator.read_and_parse_csv(INVALID_INPUT_DATA_FILE)
self.assertIs(result, False)
# Confirm that a valid CSV loads successfully
result = schema_generator.read_and_parse_csv(VALID_INPUT_DATA_FILE)
self.assertIs(result, True)
def test_output_parameters(self):
"""
Test outputting of the parameters file.
"""
schema_generator = schemagen.SchemaGenerator()
# Make an output directory just for this test
test_output_dir = pathlib.Path(TEST_OUTPUT_DIRECTORY). \
joinpath("test_output_parameters")
test_output_dir.mkdir(parents=True, exist_ok=True)
test_output_dir = str(test_output_dir)
test_output_file = str(pathlib.Path(test_output_dir). \
joinpath("parameters.json"))
# Set the output schema to a known good values;
# here we're JUST testing the writing out of the file
schema_generator.output_schema = copy.deepcopy(VALID_TEST_SCHEMA)
# Test writing out to a non-existent directory
retval = schema_generator.output_parameters_json(output_directory="foo")
self.assertEqual(retval, None)
# Test success path
retval = None
retval = schema_generator.output_parameters_json(output_directory=
test_output_dir)
self.assertEqual(retval, test_output_file)
self.assertTrue(filecmp.cmp(test_output_file, VALID_OUTPUT_PARAMETERS_FILE),
msg = test_output_file + " does not match " +
VALID_OUTPUT_PARAMETERS_FILE)
def test_output_datatypes(self):
"""
Test outputting of the column_datatypes file.
"""
schema_generator = schemagen.SchemaGenerator()
# Make an output directory just for this test
test_output_dir = pathlib.Path(TEST_OUTPUT_DIRECTORY). \
joinpath("test_output_datatypes")
test_output_dir.mkdir(parents=True, exist_ok=True)
test_output_dir = str(test_output_dir)
test_output_file = str(pathlib.Path(test_output_dir). \
joinpath("column_datatypes.json"))
# Set the output datatypes to a known good values;
# here we're JUST testing the writing out of the file
schema_generator.output_datatypes = \
copy.deepcopy(VALID_TEST_COLUMN_DATATYPES)
# Test writing out to a non-existent directory
retval = schema_generator.output_column_datatypes_json(
output_directory="foo")
self.assertEqual(retval, None)
# Test success path
retval = None
retval = schema_generator.output_column_datatypes_json(output_directory=
test_output_dir)
self.assertEqual(retval, test_output_file)
self.assertTrue(filecmp.cmp(test_output_file, VALID_OUTPUT_DATATYPES_FILE),
msg = test_output_file + " does not match " +
VALID_OUTPUT_DATATYPES_FILE)
def test__load_csv_succeeds(self):
"""
Test that the `SchemaGenerator._load_csv` method can be used to read
in an appropriately formatted CSV file.
"""
schema_generator = schemagen.SchemaGenerator()
# Confirm that a valid CSV loads into a DataFrame without throwing errors
result = schema_generator._load_csv(VALID_INPUT_DATA_FILE) # We want to test private methods... pylint: disable=protected-access
self.assertIsInstance(result, pd.core.frame.DataFrame)
def test__load_csv_fails(self):
"""
Test that the `SchemaGenerator._load_csv` method fails when
it tries to read a badly formatted CSV or is given an empty
filename.
"""
schema_generator = schemagen.SchemaGenerator()
# Confirm that the FileNotFoundError is raised for a non-existing file
with self.assertRaises(FileNotFoundError):
schema_generator._load_csv("") # We want to test private methods... pylint: disable=protected-access
# Confirm that the ParserError is raised when it can't parse the file
with self.assertRaises(pd.errors.ParserError):
schema_generator._load_csv(INVALID_INPUT_DATA_FILE) # We want to test private methods... pylint: disable=protected-access
# Confirm that the EmptyDataError is raised if called against an empty file
with self.assertRaises(pd.errors.EmptyDataError):
schema_generator._load_csv(EMPTY_INPUT_DATA_FILE) # We want to test private methods... pylint: disable=protected-access
def test__build_schema_succeeds(self):
"""
Test that the `SchemaGenerator._build_schema` method can build
an expected schema from a properly formatted DataFrame.
"""
schema_generator = schemagen.SchemaGenerator()
# Confirm that when we build schema off of our test dataframe,
# we get a result that looks like our expected schema
(params, columns) = schema_generator._build_schema(VALID_TEST_DATAFRAME,
include_text_columns=True) # We want to test private methods... pylint: disable=protected-access
self.assertEqual(params, VALID_TEST_SCHEMA)
self.assertEqual(columns, VALID_TEST_COLUMN_DATATYPES)
# Confirm that when we build schema off of our test dataframe,
# and include "na", we get a result that looks like we expect
(params, columns) = schema_generator._build_schema(VALID_TEST_DATAFRAME, # We want to test private methods... pylint: disable=protected-access
include_text_columns=True, include_na=True)
valid_schema_with_nan = copy.deepcopy(VALID_TEST_SCHEMA)
valid_schema_with_nan["schema"]["A"]["values"].append(np.NaN)
# Including NaN is going to make everything in the column a float
valid_schema_with_nan["schema"]["A"]["dtype"] = "float"
valid_schema_with_nan["schema"]["A"]["values"] = \
list(map(float, valid_schema_with_nan["schema"]["A"]["values"]))
valid_schema_with_nan["schema"]["A"]["codes"] = \
[1, 2, 3, 4, 5, 6]
valid_dtypes_with_nan = copy.deepcopy(VALID_TEST_COLUMN_DATATYPES)
valid_dtypes_with_nan["dtype"]["A"] = "float"
# Need to use np's assertion in order to make NaN == NaN
np.testing.assert_equal(params, valid_schema_with_nan)
self.assertEqual(columns, valid_dtypes_with_nan)
def test__build_schema_fails(self):
"""
Test that the `SchemaGenerator._build_schema` method fails appropriately
when trying to build a schema from something that is not a DataFrame.
"""
schema_generator = schemagen.SchemaGenerator()
# Confirm that when we build schema off of our test invalid dataframe,
# we fail in the right way
with self.assertRaises(AttributeError):
schema_generator._build_schema(INVALID_TEST_DATAFRAME, # We want to test private methods... pylint: disable=protected-access
max_values_for_categorical = 4)
def test__getters(self):
"""
Test that the getters for the output schema and the column datatypes
return the correct objects.
"""
schema_generator = schemagen.SchemaGenerator()
schema_generator.output_schema = copy.deepcopy(VALID_TEST_SCHEMA)
self.assertEqual(schema_generator.get_parameters_json(),
VALID_TEST_SCHEMA)
schema_generator.output_datatypes = \
copy.deepcopy(VALID_TEST_COLUMN_DATATYPES)
self.assertEqual(schema_generator.get_column_datatypes_json(),
VALID_TEST_COLUMN_DATATYPES)
def test__get_series_dtype(self):
"""
Test that the method that determines the appropriate datatype, min, and max
values does the right thing.
"""
schema_generator = schemagen.SchemaGenerator()
series = pd.Series(["a", "b", "c", "d"])
self.assertEqual(schema_generator._get_series_dtype(series), # We want to test private methods... pylint: disable=protected-access
("str", None, None))
series = pd.Series([1, 2, 2, 3, 4, 5, 6, 7, 8, 9])
self.assertEqual(schema_generator._get_series_dtype(series), # We want to test private methods... pylint: disable=protected-access
("uint8", 1, 9))
series = pd.Series(list(range(1000000, 1000050, 1)))
self.assertEqual(schema_generator._get_series_dtype(series), # We want to test private methods... pylint: disable=protected-access
("uint32", 1000000, 1000049))
series = pd.Series([0.1, 0.15, 0.2, 0.214, 0.25])
self.assertEqual(schema_generator._get_series_dtype(series), # We want to test private methods... pylint: disable=protected-access
("float", 0.1, 0.25))
series = pd.Series([-1, 0, 1, -2, 0, -3])
self.assertEqual(schema_generator._get_series_dtype(series), # We want to test private methods... pylint: disable=protected-access
("int8", -3, 1))
# If min is 0, don't "fuzz" it, to avoid going negative
series = pd.Series([0, 1, 2, 3, 4, 5])
self.assertEqual(schema_generator._get_series_dtype(series), # We want to test private methods... pylint: disable=protected-access
("uint8", 0, 5))
series = pd.Series(["2021-02-25", "2021-01-05", "2021-06-22"])
self.assertEqual(schema_generator._get_series_dtype(series), # We want to test private methods... pylint: disable=protected-access
("date", "2021-01-05 00:00:00", "2021-06-22 00:00:00"))
if __name__ == "__main__":
unittest.main()
| 38.895706 | 146 | 0.711435 | 9,870 | 0.778391 | 0 | 0 | 0 | 0 | 0 | 0 | 5,358 | 0.422555 |
90e49b3af233dbd74b52999ca2aa64df02b0beff | 368 | py | Python | core/migrations/0008_auto_20190528_1802.py | peterson-dev/code-snippet-app | b5ecb7b8b679c307d361a7ce100d4115f92d99a5 | [
"MIT"
]
| 2 | 2019-05-22T21:54:43.000Z | 2019-05-26T22:22:14.000Z | core/migrations/0008_auto_20190528_1802.py | peterson-dev/code-snippet-app | b5ecb7b8b679c307d361a7ce100d4115f92d99a5 | [
"MIT"
]
| 14 | 2020-02-12T00:04:05.000Z | 2022-03-11T23:51:10.000Z | core/migrations/0008_auto_20190528_1802.py | peterson-dev/kode-kangaroo | b5ecb7b8b679c307d361a7ce100d4115f92d99a5 | [
"MIT"
]
| null | null | null | # Generated by Django 2.2.1 on 2019-05-28 22:02
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0007_auto_20190523_1740'),
]
operations = [
migrations.RenameField(
model_name='snippet',
old_name='post_content',
new_name='content',
),
]
| 19.368421 | 47 | 0.589674 | 283 | 0.769022 | 0 | 0 | 0 | 0 | 0 | 0 | 110 | 0.298913 |
90e4c87211faae293a93093b6b860b2f8d021a50 | 2,740 | py | Python | scripts/Biupdownsample/grad_check.py | dongdong93/a2u_matting | 1d0ad8e630cce50c5b36c40ad384c888d292f9a8 | [
"MIT"
]
| 22 | 2021-04-28T03:48:53.000Z | 2022-01-24T09:42:53.000Z | scripts/Biupdownsample/grad_check.py | dongdong93/a2u_matting | 1d0ad8e630cce50c5b36c40ad384c888d292f9a8 | [
"MIT"
]
| 1 | 2021-08-08T20:10:18.000Z | 2021-08-23T07:33:38.000Z | scripts/Biupdownsample/grad_check.py | dongdong93/a2u_matting | 1d0ad8e630cce50c5b36c40ad384c888d292f9a8 | [
"MIT"
]
| 5 | 2021-09-17T08:02:06.000Z | 2022-01-24T09:43:03.000Z | import os.path as osp
import sys
import subprocess
subprocess.call(['pip', 'install', 'cvbase'])
import cvbase as cvb
import torch
from torch.autograd import gradcheck
sys.path.append(osp.abspath(osp.join(__file__, '../../')))
from biupdownsample import biupsample_naive, BiupsampleNaive
from biupdownsample import bidownsample_naive, BidownsampleNaive
feat = torch.randn(2, 64, 2, 2, requires_grad=True, device='cuda:0').double()
mask = torch.randn(
2, 100, 4, 4, requires_grad=True, device='cuda:0').sigmoid().double()
print('Gradcheck for biupsample naive...')
test = gradcheck(BiupsampleNaive(5, 4, 2), (feat, mask), atol=1e-4, eps=1e-4)
print(test)
feat = torch.randn(
2, 1024, 100, 100, requires_grad=True, device='cuda:0').float()
mask = torch.randn(
2, 25, 200, 200, requires_grad=True, device='cuda:0').sigmoid().float()
loop_num = 500
time_naive_forward = 0
time_naive_backward = 0
bar = cvb.ProgressBar(loop_num)
timer = cvb.Timer()
for i in range(loop_num):
x = biupsample_naive(feat.clone(), mask.clone(), 5, 1, 2)
torch.cuda.synchronize()
time_naive_forward += timer.since_last_check()
x.sum().backward(retain_graph=True)
torch.cuda.synchronize()
time_naive_backward += timer.since_last_check()
bar.update()
forward_speed = (time_naive_forward + 1e-3) * 1e3 / loop_num
backward_speed = (time_naive_backward + 1e-3) * 1e3 / loop_num
print('\nBiupsample naive time forward: '
f'{forward_speed} ms/iter | time backward: {backward_speed} ms/iter')
# ---------------------------------------------------------------
feat = torch.randn(2, 64, 4, 4, requires_grad=True, device='cuda:0').double()
mask = torch.randn(
2, 16, 4, 4, requires_grad=True, device='cuda:0').double()
print('Gradcheck for bidownsample naive...')
test = gradcheck(BidownsampleNaive(4, 1, 1), (feat, mask), atol=1e-4, eps=1e-4)
print(test)
feat = torch.randn(
2, 512, 200, 200, requires_grad=True, device='cuda:0').float()
mask = torch.randn(
2, 100, 100, 100, requires_grad=True, device='cuda:0').sigmoid().float()
loop_num = 500
time_naive_forward = 0
time_naive_backward = 0
bar = cvb.ProgressBar(loop_num)
timer = cvb.Timer()
for i in range(loop_num):
x = bidownsample_naive(feat.clone(), mask.clone(), 10, 1, 2)
torch.cuda.synchronize()
time_naive_forward += timer.since_last_check()
x.sum().backward(retain_graph=True)
torch.cuda.synchronize()
time_naive_backward += timer.since_last_check()
bar.update()
forward_speed = (time_naive_forward + 1e-3) * 1e3 / loop_num
backward_speed = (time_naive_backward + 1e-3) * 1e3 / loop_num
print('\nBidownsample naive time forward: '
f'{forward_speed} ms/iter | time backward: {backward_speed} ms/iter')
| 31.494253 | 79 | 0.691606 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 439 | 0.160219 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.