hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
β | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
β | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
β | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
β | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
β | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
β | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
β | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
β | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
β | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
81b75b5619b0c3e8fbf77d44a083c297724d2365 | 6,110 | py | Python | ConfigUpdater.py | godfatherlmh/LoLAnalyzer | 0b265b33230316ab1a1459a9767ef7527a4a4f89 | [
"MIT"
]
| null | null | null | ConfigUpdater.py | godfatherlmh/LoLAnalyzer | 0b265b33230316ab1a1459a9767ef7527a4a4f89 | [
"MIT"
]
| null | null | null | ConfigUpdater.py | godfatherlmh/LoLAnalyzer | 0b265b33230316ab1a1459a9767ef7527a4a4f89 | [
"MIT"
]
| null | null | null | # Update the working patch and champions list
from __future__ import print_function
import configparser
import json
import os
import urllib.request
from datetime import datetime
from slugify import slugify
from collections import OrderedDict
from InterfaceAPI import InterfaceAPI
def run():
config = configparser.ConfigParser()
if os.path.isfile('config.ini'):
config.read('config.ini')
API_KEY = config['PARAMS']['api-key']
else:
def validationInput(msg, validAns):
while True:
ans = input(msg)
if ans.lower() in validAns:
return ans
print('Incorrect value. Only', validAns, 'are accepted')
config.add_section('PARAMS')
config.add_section('LEAGUES')
config.add_section('REGIONS')
config.add_section('PATCHES')
config.add_section('CHAMPIONS')
config.add_section('ROLES')
config.add_section('TOP')
config.add_section('JUNGLE')
config.add_section('MID')
config.add_section('CARRY')
config.add_section('SUPPORT')
print("No config file found. Let's set up a few parameters (you may change them anytime by manually editing config.ini).")
API_KEY = input('- API-KEY (https://developer.riotgames.com/): ')
config['PARAMS']['api-key'] = API_KEY
config['PARAMS']['database'] = input('- Database location (eg. C:\LoLAnalyzerDB): ')
print('Leagues you want to download games from (y/n): ')
print('challenger league enabled by default')
config['LEAGUES']['challenger'] = 'yes'
config['LEAGUES']['master'] = 'yes' if validationInput('- master: ', ['y', 'n']) == 'y' else 'no'
if config['LEAGUES']['master'] == 'yes' :
print('Lower leagues are not recommended unless you have a high rate API-KEY (not given by default)')
config['LEAGUES']['diamond'] = 'yes' if validationInput('- diamond: ', ['y', 'n']) == 'y' else 'no'
if config['LEAGUES']['diamond'] == 'yes' :
config['LEAGUES']['platinum'] = 'yes' if validationInput('- platinum: ', ['y', 'n']) == 'y' else 'no'
if config['LEAGUES']['platinum'] == 'yes' :
config['LEAGUES']['gold'] = 'yes' if validationInput('- gold: ', ['y', 'n']) == 'y' else 'no'
if config['LEAGUES']['gold'] == 'yes' :
config['LEAGUES']['silver'] = 'yes' if validationInput('- silver: ', ['y', 'n']) == 'y' else 'no'
if config['LEAGUES']['silver'] == 'yes' :
config['LEAGUES']['bronze'] = 'yes' if validationInput('- bronze: ', ['y', 'n']) == 'y' else 'no'
print('Regions you want to download games from (y/n):')
print('API-KEY limitations are server-bounded, so you will download way more games enabling everything')
config['REGIONS']['ru'] = 'yes' if validationInput('- ru: ', ['y', 'n']) == 'y' else 'no'
config['REGIONS']['kr'] = 'yes' if validationInput('- kr: ', ['y', 'n']) == 'y' else 'no'
config['REGIONS']['br1'] = 'yes' if validationInput('- br1: ', ['y', 'n']) == 'y' else 'no'
config['REGIONS']['oc1'] = 'yes' if validationInput('- oc1: ', ['y', 'n']) == 'y' else 'no'
config['REGIONS']['jp1'] = 'yes' if validationInput('- jp1: ', ['y', 'n']) == 'y' else 'no'
config['REGIONS']['na1'] = 'yes' if validationInput('- na1: ', ['y', 'n']) == 'y' else 'no'
config['REGIONS']['eun1'] = 'yes' if validationInput('- eun1: ', ['y', 'n']) == 'y' else 'no'
config['REGIONS']['euw1'] = 'yes' if validationInput('- euw1: ', ['y', 'n']) == 'y' else 'no'
config['REGIONS']['tr1'] = 'yes' if validationInput('- tr1: ', ['y', 'n']) == 'y' else 'no'
config['REGIONS']['la1'] = 'yes' if validationInput('- la1: ', ['y', 'n']) == 'y' else 'no'
config['REGIONS']['la2'] = 'yes' if validationInput('- la2: ', ['y', 'n']) == 'y' else 'no'
# Update to current patch & champions list
# euw1 is used as reference
api = InterfaceAPI(API_KEY)
PATCHES = api.getData('https://euw1.api.riotgames.com/lol/static-data/v3/versions')
PATCHES = ['.'.join(s.split('.')[:2]) for s in reversed(PATCHES)]
config['PARAMS']['download_patches'] = PATCHES[-1]
print('Current patch set to:', config['PARAMS']['download_patches'])
PATCHES = OrderedDict((x, True) for x in PATCHES).keys()
config['PARAMS']['patches'] = ','.join(PATCHES)
print('Patch list updated')
json_data = api.getData('https://euw1.api.riotgames.com/lol/static-data/v3/champions', data={'locale': 'en_US', 'dataById': 'true'})
CHAMPIONS = json_data['data']
sortedChamps = []
for champ_id, champ_info in CHAMPIONS.items():
slugname = slugify(champ_info['name'], separator='')
config['CHAMPIONS'][slugname] = champ_id
sortedChamps.append(slugname)
# We need to sort champions by release for the neural network
# This is really important for the compatibility of the system over the patches
# Unfortunately the API doesn't give this information, so we use: http://universe-meeps.leagueoflegends.com/v1/en_us/champion-browse/index.json
response = urllib.request.urlopen('http://universe-meeps.leagueoflegends.com/v1/en_us/champion-browse/index.json')
data = json.loads(response.read().decode())
champ_date = {}
for champ in data['champions']:
date = champ['release-date']
date = date[1:] if date[0] == ' ' else date # solve a problem on annie
date = date[:10] # solve a problem on aatrox
champ_date[slugify(champ['name'], separator='')] = datetime.strptime(date, '%Y-%m-%d')
sortedChamps.sort(key=lambda x: (champ_date[x], x)) # sorted by date and then abc order (eg. annie/yi or xhaya/rakan)
config['PARAMS']['sortedChamps'] = ','.join(sortedChamps)
print('Champions list updated')
with open('config.ini', 'w') as configfile:
config.write(configfile)
print('-- Update complete --')
if __name__ == '__main__':
run()
| 54.553571 | 147 | 0.592635 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,642 | 0.432406 |
81b791765a1072aa2be9a40bf2e9fca71ca77b5d | 555 | py | Python | app/migrations/0010_auto_20200709_1512.py | RuijiaX/w3hacks | 79e1eb81836b766737e8f053a26495ec2c6fd963 | [
"MIT"
]
| 1 | 2020-08-30T04:49:20.000Z | 2020-08-30T04:49:20.000Z | app/migrations/0010_auto_20200709_1512.py | RuijiaX/w3hacks | 79e1eb81836b766737e8f053a26495ec2c6fd963 | [
"MIT"
]
| 44 | 2020-06-21T03:10:35.000Z | 2020-08-08T23:55:19.000Z | app/migrations/0010_auto_20200709_1512.py | RuijiaX/w3hacks | 79e1eb81836b766737e8f053a26495ec2c6fd963 | [
"MIT"
]
| 2 | 2020-07-01T16:54:58.000Z | 2020-07-13T21:13:06.000Z | # Generated by Django 3.0.7 on 2020-07-09 22:12
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0009_auto_20200709_1430'),
]
operations = [
migrations.AlterField(
model_name='location',
name='lat',
field=models.IntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='location',
name='lng',
field=models.IntegerField(blank=True, null=True),
),
]
| 23.125 | 61 | 0.574775 | 462 | 0.832432 | 0 | 0 | 0 | 0 | 0 | 0 | 107 | 0.192793 |
81b8a377f7e00482ba8d3e94e5cc8f42cb23bfce | 28,078 | py | Python | tests/test_fitting.py | adrdrew/viroconcom | 3eb748ba8e3e076eddd174a0fcdfee3917aa4045 | [
"MIT"
]
| null | null | null | tests/test_fitting.py | adrdrew/viroconcom | 3eb748ba8e3e076eddd174a0fcdfee3917aa4045 | [
"MIT"
]
| 1 | 2020-05-18T11:06:28.000Z | 2020-05-18T11:06:28.000Z | tests/test_fitting.py | adrdrew/viroconcom | 3eb748ba8e3e076eddd174a0fcdfee3917aa4045 | [
"MIT"
]
| null | null | null | import unittest
import csv
import numpy as np
from viroconcom.fitting import Fit
def read_benchmark_dataset(path='tests/testfiles/1year_dataset_A.txt'):
"""
Reads a datasets provided for the environmental contour benchmark.
Parameters
----------
path : string
Path to dataset including the file name, defaults to 'examples/datasets/A.txt'
Returns
-------
x : ndarray of doubles
Observations of the environmental variable 1.
y : ndarray of doubles
Observations of the environmental variable 2.
x_label : str
Label of the environmantal variable 1.
y_label : str
Label of the environmental variable 2.
"""
x = list()
y = list()
x_label = None
y_label = None
with open(path, newline='') as csv_file:
reader = csv.reader(csv_file, delimiter=';')
idx = 0
for row in reader:
if idx == 0:
x_label = row[1][
1:] # Ignore first char (is a white space).
y_label = row[2][
1:] # Ignore first char (is a white space).
if idx > 0: # Ignore the header
x.append(float(row[1]))
y.append(float(row[2]))
idx = idx + 1
x = np.asarray(x)
y = np.asarray(y)
return (x, y, x_label, y_label)
class FittingTest(unittest.TestCase):
def test_2d_fit(self):
"""
2-d Fit with Weibull and Lognormal distribution.
"""
prng = np.random.RandomState(42)
# Draw 1000 samples from a Weibull distribution with shape=1.5 and scale=3,
# which represents significant wave height.
sample_1 = prng.weibull(1.5, 1000)*3
# Let the second sample, which represents spectral peak period increase
# with significant wave height and follow a Lognormal distribution with
# mean=2 and sigma=0.2
sample_2 = [0.1 + 1.5 * np.exp(0.2 * point) +
prng.lognormal(2, 0.2) for point in sample_1]
# Describe the distribution that should be fitted to the sample.
dist_description_0 = {'name': 'Weibull_3p',
'dependency': (None, None, None),
'width_of_intervals': 2}
dist_description_1 = {'name': 'Lognormal',
'dependency': (None, None, 0),
'functions': (None, None, 'exp3')}
# Compute the fit.
my_fit = Fit((sample_1, sample_2),
(dist_description_0, dist_description_1))
dist0 = my_fit.mul_var_dist.distributions[0]
dist1 = my_fit.mul_var_dist.distributions[1]
self.assertAlmostEqual(dist0.shape(0), 1.4165147571863412, places=5)
self.assertAlmostEqual(dist0.scale(0), 2.833833521811032, places=5)
self.assertAlmostEqual(dist0.loc(0), 0.07055663251419833, places=5)
self.assertAlmostEqual(dist1.shape(0), 0.17742685807554776 , places=5)
#self.assertAlmostEqual(dist1.scale, 7.1536437634240135+2.075539206642004e^{0.1515051024957754x}, places=5)
self.assertAlmostEqual(dist1.loc, None, places=5)
# Now use a 2-parameter Weibull distribution instead of 3-p distr.
dist_description_0 = {'name': 'Weibull_2p',
'dependency': (None, None, None),
'width_of_intervals': 2}
dist_description_1 = {'name': 'Lognormal',
'dependency': (None, None, 0),
'functions': (None, None, 'exp3')}
my_fit = Fit((sample_1, sample_2),
(dist_description_0, dist_description_1))
self.assertEqual(str(my_fit)[0:5], 'Fit()')
def test_2d_benchmark_case(self):
"""
Reproduces the baseline results presented in doi: 10.1115/OMAE2019-96523 .
"""
sample_hs, sample_tz, label_hs, label_tz = read_benchmark_dataset(
path='tests/testfiles/allyears_dataset_A.txt')
# Describe the distribution that should be fitted to the sample.
dist_description_0 = {'name': 'Weibull_3p',
'dependency': (None, None, None),
'width_of_intervals': 0.5}
dist_description_1 = {'name': 'Lognormal_SigmaMu',
'dependency': (0, None, 0),
'functions': ('exp3', None, 'power3')} # Shape, location, scale.
# Compute the fit.
my_fit = Fit((sample_hs, sample_tz),
(dist_description_0, dist_description_1))
# Evaluate the fitted parameters.
dist0 = my_fit.mul_var_dist.distributions[0]
dist1 = my_fit.mul_var_dist.distributions[1]
self.assertAlmostEqual(dist0.shape(0), 1.48, delta=0.02)
self.assertAlmostEqual(dist0.scale(0), 0.944, delta=0.01)
self.assertAlmostEqual(dist0.loc(0), 0.0981, delta=0.001)
self.assertAlmostEqual(dist1.shape.a, 0, delta=0.001)
self.assertAlmostEqual(dist1.shape.b, 0.308, delta=0.002)
self.assertAlmostEqual(dist1.shape.c, -0.250, delta=0.002)
self.assertAlmostEqual(dist1.scale.a, 1.47 , delta=0.02)
self.assertAlmostEqual(dist1.scale.b, 0.214, delta=0.002)
self.assertAlmostEqual(dist1.scale.c, 0.641, delta=0.002)
self.assertAlmostEqual(dist1.scale(0), 4.3 , delta=0.1)
self.assertAlmostEqual(dist1.scale(2), 6, delta=0.1)
self.assertAlmostEqual(dist1.scale(5), 8, delta=0.1)
def test_2d_exponentiated_wbl_fit(self):
"""
Tests if a 2D fit that includes an exp. Weibull distribution works.
"""
prng = np.random.RandomState(42)
# Draw 1000 samples from a Weibull distribution with shape=1.5 and scale=3,
# which represents significant wave height.
sample_hs = prng.weibull(1.5, 1000)*3
# Let the second sample, which represents zero-upcrossing period increase
# with significant wave height and follow a Lognormal distribution with
# mean=2 and sigma=0.2
sample_tz = [0.1 + 1.5 * np.exp(0.2 * point) +
prng.lognormal(2, 0.2) for point in sample_hs]
# Define the structure of the probabilistic model that will be fitted to the
# dataset.
dist_description_hs = {'name': 'Weibull_Exp',
'dependency': (None, None, None, None),
# Shape, Location, Scale, Shape2
'width_of_intervals': 0.5}
dist_description_tz = {'name': 'Lognormal_SigmaMu',
'dependency': (0, None, 0),
# Shape, Location, Scale
'functions': ('exp3', None, 'power3')
# Shape, Location, Scale
}
# Fit the model to the data, first test a 1D fit.
fit = Fit(sample_hs, dist_description_hs)
# Now perform the 2D fit.
fit = Fit((sample_hs, sample_tz),
(dist_description_hs, dist_description_tz))
dist0 = fit.mul_var_dist.distributions[0]
self.assertGreater(dist0.shape(0), 1) # Should be about 1.5.
self.assertLess(dist0.shape(0), 2)
self.assertIsNone(dist0.loc(0)) # Has no location parameter, should be None.
self.assertGreater(dist0.scale(0), 2) # Should be about 3.
self.assertLess(dist0.scale(0), 4)
self.assertGreater(dist0.shape2(0), 0.5) # Should be about 1.
self.assertLess(dist0.shape2(0), 2)
def test_fit_lnsquare2(self):
"""
Tests a 2D fit that includes an logarithm square dependence function.
"""
sample_hs, sample_tz, label_hs, label_tz = read_benchmark_dataset()
# Define the structure of the probabilistic model that will be fitted to the
# dataset.
dist_description_hs = {'name': 'Weibull_Exp',
'dependency': (None, None, None, None),
# Shape, Location, Scale, Shape2
'width_of_intervals': 0.5}
dist_description_tz = {'name': 'Lognormal_SigmaMu',
'dependency': (0, None, 0),
# Shape, Location, Scale
'functions': ('exp3', None, 'lnsquare2')
# Shape, Location, Scale
}
# Fit the model to the data.
fit = Fit((sample_hs, sample_tz),
(dist_description_hs, dist_description_tz))
# Check whether the logarithmic square fit worked correctly.
dist1 = fit.mul_var_dist.distributions[1]
self.assertGreater(dist1.scale.a, 1) # Should be about 1-5
self.assertLess(dist1.scale.a, 5) # Should be about 1-5
self.assertGreater(dist1.scale.b, 2) # Should be about 2-10
self.assertLess(dist1.scale.b, 10) # Should be about 2-10
self.assertGreater(dist1.scale(0), 0.1)
self.assertLess(dist1.scale(0), 10)
self.assertEqual(dist1.scale.func_name, 'lnsquare2')
def test_fit_powerdecrease3(self):
"""
Tests a 2D fit that includes an powerdecrease3 dependence function.
"""
sample_hs, sample_tz, label_hs, label_tz = read_benchmark_dataset()
# Define the structure of the probabilistic model that will be fitted to the
# dataset.
dist_description_hs = {'name': 'Weibull_Exp',
'dependency': (None, None, None, None),
# Shape, Location, Scale, Shape2
'width_of_intervals': 0.5}
dist_description_tz = {'name': 'Lognormal_SigmaMu',
'dependency': (0, None, 0),
# Shape, Location, Scale
'functions': ('powerdecrease3', None, 'lnsquare2')
# Shape, Location, Scale
}
# Fit the model to the data.
fit = Fit((sample_hs, sample_tz),
(dist_description_hs, dist_description_tz))
# Check whether the logarithmic square fit worked correctly.
dist1 = fit.mul_var_dist.distributions[1]
self.assertGreater(dist1.shape.a, -0.1) # Should be about 0
self.assertLess(dist1.shape.a, 0.1) # Should be about 0
self.assertGreater(dist1.shape.b, 1.5) # Should be about 2-5
self.assertLess(dist1.shape.b, 6) # Should be about 2-10
self.assertGreater(dist1.shape.c, 0.8) # Should be about 1.1
self.assertLess(dist1.shape.c, 2) # Should be about 1.1
self.assertGreater(dist1.shape(0), 0.25) # Should be about 0.35
self.assertLess(dist1.shape(0), 0.4) # Should be about 0.35
self.assertEqual(dist1.shape.func_name, 'powerdecrease3')
def test_fit_asymdecrease3(self):
"""
Tests a 2D fit that includes an asymdecrease3 dependence function.
"""
sample_hs, sample_tz, label_hs, label_tz = read_benchmark_dataset()
# Define the structure of the probabilistic model that will be fitted to the
# dataset.
dist_description_hs = {'name': 'Weibull_Exp',
'dependency': (None, None, None, None),
# Shape, Location, Scale, Shape2
'width_of_intervals': 0.5}
dist_description_tz = {'name': 'Lognormal_SigmaMu',
'dependency': (0, None, 0),
# Shape, Location, Scale
'functions': ('asymdecrease3', None, 'lnsquare2')
# Shape, Location, Scale
}
# Fit the model to the data.
fit = Fit((sample_hs, sample_tz),
(dist_description_hs, dist_description_tz))
# Check whether the logarithmic square fit worked correctly.
dist1 = fit.mul_var_dist.distributions[1]
self.assertAlmostEqual(dist1.shape.a, 0, delta=0.1) # Should be about 0
self.assertAlmostEqual(dist1.shape.b, 0.35, delta=0.4) # Should be about 0.35
self.assertAlmostEqual(np.abs(dist1.shape.c), 0.45, delta=0.2) # Should be about 0.45
self.assertAlmostEquals(dist1.shape(0), 0.35, delta=0.2) # Should be about 0.35
def test_min_number_datapoints_for_fit(self):
"""
Tests if the minimum number of datapoints required for a fit works.
"""
sample_hs, sample_tz, label_hs, label_tz = read_benchmark_dataset()
# Define the structure of the probabilistic model that will be fitted to the
# dataset.
dist_description_hs = {'name': 'Weibull_Exp',
'dependency': (None, None, None, None),
# Shape, Location, Scale, Shape2
'width_of_intervals': 0.5}
dist_description_tz = {'name': 'Lognormal_SigmaMu',
'dependency': (0, None, 0),
# Shape, Location, Scale
'functions': ('exp3', None, 'lnsquare2'),
# Shape, Location, Scale
'min_datapoints_for_fit': 10
}
# Fit the model to the data.
fit = Fit((sample_hs, sample_tz),
(dist_description_hs, dist_description_tz))
# Check whether the logarithmic square fit worked correctly.
dist1 = fit.mul_var_dist.distributions[1]
a_min_10 = dist1.scale.a
# Now require more datapoints for a fit.
dist_description_tz = {'name': 'Lognormal_SigmaMu',
'dependency': (0, None, 0),
# Shape, Location, Scale
'functions': ('exp3', None, 'lnsquare2'),
# Shape, Location, Scale
'min_datapoints_for_fit': 500
}
# Fit the model to the data.
fit = Fit((sample_hs, sample_tz),
(dist_description_hs, dist_description_tz))
# Check whether the logarithmic square fit worked correctly.
dist1 = fit.mul_var_dist.distributions[1]
a_min_500 = dist1.scale.a
# Because in case 2 fewer bins have been used we should get different
# coefficients for the dependence function.
self.assertNotEqual(a_min_10, a_min_500)
def test_multi_processing(selfs):
"""
2-d Fit with multiprocessing (specified by setting a value for timeout)
"""
# Define a sample and a fit.
prng = np.random.RandomState(42)
sample_1 = prng.weibull(1.5, 1000)*3
sample_2 = [0.1 + 1.5 * np.exp(0.2 * point) +
prng.lognormal(2, 0.2) for point in sample_1]
dist_description_0 = {'name': 'Weibull',
'dependency': (None, None, None),
'width_of_intervals': 2}
dist_description_1 = {'name': 'Lognormal',
'dependency': (None, None, 0),
'functions': (None, None, 'exp3')}
# Compute the fit.
my_fit = Fit((sample_1, sample_2),
(dist_description_0, dist_description_1),
timeout=10)
def test_wbl_fit_with_negative_location(self):
"""
Tests fitting a translated Weibull distribution which would result
in a negative location parameter.
"""
sample_hs, sample_tz, label_hs, label_tz = read_benchmark_dataset()
# Define the structure of the probabilistic model that will be fitted to the
# dataset.
dist_description_hs = {'name': 'Weibull_3p',
'dependency': (None, None, None)}
# Fit the model to the data.
fit = Fit((sample_hs, ),
(dist_description_hs, ))
# Correct values for 10 years of data can be found in
# 10.1115/OMAE2019-96523 . Here we used 1 year of data.
dist0 = fit.mul_var_dist.distributions[0]
self.assertAlmostEqual(dist0.shape(0) / 10, 1.48 / 10, places=1)
self.assertGreater(dist0.loc(0), 0.0) # Should be 0.0981
self.assertLess(dist0.loc(0), 0.3) # Should be 0.0981
self.assertAlmostEqual(dist0.scale(0), 0.944, places=1)
# Shift the wave data with -1 m and fit again.
sample_hs = sample_hs - 2
# Negative location values will be set to zero instead and a
# warning will be raised.
with self.assertWarns(RuntimeWarning):
fit = Fit((sample_hs, ),
(dist_description_hs, ))
dist0 = fit.mul_var_dist.distributions[0]
self.assertAlmostEqual(dist0.shape(0) / 10, 1.48 / 10, places=1)
# Should be estimated to be 0.0981 - 2 and corrected to be 0.
self.assertEqual(dist0.loc(0), 0)
self.assertAlmostEqual(dist0.scale(0), 0.944, places=1)
def test_omae2020_wind_wave_model(self):
"""
Tests fitting the wind-wave model that was used in the publication
'Global hierarchical models for wind and wave contours' on dataset D.
"""
sample_v, sample_hs, label_v, label_hs = read_benchmark_dataset(path='tests/testfiles/1year_dataset_D.txt')
# Define the structure of the probabilistic model that will be fitted to the
# dataset.
dist_description_v = {'name': 'Weibull_Exp',
'dependency': (None, None, None, None),
'width_of_intervals': 2}
dist_description_hs = {'name': 'Weibull_Exp',
'fixed_parameters' : (None, None, None, 5), # shape, location, scale, shape2
'dependency': (0, None, 0, None), # shape, location, scale, shape2
'functions': ('logistics4', None, 'alpha3', None), # shape, location, scale, shape2
'min_datapoints_for_fit': 20}
# Fit the model to the data.
fit = Fit((sample_v, sample_hs),
(dist_description_v, dist_description_hs))
dist0 = fit.mul_var_dist.distributions[0]
self.assertAlmostEqual(dist0.shape(0), 2.42, delta=1)
self.assertAlmostEqual(dist0.scale(0), 10.0, delta=2)
self.assertAlmostEqual(dist0.shape2(0), 0.761, delta=0.5)
dist1 = fit.mul_var_dist.distributions[1]
self.assertEqual(dist1.shape2(0), 5)
inspection_data1 = fit.multiple_fit_inspection_data[1]
self.assertEqual(inspection_data1.shape2_value[0], 5)
self.assertAlmostEqual(inspection_data1.shape_value[0], 0.8, delta=0.5) # interval centered at 1
self.assertAlmostEqual(inspection_data1.shape_value[4], 1.5, delta=0.5) # interval centered at 9
self.assertAlmostEqual(inspection_data1.shape_value[9], 2.5, delta=1) # interval centered at 19
self.assertAlmostEqual(dist1.shape(0), 0.8, delta=0.3)
self.assertAlmostEqual(dist1.shape(10), 1.6, delta=0.5)
self.assertAlmostEqual(dist1.shape(20), 2.3, delta=0.7)
self.assertAlmostEqual(dist1.shape.a, 0.582, delta=0.5)
self.assertAlmostEqual(dist1.shape.b, 1.90, delta=1)
self.assertAlmostEqual(dist1.shape.c, 0.248, delta=0.5)
self.assertAlmostEqual(dist1.shape.d, 8.49, delta=5)
self.assertAlmostEqual(inspection_data1.scale_value[0], 0.15, delta=0.2) # interval centered at 1
self.assertAlmostEqual(inspection_data1.scale_value[4], 1, delta=0.5) # interval centered at 9
self.assertAlmostEqual(inspection_data1.scale_value[9], 4, delta=1) # interval centered at 19
self.assertAlmostEqual(dist1.scale(0), 0.15, delta=0.5)
self.assertAlmostEqual(dist1.scale(10), 1, delta=0.5)
self.assertAlmostEqual(dist1.scale(20), 4, delta=1)
self.assertAlmostEqual(dist1.scale.a, 0.394, delta=0.5)
self.assertAlmostEqual(dist1.scale.b, 0.0178, delta=0.1)
self.assertAlmostEqual(dist1.scale.c, 1.88, delta=0.8)
def test_wrong_model(self):
"""
Tests wheter errors are raised when incorrect fitting models are
specified.
"""
sample_v, sample_hs, label_v, label_hs = read_benchmark_dataset(path='tests/testfiles/1year_dataset_D.txt')
# This structure is incorrect as there is not distribution called 'something'.
dist_description_v = {'name': 'something',
'dependency': (None, None, None, None),
'fixed_parameters': (None, None, None, None), # shape, location, scale, shape2
'width_of_intervals': 2}
with self.assertRaises(ValueError):
# Fit the model to the data.
fit = Fit((sample_v, ),
(dist_description_v, ))
# This structure is incorrect as there is not dependence function called 'something'.
dist_description_v = {'name': 'Weibull_Exp',
'dependency': (None, None, None, None),
'width_of_intervals': 2}
dist_description_hs = {'name': 'Weibull_Exp',
'dependency': (0, None, 0, None), # shape, location, scale, shape2
'functions': ('something', None, 'alpha3', None), # shape, location, scale, shape2
'min_datapoints_for_fit': 20}
with self.assertRaises(ValueError):
# Fit the model to the data.
fit = Fit((sample_v, sample_hs),
(dist_description_v, dist_description_hs))
# This structure is incorrect as there will be only 1 or 2 intervals
# that fit 2000 datapoints.
dist_description_v = {'name': 'Weibull_Exp',
'dependency': (None, None, None, None),
'width_of_intervals': 2}
dist_description_hs = {'name': 'Weibull_Exp',
'dependency': (0, None, 0, None), # shape, location, scale, shape2
'functions': ('logistics4', None, 'alpha3', None), # shape, location, scale, shape2
'min_datapoints_for_fit': 2000}
with self.assertRaises(RuntimeError):
# Fit the model to the data.
fit = Fit((sample_v, sample_hs),
(dist_description_v, dist_description_hs))
# This structure is incorrect as alpha3 is only compatible with
# logistics4 .
dist_description_v = {'name': 'Weibull_Exp',
'dependency': (None, None, None, None),
'width_of_intervals': 2}
dist_description_hs = {'name': 'Weibull_Exp',
'fixed_parameters' : (None, None, None, 5), # shape, location, scale, shape2
'dependency': (0, None, 0, None), # shape, location, scale, shape2
'functions': ('power3', None, 'alpha3', None), # shape, location, scale, shape2
'min_datapoints_for_fit': 20}
with self.assertRaises(TypeError):
# Fit the model to the data.
fit = Fit((sample_v, sample_hs),
(dist_description_v, dist_description_hs))
# This structure is incorrect as only shape2 of an exponentiated Weibull
# distribution can be fixed at the moment.
dist_description_v = {'name': 'Lognormal',
'dependency': (None, None, None, None),
'fixed_parameters': (None, None, 5, None), # shape, location, scale, shape2
'width_of_intervals': 2}
with self.assertRaises(NotImplementedError):
# Fit the model to the data.
fit = Fit((sample_v, ),
(dist_description_v, ))
# This structure is incorrect as only shape2 of an exponentiated Weibull
# distribution can be fixed at the moment.
dist_description_v = {'name': 'Weibull_Exp',
'dependency': (None, None, None, None),
'width_of_intervals': 2}
dist_description_hs = {'name': 'Weibull_Exp',
'fixed_parameters' : (None, None, 5, None), # shape, location, scale, shape2
'dependency': (0, None, 0, None), # shape, location, scale, shape2
'functions': ('logistics4', None, 'alpha3', None), # shape, location, scale, shape2
'min_datapoints_for_fit': 20}
with self.assertRaises(NotImplementedError):
# Fit the model to the data.
fit = Fit((sample_v, sample_hs),
(dist_description_v, dist_description_hs))
def test_weighting_of_dependence_function(self):
"""
Tests if using weights when the dependence function is fitted works
correctly.
"""
sample_v, sample_hs, label_v, label_hs = read_benchmark_dataset(path='tests/testfiles/1year_dataset_D.txt')
# Define the structure of the probabilistic model that will be fitted to the
# dataset.
dist_description_v = {'name': 'Weibull_Exp',
'dependency': (None, None, None, None),
'width_of_intervals': 2}
dist_description_hs = {'name': 'Weibull_Exp',
'fixed_parameters' : (None, None, None, 5), # shape, location, scale, shape2
'dependency': (0, None, 0, None), # shape, location, scale, shape2
'functions': ('logistics4', None, 'alpha3', None), # shape, location, scale, shape2
'min_datapoints_for_fit': 20,
'do_use_weights_for_dependence_function': False}
# Fit the model to the data.
fit = Fit((sample_v, sample_hs),
(dist_description_v, dist_description_hs))
dist1_no_weights = fit.mul_var_dist.distributions[1]
# Now perform a fit with weights.
dist_description_hs = {'name': 'Weibull_Exp',
'fixed_parameters' : (None, None, None, 5), # shape, location, scale, shape2
'dependency': (0, None, 0, None), # shape, location, scale, shape2
'functions': ('logistics4', None, 'alpha3', None), # shape, location, scale, shape2
'min_datapoints_for_fit': 20,
'do_use_weights_for_dependence_function': True}
# Fit the model to the data.
fit = Fit((sample_v, sample_hs),
(dist_description_v, dist_description_hs))
dist1_with_weights = fit.mul_var_dist.distributions[1]
# Make sure the two fitted dependnece functions are different.
d = np.abs(dist1_with_weights.scale(0) - dist1_no_weights.scale(0)) / \
np.abs(dist1_no_weights.scale(0))
self.assertGreater(d, 0.01)
# Make sure they are not too different.
d = np.abs(dist1_with_weights.scale(20) - dist1_no_weights.scale(20)) / \
np.abs(dist1_no_weights.scale(20))
self.assertLess(d, 0.5)
| 46.563847 | 121 | 0.561044 | 26,704 | 0.951065 | 0 | 0 | 0 | 0 | 0 | 0 | 9,823 | 0.349847 |
81b9e4775c9ff677415dc6ea782a4181f1639a50 | 22,100 | py | Python | python/scripts/wavsep/wavsep.py | rugheid/OSS-ZAP | d486dde326a9120c9ddd52a3d4dcf1b9a2b4d042 | [
"Apache-2.0"
]
| 4 | 2016-08-11T05:35:26.000Z | 2021-11-15T11:27:28.000Z | python/scripts/wavsep/wavsep.py | rugheid/OSS-ZAP | d486dde326a9120c9ddd52a3d4dcf1b9a2b4d042 | [
"Apache-2.0"
]
| 1 | 2018-06-12T13:55:16.000Z | 2018-06-12T15:27:59.000Z | python/scripts/wavsep/wavsep.py | rugheid/OSS-ZAP | d486dde326a9120c9ddd52a3d4dcf1b9a2b4d042 | [
"Apache-2.0"
]
| 12 | 2018-05-15T10:14:00.000Z | 2019-11-10T07:03:16.000Z | # Zed Attack Proxy (ZAP) and its related class files.
#
# ZAP is an HTTP/HTTPS proxy for assessing web application security.
#
# Copyright 2012 ZAP Development Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script tests ZAP against wavsep: http://code.google.com/p/wavsep/
# Note wavsep has to be installed somewhere - the above link is to the
# project not the test suite!
#
# To this script:
# * Install the ZAP Python API:
# Use 'pip install python-owasp-zap-v2' or
# download from https://github.com/zaproxy/zaproxy/wiki/Downloads
# * Start ZAP (as this is for testing purposes you might not want the
# 'standard' ZAP to be started)
# * Access wavsep via your browser, proxying through ZAP
# * Vist all of the wavsep top level URLs, eg
# http://localhost:8080/wavsep/index-active.jsp
# http://localhost:8080/wavsep/index-passive.jsp
# * Run the Spider against http://localhost:8080
# * Run the Active Scanner against http://localhost:8080/wavsep
# * Run this script
# * Open the report.html file generated in your browser
#
# Notes:
# This has been tested against wavsep 1.5
from zapv2 import ZAPv2
import datetime, sys, getopt
def main(argv):
# -------------------------------------------------------------------------
# Default Configurations - use -h and -p for different host and port
# -------------------------------------------------------------------------
zapHost = '127.0.0.1'
zapPort = '8090'
try:
opts, args = getopt.getopt(argv,"h:p:")
except getopt.GetoptError:
print 'wavsep.py -h <ZAPhost> -p <ZAPport>'
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
zapHost = arg
elif opt == '-p':
zapPort = arg
zapUrl = 'http://' + zapHost + ':' + zapPort
# Dictionary of abbreviation to keep the output a bit shorter
abbrev = {
'Active Vulnerability title' : 'Ex',\
'Cross Site Scripting (DOM Based)' : 'DXSS',\
'Cross Site Scripting (Reflected)' : 'RXSS',\
'Absence of Anti-CSRF Tokens' : 'NoCSRF',\
'Application Error Disclosure' : 'AppError',\
'Anti CSRF Tokens Scanner' : 'ACSRF',\
'Buffer Overflow' : 'Buffer',\
'Cookie set without HttpOnly flag' : 'HttpOnly',\
'Cookie Slack Detector' : 'CookieSlack',\
'Cross Site Request Forgery' : 'CSRF',\
'External Redirect' : 'ExtRedir',\
'Format String Error' : 'Format',\
'HTTP Parameter Override' : 'ParamOver',\
'Information disclosure - database error messages' : 'InfoDb',\
'Information disclosure - debug error messages' : 'InfoDebug',\
'Information Disclosure - Sensitive Informations in URL' : 'InfoUrl',\
'LDAP Injection' : 'LDAP',\
'Loosely Scoped Cookie' : 'CookieLoose',\
'None. Warning only.' : 'NoCSRF2',\
'Password Autocomplete in browser' : 'Auto',\
'Path Traversal' : 'PathTrav',\
'Private IP Disclosure' : 'PrivIP',\
'Remote File Inclusion' : 'RFI',\
'Session ID in URL Rewrite' : 'SessRewrite',\
'Source Code Disclosure - File Inclusion' : 'SrcInc',\
'SQL Injection' : 'SQLi',\
'SQL Injection - MySQL' : 'SqlMySql',\
'SQL Injection - Generic SQL RDBMS' : 'SqlGen',\
'SQL Injection - Boolean Based' : 'SqlBool',\
'SQL Injection - Error Based - Generic SQL RDBMS' : 'SqlGenE',\
'SQL Injection - Error Based - MySQL' : 'SqlMySqlE',\
'SQL Injection - Error Based - Java' : 'SqlJavaE',\
'SQL Injection - Hypersonic SQL - Time Based' : 'SqlHyperT',\
'SQL Injection - MySQL - Time Based' : 'SqlMySqlT',\
'SQL Injection - Oracle - Time Based' : 'SqlOracleT',\
'SQL Injection - PostgreSQL - Time Based' : 'SqlPostgreT',\
'URL Redirector Abuse' : 'UrlRedir',\
'Viewstate without MAC signature (Unsure)' : 'ViewstateNoMac',\
'Weak Authentication Method' : 'WeakAuth',\
'Web Browser XSS Protection Not Enabled' : 'XSSoff',\
'X-Content-Type-Options Header Missing' : 'XContent',\
'X-Frame-Options Header Not Set' : 'XFrame'}
# The rules to apply:
# Column 1: String to match against an alert URL
# Column 2: Alert abbreviation to match
# Column 3: pass, fail, ignore
#
rules = [ \
# All these appear to be valid ;)
['-', 'InfoDebug', 'ignore'], \
['-', 'InfoUrl', 'ignore'], \
['-', 'ACSRF', 'ignore'], \
['-', 'ACSRF', 'ignore'], \
['-', 'Ex', 'ignore'], \
['-', 'CookieLoose', 'ignore'], \
['-', 'CookieSlack', 'ignore'], \
['-', 'NoCSRF2', 'ignore'], \
['-', 'ParamOver', 'ignore'], \
['-', 'PrivIP', 'ignore'], \
['-', 'SrcInc', 'ignore'], \
['-', 'XFrame', 'ignore'], \
['-', 'XContent', 'ignore'], \
['-', 'XSSoff', 'ignore'], \
['LFI-', 'AppError', 'ignore'], \
['LFI-', 'Buffer', 'ignore'], \
['LFI-', 'Format', 'ignore'], \
['LFI-', 'NoCSRF', 'ignore'], \
['LFI-', 'RFI', 'ignore'], \
['LFI-', 'DXSS', 'ignore'], \
['LFI-', 'RXSS', 'ignore'], \
['LFI-', 'SqlHyperT', 'ignore'], \
['LFI-', 'SqlMySql', 'ignore'], \
['LFI-', 'SqlOracleT', 'ignore'], \
['LFI-', 'SqlPostgreT', 'ignore'], \
['Redirect-', 'LDAP', 'ignore'], \
['Redirect-', 'NoCSRF', 'ignore'], \
['Redirect-', 'RFI', 'ignore'], \
['Redirect-', 'DXSS', 'ignore'], \
['Redirect-', 'RXSS', 'ignore'], \
['Redirect-', 'SqlHyperT', 'ignore'], \
['Redirect-', 'SqlMySql', 'ignore'], \
['Redirect-', 'SqlOracleT', 'ignore'], \
['Redirect-', 'SqlPostgreT', 'ignore'], \
['RFI-', 'AppError', 'ignore'], \
['RFI-', 'Buffer', 'ignore'], \
['RFI-', 'Format', 'ignore'], \
['RFI-', 'NoCSRF', 'ignore'], \
['RFI-', 'DXSS', 'ignore'], \
['RFI-', 'RXSS', 'ignore'], \
['RFI-', 'SqlHyperT', 'ignore'], \
['RFI-', 'SqlMySql', 'ignore'], \
['RFI-', 'SqlOracleT', 'ignore'], \
['RFI-', 'SqlPostgreT', 'ignore'], \
['RXSS-', 'Auto', 'ignore'], \
['RXSS-', 'Buffer', 'ignore'], \
['RXSS-', 'Format', 'ignore'], \
['RXSS-', 'HttpOnly', 'ignore'], \
['RXSS-', 'NoCSRF', 'ignore'], \
['RXSS-', 'SqlOracleT', 'ignore'], \
['RXSS-', 'SqlPostgreT', 'ignore'], \
['RXSS-', 'SqlMySql', 'ignore'], \
['RXSS-', 'SqlOracleT', 'ignore'], \
['RXSS-', 'ViewstateNoMac', 'ignore'], \
['SInjection-', 'AppError', 'ignore'], \
['SInjection-', 'Auto', 'ignore'], \
['SInjection-', 'Buffer', 'ignore'], \
['SInjection-', 'NoCSRF', 'ignore'], \
['SInjection-', 'Format', 'ignore'], \
['SInjection-', 'LDAP', 'ignore'], \
['SInjection-', 'RXSS', 'ignore'], \
['SInjection-', 'SqlHyperT', 'ignore'], \
['LoginBypass', 'Auto', 'ignore'], \
['CrlfRemovalInHttpHeader', 'HttpOnly', 'ignore'], \
['Tag2HtmlPageScopeValidViewstateRequired', 'ViewstateNoMac', 'ignore'], \
['session-password-autocomplete', 'NoCSRF', 'ignore'], \
#
['LFI-Detection-Evaluation', 'PathTrav', 'pass'], \
['LFI-FalsePositives', 'PathTrav', 'fail'], \
['Redirect-', 'ExtRedir', 'pass'], \
['RFI-Detection-Evaluation', 'RFI', 'pass'], \
['RFI-FalsePositives', 'RFI', 'fail'], \
['RXSS-Detection-Evaluation', 'DXSS', 'pass'], \
['RXSS-Detection-Evaluation', 'RXSS', 'pass'], \
['RXSS-FalsePositives-GET', 'DXSS', 'fail'], \
['RXSS-FalsePositives-GET', 'RXSS', 'fail'], \
['SInjection-Detection-Evaluation', 'SQLfp', 'pass'], \
['SInjection-Detection-Evaluation', 'SQLi', 'pass'], \
#['SInjection-Detection-Evaluation', 'SqlHyper', 'pass'], \
['SInjection-Detection-Evaluation', 'SqlBool', 'pass'], \
['SInjection-Detection-Evaluation', 'SqlGen', 'pass'], \
['SInjection-Detection-Evaluation', 'SqlGenE', 'pass'], \
['SInjection-Detection-Evaluation', 'SqlMySql', 'pass'], \
['SInjection-Detection-Evaluation', 'SqlMySqlE', 'pass'], \
['SInjection-Detection-Evaluation', 'SqlMySqlT', 'pass'], \
['SInjection-Detection-Evaluation', 'SqlOracleT', 'pass'], \
['SInjection-Detection-Evaluation', 'SqlPostgreT', 'pass'], \
['SInjection-FalsePositives', 'SQLfp', 'fail'], \
['SInjection-FalsePositives', 'SQLi', 'fail'], \
['SInjection-FalsePositives', 'SqlBool', 'fail'], \
['SInjection-FalsePositives', 'SqlGen', 'fail'], \
['SInjection-FalsePositives', 'SqlGenE', 'fail'], \
['SInjection-FalsePositives', 'SqlMySql', 'fail'], \
['SInjection-FalsePositives', 'SqlMySqlE', 'fail'], \
['SInjection-FalsePositives', 'SqlMySqlT', 'fail'], \
['SInjection-FalsePositives', 'SqlHyperT', 'fail'], \
['SInjection-FalsePositives', 'SqlMySqlT', 'fail'], \
['SInjection-FalsePositives', 'SqlOracleT', 'fail'], \
['SInjection-FalsePositives', 'SqlPostgreT', 'fail'], \
['info-cookie-no-httponly', 'HttpOnly', 'pass'], \
['info-server-stack-trace', 'AppError', 'pass'], \
['session-password-autocomplete', 'Auto', 'pass'], \
['weak-authentication-basic', 'WeakAuth', 'pass'], \
]
zap = ZAPv2(proxies={'http': zapUrl, 'https': zapUrl})
uniqueUrls = set([])
# alertsPerUrl is a disctionary of urlsummary to a dictionary of type to set of alertshortnames ;)
alertsPerUrl = {}
plugins = set([])
alertPassCount = {}
alertFailCount = {}
alertIgnoreCount = {}
alertOtherCount = {}
zapVersion = zap.core.version
totalAlerts = 0
offset = 0
page = 100
# Page through the alerts as otherwise ZAP can hang...
alerts = zap.core.alerts('', offset, page)
while len(alerts) > 0:
totalAlerts += len(alerts)
for alert in alerts:
url = alert.get('url')
# Grab the url before any '?'
url = url.split('?')[0]
#print 'URL: ' + url
urlEl = url.split('/')
if (len(urlEl) > 6):
#print 'URL 4:' + urlEl[4] + ' 6:' + urlEl[6].split('-')[0]
if (urlEl[3] != 'wavsep'):
print 'Ignoring non wavsep URL 4:' + urlEl[4] + ' URL 5:' + urlEl[5] + ' URL 6:' + urlEl[6]
continue
if (urlEl[6].split('-')[0][:9] == 'index.jsp'):
#print 'Ignoring index URL 4:' + urlEl[4] + ' URL 5:' + urlEl[5] + ' URL 6:' + urlEl[6]
continue
if (len(urlEl) > 7 and urlEl[4] == 'active'):
if (urlEl[7].split('-')[0][:4] != 'Case'):
#print 'Ignoring index URL 4:' + urlEl[4] + ' URL 5:' + urlEl[5] + ' URL 6:' + urlEl[6] + ' URL 7:' + urlEl[7]
continue
urlSummary = urlEl[4] + ' : ' + urlEl[5] + ' : ' + urlEl[6] + ' : ' + urlEl[7].split('-')[0]
else:
# Passive URLs have different format
urlSummary = urlEl[4] + ' : ' + urlEl[5] + ' : ' + urlEl[6]
#print 'URL summary:' + urlSummary
short = abbrev.get(alert.get('alert'))
if (short is None):
short = 'UNKNOWN'
print 'Unknown alert: ' + alert.get('alert')
aDict = alertsPerUrl.get(urlSummary, {'pass' : set([]), 'fail' : set([]), 'ignore' : set([]), 'other' : set([])})
added = False
for rule in rules:
if (rule[0] in urlSummary and rule[1] == short):
aDict[rule[2]].add(short)
# Counts per alert
if (rule[2] == 'pass'):
alertPassCount[short] = alertPassCount.get(short, 0) + 1
elif (rule[2] == 'fail'):
alertFailCount[short] = alertFailCount.get(short, 0) + 1
elif (rule[2] == 'ignore'):
alertIgnoreCount[short] = alertIgnoreCount.get(short, 0) + 1
added = True
break
if (not added):
aDict['other'].add(short)
alertOtherCount[short] = alertOtherCount.get(short, 0) + 1
alertsPerUrl[urlSummary] = aDict
plugins.add(alert.get('alert'))
uniqueUrls.add(url)
offset += page
alerts = zap.core.alerts('', offset, page)
#for key, value in alertsPerUrl.iteritems():
# print key, value
# Generate report file
reportFile = open('report.html', 'w')
reportFile.write("<html>\n")
reportFile.write(" <head>\n")
reportFile.write(" <title>ZAP Wavsep Report</title>\n")
reportFile.write(" <!--Load the AJAX API-->\n")
reportFile.write(" <script type=\"text/javascript\" src=\"https://www.google.com/jsapi\"></script>\n")
reportFile.write(" </head>\n")
reportFile.write("<body>\n")
reportFile.write("<h1><img src=\"https://raw.githubusercontent.com/zaproxy/zaproxy/develop/src/resource/zap64x64.png\" align=\"middle\">OWASP ZAP wavsep results</h1>\n")
reportFile.write("Generated: " + datetime.datetime.now().strftime("%Y-%m-%d %H:%M") + "\n")
topResults = []
thisTop = ['', 0, 0]
groupResults = []
thisGroup = ['', 0, 0]
totalPass = 0
totalFail = 0
# Calculate the top level scores
for key, value in sorted(alertsPerUrl.iteritems()):
top = key.split(' : ')[1]
if ('-' in top):
top = top.split('-')[0] + '-' + top.split('-')[1]
if (top != thisTop[0]):
thisTop = [top, 0, 0] # top, pass, fail
topResults.append(thisTop)
if (len(value.get('pass')) > 0):
thisTop[1] += 1
elif (len(value.get('fail')) > 0):
thisTop[2] += 1
elif ('FalsePositive' in key):
thisTop[1] += 1
else:
thisTop[2] += 1
# Calculate the group scores
for key, value in sorted(alertsPerUrl.iteritems()):
group = key.split(' : ')[1]
if (group != thisGroup[0]):
thisGroup = [group, 0, 0] # group, pass, fail
groupResults.append(thisGroup)
if (len(value.get('pass')) > 0):
totalPass += 1
thisGroup[1] += 1
elif (len(value.get('fail')) > 0):
totalFail += 1
thisGroup[2] += 1
elif ('FalsePositive' in key):
totalPass += 1
thisGroup[1] += 1
else:
totalFail += 1
thisGroup[2] += 1
# Output the summary
scale=8
reportFile.write("<h3>Total Score</h3>\n")
reportFile.write("<font style=\"BACKGROUND-COLOR: GREEN\">")
for i in range (totalPass/scale):
reportFile.write(" ")
reportFile.write("</font>")
reportFile.write("<font style=\"BACKGROUND-COLOR: RED\">")
for i in range (totalFail/scale):
reportFile.write(" ")
reportFile.write("</font>")
total = 100 * totalPass / (totalPass + totalFail)
reportFile.write(str(total) + "%<br/><br/>\n")
reportFile.write('ZAP Version: ' + zapVersion + '<br/>\n')
reportFile.write('URLs found: ' + str(len(uniqueUrls)))
# Output the top level table
reportFile.write("<h3>Top Level Scores</h3>\n")
reportFile.write("<table border=\"1\">\n")
reportFile.write("<tr><th>Top Level</th><th>Pass</th><th>Fail</th><th>Score</th><th>Chart</th></tr>\n")
scale=6
for topResult in topResults:
#print "%s Pass: %i Fail: %i Score: %i\%" % (topResult[0], topResult[1], topResult[2], (100*topResult[1]/topResult[1]+topResult[2]))
reportFile.write("<tr>")
reportFile.write("<td>" + topResult[0] + "</td>")
reportFile.write("<td align=\"right\">" + str(topResult[1]) + "</td>")
reportFile.write("<td align=\"right\">" + str(topResult[2]) + "</td>")
score = 100 * topResult[1] / (topResult[1] + topResult[2])
reportFile.write("<td align=\"right\">" + str(score) + "%</td>")
reportFile.write("<td>")
reportFile.write("<font style=\"BACKGROUND-COLOR: GREEN\">")
for i in range (topResult[1]/scale):
reportFile.write(" ")
reportFile.write("</font>")
reportFile.write("<font style=\"BACKGROUND-COLOR: RED\">")
for i in range (topResult[2]/scale):
reportFile.write(" ")
reportFile.write("</font>")
reportFile.write("</td>")
reportFile.write("</tr>\n")
reportFile.write("</table><br/>\n")
reportFile.write("<h3>Alerts</h3>\n")
reportFile.write("<table border=\"1\">\n")
reportFile.write("<tr><th>Alert</th><th>Description</th><th>Pass</th><th>Fail</th><th>Ignore</th><th>Other</th></tr>\n")
#for key, value in abbrev.items():
for (k, v) in sorted(abbrev.items(), key=lambda (k,v): v):
reportFile.write("<tr>")
reportFile.write("<td>" + v + "</td>")
reportFile.write("<td>" + k + "</td>")
reportFile.write("<td>" + str(alertPassCount.get(v, 0)) +" </td>")
reportFile.write("<td>" + str(alertFailCount.get(v, 0)) +" </td>")
reportFile.write("<td>" + str(alertIgnoreCount.get(v, 0)) +" </td>")
reportFile.write("<td>" + str(alertOtherCount.get(v, 0)) +" </td>")
reportFile.write("</tr>\n")
reportFile.write("</table><br/>\n")
# Output the group table
reportFile.write("<h3>Group Scores</h3>\n")
reportFile.write("<table border=\"1\">\n")
reportFile.write("<tr><th>Group</th><th>Pass</th><th>Fail</th><th>Score</th><th>Chart</th></tr>\n")
scale=4
for groupResult in groupResults:
#print "%s Pass: %i Fail: %i Score: %i\%" % (groupResult[0], groupResult[1], groupResult[2], (100*groupResult[1]/groupResult[1]+groupResult[2]))
reportFile.write("<tr>")
reportFile.write("<td>" + groupResult[0] + "</td>")
reportFile.write("<td align=\"right\">" + str(groupResult[1]) + "</td>")
reportFile.write("<td align=\"right\">" + str(groupResult[2]) + "</td>")
score = 100 * groupResult[1] / (groupResult[1] + groupResult[2])
reportFile.write("<td align=\"right\">" + str(score) + "%</td>")
reportFile.write("<td>")
reportFile.write("<font style=\"BACKGROUND-COLOR: GREEN\">")
for i in range (groupResult[1]/scale):
reportFile.write(" ")
reportFile.write("</font>")
reportFile.write("<font style=\"BACKGROUND-COLOR: RED\">")
for i in range (groupResult[2]/scale):
reportFile.write(" ")
reportFile.write("</font>")
reportFile.write("</td>")
reportFile.write("</tr>\n")
reportFile.write("</table><br/>\n")
# Output the detail table
reportFile.write("<h3>Detailed Results</h3>\n")
reportFile.write("<table border=\"1\">\n")
reportFile.write("<tr><th>Page</th><th>Result</th><th>Pass</th><th>Fail</th><th>Ignore</th><th>Other</th></tr>\n")
for key, value in sorted(alertsPerUrl.iteritems()):
reportFile.write("<tr>")
keyArray = key.split(':')
if (len(keyArray) == 4):
reportFile.write("<td>" + keyArray[0] + keyArray[2] + keyArray[3] + "</td>")
else:
reportFile.write("<td>" + keyArray[0] + keyArray[2] + "</td>")
reportFile.write("<td>")
if (len(value.get('pass')) > 0):
reportFile.write("<font style=\"BACKGROUND-COLOR: GREEN\"> PASS </font>")
elif (len(value.get('fail')) > 0):
reportFile.write("<font style=\"BACKGROUND-COLOR: RED\"> FAIL </font>")
elif ('FalsePositive' in key):
reportFile.write("<font style=\"BACKGROUND-COLOR: GREEN\"> PASS </font>")
else:
reportFile.write("<font style=\"BACKGROUND-COLOR: RED\"> FAIL </font>")
reportFile.write("</td>")
reportFile.write("<td>")
if (value.get('pass') is not None):
reportFile.write(" ".join(value.get('pass')))
reportFile.write(" </td>")
reportFile.write("<td>")
if (value.get('fail') is not None):
reportFile.write(" ".join(value.get('fail')))
reportFile.write(" </td>")
reportFile.write("<td>")
if (value.get('ignore') is not None):
reportFile.write(" ".join(value.get('ignore')))
reportFile.write(" </td>")
reportFile.write("<td>")
if (value.get('other') is not None):
reportFile.write(" ".join(value.get('other')))
reportFile.write(" </td>")
reportFile.write("</tr>\n")
reportFile.write("</table><br/>\n")
reportFile.write("<h3>Plugin Times</h3>\n")
# The start of the chart script
reportFile.write("<script type=\"text/javascript\">\n")
reportFile.write(" // Load the Visualization API and the piechart package.\n")
reportFile.write(" google.load('visualization', '1.0', {'packages':['corechart']});\n")
reportFile.write(" // Set a callback to run when the Google Visualization API is loaded.\n")
reportFile.write(" google.setOnLoadCallback(drawChart);\n")
reportFile.write(" function drawChart() {\n")
reportFile.write(" // Create the data table.\n")
reportFile.write(" var data = new google.visualization.DataTable();\n")
reportFile.write(" data.addColumn('string', 'Plugin');\n")
reportFile.write(" data.addColumn('number', 'Time in ms');\n")
reportFile.write(" data.addRows([\n")
progress = zap.ascan.scan_progress()
# Loop through first time for the chart
for plugin in progress[1]['HostProcess']:
reportFile.write(" ['" + plugin['Plugin'][0] + "', " + plugin['Plugin'][3] + "],\n")
# The end of the chart script
reportFile.write(" ]);\n")
reportFile.write(" // Set chart options\n")
reportFile.write(" var options = {'title':'Plugin times',\n")
reportFile.write(" 'width':600,\n")
reportFile.write(" 'height':500};\n")
reportFile.write(" // Instantiate and draw our chart, passing in some options.\n")
reportFile.write(" var chart = new google.visualization.PieChart(document.getElementById('chart_div'));\n")
reportFile.write(" chart.draw(data, options);\n")
reportFile.write(" }\n")
reportFile.write("</script>\n")
reportFile.write("<div id=\"chart_div\"></div>\n")
reportFile.write("<table border=\"1\">\n")
reportFile.write("<tr><th>Plugin</th><th>ms</th></tr>\n")
# Loop through second time for the table
totalTime = 0
for plugin in progress[1]['HostProcess']:
reportFile.write("<tr>")
reportFile.write("<td>" + plugin['Plugin'][0] + "</td>")
# Convert ms into something more readable
t = int(plugin['Plugin'][3])
totalTime += t
s, ms = divmod(t, 1000)
m, s = divmod(s, 60)
h, m = divmod(m, 60)
time = "%d:%02d:%02d.%03d" % (h, m, s, ms)
reportFile.write("<td>" + time + "</td>")
reportFile.write("</tr>\n")
reportFile.write("<tr><td></td><td></td></tr>")
reportFile.write("<tr>")
reportFile.write("<td>Total</td>")
# Convert ms into something more readable
s, ms = divmod(totalTime, 1000)
m, s = divmod(s, 60)
h, m = divmod(m, 60)
time = "%d:%02d:%02d.%03d" % (h, m, s, ms)
reportFile.write("<td>" + time + "</td>")
reportFile.write("</tr>\n")
reportFile.write("</table><br/>\n")
reportFile.write("</body></html>\n")
reportFile.close()
#for key, value in sorted(alertsPerUrl.iteritems()):
# print "%s: %s" % (key, value)
#print ''
print ''
print 'Got ' + str(totalAlerts) + ' alerts'
print 'Got ' + str(len(uniqueUrls)) + ' unique urls'
print 'Took ' + time
print 'Score ' + str(total)
if __name__ == "__main__":
main(sys.argv[1:])
| 38.368056 | 170 | 0.611403 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 12,448 | 0.563258 |
81ba27f4241d7478bbc576ab7545d0bb01038c41 | 665 | py | Python | ex115/biblioteca/interface/__init__.py | Danilo-Xaxa/python_curso_em_video | 9a88e5f579dfc844f936b7759d33e4068b091f09 | [
"MIT"
]
| 4 | 2021-08-29T02:19:55.000Z | 2021-08-30T20:21:30.000Z | ex115/biblioteca/interface/__init__.py | Danilo-Xaxa/python_curso_em_video | 9a88e5f579dfc844f936b7759d33e4068b091f09 | [
"MIT"
]
| null | null | null | ex115/biblioteca/interface/__init__.py | Danilo-Xaxa/python_curso_em_video | 9a88e5f579dfc844f936b7759d33e4068b091f09 | [
"MIT"
]
| null | null | null | def LeiaInt(msg1):
pronto = False
while True:
valor1 = input(msg1)
if valor1.isnumeric():
pronto = True
else:
print('\033[1;31mERRO! FAVOR DIGITAR UM NΓMERO INTEIRO VΓLIDO\033[m')
if pronto:
break
return valor1
def linha(tamanho=42):
return '-' * tamanho
def cabeΓ§alho(txt):
print(linha())
print(txt.center(42))
print(linha())
def menu(lista):
cabeΓ§alho('MENU PRINCIPAL')
x = 1
for item in lista:
print(f'\033[33m{x}\033[m - \033[34m{item}\033[m')
x += 1
print(linha())
opΓ§ = LeiaInt('\033[32mSua opΓ§Γ£o: \033[m')
return opΓ§ | 20.78125 | 81 | 0.554887 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 155 | 0.230312 |
81bafa0175de3af83830a52504e9b10d4a89639b | 10,439 | py | Python | pocketsmith/models/attachment.py | brett-comber/python-pocketsmith-api | a9c7f25abf65e4e022535431dc1d34d6a1bd97e8 | [
"MIT"
]
| null | null | null | pocketsmith/models/attachment.py | brett-comber/python-pocketsmith-api | a9c7f25abf65e4e022535431dc1d34d6a1bd97e8 | [
"MIT"
]
| null | null | null | pocketsmith/models/attachment.py | brett-comber/python-pocketsmith-api | a9c7f25abf65e4e022535431dc1d34d6a1bd97e8 | [
"MIT"
]
| null | null | null | # coding: utf-8
"""
PocketSmith
The public PocketSmith API # noqa: E501
The version of the OpenAPI document: 2.0
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from pocketsmith.configuration import Configuration
class Attachment(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'content_type': 'str',
'content_type_meta': 'AttachmentContentTypeMeta',
'created_at': 'datetime',
'file_name': 'str',
'id': 'int',
'original_url': 'str',
'title': 'str',
'type': 'str',
'updated_at': 'datetime',
'variants': 'AttachmentVariants'
}
attribute_map = {
'content_type': 'content_type',
'content_type_meta': 'content_type_meta',
'created_at': 'created_at',
'file_name': 'file_name',
'id': 'id',
'original_url': 'original_url',
'title': 'title',
'type': 'type',
'updated_at': 'updated_at',
'variants': 'variants'
}
def __init__(self, content_type=None, content_type_meta=None, created_at=None, file_name=None, id=None, original_url=None, title=None, type=None, updated_at=None, variants=None, local_vars_configuration=None): # noqa: E501
"""Attachment - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._content_type = None
self._content_type_meta = None
self._created_at = None
self._file_name = None
self._id = None
self._original_url = None
self._title = None
self._type = None
self._updated_at = None
self._variants = None
self.discriminator = None
if content_type is not None:
self.content_type = content_type
if content_type_meta is not None:
self.content_type_meta = content_type_meta
if created_at is not None:
self.created_at = created_at
if file_name is not None:
self.file_name = file_name
if id is not None:
self.id = id
if original_url is not None:
self.original_url = original_url
if title is not None:
self.title = title
if type is not None:
self.type = type
if updated_at is not None:
self.updated_at = updated_at
if variants is not None:
self.variants = variants
@property
def content_type(self):
"""Gets the content_type of this Attachment. # noqa: E501
The content type of the attachment. # noqa: E501
:return: The content_type of this Attachment. # noqa: E501
:rtype: str
"""
return self._content_type
@content_type.setter
def content_type(self, content_type):
"""Sets the content_type of this Attachment.
The content type of the attachment. # noqa: E501
:param content_type: The content_type of this Attachment. # noqa: E501
:type: str
"""
self._content_type = content_type
@property
def content_type_meta(self):
"""Gets the content_type_meta of this Attachment. # noqa: E501
:return: The content_type_meta of this Attachment. # noqa: E501
:rtype: AttachmentContentTypeMeta
"""
return self._content_type_meta
@content_type_meta.setter
def content_type_meta(self, content_type_meta):
"""Sets the content_type_meta of this Attachment.
:param content_type_meta: The content_type_meta of this Attachment. # noqa: E501
:type: AttachmentContentTypeMeta
"""
self._content_type_meta = content_type_meta
@property
def created_at(self):
"""Gets the created_at of this Attachment. # noqa: E501
When the attachment was created # noqa: E501
:return: The created_at of this Attachment. # noqa: E501
:rtype: datetime
"""
return self._created_at
@created_at.setter
def created_at(self, created_at):
"""Sets the created_at of this Attachment.
When the attachment was created # noqa: E501
:param created_at: The created_at of this Attachment. # noqa: E501
:type: datetime
"""
self._created_at = created_at
@property
def file_name(self):
"""Gets the file_name of this Attachment. # noqa: E501
The file name of the attachment # noqa: E501
:return: The file_name of this Attachment. # noqa: E501
:rtype: str
"""
return self._file_name
@file_name.setter
def file_name(self, file_name):
"""Sets the file_name of this Attachment.
The file name of the attachment # noqa: E501
:param file_name: The file_name of this Attachment. # noqa: E501
:type: str
"""
self._file_name = file_name
@property
def id(self):
"""Gets the id of this Attachment. # noqa: E501
The unique identifier of the attachment # noqa: E501
:return: The id of this Attachment. # noqa: E501
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this Attachment.
The unique identifier of the attachment # noqa: E501
:param id: The id of this Attachment. # noqa: E501
:type: int
"""
self._id = id
@property
def original_url(self):
"""Gets the original_url of this Attachment. # noqa: E501
The url of the attachment # noqa: E501
:return: The original_url of this Attachment. # noqa: E501
:rtype: str
"""
return self._original_url
@original_url.setter
def original_url(self, original_url):
"""Sets the original_url of this Attachment.
The url of the attachment # noqa: E501
:param original_url: The original_url of this Attachment. # noqa: E501
:type: str
"""
self._original_url = original_url
@property
def title(self):
"""Gets the title of this Attachment. # noqa: E501
The title of the attachment. If blank or not provided, the title will be derived from the file name. # noqa: E501
:return: The title of this Attachment. # noqa: E501
:rtype: str
"""
return self._title
@title.setter
def title(self, title):
"""Sets the title of this Attachment.
The title of the attachment. If blank or not provided, the title will be derived from the file name. # noqa: E501
:param title: The title of this Attachment. # noqa: E501
:type: str
"""
self._title = title
@property
def type(self):
"""Gets the type of this Attachment. # noqa: E501
The type of attachment # noqa: E501
:return: The type of this Attachment. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this Attachment.
The type of attachment # noqa: E501
:param type: The type of this Attachment. # noqa: E501
:type: str
"""
self._type = type
@property
def updated_at(self):
"""Gets the updated_at of this Attachment. # noqa: E501
When the attachment was last updated # noqa: E501
:return: The updated_at of this Attachment. # noqa: E501
:rtype: datetime
"""
return self._updated_at
@updated_at.setter
def updated_at(self, updated_at):
"""Sets the updated_at of this Attachment.
When the attachment was last updated # noqa: E501
:param updated_at: The updated_at of this Attachment. # noqa: E501
:type: datetime
"""
self._updated_at = updated_at
@property
def variants(self):
"""Gets the variants of this Attachment. # noqa: E501
:return: The variants of this Attachment. # noqa: E501
:rtype: AttachmentVariants
"""
return self._variants
@variants.setter
def variants(self, variants):
"""Sets the variants of this Attachment.
:param variants: The variants of this Attachment. # noqa: E501
:type: AttachmentVariants
"""
self._variants = variants
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Attachment):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, Attachment):
return True
return self.to_dict() != other.to_dict()
| 28.061828 | 227 | 0.590861 | 10,116 | 0.969058 | 0 | 0 | 5,838 | 0.559249 | 0 | 0 | 5,486 | 0.525529 |
81bce2f74bd4337a65e512dbd85c7e158418982f | 16,476 | py | Python | pynsq/nsq/NSQReader.py | ghorges/nsq-2.0 | b8dc67fa9467e9f39f976f923b798f574d12d8a9 | [
"MIT"
]
| null | null | null | pynsq/nsq/NSQReader.py | ghorges/nsq-2.0 | b8dc67fa9467e9f39f976f923b798f574d12d8a9 | [
"MIT"
]
| null | null | null | pynsq/nsq/NSQReader.py | ghorges/nsq-2.0 | b8dc67fa9467e9f39f976f923b798f574d12d8a9 | [
"MIT"
]
| null | null | null | """
high-level NSQ reader class built on top of a Tornado IOLoop supporting both sync and
async modes of operation.
supports various hooks to modify behavior when heartbeats are received, temporarily
disable the reader, and pre-process/validate messages.
when supplied a list of nsqlookupd addresses, a reader instance will periodically poll
the specified topic in order to discover new producers and reconnect to existing ones.
sync ex.
import nsq
def task1(message):
print message
return True
def task2(message):
print message
return True
all_tasks = {"task1": task1, "task2": task2}
r = nsq.Reader(all_tasks, lookupd_http_addresses=['http://127.0.0.1:4161'],
topic="nsq_reader", channel="asdf", lookupd_poll_interval=15)
nsq.run()
async ex.
import nsq
buf = []
def process_message(message, finisher):
global buf
# cache both the message and the finisher callable for later processing
buf.append((message, finisher))
if len(buf) >= 3:
print '****'
for msg, finish_fxn in buf:
print msg
finish_fxn(True) # use finish_fxn to tell NSQ of success
print '****'
buf = []
else:
print 'deferring processing'
all_tasks = {"task1": process_message}
r = nsq.Reader(all_tasks, lookupd_http_addresses=['http://127.0.0.1:4161'],
topic="nsq_reader", channel="async", async=True)
nsq.run()
"""
import logging
try:
import simplejson as json
except ImportError:
import json
import time
import signal
import socket
import functools
import urllib
import random
import tornado.ioloop
import tornado.httpclient
import BackoffTimer
import nsq
import async
class RequeueWithoutBackoff(Exception):
"""exception for requeueing a message without incrementing backoff"""
pass
class Reader(object):
def __init__(self, all_tasks, topic, channel,
nsqd_tcp_addresses=None, lookupd_http_addresses=None, async=False,
max_tries=5, max_in_flight=1, requeue_delay=90, lookupd_poll_interval=120):
"""
Reader receives messages over the specified ``topic/channel`` and provides an async loop
that calls each task method provided by ``all_tasks`` up to ``max_tries``.
It will handle sending FIN or REQ commands based on feedback from the task methods. When
re-queueing, an increasing delay will be calculated automatically. Additionally, when
message processing fails, it will backoff for increasing multiples of ``requeue_delay``
between updating of RDY count.
``all_tasks`` defines the a mapping of tasks and callables that will be executed for each
message received.
``topic`` specifies the desired NSQ topic
``channel`` specifies the desired NSQ channel
``nsqd_tcp_addresses`` a sequence of string addresses of the nsqd instances this reader
should connect to
``lookupd_http_addresses`` a sequence of string addresses of the nsqlookupd instances this
reader should query for producers of the specified topic
``async`` determines whether handlers will do asynchronous processing. If set to True,
handlers must accept a keyword argument called ``finisher`` that will be a callable used
to signal message completion, taking a boolean argument indicating success.
``max_tries`` the maximum number of attempts the reader will make to process a message after
which messages will be automatically discarded
``max_in_flight`` the maximum number of messages this reader will pipeline for processing.
this value will be divided evenly amongst the configured/discovered nsqd producers.
``requeue_delay`` the base multiple used when re-queueing (multiplied by # of attempts)
``lookupd_poll_interval`` the amount of time in between querying all of the supplied
nsqlookupd instances. a random amount of time based on thie value will be initially
introduced in order to add jitter when multiple readers are running.
"""
assert isinstance(all_tasks, dict)
for key, method in all_tasks.items():
assert callable(method), "key %s must have a callable value" % key
assert isinstance(topic, (str, unicode)) and len(topic) > 0
assert isinstance(channel, (str, unicode)) and len(channel) > 0
assert isinstance(max_in_flight, int) and 0 < max_in_flight < 2500
if nsqd_tcp_addresses:
if not isinstance(nsqd_tcp_addresses, (list, set, tuple)):
assert isinstance(nsqd_tcp_addresses, (str, unicode))
nsqd_tcp_addresses = [nsqd_tcp_addresses]
else:
nsqd_tcp_addresses = []
if lookupd_http_addresses:
if not isinstance(lookupd_http_addresses, (list, set, tuple)):
assert isinstance(lookupd_http_addresses, (str, unicode))
lookupd_http_addresses = [lookupd_http_addresses]
else:
lookupd_http_addresses = []
assert nsqd_tcp_addresses or lookupd_http_addresses
self.topic = topic
self.channel = channel
self.nsqd_tcp_addresses = nsqd_tcp_addresses
self.lookupd_http_addresses = lookupd_http_addresses
self.requeue_delay = int(requeue_delay * 1000)
self.max_tries = max_tries
self.max_in_flight = max_in_flight
self.lookupd_poll_interval = lookupd_poll_interval
self.async = async
self.task_lookup = all_tasks
self.backoff_timer = dict((k, BackoffTimer.BackoffTimer(0, 120)) for k in self.task_lookup.keys())
self.hostname = socket.gethostname()
self.short_hostname = self.hostname.split('.')[0]
self.conns = {}
self.http_client = tornado.httpclient.AsyncHTTPClient()
self.last_recv_timestamps = {}
logging.info("starting reader for topic '%s'..." % self.topic)
for task in self.task_lookup:
for addr in self.nsqd_tcp_addresses:
address, port = addr.split(':')
self.connect_to_nsqd(address, int(port), task)
# trigger the first one manually
self.query_lookupd()
tornado.ioloop.PeriodicCallback(self.check_last_recv_timestamps, 60 * 1000).start()
periodic = tornado.ioloop.PeriodicCallback(self.query_lookupd, self.lookupd_poll_interval * 1000)
# randomize the time we start this poll loop so that all servers don't query at exactly the same time
# randomize based on 10% of the interval
delay = random.random() * self.lookupd_poll_interval * .1
tornado.ioloop.IOLoop.instance().add_timeout(time.time() + delay, periodic.start)
def _client_callback(self, success, message=None, task=None, conn=None):
'''
This is the method that an asynchronous nsqreader should call to indicate
async completion of a message. This will most likely be exposed as the finisher
callable created in `callback` above with some functools voodoo
'''
if success:
self.backoff_timer[task].success()
self.finish(conn, message.id)
else:
self.backoff_timer[task].failure()
self.requeue(conn, message)
def requeue(self, conn, message, delay=True):
if message.attempts > self.max_tries:
self.giving_up(message)
return self.finish(conn, message.id)
try:
# ms
requeue_delay = self.requeue_delay * message.attempts if delay else 0
conn.send(nsq.requeue(message.id, str(requeue_delay)))
except Exception:
conn.close()
logging.exception('[%s] failed to send requeue %s @ %d' % (conn, message.id, requeue_delay))
def finish(self, conn, message_id):
'''
This is an internal method for NSQReader
'''
try:
conn.send(nsq.finish(message_id))
except Exception:
conn.close()
logging.exception('[%s] failed to send finish %s' % (conn, message_id))
def connection_max_in_flight(self):
return max(1, self.max_in_flight / max(1, len(self.conns)))
def handle_message(self, conn, task, message):
conn.ready -= 1
# update ready count if necessary...
# if we're in a backoff state for this task
# set a timer to actually send the ready update
per_conn = self.connection_max_in_flight()
if not conn.is_sending_ready and (conn.ready <= 1 or conn.ready < int(per_conn * 0.25)):
backoff_interval = self.backoff_timer[task].get_interval()
if self.disabled():
backoff_interval = 15
if backoff_interval > 0:
conn.is_sending_ready = True
logging.info('[%s] backing off for %0.2f seconds' % (conn, backoff_interval))
send_ready_callback = functools.partial(self.send_ready, conn, per_conn)
tornado.ioloop.IOLoop.instance().add_timeout(time.time() + backoff_interval, send_ready_callback)
else:
self.send_ready(conn, per_conn)
try:
processed_message = self.preprocess_message(message)
if not self.validate_message(processed_message):
return self.finish(conn, message.id)
except Exception:
logging.exception('[%s] caught exception while preprocessing' % conn)
return self.requeue(conn, message)
method_callback = self.task_lookup[task]
try:
if self.async:
# this handler accepts the finisher callable as a keyword arg
finisher = functools.partial(self._client_callback, message=message, task=task, conn=conn)
return method_callback(processed_message, finisher=finisher)
else:
# this is an old-school sync handler, give it just the message
if method_callback(processed_message):
self.backoff_timer[task].success()
return self.finish(conn, message.id)
self.backoff_timer[task].failure()
except RequeueWithoutBackoff:
logging.info('RequeueWithoutBackoff')
except Exception:
logging.exception('[%s] caught exception while handling %s' % (conn, task))
self.backoff_timer[task].failure()
return self.requeue(conn, message)
def send_ready(self, conn, value):
if self.disabled():
logging.info('[%s] disabled, delaying ready state change', conn)
send_ready_callback = functools.partial(self.send_ready, conn, value)
tornado.ioloop.IOLoop.instance().add_timeout(time.time() + 15, send_ready_callback)
return
try:
conn.send(nsq.ready(value))
conn.ready = value
except Exception:
conn.close()
logging.exception('[%s] failed to send ready' % conn)
conn.is_sending_ready = False
def _data_callback(self, conn, raw_data, task):
self.last_recv_timestamps[get_conn_id(conn, task)] = time.time()
frame, data = nsq.unpack_response(raw_data)
if frame == nsq.FRAME_TYPE_MESSAGE:
message = nsq.decode_message(data)
try:
self.handle_message(conn, task, message)
except Exception:
logging.exception('[%s] failed to handle_message() %r' % (conn, message))
elif frame == nsq.FRAME_TYPE_RESPONSE and data == "_heartbeat_":
self.heartbeat(conn)
conn.send(nsq.nop())
def connect_to_nsqd(self, address, port, task):
assert isinstance(address, (str, unicode))
assert isinstance(port, int)
conn_id = address + ':' + str(port) + ':' + task
if conn_id in self.conns:
return
logging.info("[%s] connecting to nsqd for '%s'", address + ':' + str(port), task)
connect_callback = functools.partial(self._connect_callback, task=task)
data_callback = functools.partial(self._data_callback, task=task)
close_callback = functools.partial(self._close_callback, task=task)
conn = async.AsyncConn(address, port, connect_callback, data_callback, close_callback)
conn.connect()
self.conns[conn_id] = conn
def _connect_callback(self, conn, task):
if len(self.task_lookup) > 1:
channel = self.channel + '.' + task
else:
channel = self.channel
initial_ready = self.connection_max_in_flight()
try:
conn.send(nsq.subscribe(self.topic, channel, self.short_hostname, self.hostname))
conn.send(nsq.ready(initial_ready))
conn.ready = initial_ready
conn.is_sending_ready = False
except Exception:
conn.close()
logging.exception('[%s] failed to bootstrap connection' % conn)
def _close_callback(self, conn, task):
conn_id = get_conn_id(conn, task)
if conn_id in self.conns:
del self.conns[conn_id]
logging.warning("[%s] connection closed... %d left open", conn, len(self.conns))
if len(self.conns) == 0 and len(self.lookupd_http_addresses) == 0:
logging.warning("all connections closed and no lookupds... exiting")
tornado.ioloop.IOLoop.instance().stop()
def query_lookupd(self):
for endpoint in self.lookupd_http_addresses:
lookupd_url = endpoint + "/lookup?topic=" + urllib.quote(self.topic)
req = tornado.httpclient.HTTPRequest(lookupd_url, method="GET",
connect_timeout=1, request_timeout=2)
callback = functools.partial(self._finish_query_lookupd, endpoint=endpoint)
self.http_client.fetch(req, callback=callback)
def _finish_query_lookupd(self, response, endpoint):
if response.error:
logging.warning("[%s] lookupd error %s", endpoint, response.error)
return
try:
lookup_data = json.loads(response.body)
except json.JSONDecodeError:
logging.warning("[%s] failed to parse JSON from lookupd: %r", endpoint, response.body)
return
if lookup_data['status_code'] != 200:
logging.warning("[%s] lookupd responded with %d", endpoint, lookup_data['status_code'])
return
for task in self.task_lookup:
for producer in lookup_data['data']['producers']:
self.connect_to_nsqd(producer['address'], producer['tcp_port'], task)
def check_last_recv_timestamps(self):
now = time.time()
for conn_id, conn in dict(self.conns).iteritems():
timestamp = self.last_recv_timestamps.get(conn_id, 0)
if (now - timestamp) > 60:
# this connection hasnt received data beyond
# the normal heartbeat interval, close it
logging.warning("[%s] connection is stale, closing", conn)
conn = self.conns[conn_id]
conn.close()
#
# subclass overwriteable
#
def giving_up(self, message):
logging.warning("giving up on message '%s' after max tries %d", message.id, self.max_tries)
def disabled(self):
return False
def heartbeat(self, conn):
pass
def validate_message(self, message):
return True
def preprocess_message(self, message):
return message
def get_conn_id(conn, task):
return str(conn) + ':' + task
def _handle_term_signal(sig_num, frame):
logging.info('TERM Signal handler called with signal %r' % sig_num)
tornado.ioloop.IOLoop.instance().stop()
def run():
signal.signal(signal.SIGTERM, _handle_term_signal)
tornado.ioloop.IOLoop.instance().start()
| 40.581281 | 113 | 0.624059 | 14,319 | 0.869082 | 0 | 0 | 0 | 0 | 0 | 0 | 5,521 | 0.335093 |
81bed88a93d034618c88d318a0da803628905ccb | 337 | py | Python | main.py | ygidtu/mountainClimber | 37a1b2934741a755c90000af8d2f9e8256f24ca6 | [
"Apache-2.0"
]
| null | null | null | main.py | ygidtu/mountainClimber | 37a1b2934741a755c90000af8d2f9e8256f24ca6 | [
"Apache-2.0"
]
| null | null | null | main.py | ygidtu/mountainClimber | 37a1b2934741a755c90000af8d2f9e8256f24ca6 | [
"Apache-2.0"
]
| null | null | null | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
u"""
Created at 2020.09.04 by Zhang Yiming
"""
import warnings
warnings.filterwarnings("ignore")
import click
from cli.climb import climb
from cli.diff import diff
@click.group()
def main():
pass
main.add_command(climb)
main.add_command(diff)
if __name__ == '__main__':
main()
| 14.041667 | 37 | 0.700297 | 0 | 0 | 0 | 0 | 35 | 0.103858 | 0 | 0 | 108 | 0.320475 |
81bf3cce63eb0d81e1cb3c04efffcbc893d011ef | 2,023 | py | Python | app/fednlp/data/raw_data_loader/CNN_Dailymail/data_loader.py | ray-ruisun/FedML | 24ff30d636bb70f64e94e9ca205375033597d3dd | [
"Apache-2.0"
]
| null | null | null | app/fednlp/data/raw_data_loader/CNN_Dailymail/data_loader.py | ray-ruisun/FedML | 24ff30d636bb70f64e94e9ca205375033597d3dd | [
"Apache-2.0"
]
| null | null | null | app/fednlp/data/raw_data_loader/CNN_Dailymail/data_loader.py | ray-ruisun/FedML | 24ff30d636bb70f64e94e9ca205375033597d3dd | [
"Apache-2.0"
]
| null | null | null | import os
from data.raw_data_loader.base.base_raw_data_loader import Seq2SeqRawDataLoader
class RawDataLoader(Seq2SeqRawDataLoader):
def __init__(self, data_path):
super().__init__(data_path)
self.cnn_path = "cnn/stories"
self.dailymail_path = "dailymail/stories"
def load_data(self):
if len(self.X) == 0 or len(self.Y) == 0:
total_size = 0
for root, dirs, files in os.walk(
os.path.join(self.data_path, self.cnn_path)
):
for file_name in files:
file_path = os.path.join(root, file_name)
processed_size = self.process_data_file(file_path)
total_size += processed_size
for root, dirs, files in os.walk(
os.path.join(self.data_path, self.dailymail_path)
):
for file_name in files:
file_path = os.path.join(root, file_name)
processed_size = self.process_data_file(file_path)
total_size += processed_size
index_list = [i for i in range(total_size)]
self.attributes["index_list"] = index_list
def process_data_file(self, file_path):
cnt = 0
article_lines = []
abstract_lines = []
next_is_highlight = False
with open(file_path, "r") as f:
for line in f:
line = line.strip()
if line:
if line.startswith("@highlight"):
next_is_highlight = True
elif next_is_highlight:
abstract_lines.append(line)
else:
article_lines.append(line)
assert len(self.X) == len(self.Y)
idx = len(self.X)
self.X[idx] = " ".join(article_lines)
self.Y[idx] = " ".join(
["%s %s %s" % ("<s>", sent, "</s>") for sent in abstract_lines]
)
cnt += 1
return cnt
| 36.125 | 79 | 0.527435 | 1,928 | 0.95304 | 0 | 0 | 0 | 0 | 0 | 0 | 86 | 0.042511 |
81bf6ad4a1d9f400fda048a534023120e5946c0a | 4,098 | py | Python | packages/utils/propagate_license.py | justi/m2g | 09e8b889889ee8d8fb08b9b6fcd726fb3d901644 | [
"Apache-2.0"
]
| 12 | 2015-03-11T22:07:17.000Z | 2016-01-29T21:24:29.000Z | packages/utils/propagate_license.py | youngmook/m2g | 09e8b889889ee8d8fb08b9b6fcd726fb3d901644 | [
"Apache-2.0"
]
| 213 | 2015-01-30T16:02:57.000Z | 2016-01-29T21:45:02.000Z | packages/utils/propagate_license.py | youngmook/m2g | 09e8b889889ee8d8fb08b9b6fcd726fb3d901644 | [
"Apache-2.0"
]
| 5 | 2015-02-04T13:58:12.000Z | 2016-01-29T21:24:46.000Z | #!/usr/bin/env python
# Copyright 2014 Open Connectome Project (http://openconnecto.me)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# propagate_license.py
# Created by Disa Mhembere on 2014-05-16.
# Email: [email protected]
__license_header__ = """
{} Copyright 2014 Open Connectome Project (http://openconnecto.me)
{}
{} Licensed under the Apache License, Version 2.0 (the "License");
{} you may not use this file except in compliance with the License.
{} You may obtain a copy of the License at
{}
{} http://www.apache.org/licenses/LICENSE-2.0
{}
{} Unless required by applicable law or agreed to in writing, software
{} distributed under the License is distributed on an "AS IS" BASIS,
{} WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
{} See the License for the specific language governing permissions and
{} limitations under the License.
{}
"""
COMM_COUNT = 14
comm = {".py":"#", ".pyx":"#", "": "#", ".html":"", ".sh":"#", ".r":"#", ".m":"%", ".c":"//",
".c++":"//", ".java":"//", ".js":"//"}
import argparse
import os
def add(files):
global __license_header__
for full_fn in files:
license_header = __license_header__
print "Processing file: %s ..." % full_fn
script = open(full_fn, "rb")
lines = script.read().splitlines()
script.close()
# Exception for html
comment_style = comm[os.path.splitext(full_fn)[1].lower()]
if lines[0].startswith("#!/usr/bin"):
if lines[5].startswith("# Copyright"): # get rid of copyright year
del lines[5], lines[1]
lines.insert(1, license_header.format(*([comment_style]*COMM_COUNT)))
else:
#license_header += "{} Created by Disa Mhembere\n{} Email: [email protected]".format(*([comment_style]*2))
if os.path.splitext(full_fn)[1].lower().strip() == ".html":
license_header = "<!-- " + license_header + " -->"
lines.insert(0, license_header.format(*([comment_style]*COMM_COUNT)))
script = open(full_fn, "wb")
script.write("\n".join(lines))
def hidden(path):
breakdown = path.split("/")
for item in breakdown:
if item.startswith("."):
return True
return False
def rm(dirname):
pass
def main():
parser = argparse.ArgumentParser(description="Add or Update license headers to code")
parser.add_argument("-r", "--remove", action="store_true", help="Remove the license")
parser.add_argument("-d", "--dirname", action="store", default=".", help="Directory where to start walk")
parser.add_argument("-f", "--files", action="store", nargs="*", help="Files you want license added to")
parser.add_argument("-e", "--file_exts", nargs="*", action="store", \
default=[".py", ".pyx", ".html", ".sh", ".R", ".m", ""], \
help="File extensions to add to the files altered")
parser.add_argument("-i", "--ignore", nargs="*", action="store", \
default=["README", "__init__.py", "TODO", __file__], \
help="Files to ignore")
result = parser.parse_args()
if result.files:
print "Licensing individual files ..."
add(result.files)
exit(1)
else:
print "Licensing a directory of files ..."
files = []
for root, dirnames, filenames in os.walk(os.path.abspath(result.dirname)):
for filename in filenames:
full_fn = os.path.join(root, filename)
if os.path.isfile(full_fn) and not hidden(full_fn) \
and not os.path.basename(full_fn) in result.ignore \
and ( os.path.splitext(full_fn)[-1].lower().strip() in result.file_exts ):
files.append(full_fn)
add(files)
if __name__ == "__main__":
main()
| 35.327586 | 107 | 0.656418 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,115 | 0.516105 |
81c0253c14333698b3786f1e30f1b538f9b67736 | 1,384 | py | Python | core/gf/test.py | zorrock/accelerated-text | 942bacf653fc4c901748b06eaba72da4aeaaec9e | [
"Apache-2.0"
]
| 1 | 2021-05-05T01:37:51.000Z | 2021-05-05T01:37:51.000Z | core/gf/test.py | zorrock/accelerated-text | 942bacf653fc4c901748b06eaba72da4aeaaec9e | [
"Apache-2.0"
]
| null | null | null | core/gf/test.py | zorrock/accelerated-text | 942bacf653fc4c901748b06eaba72da4aeaaec9e | [
"Apache-2.0"
]
| null | null | null | import pytest
import server
@pytest.fixture(scope="session")
def authorship_grammar():
with open("test_grammars/Authorship.gf", "r") as f:
abstract = {"content": f.read()}
with open("test_grammars/AuthorshipEng.gf", "r") as f:
inst = {"content": f.read(), "key": "Eng"}
return server.compile_grammar("Authorship", abstract, [inst])
def test_compile_grammar(authorship_grammar):
result = authorship_grammar
print(result)
assert result
langs = result.languages
assert len(langs) == 1
assert "AuthorshipEng" in langs
def test_generation_results(authorship_grammar):
expressions = server.generate_expressions(authorship_grammar)
results = list([(k, server.generate_variants(expressions, concrete))
for k, concrete in authorship_grammar.languages.items()])
print(results)
(_, r0) = results[0]
assert set(r0) == set([
"good {{TITLE}} is authored by {{AUTHOR}}",
"good {{TITLE}} is written by {{AUTHOR}}",
"excellent {{TITLE}} is authored by {{AUTHOR}}",
"excellent {{TITLE}} is written by {{AUTHOR}}",
"{{AUTHOR}} is the author of excellent {{TITLE}}",
"{{AUTHOR}} is the author of good {{TITLE}}",
"{{AUTHOR}} was authored by good {{TITLE}}",
"{{AUTHOR}} was authored by excellent {{TITLE}}",
])
| 32.186047 | 77 | 0.62211 | 0 | 0 | 0 | 0 | 341 | 0.246387 | 0 | 0 | 491 | 0.354769 |
81c086bf3828eec5887f2980268193fc09c2dd9d | 3,126 | py | Python | troposphere/validators/dynamodb.py | compose-x/troposphere | 9a94a8fafd8b4da1cd1f4239be0e7aa0681fd8d4 | [
"BSD-2-Clause"
]
| null | null | null | troposphere/validators/dynamodb.py | compose-x/troposphere | 9a94a8fafd8b4da1cd1f4239be0e7aa0681fd8d4 | [
"BSD-2-Clause"
]
| null | null | null | troposphere/validators/dynamodb.py | compose-x/troposphere | 9a94a8fafd8b4da1cd1f4239be0e7aa0681fd8d4 | [
"BSD-2-Clause"
]
| null | null | null | # Copyright (c) 2012-2022, Mark Peek <[email protected]>
# All rights reserved.
#
# See LICENSE file for full license.
from .. import AWSHelperFn, If
def attribute_type_validator(x):
"""
Property: AttributeDefinition.AttributeType
"""
valid_types = ["S", "N", "B"]
if x not in valid_types:
raise ValueError("AttributeType must be one of: %s" % ", ".join(valid_types))
return x
def key_type_validator(x):
"""
Property: KeySchema.KeyType
"""
valid_types = ["HASH", "RANGE"]
if x not in valid_types:
raise ValueError("KeyType must be one of: %s" % ", ".join(valid_types))
return x
def projection_type_validator(x):
"""
Property: Projection.ProjectionType
"""
valid_types = ["KEYS_ONLY", "INCLUDE", "ALL"]
if x not in valid_types:
raise ValueError("ProjectionType must be one of: %s" % ", ".join(valid_types))
return x
def billing_mode_validator(x):
"""
Property: Table.BillingMode
"""
valid_modes = ["PROVISIONED", "PAY_PER_REQUEST"]
if x not in valid_modes:
raise ValueError(
"Table billing mode must be one of: %s" % ", ".join(valid_modes)
)
return x
def table_class_validator(x):
"""
Property: Table.TableClass
"""
valid_table_classes = ["STANDARD", "STANDARD_INFREQUENT_ACCESS"]
if x not in valid_table_classes:
raise ValueError(
"Table class must be one of: %s" % ", ".join(valid_table_classes)
)
return x
def validate_table(self):
"""
Class: Table
"""
billing_mode = self.properties.get("BillingMode", "PROVISIONED")
indexes = self.properties.get("GlobalSecondaryIndexes", [])
tput_props = [self.properties]
tput_props.extend([x.properties for x in indexes if not isinstance(x, AWSHelperFn)])
def check_if_all(name, props):
validated = []
for prop in props:
is_helper = isinstance(prop.get(name), AWSHelperFn)
validated.append(name in prop or is_helper)
return all(validated)
def check_any(name, props):
validated = []
for prop in props:
is_helper = isinstance(prop.get(name), AWSHelperFn)
validated.append(name in prop and not is_helper)
return any(validated)
if isinstance(billing_mode, If):
if check_any("ProvisionedThroughput", tput_props):
raise ValueError(
"Table billing mode is per-request. "
"ProvisionedThroughput property is mutually exclusive"
)
return
if billing_mode == "PROVISIONED":
if not check_if_all("ProvisionedThroughput", tput_props):
raise ValueError(
"Table billing mode is provisioned. "
"ProvisionedThroughput required if available"
)
elif billing_mode == "PAY_PER_REQUEST":
if check_any("ProvisionedThroughput", tput_props):
raise ValueError(
"Table billing mode is per-request. "
"ProvisionedThroughput property is mutually exclusive"
)
| 29.214953 | 88 | 0.619962 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,093 | 0.349648 |
81c08bcad1b73822669737a9c7a8c3b7773030bc | 430 | py | Python | videoclip_sources/e004.py | ChrisScarred/misty2py-skills | 30557d246b91fb525866fe8b92e280d2609ca26b | [
"MIT"
]
| null | null | null | videoclip_sources/e004.py | ChrisScarred/misty2py-skills | 30557d246b91fb525866fe8b92e280d2609ca26b | [
"MIT"
]
| null | null | null | videoclip_sources/e004.py | ChrisScarred/misty2py-skills | 30557d246b91fb525866fe8b92e280d2609ca26b | [
"MIT"
]
| null | null | null | import time
from misty2py.robot import Misty
from misty2py.utils.env_loader import EnvLoader
from misty2py_skills.utils.utils import get_abs_path
env_loader = EnvLoader(get_abs_path(".env"))
m = Misty(env_loader.get_ip())
d = m.event("subscribe", type="BatteryCharge")
e_name = d.get("event_name")
time.sleep(1)
d = m.event("get_data", name=e_name)
# do something with the data here
d = m.event("unsubscribe", name=e_name)
| 21.5 | 52 | 0.755814 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 100 | 0.232558 |
81c1b8a6fb449ff2c4c107dcaec453b46983daed | 2,302 | py | Python | p2/Python Files/audit_street.py | priyankaswadi/Udacity-Data-Analyst-Nanodegree | 52989f7e447e69c6fb08119f4e39a4500dcdf571 | [
"Apache-2.0"
]
| null | null | null | p2/Python Files/audit_street.py | priyankaswadi/Udacity-Data-Analyst-Nanodegree | 52989f7e447e69c6fb08119f4e39a4500dcdf571 | [
"Apache-2.0"
]
| null | null | null | p2/Python Files/audit_street.py | priyankaswadi/Udacity-Data-Analyst-Nanodegree | 52989f7e447e69c6fb08119f4e39a4500dcdf571 | [
"Apache-2.0"
]
| null | null | null | #Map incorrect and abbreviated street names with correct/better ones
import xml.etree.cElementTree as ET
from collections import defaultdict
import re
import pprint
OSMFILE = "albany.osm"
street_type_re = re.compile(r'\b\S+\.?$', re.IGNORECASE)
# UPDATE THIS VARIABLE
mapping = {"rd": "Road",
"Rd": "Road",
"road": "Road",
"Ave": "Avenue",
"Ave.": "Avenue",
"AVE": "Avenue",
"way" : "Way",
"street": "Street",
"way":"Way",
"Dr.":"Drive",
"Blvd":"Boulevard",
"rt":"Route",
"Ext": "Extension",
"Jay":"Jay Street",
"Nott St E":"Nott Street East",
"Troy-Schenetady-Road":"Troy Schenectady Road",
"Troy-Schenetady Rd" :"Troy Schenectady Road",
"Delatour":"Delatour Road",
"Deltour": "Delatour Road",
"Sparrowbush": "Sparrowbush Road"
}
def audit_street_type(street_types, street_name):
m = street_type_re.search(street_name)
if m:
street_type = m.group()
if street_type not in expected:
street_types[street_type].add(street_name)
def is_street_name(elem):
return (elem.attrib['k'] == "addr:street")
def audit(osmfile):
osm_file = open(osmfile, "r")
street_types = defaultdict(set)
for event, elem in ET.iterparse(osm_file, events=("start",)):
if elem.tag == "node" or elem.tag == "way":
for tag in elem.iter("tag"):
if is_street_name(tag):
audit_street_type(street_types, tag.attrib['v'])
osm_file.close()
return street_types
def update_name(name, mapping):
n = street_type_re.search(name)
if n:
n = n.group()
for m in mapping:
if n == m:
name = name[:-len(n)] + mapping[m]
return name
def test():
st_types = audit(OSMFILE)
pprint.pprint(dict(st_types))
for st_type, ways in st_types.iteritems():
for name in ways:
better_name = update_name(name, mapping)
if (name == better_name):
continue
print name + " --> " + better_name
if __name__ == '__main__':
test() | 27.73494 | 68 | 0.541703 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 554 | 0.24066 |
81c1bcd0f7ae70104609fcbc8c962b13a08d4c00 | 1,943 | py | Python | modules/week2/utils.py | tobias-z/4-sem-python | 35c0a73f0a2085f2dc539c8ec8761c26675aa078 | [
"MIT"
]
| null | null | null | modules/week2/utils.py | tobias-z/4-sem-python | 35c0a73f0a2085f2dc539c8ec8761c26675aa078 | [
"MIT"
]
| null | null | null | modules/week2/utils.py | tobias-z/4-sem-python | 35c0a73f0a2085f2dc539c8ec8761c26675aa078 | [
"MIT"
]
| null | null | null | from io import TextIOWrapper
import os
from typing import List
OUTPUT = "files/output.csv"
FOLDER = "modules/week2/folders"
def get_file_names(folderpath, out=OUTPUT):
"""takes a path to a folder and writes all filenames in the folder to a specified output file"""
dir_list = os.listdir(folderpath)
with open(out, "w") as file:
for line in dir_list:
file.write(line + "\n")
def get_all_file_names(folderpath, out=OUTPUT):
"""takes a path to a folder and write all filenames recursively (files of all sub folders to)"""
def write_dir_to_file(file: TextIOWrapper, dir: List[str], folderpath: str):
for line in dir:
path_to_file = f"{folderpath}/{line}"
if os.path.isdir(path_to_file):
write_dir_to_file(file, os.listdir(path_to_file), path_to_file)
continue
file.write(line + "\n")
with open(out, "w") as file:
write_dir_to_file(file, os.listdir(folderpath), folderpath)
def print_line_one(file_names: List[str]):
"""takes a list of filenames and print the first line of each"""
for file_name in file_names:
with open(file_name) as file:
print(file.readline())
def print_emails(file_names: List[str]):
"""takes a list of filenames and print each line that contains an email (just look for @)"""
for file_name in file_names:
with open(file_name) as file:
for line in file.readlines():
if "@" in line:
print(line)
def write_headlines(md_files: List[str], out=OUTPUT):
"""takes a list of md files and writes all headlines (lines starting with #) to a file"""
with open(out, "w") as output_file:
for md_file in md_files:
with open(md_file) as file:
for line in file.readlines():
if line.startswith("#"):
output_file.write(line)
| 34.696429 | 100 | 0.629439 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 523 | 0.269171 |
81c234494317e86e4d284863eff810f848405889 | 4,480 | py | Python | src/api/providers.py | ismetacar/ertis-auth | 64727cc8201d5fcc955485e94262500d63ff4b17 | [
"MIT"
]
| 17 | 2020-06-17T15:28:59.000Z | 2021-09-21T19:18:14.000Z | src/api/providers.py | ismetacar/Ertis-Auth | 5521eb8a0b11fca7c5ff2a4ecc6cc0b9af59aa8f | [
"MIT"
]
| 5 | 2020-06-17T21:22:56.000Z | 2021-05-02T19:10:05.000Z | src/api/providers.py | ismetacar/Ertis-Auth | 5521eb8a0b11fca7c5ff2a4ecc6cc0b9af59aa8f | [
"MIT"
]
| 2 | 2021-03-02T17:08:07.000Z | 2021-04-07T18:11:59.000Z | import json
from sanic import response
from sanic_openapi import doc
from src.plugins.authorization import authorized
from src.plugins.validator import validated
from src.request_models.providers import Provider
from src.request_models.query_model import Query
from src.resources.generic import ensure_membership_is_exists, QUERY_BODY_SCHEMA
from src.resources.providers.resource import CREATE_PROVIDER_SCHEMA
from src.utils import query_helpers
from src.utils.json_helpers import bson_to_json
def init_providers_api(app, settings):
# region Create Provider
@app.route('/api/v1/memberships/<membership_id>/providers', methods=['POST'])
@doc.tag("Providers")
@doc.operation("Create Provider")
@doc.consumes(Provider, location="body", content_type="application/json")
@validated(CREATE_PROVIDER_SCHEMA)
@authorized(app, settings, methods=['POST'], required_permission='providers.create')
async def create_provider(request, membership_id, *args, **kwargs):
await ensure_membership_is_exists(app.db, membership_id, request.ctx.utilizer)
body = request.json
resource = await app.provider_service.create_provider(body, request.ctx.utilizer)
return response.json(json.loads(json.dumps(resource, default=bson_to_json)), 201)
# endregion
# region Get Provider
@app.route('/api/v1/memberships/<membership_id>/providers/<provider_id>', methods=['GET'])
@doc.tag("Providers")
@doc.operation("Get Provider")
@authorized(app, settings, methods=['GET'], required_permission='providers.read')
async def get_provider(request, membership_id, provider_id, *args, **kwargs):
await ensure_membership_is_exists(app.db, membership_id, request.ctx.utilizer)
resource = await app.provider_service.get_provider(provider_id, request.ctx.utilizer)
return response.json(json.loads(json.dumps(resource, default=bson_to_json)))
# endregion
# region Update Provider
@app.route('/api/v1/memberships/<membership_id>/providers/<provider_id>', methods=['PUT'])
@doc.tag("Providers")
@doc.operation("Update Provider")
@doc.consumes(Provider, location="body", content_type="application/json")
@authorized(app, settings, methods=['PUT'], required_permission='providers.update')
async def update_provider(request, membership_id, provider_id, **kwargs):
await ensure_membership_is_exists(app.db, membership_id, request.ctx.utilizer)
body = request.json
resource = await app.provider_service.update_provider(provider_id, body, request.ctx.utilizer,
app.persist_event)
return response.json(json.loads(json.dumps(resource, default=bson_to_json)), 200)
# endregion
# region Delete Provider
@app.route('/api/v1/memberships/<membership_id>/providers/<provider_id>', methods=['DELETE'])
@doc.tag("Providers")
@doc.operation("Delete Provider")
@authorized(app, settings, methods=['DELETE'], required_permission='providers.delete')
async def delete_provider(request, membership_id, provider_id, **kwargs):
await ensure_membership_is_exists(app.db, membership_id, request.ctx.utilizer)
await app.provider_service.delete_provider(provider_id, request.ctx.utilizer, app.persist_event)
return response.json({}, 204)
# endregion
# region Query Applications
# noinspection DuplicatedCode
@app.route('/api/v1/memberships/<membership_id>/providers/_query', methods=['POST'])
@doc.tag("Providers")
@doc.operation("Query Providers")
@doc.consumes(Query, location="body", content_type="application/json")
@authorized(app, settings, methods=['POST'], required_permission='providers.read')
@validated(QUERY_BODY_SCHEMA)
async def query_providers(request, membership_id, **kwargs):
await ensure_membership_is_exists(app.db, membership_id, request.ctx.utilizer)
where, select, limit, skip, sort = query_helpers.parse(request)
providers, count = await app.provider_service.query_providers(
membership_id,
where,
select,
limit,
skip,
sort
)
response_json = json.loads(json.dumps({
'data': {
'items': providers,
'count': count
}
}, default=bson_to_json))
return response.json(response_json, 200)
# endregion
| 43.076923 | 104 | 0.705134 | 0 | 0 | 0 | 0 | 3,650 | 0.814732 | 2,129 | 0.475223 | 863 | 0.192634 |
81c238300e9927729e01076aa4674e5af0b62cf8 | 3,078 | py | Python | lista08_pesquisa/questao02.py | mayararysia/ESTD | 65aa8816aa8773066201cb410b02c1cb72ad5611 | [
"MIT"
]
| null | null | null | lista08_pesquisa/questao02.py | mayararysia/ESTD | 65aa8816aa8773066201cb410b02c1cb72ad5611 | [
"MIT"
]
| null | null | null | lista08_pesquisa/questao02.py | mayararysia/ESTD | 65aa8816aa8773066201cb410b02c1cb72ad5611 | [
"MIT"
]
| null | null | null | # -*- coding: utf-8 -*-
#Lista de ExercΓcios 08 (Pesquisa) - QuestΓ£o 02
#Mayara Rysia
from time import time
from time import sleep
from random import randint
"""
2. Use as duas funΓ§Γ΅es de busca binΓ‘ria apresentadas (iterativa e recursiva). Gere
uma lista de nΓΊmeros aleatΓ³rios, ordene-os e verifique o desempenho delas. Qual
os resultados?
"""
#Busca BinΓ‘ria - cΓ³digo recursivo
def busca_binaria(uma_lista, item_procurado):
if len(uma_lista) == 0:
return False
meio = len(uma_lista)//2
if uma_lista[meio] == item_procurado:
return True
if item_procurado < uma_lista[meio]:
return busca_binaria(uma_lista[:meio], item_procurado)
else:
return busca_binaria(uma_lista[meio+1:], item_procurado)
#Busca BinΓ‘ria - cΓ³digo iterativo
def busca_binaria_it(uma_lista, item_pesquisado):
inicio = 0
fim = len(uma_lista)-1
encontrou = False
while inicio<=fim and not encontrou:
meio = (inicio + fim)//2
if uma_lista[meio] == item_pesquisado:
encontrou = True
else:
if item_pesquisado < uma_lista[meio]:
fim = meio-1
else:
inicio = meio+1
return encontrou
#ordena a lista
def ordena(lista):
quant = tam = len(lista)
continua = True
while quant>=1 and continua:
continua = False
for i in range(tam):
j=i+1
if j != tam and lista[i] > lista[j]:
continua = True
ant = lista[i]
lista[i] = lista[j]
lista[j] = ant
i=j
quant-=1
return lista
#cria a lista
def criaLista():
lista = []
for i in range(9):
num = randint(0, 42)
lista.append(num)
return lista
def Teste(lista, num):
print('Procurando ', num,'na lista', lista)
inicio = time()
result = busca_binaria(lista, num)
fim = time()
tempo_gasto = fim-inicio
print('resultado', result)
return tempo_gasto
def Teste_it(lista, num):
print('Procurando ', num,'na lista', lista)
inicio = time()
result = busca_binaria_it(lista, num)
fim = time()
tempo_gasto = fim-inicio
print('resultado', result)
return tempo_gasto
if __name__ == '__main__':
l = criaLista()
lista = ordena(l)
qtd_br = qtd_bi = 0
#Testes
for i in range(5):
num = randint(0, 42)
print("<< Busca Recursiva >> \n")
tempo_gasto_br = Teste(lista, num)
print('\ttempo gasto: ', tempo_gasto_br)
print('\n\n')
sleep(2)
print("<< Busca Iterativa >> \n")
tempo_gasto_bi = Teste_it(lista, num)
print('\ttempo gasto: ', tempo_gasto_bi)
print('\n\n')
if tempo_gasto_br < tempo_gasto_bi:
qtd_br +=1
print('\n-> Busca Recursiva levou o menor tempo\n')
else:
qtd_bi +=1
print('\n-> Busca Iterativa levou o menor tempo\n')
print("------- ------- ------- ------- -------")
print("\nCONCLUSΓO\n\n ")
if qtd_br > qtd_bi:
print("Busca BinΓ‘ria Recursiva teve o melhor desempenho!")
else:
print("Busca BinΓ‘ria Iterativa teve o melhor desempenho!")
print("Quantidade BinΓ‘ria Recursiva: ", qtd_br)
print("Quantidade BinΓ‘ria Iterativa: ", qtd_bi)
| 20.938776 | 82 | 0.635153 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 874 | 0.282482 |
81c3777bd3aa3fe5f25a3ee068f24e1720ba3426 | 3,290 | py | Python | ccvpn/views/__init__.py | CCrypto/ccvpn | 6bbfd01f41816bea905518f302f4cec474fdd221 | [
"MIT"
]
| 81 | 2015-03-07T20:26:55.000Z | 2016-05-16T10:22:05.000Z | ccvpn/views/__init__.py | CCrypto/ccvpn2 | 6bbfd01f41816bea905518f302f4cec474fdd221 | [
"MIT"
]
| 1 | 2017-09-21T15:56:31.000Z | 2017-11-30T15:10:56.000Z | ccvpn/views/__init__.py | CCrypto/ccvpn | 6bbfd01f41816bea905518f302f4cec474fdd221 | [
"MIT"
]
| 20 | 2015-03-07T22:36:46.000Z | 2016-04-23T22:47:12.000Z | import codecs
import markdown
import os
import logging
from pyramid.view import view_config
from pyramid.httpexceptions import HTTPOk, HTTPNotFound
from sqlalchemy import func
from mako.lookup import TemplateLookup
import mako.exceptions
logger = logging.getLogger(__name__)
from ccvpn.models import DBSession, User, IcingaError, IcingaQuery, Gateway, VPNSession
from ccvpn.views import account, admin, api, order # noqa
@view_config(context=Exception)
def error_view(exc, request):
logger.exception('Exception', exc_info=exc)
raise
@view_config(route_name='home', renderer='home.mako')
def home(request):
settings = request.registry.settings
return {
'eur_price': float(settings.get('paypal.month_price', 2)),
'btc_price': float(settings.get('bitcoin.month_price', 0.02)),
'motd': settings.get('motd'),
}
@view_config(route_name='ca_crt')
def ca_crt(request):
return HTTPOk(body=account.openvpn_ca)
@view_config(route_name='page', renderer='page.mako')
def page(request):
root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
pagesdir = os.path.join(root, 'pages/')
basename = pagesdir + request.matchdict['page']
irc_username = request.user.username if request.user else '?'
try:
translated_file = basename + '.' + request.locale_name + '.md'
fallback_file = basename + '.md'
if os.path.isfile(translated_file):
template = translated_file
elif os.path.isfile(fallback_file):
template = fallback_file
else:
raise FileNotFoundError()
with open(template, encoding='utf8') as template_f:
mdt = template_f.read()
mdt = mdt.replace('${irc_username}', irc_username)
md = markdown.Markdown(extensions=['toc', 'meta',
'codehilite(noclasses=True)'])
content = md.convert(mdt)
title = md.Meta['title'][0] if 'title' in md.Meta else None
return {'content': content, 'title': title}
except FileNotFoundError:
return HTTPNotFound()
def format_bps(bits):
multiples = ((1e9, 'G'), (1e6, 'M'), (1e3, 'K'), (0, ''))
for d, m in multiples:
if bits < d:
continue
n = bits / (d or 1)
return '{:2g}{}bps'.format(n, m)
@view_config(route_name='status', renderer='status.mako')
def status(request):
settings = request.registry.settings
domain = settings.get('net_domain', '')
gateways = DBSession.query(Gateway) \
.filter_by(enabled=True) \
.order_by(Gateway.country, Gateway.name) \
.all()
l = list(gateways)
for host in l:
host.host_name = '%s-%s.%s'%(host.country, host.name, domain)
host.bps_formatted = format_bps(host.bps)
return {
'gateways': l,
'n_users': DBSession.query(func.count(User.id))
.filter_by(is_paid=True).scalar(),
'n_connected': DBSession.query(func.count(VPNSession.id)) \
.filter(VPNSession.is_online==True).scalar(),
'n_countries': len(set(i.country for i in l)),
'total_bw': format_bps(sum(i.bps for i in l)),
}
| 33.571429 | 87 | 0.619149 | 0 | 0 | 0 | 0 | 2,628 | 0.798784 | 0 | 0 | 379 | 0.115198 |
81c43cdcda51abd9a7c25faabe42afd1a69a3e45 | 3,142 | py | Python | rx/subjects/subject.py | MichaelSchneeberger/RxPY | 994f974d37783f63c5d9e018a316fa9b06ba9337 | [
"ECL-2.0",
"Apache-2.0"
]
| null | null | null | rx/subjects/subject.py | MichaelSchneeberger/RxPY | 994f974d37783f63c5d9e018a316fa9b06ba9337 | [
"ECL-2.0",
"Apache-2.0"
]
| null | null | null | rx/subjects/subject.py | MichaelSchneeberger/RxPY | 994f974d37783f63c5d9e018a316fa9b06ba9337 | [
"ECL-2.0",
"Apache-2.0"
]
| null | null | null | import threading
from typing import Any, List, Optional
from rx.disposable import Disposable
from rx.core.typing import Observer, Scheduler
from rx.core import Observable, typing
from rx.internal import DisposedException
from .anonymoussubject import AnonymousSubject
from .innersubscription import InnerSubscription
class Subject(Observable, Observer):
"""Represents an object that is both an observable sequence as well
as an observer. Each notification is broadcasted to all subscribed
observers.
"""
def __init__(self) -> None:
super().__init__()
self.is_disposed = False
self.is_stopped = False
self.observers: List[Observer] = []
self.exception: Optional[Exception] = None
self.lock = threading.RLock()
def check_disposed(self):
if self.is_disposed:
raise DisposedException()
def _subscribe_core(self, observer: Observer, scheduler: Scheduler = None) -> typing.Disposable:
with self.lock:
self.check_disposed()
if not self.is_stopped:
self.observers.append(observer)
return InnerSubscription(self, observer)
if self.exception:
observer.on_error(self.exception)
return Disposable()
observer.on_completed()
return Disposable()
def on_completed(self) -> None:
"""Notifies all subscribed observers of the end of the
sequence."""
observers = None
with self.lock:
self.check_disposed()
if not self.is_stopped:
observers = self.observers[:]
self.observers = []
self.is_stopped = True
if observers:
for observer in observers:
observer.on_completed()
def on_error(self, error: Exception) -> None:
"""Notifies all subscribed observers with the exception.
Args:
error: The exception to send to all subscribed observers.
"""
os = None
with self.lock:
self.check_disposed()
if not self.is_stopped:
os = self.observers[:]
self.observers = []
self.is_stopped = True
self.exception = error
if os:
for observer in os:
observer.on_error(error)
def on_next(self, value: Any) -> None:
"""Notifies all subscribed observers with the value.
Args:
value: The value to send to all subscribed observers.
"""
os = None
with self.lock:
self.check_disposed()
if not self.is_stopped:
os = self.observers[:]
if os:
for observer in os:
observer.on_next(value)
def dispose(self) -> None:
"""Unsubscribe all observers and release resources."""
with self.lock:
self.is_disposed = True
self.observers = []
@classmethod
def create(cls, observer, observable):
return AnonymousSubject(observer, observable)
| 28.825688 | 100 | 0.590707 | 2,820 | 0.897518 | 0 | 0 | 109 | 0.034691 | 0 | 0 | 588 | 0.187142 |
81c467ca6111d33d242e6a5ccd32ee27968ad970 | 931 | py | Python | scripts/uda.py | nng555/fairseq | c9730a125825a85f33042e1b9fd1959b8ca829e5 | [
"MIT"
]
| 2 | 2020-10-05T08:52:01.000Z | 2021-03-03T15:26:35.000Z | scripts/uda.py | nng555/fairseq | c9730a125825a85f33042e1b9fd1959b8ca829e5 | [
"MIT"
]
| null | null | null | scripts/uda.py | nng555/fairseq | c9730a125825a85f33042e1b9fd1959b8ca829e5 | [
"MIT"
]
| null | null | null | import os
import hydra
import subprocess
import logging
from omegaconf import DictConfig
from hydra import slurm_utils
log = logging.getLogger(__name__)
@hydra.main(config_path='/h/nng/conf/robust/config.yaml', strict=False)
def launch(cfg: DictConfig):
os.environ['NCCL_DEBUG'] = 'INFO'
if cfg.data.task in ['nli']:
base_path = '/scratch/ssd001/datasets/'
elif cfg.data.task in ['sentiment']:
base_path = '/h/nng/data'
else:
raise Exception('task %s data path not found'.format(cfg.data.task))
data_dir = os.path.join(base_path, cfg.data.task, cfg.data.name, cfg.data.fdset)
flags = [data_dir, str(cfg.gen.num_shards), str(cfg.gen.shard), str(cfg.gen.sampling_temp), cfg.gen.fname]
command = ['bash', 'run.sh'] + flags
os.chdir('/h/nng/programs/uda/back_translate')
log.info(' '.join(command))
subprocess.call(command)
if __name__ == "__main__":
launch()
| 29.09375 | 110 | 0.684211 | 0 | 0 | 0 | 0 | 734 | 0.7884 | 0 | 0 | 198 | 0.212675 |
81c5bce0c4d9254a207a213c3a227fa2fcf0908d | 2,062 | py | Python | 06_Business/application_iris/app.py | MaryMP11/The_Bridge_School_DataScience_PT | 8b4a24d0b79608061a470e806de542dbbcccf75d | [
"Apache-2.0"
]
| null | null | null | 06_Business/application_iris/app.py | MaryMP11/The_Bridge_School_DataScience_PT | 8b4a24d0b79608061a470e806de542dbbcccf75d | [
"Apache-2.0"
]
| null | null | null | 06_Business/application_iris/app.py | MaryMP11/The_Bridge_School_DataScience_PT | 8b4a24d0b79608061a470e806de542dbbcccf75d | [
"Apache-2.0"
]
| null | null | null | from flask import Flask, request, jsonify, session, url_for, redirect, render_template
import joblib
from flower_form import FlowerForm
classifier_loaded = joblib.load("application_iris/saved_models/knn_iris_dataset.pkl")
encoder_loaded = joblib.load("application_iris/saved_models/iris_label_encoder.pkl")
# prediction function
def make_prediction(model, encoder, sample_json):
# parse input from request
SepalLengthCm = sample_json['SepalLengthCm']
SepalWidthCm = sample_json['SepalWidthCm']
PetalLengthCm = sample_json['PetalLengthCm']
PetalWidthCm = sample_json['PetalWidthCm']
# Make an input vector
flower = [[SepalLengthCm, SepalWidthCm, PetalLengthCm, PetalWidthCm]]
# Predict
prediction_raw = model.predict(flower)
# Convert Species index to Species name
prediction_real = encoder.inverse_transform(prediction_raw)
return prediction_real[0]
app = Flask(__name__)
app.config['SECRET_KEY'] = 'mysecretkey'
@app.route("/", methods=['GET','POST'])
def index():
form = FlowerForm()
if form.validate_on_submit():
session['SepalLengthCm'] = form.SepalLengthCm.data
session['SepalWidthCm'] = form.SepalWidthCm.data
session['PetalLengthCm'] = form.PetalLengthCm.data
session['PetalWidthCm'] = form.PetalWidthCm.data
return redirect(url_for("prediction"))
return render_template("home.html", form=form)
# Read models
# classifier_loaded = joblib.load("saved_models/01.knn_with_iris_dataset.pkl")
# encoder_loaded = joblib.load("saved_models/02.iris_label_encoder.pkl")
@app.route('/prediction')
def prediction():
content = {'SepalLengthCm': float(session['SepalLengthCm']), 'SepalWidthCm': float(session['SepalWidthCm']),
'PetalLengthCm': float(session['PetalLengthCm']), 'PetalWidthCm': float(session['PetalWidthCm'])}
results = make_prediction(classifier_loaded, encoder_loaded, content)
return render_template('prediction.html', results=results)
if __name__ == '__main__':
app.run(host='0.0.0.0', port=8080) | 32.21875 | 112 | 0.734724 | 0 | 0 | 0 | 0 | 850 | 0.412221 | 0 | 0 | 729 | 0.35354 |
81c63070aaf168ec47a744d51e1a20dd220ae56b | 8,522 | py | Python | test.py | EdwinChan/python-physical | 44383280acddd77b35adf8923b7d8cdb512553a0 | [
"MIT"
]
| 2 | 2021-07-13T05:58:13.000Z | 2021-08-19T04:37:57.000Z | test.py | EdwinChan/python-physical | 44383280acddd77b35adf8923b7d8cdb512553a0 | [
"MIT"
]
| null | null | null | test.py | EdwinChan/python-physical | 44383280acddd77b35adf8923b7d8cdb512553a0 | [
"MIT"
]
| null | null | null | import math
import re
import unittest
import urllib.error
import urllib.request
from .core import Quantity
from .define import defined_systems
si = defined_systems['si']
esu = defined_systems['esu']
emu = defined_systems['emu']
gauss = defined_systems['gauss']
class PhysicalQuantitiesTest(unittest.TestCase):
def assert_quantity_equal(self, first, second):
self.assertAlmostEqual(first.value, second.value)
self.assertAlmostEqual(first.error, second.error)
self.assertEqual(first.units, second.units)
self.assertEqual(first.system, second.system)
def test_sign(self):
a = Quantity(1, 0.2, {'Kilogram': 1}, si)
b = Quantity(-1, 0.2, {'Kilogram': 1}, si)
self.assert_quantity_equal(+a, a)
self.assert_quantity_equal(+b, b)
self.assert_quantity_equal(-a, b)
self.assert_quantity_equal(-b, a)
self.assert_quantity_equal(abs(a), a)
self.assert_quantity_equal(abs(b), a)
def test_add(self):
a = Quantity(1, 0.2, {'Newton': 1}, si)
b = Quantity(3, 0.4, {'Kilogram': 1, 'Meter': 1, 'Second': -2}, si)
c = Quantity(4, 1 / math.sqrt(5), {'Newton': 1}, si)
d = Quantity(1, 0.2, {'Kilogram': 1}, si)
self.assert_quantity_equal(a + b, c.expand())
with self.assertRaises(TypeError): a + d
with self.assertRaises(TypeError): a + 1
def test_subtract(self):
a = Quantity(1, 0.2, {'Newton': 1}, si)
b = Quantity(3, 0.4, {'Kilogram': 1, 'Meter': 1, 'Second': -2}, si)
c = Quantity(-2, 1 / math.sqrt(5), {'Newton': 1}, si)
d = Quantity(1, 0.2, {'Kilogram': 1}, si)
self.assert_quantity_equal(a - b, c.expand())
with self.assertRaises(TypeError): a - d
with self.assertRaises(TypeError): a - 1
def test_multiply(self):
a = Quantity(1, 0.2, {'Kilogram': 1}, si)
b = Quantity(3, 0.4, {'Meter': -2}, si)
c = Quantity(3, math.sqrt(13) / 5, {'Kilogram': 1, 'Meter': -2}, si)
self.assert_quantity_equal(a * b, c)
a = Quantity(1, 0.2, {'Kilogram': 1}, si) * 5
b = Quantity(5, 1, {'Kilogram': 1}, si)
self.assert_quantity_equal(a, b)
a = Quantity(1, 0.2, {'Kilogram': 1}, si) * -5
b = Quantity(-5, 1, {'Kilogram': 1}, si)
self.assert_quantity_equal(a, b)
a = 5 * Quantity(3, 0.4, {'Kilogram': 1}, si)
b = Quantity(15, 2, {'Kilogram': 1}, si)
self.assert_quantity_equal(a, b)
a = -5 * Quantity(3, 0.4, {'Kilogram': 1}, si)
b = Quantity(-15, 2, {'Kilogram': 1}, si)
self.assert_quantity_equal(a, b)
def test_divide(self):
a = Quantity(2, 0.1, {'Kilogram': 1}, si)
b = Quantity(4, 0.3, {'Meter': -2}, si)
c = Quantity(0.5, math.sqrt(13) / 80,
{'Kilogram': 1, 'Meter': 2}, si)
self.assert_quantity_equal(a / b, c)
a = Quantity(1, 0.2, {'Kilogram': 1}, si) / 5
b = Quantity(0.2, 0.04, {'Kilogram': 1}, si)
self.assert_quantity_equal(a, b)
a = Quantity(1, 0.2, {'Kilogram': 1}, si) / -5
b = Quantity(-0.2, 0.04, {'Kilogram': 1}, si)
self.assert_quantity_equal(a, b)
a = 5 / Quantity(3, 0.4, {'Kilogram': 1}, si)
b = Quantity(5/3, 2/9, {'Kilogram': -1}, si)
self.assert_quantity_equal(a, b)
a = -5 / Quantity(3, 0.4, {'Kilogram': 1}, si)
b = Quantity(-5/3, 2/9, {'Kilogram': -1}, si)
self.assert_quantity_equal(a, b)
def test_power(self):
a = Quantity(3, 0.4, {'Kilogram': 1, 'Meter': 1}, si) ** 5
b = Quantity(243, 162, {'Kilogram': 5, 'Meter': 5}, si)
self.assert_quantity_equal(a, b)
def test_almost_equals(self):
a = Quantity(1, 0.5, {'Kilogram': 1}, si)
b = Quantity(2, 0.7, {'Kilogram': 1}, si)
c = Quantity(3, 0.9, {'Kilogram': 1}, si)
d = Quantity(1, 0.5, {'Meter': 1}, si)
e = Quantity(1, 0.5, {}, si)
f = Quantity(2, 0.7, {}, si)
self.assertTrue(a.almost_equals(b))
self.assertFalse(a.almost_equals(c))
self.assertRaises(TypeError, a.almost_equals, d)
for x in [a, b, c, d]:
self.assertRaises(TypeError, x.almost_equals, 1)
self.assertTrue(e.almost_equals(1))
self.assertTrue(f.almost_equals(2))
self.assertFalse(e.almost_equals(2))
self.assertFalse(f.almost_equals(1))
self.assertTrue(e.almost_equals(f))
def test_float(self):
a = Quantity(1, 0, {'Second': 1, 'Hertz': 1}, si)
b = Quantity(365.25 * 86400, 0, {'Second': 1, 'JulianYear': -1}, si)
self.assertEqual(math.cos(a), math.cos(1))
self.assertEqual(math.cos(b), math.cos(1))
def test_expand(self):
# Lorentz force
a = Quantity(1, 0,
{'Coulomb': 1, 'Meter': 1, 'Second': -1, 'Tesla': 1}, si)
b = Quantity(1, 0, {'Newton': 1}, si)
self.assert_quantity_equal(a.expand(), b.expand())
# Faraday's law
a = Quantity(1, 0, {'Weber': 1, 'Second': -1}, si)
b = Quantity(1, 0, {'Volt': 1}, si)
self.assert_quantity_equal(a.expand(), b.expand())
# torque of a motor
a = Quantity(1, 0, {'Ampere': 1, 'Tesla': 1, 'Meter': 2}, si)
b = Quantity(1, 0, {'Newton': 1, 'Meter': 1}, si)
self.assert_quantity_equal(a.expand(), b.expand())
# resonance frequency of an RLC circuit
a = Quantity(1, 0, {'Henry': -1/2, 'Farad': -1/2}, si)
b = Quantity(1, 0, {'Hertz': 1}, si)
self.assert_quantity_equal(a.expand(), b.expand())
def test_simple_constants(self):
for system in defined_systems.values():
a = Quantity(13.6, 0,
{'ElectronVolt': 1, 'RydbergEnergy': -1}, system).expand()
self.assertAlmostEqual(a.value, 1, places=3)
self.assertEqual(a.units, {})
a = system.get_constant('FineStructureConstant').expand() * 137
self.assertAlmostEqual(a.value, 1, places=3)
self.assertEqual(a.units, {})
def test_electromagnetic_constants(self):
from . import si, esu, emu, gauss
a = (si.e**2 / si.a0 / (4*math.pi*si.epsilon0) / (1e-7*si.J)).expand()
b = (esu.e**2 / esu.a0 / esu.erg).expand()
c = (emu.e**2 / emu.a0 * emu.c**2 / emu.erg).expand()
d = (gauss.e**2 / gauss.a0 / gauss.erg).expand()
self.assertAlmostEqual(a.value * 1e11, b.value * 1e11)
self.assertAlmostEqual(a.value * 1e11, c.value * 1e11)
self.assertAlmostEqual(a.value * 1e11, d.value * 1e11)
a = (si.muB**2 / si.a0**3 * si.mu0 / (1e-7*si.J)).expand()
b = (esu.muB**2 / esu.a0**3 / esu.c**2 / esu.erg).expand()
c = (emu.muB**2 / emu.a0**3 / emu.erg).expand()
d = (gauss.muB**2 / gauss.a0**3 / gauss.erg).expand()
self.assertAlmostEqual(a.value * 1e3, b.value * 1e3)
self.assertAlmostEqual(a.value * 1e3, c.value * 1e3)
self.assertAlmostEqual(a.value * 1e3, d.value * 1e3)
def test_codata(self):
url = 'http://physics.nist.gov/cuu/Constants/Table/allascii.txt'
units = {
'AtomicMassUnit': 'unified atomic mass unit'}
constants = {
'AvogadroConstant': 'Avogadro constant',
'ElectronGFactor': 'electron g factor',
'ProtonGFactor': 'proton g factor',
'NeutronGFactor': 'neutron g factor',
'MuonGFactor': 'muon g factor',
'LightSpeed': 'speed of light in vacuum',
'ElementaryCharge': 'atomic unit of charge',
'PlanckConstant': 'Planck constant',
'BoltzmannConstant': 'Boltzmann constant',
'GravitationalConstant': 'Newtonian constant of gravitation',
'VacuumPermeability': 'vacuum mag. permeability',
'ElectronMass': 'electron mass',
'ProtonMass': 'proton mass',
'NeutronMass': 'neutron mass',
'MuonMass': 'muon mass'}
try:
response = urllib.request.urlopen(url)
except urllib.error.URLError:
raise ValueError('Cannot download data.')
data = iter(response.read().decode('ascii').rstrip('\n').split('\n'))
while not next(data).startswith('--'):
pass
data = (re.split(' {2,}', x) for x in data)
def parse_value(x):
return float(x.replace(' ', '').replace('...', ''))
def parse_error(x):
return 0 if x == '(exact)' else float(x.replace(' ', ''))
data = {x: (parse_value(y), parse_error(z)) for x, y, z, *_ in data}
for local_name, codata_name in units.items():
quantity = Quantity(1, 0, {local_name: 1}, si).expand()
x, y = data[codata_name]
assert math.isclose(quantity.value, x)
assert math.isclose(quantity.error, y)
for local_name, codata_name in constants.items():
quantity = si.get_constant(local_name).expand()
x, y = data[codata_name]
assert math.isclose(quantity.value, x)
assert math.isclose(quantity.error, y)
if __name__ == '__main__':
unittest.main()
| 40.198113 | 74 | 0.602558 | 8,211 | 0.963506 | 0 | 0 | 0 | 0 | 0 | 0 | 1,444 | 0.169444 |
81c77ad5e87d4cea69ce9a40ad25f9768714ae45 | 3,828 | py | Python | test/test_cirrus_ngs/test_cfnCluster/test_ConnectionManager.py | ucsd-ccbb/cirrus-ngs | 8f51450b3d971b03d4fd08a1aab11d5a076aa23e | [
"MIT"
]
| 8 | 2017-01-20T00:00:45.000Z | 2022-02-11T00:20:45.000Z | test/test_cirrus_ngs/test_cfnCluster/test_ConnectionManager.py | miko-798/cirrus-ngs | 2c005f0fe29e298652ed2164e08ada75e908229b | [
"MIT"
]
| 3 | 2018-03-23T19:09:06.000Z | 2018-03-26T19:49:55.000Z | test/test_cirrus_ngs/test_cfnCluster/test_ConnectionManager.py | miko-798/cirrus-ngs | 2c005f0fe29e298652ed2164e08ada75e908229b | [
"MIT"
]
| 2 | 2018-03-29T06:24:31.000Z | 2019-04-01T18:34:53.000Z | import unittest
import sys
import os
sys.path.append(os.getcwd().replace("test", "src"))
import cirrus_ngs.cfnCluster.ConnectionManager as ConnectionManager
import paramiko
import tempfile
import re
##THIS TEST WILL NOT WORK##
class test_ConnectionManager(unittest.TestCase):
def test_paramiko(self):
key_file = tempfile.NamedTemporaryFile()
key_file.write(b"notakey")
self.assertRaises(paramiko.SSHException, paramiko.RSAKey.from_private_key_file, key_file.name)
key_file.close()
#key path
new_key = ""
#checks to make sure a real key file works. will not be portable
#leaving my ssh key for users to download for tests seems not smart
paramiko.RSAKey.from_private_key_file(new_key)
def test_connect_master(self):
#ip
hostname = ""
username = "ec2-user"
key_file = tempfile.NamedTemporaryFile()
key_file.write(b"not_a_key")
key_file.seek(0)
self.assertRaises(paramiko.SSHException, ConnectionManager.connect_master, hostname, username, key_file.name)
key_file.close()
#this won't even work elsewhere but I don't want to put my keyfile into the eepo
#key path
new_key = ""
ConnectionManager.connect_master(hostname, username, new_key)
#checks if last line in the standard output is "connected"
out = sys.stdout.getvalue().strip()
last_line = out.split()[-1]
self.assertEqual(last_line, "connected")
#checks that connected and connecting only are printed once exactly
num_connected = len(re.findall("connected", out))
self.assertEqual(1, num_connected)
num_connecting = len(re.findall("connecting", out))
self.assertEqual(1, num_connecting)
def test_execute_command(self):
#ip
hostname = ""
username = "ec2-user"
#key path
key = ""
ssh_client = ConnectionManager.connect_master(hostname, username, key)
command = "pwd"
#checks that the pwd command worked
self.assertEqual(ConnectionManager.execute_command(ssh_client, command), "/home/ec2-user\n")
ssh_client = "not an ssh_client"
#makes sure that an error is raised when a non sshclient is passed in
self.assertRaises(AttributeError, ConnectionManager.execute_command, ssh_client, command)
def test_copy_file(self):
#ip
hostname = ""
username = "ec2-user"
#key path
key = ""
ssh_client = ConnectionManager.connect_master(hostname, username, key)
temp = tempfile.NamedTemporaryFile()
localpath = temp.name
remotepath = "/home/ec2-user"
ConnectionManager.copy_file(ssh_client, localpath, remotepath)
out = sys.stdout.getvalue().strip().split()[-2:]
#checks that the copy file prints the local and remote paths
self.assertEqual(out, [localpath, remotepath])
ls_output = ConnectionManager.execute_command(ssh_client,
"ls tmp* | wc -l")
ConnectionManager.execute_command(ssh_client, "rm tmp*")
#checks that there is exactly 1 tempfile in the home directory of the server
self.assertEqual(ls_output.strip(), "1")
#makes sure it doesn't work with a nonfile
self.assertRaises(FileNotFoundError, ConnectionManager.copy_file,
ssh_client, "fakefile", "/home/ec2-user")
#########################################################################
#copy_gatk, list_dir, and close_connection are considered trivial methods
#and are not tested
#########################################################################
if __name__ == "__main__":
unittest.main(module=__name__, buffer=True, exit=False)
| 35.775701 | 117 | 0.637931 | 3,510 | 0.916928 | 0 | 0 | 0 | 0 | 0 | 0 | 1,163 | 0.303814 |
81c7caa0739efb8823c259206d2c89fc45540cae | 3,166 | py | Python | src/backend/opus/opusctl/cmds/process.py | DTG-FRESCO/opus | 8975e154524802efead82794ab2f70d4a1611000 | [
"Apache-2.0"
]
| null | null | null | src/backend/opus/opusctl/cmds/process.py | DTG-FRESCO/opus | 8975e154524802efead82794ab2f70d4a1611000 | [
"Apache-2.0"
]
| null | null | null | src/backend/opus/opusctl/cmds/process.py | DTG-FRESCO/opus | 8975e154524802efead82794ab2f70d4a1611000 | [
"Apache-2.0"
]
| null | null | null | # -*- coding: utf-8 -*-
'''
Commands for launching processes with or without OPUS interposition.
'''
from __future__ import absolute_import, division, print_function
import argparse
import os
import psutil
from .. import config, server_start, utils
def get_current_shell():
ppid = os.getppid()
parent = psutil.Process(ppid);
cur_shell = parent.exe()
shell_args = parent.cmdline()[1:]
return cur_shell, shell_args
@config.auto_read_config
def handle_launch(cfg, binary, arguments):
if not utils.is_server_active(cfg=cfg):
if not server_start.start_opus_server(cfg):
print("Aborting command launch.")
return
opus_preload_lib = utils.path_normalise(os.path.join(cfg['install_dir'],
'lib',
'libopusinterpose.so')
)
if 'LD_PRELOAD' in os.environ:
if opus_preload_lib not in os.environ['LD_PRELOAD']:
os.environ['LD_PRELOAD'] = (os.environ['LD_PRELOAD'] + " " +
opus_preload_lib)
else:
os.environ['LD_PRELOAD'] = opus_preload_lib
if cfg['server_addr'][:4] == "unix":
os.environ['OPUS_UDS_PATH'] = utils.path_normalise(cfg['server_addr'][7:])
os.environ['OPUS_PROV_COMM_MODE'] = cfg['server_addr'][:4]
else:
os.environ['OPUS_PROV_COMM_MODE'] = cfg['server_addr'][:3]
addr = cfg['server_addr'][6:].split(":")
os.environ['OPUS_TCP_ADDRESS'] = addr[0]
os.environ['OPUS_TCP_PORT'] = addr[1]
os.environ['OPUS_MSG_AGGR'] = "1"
os.environ['OPUS_MAX_AGGR_MSG_SIZE'] = "65536"
os.environ['OPUS_LOG_LEVEL'] = "3" # Log critical
os.environ['OPUS_INTERPOSE_MODE'] = "1" # OPUS lite
if not binary:
binary, arguments = get_current_shell()
os.execvp(binary, [binary] + arguments)
@config.auto_read_config
def handle_exclude(cfg, binary, arguments):
if utils.is_opus_active():
utils.reset_opus_env(cfg)
else:
print("OPUS is not active.")
if not binary:
binary, arguments = get_current_shell()
os.execvp(binary, [binary] + arguments)
def handle(cmd, **params):
if cmd == "launch":
handle_launch(**params)
elif cmd == "exclude":
handle_exclude(**params)
def setup_parser(parser):
cmds = parser.add_subparsers(dest="cmd")
launch = cmds.add_parser(
"launch",
help="Launch a process under OPUS.")
launch.add_argument(
"binary", nargs='?',
help="The binary to be launched. Defaults to the current shell.")
launch.add_argument(
"arguments", nargs=argparse.REMAINDER,
help="Any arguments to be passed.")
exclude = cmds.add_parser(
"exclude",
help="Launch a process excluded from OPUS interposition.")
exclude.add_argument(
"binary", nargs='?',
help="The binary to be launched. Defaults to the current shell.")
exclude.add_argument(
"arguments", nargs=argparse.REMAINDER,
help="Any arguments to be passed.")
| 31.979798 | 82 | 0.609602 | 0 | 0 | 0 | 0 | 1,803 | 0.569488 | 0 | 0 | 870 | 0.274795 |
81c8df8164adec85e55f94629c0090d4dd609286 | 1,718 | py | Python | tests/unit/l2_infrastructure/test_app_collection_config_parser.py | ansible-self-service/ansible-self-service | 80840f02b68c6ba5fe6c55ab9a317b310c185b4d | [
"MIT"
]
| null | null | null | tests/unit/l2_infrastructure/test_app_collection_config_parser.py | ansible-self-service/ansible-self-service | 80840f02b68c6ba5fe6c55ab9a317b310c185b4d | [
"MIT"
]
| null | null | null | tests/unit/l2_infrastructure/test_app_collection_config_parser.py | ansible-self-service/ansible-self-service | 80840f02b68c6ba5fe6c55ab9a317b310c185b4d | [
"MIT"
]
| null | null | null | import pytest
from ansible_self_service.l2_infrastructure.app_collection_config_parser import AppCollectionConfigValidationException, \
YamlAppCollectionConfigParser
from ansible_self_service.l4_core.models import AppCategory, App
VALID_CATEGORY_NAME = 'Misc'
VALID_ITEM_NAME = 'Cowsay'
VALID_ITEM_DESCRIPTION = 'Let an ASCII cow say stuff in your terminal!'
VALID_CONFIG = f"""
categories:
{VALID_CATEGORY_NAME}: {{}}
items:
{VALID_ITEM_NAME}:
description: |
{VALID_ITEM_DESCRIPTION}
categories:
- {VALID_CATEGORY_NAME}
image_url: https://upload.wikimedia.org/wikipedia/commons/8/80/Cowsay_Typical_Output.png
playbook: playbooks/cowsay.yml
params:
ansible_become_password:
type: secret
mandatory: true
requirements: > # any expression that we could use for a tasks "when" clause; items are ANDed
- ansible_distribution == 'Ubuntu'
"""
INVALID_CONFIG = '''
this is not even YAML
'''
def test_parse_valid_file(tmpdir):
config_file = tmpdir.join('self-service.yaml')
config_file.write(VALID_CONFIG)
repo_config_parser = YamlAppCollectionConfigParser()
categories, apps = repo_config_parser.from_file(config_file)
assert categories == [AppCategory(name=VALID_CATEGORY_NAME)]
assert apps == [App(
name=VALID_ITEM_NAME, description=VALID_ITEM_DESCRIPTION, categories=[AppCategory(name=VALID_CATEGORY_NAME)])
]
def test_parse_invalid_file(tmpdir):
config_file = tmpdir.join('self-service.yaml')
config_file.write(INVALID_CONFIG)
repo_config_parser = YamlAppCollectionConfigParser()
with pytest.raises(AppCollectionConfigValidationException):
repo_config_parser.from_file(config_file)
| 33.038462 | 121 | 0.760186 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 657 | 0.382421 |
81ca35091868d035a8a09d9c9753adadf774b179 | 6,088 | py | Python | api-server.py | proatria/sftpplus-api-example | 1fc3af66beef06d66ad46a0cf74bb0905793cf7f | [
"MIT"
]
| null | null | null | api-server.py | proatria/sftpplus-api-example | 1fc3af66beef06d66ad46a0cf74bb0905793cf7f | [
"MIT"
]
| null | null | null | api-server.py | proatria/sftpplus-api-example | 1fc3af66beef06d66ad46a0cf74bb0905793cf7f | [
"MIT"
]
| null | null | null | """
Run a simple HTTP server which provides API endpoint for SFTPPlus.
Usage:
server.py [options]
-h --help Show this help.
-p --port=8000 Listen to a specific port. [default: 8080]
-a --address=127.0.0.1 Listen on specific address. [default: 0.0.0.0]
-c --certificate=PATH Enable HTTPS by defining the path
to a file containing server key, certificate, and CA chain
all PEM format and stored in a single file.
-f --flaky Introduce random errors to test SFTPPlus API retry functionality.
The following API endpoints are provided:
* /auth-api - For the authentication API
* /event-api - For the event handler API
"""
from __future__ import absolute_import, unicode_literals
import base64
import json
import ssl
from random import randint
from aiohttp import web
from docopt import docopt
# Command line handling part.
arguments = docopt(__doc__)
# Convert arguments to usable types.
port = int(arguments["--port"])
# Need to escape the address for ipv6.
address = arguments["--address"].replace(":", r"\:")
is_flaky = arguments["--flaky"]
certificate = arguments["--certificate"]
# Set to lower values to increase the probability of a failure.
_FLAKY_DEGREE = 3
# DB with accepted accounts.
# Each key is the name of an user.
# Each value contains the accepted password and/or SSH-key.
ACCOUNTS = {
# An account with some custom configuration.
# Configuration that is not explicitly defined here is extracted based on
# the SFTPPlus group.
"test-user": {
"password": "test-pass",
# Just the public key value, in OpenSSH format.
# Without hte key type or comments.
"ssh-public-key": "AAAAB3NzaC1yc2EAAAADAQABAAAAgQC4fV6tSakDSB6ZovygLsf1iC9P3tJHePTKAPkPAWzlu5BRHcmAu0uTjn7GhrpxbjjWMwDVN0Oxzw7teI0OEIVkpnlcyM6L5mGk+X6Lc4+lAfp1YxCR9o9+FXMWSJP32jRwI+4LhWYxnYUldvAO5LDz9QeR0yKimwcjRToF6/jpLw==",
"configuration": {
"home_folder_path": "/tmp",
# EXTRA_DATA is not yet supported.
# 'extra_data': {
# 'file_api_token': 'fav1_some_value',
# },
},
},
# An account with default configuration extracted from
# the default SFTPPlus group.
# SSH-Key authentication is disabled for this user.
"default-user": {
"password": "default-pass",
"ssh-public-key": "",
"configuration": {},
},
}
async def handle_root(request):
return web.Response(text="Demo SFTPPlus API endpoints.")
async def handle_auth(request):
"""
This is triggered for authentication API calls.
"""
request_json = await get_json(request)
print("\n\n")
print("-" * 80)
print("New authentication request received")
print(json.dumps(request_json, indent=2))
if is_flaky and randint(0, _FLAKY_DEGREE) == 0:
print("TRIGGERING AN EMULATED FAILURE")
return web.Response(status=500, text="Failed to process the request")
credentials = request_json["credentials"]
account = ACCOUNTS.get(credentials["username"], None)
if account is None:
# This is not an account handled by this authentication API.
# Inform SFTPPus that it can try to authenticate the user via other
# method (LDAP, or another HTTP authentication server).
print("UNKNOWN USER")
return web.Response(
status=401, text="User not handled by our API. Try other method."
)
response = {"account": account.get("configuration", {})}
if credentials["type"] in ["password", "password-basic-auth"]:
# We have password based authentication.
if credentials["content"] != account["password"]:
print("INVALID PASSWORD")
return web.Response(status=403, text="Password rejected.")
# Valid password.
print("VALID PASSWORD")
return web.json_response(response)
if credentials["type"] == "ssh-key":
# We have SSH-key based authentication.
# The keys are encoded as BASE64, but we compare them as bytes.
if base64.b64decode(credentials["content"]) != base64.b64decode(
account["ssh-public-key"]
):
print("INVALID SSH-KEY")
return web.Response(status=403, text="SSH-Key rejected.")
# Valid SSH key authentication.
print("VALID SSH-KEY")
return web.json_response(response)
return web.Response(status=403, text="Credentials type not supported.")
async def handle_event(request):
"""
This is triggered by the event handler API calls.
"""
print("\n\n")
print("-" * 80)
print("New event handler call")
print("-" * 80)
print("Headers:")
for key, value in request.headers.items():
print(f" {key}: {value}")
print("-" * 80)
print("Payload:")
await get_json(request)
if is_flaky and randint(0, _FLAKY_DEGREE) == 0:
print("TRIGGERING AN EMULATED FAILURE")
return web.Response(status=500, text="Failed to process the request")
# An empty response body can be used to confirm that the event
# was received successfully by the API server.
# This instruct SFTPPlus not to retry.
return web.Response(status=204, text="")
async def get_json(request):
"""
Return the json dict from `request`.
It also logs the JSON
"""
result = {}
try:
result = await request.json()
except json.JSONDecodeError:
print("INVALID JSON RECEIVED")
text = await request.text()
print(text)
result = {}
else:
print(json.dumps(result, indent=2))
return result
app = web.Application()
app.add_routes(
[
web.get("/", handle_root),
web.post("/auth-api", handle_auth),
web.post("/event-api", handle_event),
]
)
ssl_context = None
if certificate:
ssl_context = ssl.create_default_context(ssl.Purpose.SERVER_AUTH)
ssl_context.load_cert_chain(certificate, certificate)
if __name__ == "__main__":
web.run_app(app, host=address, port=port, ssl_context=ssl_context)
| 31.220513 | 233 | 0.655388 | 0 | 0 | 0 | 0 | 0 | 0 | 3,215 | 0.528088 | 3,241 | 0.532359 |
81ca610dec0f1e1d5519b0914515a58eb09c500b | 55 | py | Python | arkfbp/flow/__init__.py | arkfbp/arkfbp-py | 2444736462e8b4f09ae1ffe56779d9f515deb39f | [
"MIT"
]
| 2 | 2020-09-11T09:26:43.000Z | 2020-12-17T07:32:38.000Z | arkfbp/flow/__init__.py | arkfbp/arkfbp-py | 2444736462e8b4f09ae1ffe56779d9f515deb39f | [
"MIT"
]
| 4 | 2020-12-02T03:42:38.000Z | 2020-12-14T07:56:06.000Z | arkfbp/flow/__init__.py | arkfbp/arkfbp-py | 2444736462e8b4f09ae1ffe56779d9f515deb39f | [
"MIT"
]
| 2 | 2020-12-08T01:11:54.000Z | 2021-01-25T04:29:15.000Z | from .base import Flow
from .view_flow import ViewFlow
| 18.333333 | 31 | 0.818182 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
81cb6312561698f081c8ea3ba400b666e569a740 | 740 | py | Python | ethereumetl/mappers/event_mapper.py | thanhnv2303/ethereum-etl | 94381feadf1f1602a95db44aea5e944559628271 | [
"MIT"
]
| null | null | null | ethereumetl/mappers/event_mapper.py | thanhnv2303/ethereum-etl | 94381feadf1f1602a95db44aea5e944559628271 | [
"MIT"
]
| null | null | null | ethereumetl/mappers/event_mapper.py | thanhnv2303/ethereum-etl | 94381feadf1f1602a95db44aea5e944559628271 | [
"MIT"
]
| null | null | null | from config.constant import ExportItemConstant, ExportItemTypeConstant, EventConstant, TransactionConstant
from ethereumetl.service.eth_event_service import EthEvent
class EthEventMapper(object):
def eth_event_to_dict(self, eth_event: EthEvent):
d1 = {
ExportItemConstant.type: ExportItemTypeConstant.event,
EventConstant.event_type: eth_event.event_type,
EventConstant.contract_address: eth_event.contract_address,
TransactionConstant.transaction_hash: eth_event.transaction_hash,
EventConstant.log_index: eth_event.log_index,
TransactionConstant.block_number: eth_event.block_number,
}
d2 = eth_event.params
return {**d1, **d2}
| 43.529412 | 106 | 0.731081 | 571 | 0.771622 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
81cc09d97179b0455468e2dd08a57556c6ae600f | 5,934 | py | Python | openerp/addons/crm_partner_assign/wizard/crm_forward_to_partner.py | ntiufalara/openerp7 | 903800da0644ec0dd9c1dcd34205541f84d45fe4 | [
"MIT"
]
| 3 | 2016-01-29T14:39:49.000Z | 2018-12-29T22:42:00.000Z | openerp/addons/crm_partner_assign/wizard/crm_forward_to_partner.py | ntiufalara/openerp7 | 903800da0644ec0dd9c1dcd34205541f84d45fe4 | [
"MIT"
]
| 2 | 2016-03-23T14:29:41.000Z | 2017-02-20T17:11:30.000Z | openerp/addons/crm_partner_assign/wizard/crm_forward_to_partner.py | ntiufalara/openerp7 | 903800da0644ec0dd9c1dcd34205541f84d45fe4 | [
"MIT"
]
| null | null | null | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
class crm_lead_forward_to_partner(osv.TransientModel):
""" Forward info history to partners. """
_name = 'crm.lead.forward.to.partner'
_inherit = "mail.compose.message"
def _get_composition_mode_selection(self, cr, uid, context=None):
composition_mode = super(crm_lead_forward_to_partner, self)._get_composition_mode_selection(cr, uid, context=context)
composition_mode.append(('forward', 'Forward'))
return composition_mode
_columns = {
'partner_ids': fields.many2many('res.partner',
'lead_forward_to_partner_res_partner_rel',
'wizard_id', 'partner_id', 'Additional contacts'),
'attachment_ids': fields.many2many('ir.attachment',
'lead_forward_to_partner_attachment_rel',
'wizard_id', 'attachment_id', 'Attachments'),
'history_mode': fields.selection([('info', 'Internal notes'),
('latest', 'Latest email'), ('whole', 'Whole Story')],
'Send history', required=True),
}
_defaults = {
'history_mode': 'info',
}
def default_get(self, cr, uid, fields, context=None):
if context is None:
context = {}
# set as comment, perform overrided document-like action that calls get_record_data
old_mode = context.get('default_composition_mode', 'forward')
context['default_composition_mode'] = 'comment'
res = super(crm_lead_forward_to_partner, self).default_get(cr, uid, fields, context=context)
# back to forward mode
context['default_composition_mode'] = old_mode
res['composition_mode'] = context['default_composition_mode']
return res
def get_record_data(self, cr, uid, model, res_id, context=None):
""" Override of mail.compose.message, to add default values coming
form the related lead.
"""
if context is None:
context = {}
res = super(crm_lead_forward_to_partner, self).get_record_data(cr, uid, model, res_id, context=context)
if model not in ('crm.lead') or not res_id:
return res
template_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'crm_partner_assign', 'crm_partner_assign_email_template')[1]
context['history_mode'] = context.get('history_mode','whole')
mail_body_fields = ['partner_id', 'partner_name', 'title', 'function', 'street', 'street2', 'zip', 'city', 'country_id', 'state_id', 'email_from', 'phone', 'fax', 'mobile', 'description']
lead = self.pool.get('crm.lead').browse(cr, uid, res_id, context=context)
context['mail_body'] = self.pool.get('crm.lead')._mail_body(cr, uid, lead, mail_body_fields, context=context)
template = self.generate_email_for_composer(cr, uid, template_id, res_id, context)
res['subject'] = template['subject']
res['body'] = template['body']
return res
def on_change_history_mode(self, cr, uid, ids, history_mode, model, res_id, context=None):
""" Update body when changing history_mode """
if context is None:
context = {}
if model and model == 'crm.lead' and res_id:
lead = self.pool.get(model).browse(cr, uid, res_id, context=context)
context['history_mode'] = history_mode
body = self.get_record_data(cr, uid, 'crm.lead', res_id, context=context)['body']
return {'value': {'body': body}}
def create(self, cr, uid, values, context=None):
""" TDE-HACK: remove 'type' from context, because when viewing an
opportunity form view, a default_type is set and propagated
to the wizard, that has a not matching type field. """
default_type = context.pop('default_type', None)
new_id = super(crm_lead_forward_to_partner, self).create(cr, uid, values, context=context)
if default_type:
context['default_type'] = default_type
return new_id
def action_forward(self, cr, uid, ids, context=None):
""" Forward the lead to a partner """
if context is None:
context = {}
res = {'type': 'ir.actions.act_window_close'}
wizard = self.browse(cr, uid, ids[0], context=context)
if wizard.model not in ('crm.lead'):
return res
lead = self.pool.get(wizard.model)
lead_ids = wizard.res_id and [wizard.res_id] or []
if wizard.composition_mode == 'mass_mail':
lead_ids = context and context.get('active_ids', []) or []
value = self.default_get(cr, uid, ['body', 'email_to', 'email_cc', 'subject', 'history_mode'], context=context)
self.write(cr, uid, ids, value, context=context)
return self.send_mail(cr, uid, ids, context=context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| 47.472 | 195 | 0.635322 | 4,780 | 0.805527 | 0 | 0 | 0 | 0 | 0 | 0 | 2,652 | 0.446916 |
81cd30e6f9de401088d486d770c3328acf7f5008 | 5,033 | py | Python | losses.py | DensenDavis/yolov5_tf2 | 5b2f38e3f0391e5beee1551a386e9b81512db179 | [
"Unlicense"
]
| null | null | null | losses.py | DensenDavis/yolov5_tf2 | 5b2f38e3f0391e5beee1551a386e9b81512db179 | [
"Unlicense"
]
| null | null | null | losses.py | DensenDavis/yolov5_tf2 | 5b2f38e3f0391e5beee1551a386e9b81512db179 | [
"Unlicense"
]
| null | null | null | import tensorflow as tf
from tensorflow.keras.losses import binary_crossentropy,sparse_categorical_crossentropy
from config import Configuration
cfg = Configuration()
class YOLOLoss(tf.losses.Loss):
def __init__(self, anchors):
super(YOLOLoss, self).__init__(reduction="none", name="YOLOLoss")
self.anchors = tf.constant(anchors)
def _meshgrid(self, n_a, n_b):
return [
tf.reshape(tf.tile(tf.range(n_a), [n_b]), (n_b, n_a)),
tf.reshape(tf.repeat(tf.range(n_b), n_a), (n_b, n_a))
]
def broadcast_iou(self, box_1, box_2):
# box_1: (..., (x1, y1, x2, y2))
# box_2: (N, (x1, y1, x2, y2))
# broadcast boxes
box_1 = tf.expand_dims(box_1, -2)
box_2 = tf.expand_dims(box_2, 0)
# new_shape: (..., N, (x1, y1, x2, y2))
new_shape = tf.broadcast_dynamic_shape(tf.shape(box_1), tf.shape(box_2))
box_1 = tf.broadcast_to(box_1, new_shape)
box_2 = tf.broadcast_to(box_2, new_shape)
int_w = tf.maximum(tf.minimum(box_1[..., 2], box_2[..., 2]) -
tf.maximum(box_1[..., 0], box_2[..., 0]), 0)
int_h = tf.maximum(tf.minimum(box_1[..., 3], box_2[..., 3]) -
tf.maximum(box_1[..., 1], box_2[..., 1]), 0)
int_area = int_w * int_h
box_1_area = (box_1[..., 2] - box_1[..., 0]) * \
(box_1[..., 3] - box_1[..., 1])
box_2_area = (box_2[..., 2] - box_2[..., 0]) * \
(box_2[..., 3] - box_2[..., 1])
return int_area / (box_1_area + box_2_area - int_area)
def yolo_boxes(self, pred, classes):
# pred: (batch_size, grid, grid, anchors, (x, y, w, h, obj, ...classes))
grid_size = tf.shape(pred)[1:3]
box_xy, box_wh, objectness, class_probs = tf.split(pred, (2, 2, 1, classes), axis=-1)
box_xy = tf.sigmoid(box_xy)
objectness = tf.sigmoid(objectness)
class_probs = tf.sigmoid(class_probs)
pred_box = tf.concat((box_xy, box_wh), axis=-1) # original xywh for loss
# !!! grid[x][y] == (y, x)
grid = self._meshgrid(grid_size[1],grid_size[0])
grid = tf.expand_dims(tf.stack(grid, axis=-1), axis=2) # [gx, gy, 1, 2]
box_xy = (box_xy + tf.cast(grid, tf.float32)) / tf.cast(grid_size, tf.float32)
box_wh = tf.exp(box_wh) * self.anchors
box_x1y1 = box_xy - box_wh / 2
box_x2y2 = box_xy + box_wh / 2
bbox = tf.concat([box_x1y1, box_x2y2], axis=-1)
return bbox, objectness, class_probs, pred_box
def call(self, y_true, y_pred):
# 1. transform all pred outputs
# y_pred: (batch_size, grid, grid, anchors, (x, y, w, h, obj, ...cls))
pred_box, pred_obj, pred_class, pred_xywh = self.yolo_boxes(y_pred, cfg.num_classes)
pred_xy = pred_xywh[..., 0:2]
pred_wh = pred_xywh[..., 2:4]
# 2. transform all true outputs
# y_true: (batch_size, grid, grid, anchors, (x1, y1, x2, y2, obj, cls))
true_box, true_obj, true_class_idx = tf.split(y_true, (4, 1, 1), axis=-1)
true_xy = (true_box[..., 0:2] + true_box[..., 2:4]) / 2
true_wh = true_box[..., 2:4] - true_box[..., 0:2]
# give higher weights to small boxes
box_loss_scale = 2 - true_wh[..., 0] * true_wh[..., 1]
# 3. inverting the pred box equations
grid_size = tf.shape(y_true)[1]
grid = tf.meshgrid(tf.range(grid_size), tf.range(grid_size))
grid = tf.expand_dims(tf.stack(grid, axis=-1), axis=2)
true_xy = true_xy * tf.cast(grid_size, tf.float32) - tf.cast(grid, tf.float32)
true_wh = tf.math.log(true_wh / self.anchors)
true_wh = tf.where(tf.math.is_inf(true_wh),tf.zeros_like(true_wh), true_wh)
# 4. calculate all masks
obj_mask = tf.squeeze(true_obj, -1)
# ignore false positive when iou is over threshold
best_iou = tf.map_fn(
lambda x: tf.reduce_max(self.broadcast_iou(x[0], tf.boolean_mask(
x[1], tf.cast(x[2], tf.bool))), axis=-1),
(pred_box, true_box, obj_mask),
tf.float32)
ignore_mask = tf.cast(best_iou < cfg.train_iou_threshold, tf.float32)
# 5. calculate all losses
xy_loss = obj_mask * box_loss_scale * tf.reduce_sum(tf.square(true_xy - pred_xy), axis=-1)
wh_loss = obj_mask * box_loss_scale * tf.reduce_sum(tf.square(true_wh - pred_wh), axis=-1)
obj_loss = binary_crossentropy(true_obj, pred_obj)
obj_loss = obj_mask * obj_loss + (1 - obj_mask) * ignore_mask * obj_loss
class_loss = obj_mask * sparse_categorical_crossentropy(true_class_idx, pred_class)
# 6. sum over (batch, gridx, gridy, anchors) => (batch, 1)
xy_loss = tf.reduce_sum(xy_loss, axis=(1, 2, 3))
wh_loss = tf.reduce_sum(wh_loss, axis=(1, 2, 3))
obj_loss = tf.reduce_sum(obj_loss, axis=(1, 2, 3))
class_loss = tf.reduce_sum(class_loss, axis=(1, 2, 3))
return xy_loss + wh_loss + obj_loss + class_loss
| 44.9375 | 98 | 0.587522 | 4,864 | 0.966422 | 0 | 0 | 0 | 0 | 0 | 0 | 705 | 0.140076 |
81cd44adfb162f86e55541035a3d572728194cd3 | 2,235 | py | Python | test/stress/mmlogic.py | dzlier-gcp/open-match | 8db449b307468e20c9835cc22dcca9511c38025a | [
"Apache-2.0"
]
| null | null | null | test/stress/mmlogic.py | dzlier-gcp/open-match | 8db449b307468e20c9835cc22dcca9511c38025a | [
"Apache-2.0"
]
| 12 | 2019-08-10T00:37:58.000Z | 2019-08-14T22:47:26.000Z | test/stress/mmlogic.py | dzlier-gcp/open-match | 8db449b307468e20c9835cc22dcca9511c38025a | [
"Apache-2.0"
]
| 2 | 2019-08-10T00:31:54.000Z | 2019-08-10T00:33:10.000Z | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
import json
from locust import HttpLocust, TaskSequence, task, seq_task
from util import ticket_generator, pool_generator, ATTRIBUTE_LIST
NUM_QUERY_ATTR = 20
class ClientBehavior(TaskSequence):
def on_start(self):
""" on_start is called when a Locust start before any task is scheduled """
self.init()
def init(self):
# Placeholder for initialize future TLS materials and request generators
create_payload = {
"method": "POST",
"endpoint": "/v1/frontend/tickets",
"params": None,
"body": None
}
# Each spawned client first generate 10 tickets then do the query to mmlogic (data) layer
# Total number of tickets in open-match would be 10 * # of spawned clients
for i in range(10):
self.client.request(create_payload["method"], create_payload["endpoint"], params=None, data=json.dumps(ticket_generator()))
@task(1)
def query_ticket(self):
query_payload = {
"method": "POST",
"endpoint": "/v1/mmlogic/tickets:query",
"params": None,
"body": pool_generator(random.choices(ATTRIBUTE_LIST, k=NUM_QUERY_ATTR))
}
method, endpoint, params, data, name = query_payload["method"], query_payload["endpoint"], None, json.dumps(query_payload["body"]), "Query: {}".format(query_payload["endpoint"])
with self.client.request(method, endpoint, name=name, params=params, data=data, catch_response=True) as response:
if response.status_code != 200:
response.failure("Got status code {}, was expected 200.".format(response.content))
class WebsiteUser(HttpLocust):
task_set = ClientBehavior
min_wait = 500
max_wait = 1500
| 35.47619 | 181 | 0.714541 | 1,478 | 0.661298 | 0 | 0 | 664 | 0.297092 | 0 | 0 | 1,101 | 0.492617 |
81cdcd944d2ec3787c0800a16240ad15e52f16bd | 500 | py | Python | benchmarks_sphere/report_konwihr_rexi_nl/compare_wt_dt_vs_accuracy_galewsky_new_rexi_cmlarge_elrexi/postprocessing_pickle.py | valentinaschueller/sweet | 27e99c7a110c99deeadee70688c186d82b39ac90 | [
"MIT"
]
| 6 | 2017-11-20T08:12:46.000Z | 2021-03-11T15:32:36.000Z | benchmarks_sphere/report_konwihr_rexi_nl/compare_wt_dt_vs_accuracy_galewsky_new_rexi_cmlarge_elrexi/postprocessing_pickle.py | valentinaschueller/sweet | 27e99c7a110c99deeadee70688c186d82b39ac90 | [
"MIT"
]
| 4 | 2018-02-02T21:46:33.000Z | 2022-01-11T11:10:27.000Z | benchmarks_sphere/report_konwihr_rexi_nl/compare_wt_dt_vs_accuracy_galewsky_new_rexi_cmlarge_elrexi/postprocessing_pickle.py | valentinaschueller/sweet | 27e99c7a110c99deeadee70688c186d82b39ac90 | [
"MIT"
]
| 12 | 2016-03-01T18:33:34.000Z | 2022-02-08T22:20:31.000Z | #! /usr/bin/env python3
import sys
import math
import glob
from mule_local.postprocessing.pickle_SphereDataSpectralDiff import *
from mule.exec_program import *
# Ugly hack!
#output, retval = exec_program('ls *benchref*/*prog_h* | sort | tail -n 1 | sed "s/.*prog_h//"')
#if retval != 0:
# print(output)
# raise Exception("Something went wrong")
#output = output.replace("\n", '')
#output = output.replace("\r", '')
#p = pickle_SphereDataSpectralDiff(output)
p = pickle_SphereDataSpectralDiff()
| 23.809524 | 96 | 0.72 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 313 | 0.626 |
81ceeac6fb9c99499e11e6ba24211d641629642f | 4,355 | py | Python | src/houdini_package_runner/items/base.py | captainhammy/houdini_package_runner | 40f8b60ebe32c64fd9b37328a9a5eefacd1c6ebd | [
"MIT"
]
| 3 | 2022-02-06T23:31:17.000Z | 2022-02-07T11:10:03.000Z | src/houdini_package_runner/items/base.py | captainhammy/houdini_package_runner | 40f8b60ebe32c64fd9b37328a9a5eefacd1c6ebd | [
"MIT"
]
| null | null | null | src/houdini_package_runner/items/base.py | captainhammy/houdini_package_runner | 40f8b60ebe32c64fd9b37328a9a5eefacd1c6ebd | [
"MIT"
]
| null | null | null | """This module contains a base runnable item."""
# =============================================================================
# IMPORTS
# =============================================================================
# Future
from __future__ import annotations
# Standard Library
from abc import ABC, abstractmethod
from typing import TYPE_CHECKING, List
# Imports for type checking.
if TYPE_CHECKING:
import pathlib
import houdini_package_runner.runners.base
# =============================================================================
# CLASSES
# =============================================================================
class BaseItem(ABC):
"""Base class for a runnable item.
:param write_back: Whether the item should write itself back to disk.
"""
def __init__(self, write_back: bool = False) -> None:
self._contents_changed = False
self._ignored_builtins: List[str] = []
self._is_single_line = False
self._is_test_item = False
self._write_back = write_back
def __repr__(self):
return f"<{self.__class__.__name__}>"
# -------------------------------------------------------------------------
# PROPERTIES
# -------------------------------------------------------------------------
@property
def contents_changed(self) -> bool:
"""Whether the contents of the item have changed."""
return self._contents_changed
@contents_changed.setter
def contents_changed(self, contents_changed: bool):
self._contents_changed = contents_changed
# -------------------------------------------------------------------------
@property
def ignored_builtins(self) -> List[str]:
"""A list of known builtins to ignore for checks which look for imports."""
return self._ignored_builtins
# -------------------------------------------------------------------------
@property
def is_single_line(self) -> bool:
"""Whether the item code on a single line."""
return self._is_single_line
# -------------------------------------------------------------------------
@property
def is_test_item(self) -> bool:
"""Whether the item is a test related item."""
return self._is_test_item
@is_test_item.setter
def is_test_item(self, is_test_item: bool):
self._is_test_item = is_test_item
# -------------------------------------------------------------------------
@property
def write_back(self) -> bool:
"""Whether the item should write changes back."""
return self._write_back
@write_back.setter
def write_back(self, write_back):
self._write_back = write_back
# -------------------------------------------------------------------------
# METHODS
# -------------------------------------------------------------------------
@abstractmethod
def process(
self, runner: houdini_package_runner.runners.base.HoudiniPackageRunner
) -> int:
"""Process an item.
:param runner: The package runner processing the item.
:return: The process return code.
"""
class BaseFileItem(BaseItem):
"""Base class for a runnable item.
:param path: The path for the item.
:param write_back: Whether the item should write itself back to disk.
"""
def __init__(self, path: pathlib.Path, write_back: bool = False) -> None:
super().__init__(write_back=write_back)
self._path = path
def __repr__(self):
return f"<{self.__class__.__name__} {self.path}>"
# -------------------------------------------------------------------------
# PROPERTIES
# -------------------------------------------------------------------------
@property
def path(self) -> pathlib.Path:
"""The path on disk."""
return self._path
# -------------------------------------------------------------------------
# METHODS
# -------------------------------------------------------------------------
@abstractmethod
def process(
self, runner: houdini_package_runner.runners.base.HoudiniPackageRunner
) -> int:
"""Process an item.
:param runner: The package runner processing the item.
:return: The process return code.
"""
| 29.828767 | 83 | 0.461538 | 3,702 | 0.850057 | 0 | 0 | 1,709 | 0.392423 | 0 | 0 | 2,292 | 0.526292 |
81cf7c347a9efbb6723692e303da22251c98208b | 71 | py | Python | visualizer/__init__.py | AndreasMadsen/bachelor-code | 115fd2b955de07f34cdec998ba2a7f103ae253e3 | [
"MIT"
]
| 1 | 2015-06-16T06:53:52.000Z | 2015-06-16T06:53:52.000Z | visualizer/__init__.py | AndreasMadsen/bachelor-code | 115fd2b955de07f34cdec998ba2a7f103ae253e3 | [
"MIT"
]
| null | null | null | visualizer/__init__.py | AndreasMadsen/bachelor-code | 115fd2b955de07f34cdec998ba2a7f103ae253e3 | [
"MIT"
]
| null | null | null |
from graph.graph_server import GraphServer
__all__ = ['GraphServer']
| 14.2 | 42 | 0.788732 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 13 | 0.183099 |
81cfb18746180392d2ab217e02dc844bfc9a910e | 4,485 | py | Python | djangoplicity/blog/migrations/0001_initial.py | djangoplicity/blog | 2465b34228d794db9f746e314fa04657cbf18d38 | [
"BSD-3-Clause"
]
| null | null | null | djangoplicity/blog/migrations/0001_initial.py | djangoplicity/blog | 2465b34228d794db9f746e314fa04657cbf18d38 | [
"BSD-3-Clause"
]
| 1 | 2021-10-20T00:11:16.000Z | 2021-10-20T00:17:51.000Z | djangoplicity/blog/migrations/0001_initial.py | djangoplicity/djangoplicity-blog | 2465b34228d794db9f746e314fa04657cbf18d38 | [
"BSD-3-Clause"
]
| null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.9.13 on 2017-08-15 16:23
from __future__ import unicode_literals
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
import django.db.models.deletion
import djangoplicity.archives.base
import djangoplicity.archives.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
('media', '0021_auto_20170207_1749'),
]
operations = [
migrations.CreateModel(
name='Author',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('biography', models.TextField(blank=True)),
('photo', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='media.Image')),
],
),
migrations.CreateModel(
name='AuthorDescription',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.CharField(blank=True, help_text='Optional description, e.g.: "Author: ", or "Interview with"', max_length=100)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='blog.Author')),
],
),
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('footer', models.TextField(blank=True, help_text='Optional footer added to the bottom of posts')),
],
),
migrations.CreateModel(
name='Post',
fields=[
('slug', models.SlugField(help_text='Used for the URL', primary_key=True, serialize=False)),
('title', models.CharField(max_length=255)),
('subtitle', models.CharField(blank=True, help_text='Optional subtitle', max_length=255)),
('lede', models.TextField()),
('body', models.TextField()),
('discover_box', models.TextField(blank=True)),
('numbers_box', models.TextField(blank=True)),
('links', models.TextField(blank=True)),
('release_date', djangoplicity.archives.fields.ReleaseDateTimeField(blank=True, db_index=True, null=True)),
('embargo_date', djangoplicity.archives.fields.ReleaseDateTimeField(blank=True, db_index=True, null=True)),
('published', models.BooleanField(db_index=True, default=False, verbose_name='Published')),
('last_modified', models.DateTimeField(auto_now=True, verbose_name='Last modified')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Created')),
('release_task_id', models.CharField(blank=True, max_length=64, null=True)),
('embargo_task_id', models.CharField(blank=True, max_length=64, null=True)),
('checksums', django.contrib.postgres.fields.jsonb.JSONField(blank=True, null=True)),
('authors', models.ManyToManyField(through='blog.AuthorDescription', to='blog.Author')),
('banner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='media.Image', verbose_name='Banner Image')),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='blog.Category')),
],
options={
'ordering': ('-release_date',),
},
bases=(djangoplicity.archives.base.ArchiveModel, models.Model),
),
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
],
),
migrations.AddField(
model_name='post',
name='tags',
field=models.ManyToManyField(to='blog.Tag'),
),
migrations.AddField(
model_name='authordescription',
name='post',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='blog.Post'),
),
]
| 48.75 | 151 | 0.599331 | 4,178 | 0.93155 | 0 | 0 | 0 | 0 | 0 | 0 | 819 | 0.182609 |
81d02a1b1d708206e4f47e1ceb3bcbc7a7b7f3aa | 4,386 | py | Python | picklesize/test_picklesize.py | pydron/picklesize | c524ccae7beecfada663fbcf251c4166158f2995 | [
"MIT"
]
| null | null | null | picklesize/test_picklesize.py | pydron/picklesize | c524ccae7beecfada663fbcf251c4166158f2995 | [
"MIT"
]
| null | null | null | picklesize/test_picklesize.py | pydron/picklesize | c524ccae7beecfada663fbcf251c4166158f2995 | [
"MIT"
]
| null | null | null | '''
Created on 20.07.2015
@author: stefan
'''
import unittest
import pickle
import picklesize
import copy_reg
class TestEstimator(unittest.TestCase):
def setUp(self):
self.target = picklesize.PickleSize()
def compare(self, obj):
data = pickle.dumps(obj, protocol=pickle.HIGHEST_PROTOCOL)
expected = len(data)
actual = self.target.picklesize(obj, pickle.HIGHEST_PROTOCOL)
self.assertEqual(expected, actual, "Wrong estimate (%s instead of %s) for %r." %
(actual, expected, obj))
def test_None(self):
self.compare(None)
def test_True(self):
self.compare(True)
def test_False(self):
self.compare(False)
def test_int(self):
self.compare(0)
self.compare(1)
self.compare(0xFF-1)
self.compare(0xFF)
self.compare(0xFF+1)
self.compare(0xFFFF-1)
self.compare(0xFFFF)
self.compare(0xFFFF+1)
self.compare(-0xFF-1)
self.compare(-0xFF)
self.compare(-0xFF+1)
self.compare(-0xFFFF-1)
self.compare(-0xFFFF)
self.compare(-0xFFFF+1)
def test_long(self):
self.compare(0L)
self.compare(1L)
self.compare(10L**100)
self.compare(10L**1000)
def test_float(self):
self.compare(0.0)
self.compare(-42.42)
def test_string(self):
self.compare("")
self.compare(255*"x")
self.compare(256*"x")
self.compare(257*"x")
def test_unicode(self):
self.compare(u"")
self.compare(255*u"x")
self.compare(256*u"x")
self.compare(257*u"x")
def test_tuple(self):
self.compare(tuple())
self.compare((1,))
self.compare((1,2))
self.compare((1,2,3))
self.compare((1,2,3,4))
def test_list(self):
self.compare([])
self.compare([1])
self.compare(999*[1])
self.compare(1000*[1])
self.compare(1001*[1])
self.compare(1002*[1])
self.compare(5412*[1])
def test_dict(self):
self.compare({})
self.compare({1:2})
self.compare({1:1, 2:2})
def test_instance(self):
self.compare(OldStyle_WithAttribs())
self.compare(OldStyle_WithInit())
def test_Type(self):
self.compare(long)
self.compare(OldStyle_WithAttribs)
self.compare(global_function)
self.compare(max)
def test_Ref(self):
x = "abc"
self.compare([x,x])
def test_Reducer(self):
self.compare(NewStyle_Reducer())
def test_NewStyleInstance(self):
self.compare(NewStyle_WithAttribs())
def test_numpy(self):
import numpy as np
self.compare(np.ones((10,10)))
self.compare(np.ones((10,10))[0:5,:])
self.compare(np.ones((10,10))[:,0:5])
def test_numpy_multiple_arrays(self):
import numpy as np
self.compare([np.ones((10,10)), np.ones((10,10))])
def test_numpy_large(self):
import numpy as np
self.compare(np.ones(1024*1024))
class TestFast(TestEstimator):
def setUp(self):
self.target = picklesize.FastPickleSize()
def compare(self, obj):
data = pickle.dumps(obj, protocol=pickle.HIGHEST_PROTOCOL)
expected = len(data)
actual = self.target.picklesize(obj, pickle.HIGHEST_PROTOCOL)
self.assertLessEqual(actual, 2*expected+100, "Over estimate (%s instead of %s) for %r." %
(actual, expected, obj))
self.assertGreaterEqual(actual, 0.5*expected-100, "Gross under estimate (%s instead of %s) for %r." %
(actual, expected, obj))
class OldStyle_WithAttribs():
def __init__(self):
self.a = 12
self.b = 42
class OldStyle_WithInit():
def __getinitargs__(self):
return (1,2,3)
class NewStyle_Reducer(object):
pass
class NewStyle_WithAttribs(object):
def __init__(self):
self.a = 12
self.b = 42
def tuple_reducer(obj):
return (NewStyle_Reducer, tuple())
copy_reg.pickle(NewStyle_Reducer, tuple_reducer)
def global_function():
pass | 24.779661 | 110 | 0.564979 | 4,083 | 0.930917 | 0 | 0 | 0 | 0 | 0 | 0 | 211 | 0.048108 |
81d18ed12d722ad07b48dba575ac241b64bc31f0 | 1,691 | py | Python | setup.py | extensive-nlp/ttc_nlp | b84892b4f6856baf99d9a5975cdcbf2fe3b19b7a | [
"Apache-2.0"
]
| null | null | null | setup.py | extensive-nlp/ttc_nlp | b84892b4f6856baf99d9a5975cdcbf2fe3b19b7a | [
"Apache-2.0"
]
| null | null | null | setup.py | extensive-nlp/ttc_nlp | b84892b4f6856baf99d9a5975cdcbf2fe3b19b7a | [
"Apache-2.0"
]
| null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Setup process."""
from io import open
from os import path
from setuptools import find_packages, setup
with open(
path.join(path.abspath(path.dirname(__file__)), "README.md"), encoding="utf-8"
) as f:
long_description = f.read()
setup(
# Basic project information
name="ttctext",
version="0.0.1",
# Authorship and online reference
author="Satyajit Ghana",
author_email="[email protected]",
url="https://github.com/extensive-nlp/ttc_nlp",
# Detailled description
description="TTC NLP Module",
long_description=long_description,
long_description_content_type="text/markdown",
keywords="sample setuptools development",
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Natural Language :: English",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
],
# Package configuration
packages=find_packages(exclude=("tests",)),
include_package_data=True,
python_requires=">= 3.6",
install_requires=[
"torch>=1.9.0",
"torchtext>=0.10.0",
"torchmetrics>=0.4.1",
"omegaconf>=2.1.0",
"pytorch-lightning>=1.3.8",
"gdown>=3.13.0",
"spacy>=3.1.0",
"pandas~=1.1.0",
"seaborn>=0.11.1",
"matplotlib>=3.1.3",
"tqdm>=4.61.2",
"scikit-learn~=0.24.2",
],
# Licensing and copyright
license="Apache 2.0",
)
| 28.661017 | 82 | 0.607333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 912 | 0.539326 |
81d3e9a297bdf6007923e315c9b06917f0723c4c | 216 | py | Python | auxein/fitness/__init__.py | auxein/auxein | 5388cb572b65aecc282f915515c35dc3b987154c | [
"Apache-2.0"
]
| 1 | 2019-05-08T14:53:27.000Z | 2019-05-08T14:53:27.000Z | auxein/fitness/__init__.py | auxein/auxein | 5388cb572b65aecc282f915515c35dc3b987154c | [
"Apache-2.0"
]
| 2 | 2020-08-26T09:16:47.000Z | 2020-10-30T16:47:03.000Z | auxein/fitness/__init__.py | auxein/auxein | 5388cb572b65aecc282f915515c35dc3b987154c | [
"Apache-2.0"
]
| null | null | null | # flake8: noqa
from .core import Fitness
from .kernel_based import GlobalMinimum
from .observation_based import ObservationBasedFitness, MultipleLinearRegression, SimplePolynomialRegression, MultipleLinearRegression | 43.2 | 134 | 0.87963 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 14 | 0.064815 |
81d542769cfc331b3bf5ee9b379987289db08efb | 1,071 | py | Python | steelpy/codes/main.py | svortega/steelpy | bef35eb8ab8728fc29f57b7070b5f3bac0b0e840 | [
"MIT"
]
| 4 | 2021-09-28T12:52:01.000Z | 2022-02-24T22:30:22.000Z | steelpy/codes/main.py | svortega/steelpy | bef35eb8ab8728fc29f57b7070b5f3bac0b0e840 | [
"MIT"
]
| null | null | null | steelpy/codes/main.py | svortega/steelpy | bef35eb8ab8728fc29f57b7070b5f3bac0b0e840 | [
"MIT"
]
| null | null | null | # Copyright (c) 2019-2020 steelpy
# Python stdlib imports
# package imports
#from steelpy.codes.aisc.aisc360 import AISC_360_16
#from steelpy.codes.aisc.aisc335 import AISC_335_89
#from steelpy.codes.iso.ISO19902 import ISOCodeCheck
from steelpy.codes.piping.pipeline import Pipeline_Assessment
#from steelpy.codes.api.wsd_22ed import APIwsd22ed
from steelpy.codes.dnv.pannel import CodeCheckPanel
#
#from steelpy.process.units.main import Units
#from steelpy.material.material import Material
#from steelpy.sections.tubular import Tubular
from steelpy.codes.api.main import API_design
class CodeCheck:
"""
"""
def __init__(self):
""""""
#self._units = Units()
pass
#@property
#def units(self):
# """
# """
# return self._units
#
@property
def API(self):
"""
"""
return API_design()
#
@property
def pipe(self):
""" """
return Pipeline_Assessment()
#
def DNV_pannel(self):
""" """
return CodeCheckPanel()
| 22.3125 | 61 | 0.644258 | 479 | 0.447246 | 0 | 0 | 162 | 0.151261 | 0 | 0 | 556 | 0.519141 |
81d63e7ca9da71f50fffa4b00a77a421574650e4 | 347 | py | Python | main.py | soyoung97/MixText | 22993cd028a4223a54e138a89b53cd7978a5e38b | [
"MIT"
]
| null | null | null | main.py | soyoung97/MixText | 22993cd028a4223a54e138a89b53cd7978a5e38b | [
"MIT"
]
| null | null | null | main.py | soyoung97/MixText | 22993cd028a4223a54e138a89b53cd7978a5e38b | [
"MIT"
]
| null | null | null | import os
os.system("pip install pytorch_transformers")
import nsml
print(nsml.DATASET_PATH)
os.system('python ./code/train.py --n-labeled 10 --data-path '+ nsml.DATASET_PATH + '/train/ --batch-size 4 --batch-size-u 8 --epochs 20 --val-iteration 1000 --lambda-u 1 --T 0.5 --alpha 16 --mix-layers-set 7 9 12 --lrmain 0.000005 --lrlast 0.00005'
)
| 38.555556 | 249 | 0.706052 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 251 | 0.723343 |
81d742485fceccd1810f61f429cd089c6e0b112d | 1,126 | py | Python | test.py | IldusTim/QAStudy | f2f5e9c673259e7e1c8d0ab2887f28326300abe3 | [
"Apache-2.0"
]
| null | null | null | test.py | IldusTim/QAStudy | f2f5e9c673259e7e1c8d0ab2887f28326300abe3 | [
"Apache-2.0"
]
| null | null | null | test.py | IldusTim/QAStudy | f2f5e9c673259e7e1c8d0ab2887f28326300abe3 | [
"Apache-2.0"
]
| null | null | null | # -*- coding: utf-8 -*-
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
import math
from selenium.webdriver.support.ui import Select
import os
import time
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
link = "http://suninjuly.github.io/explicit_wait2.html"
opt = webdriver.ChromeOptions()
opt.add_experimental_option('w3c', False)
browser = webdriver.Chrome(chrome_options=opt)
browser.implicitly_wait(5, 0.5)
browser.get(link)
button = browser.find_element_by_id("book")
price = WebDriverWait(browser, 12).until(EC.text_to_be_present_in_element((By.ID, "price"),"10000 RUR"))
button.click()
def calc(x):
return str(math.log(abs(12*math.sin(int(x)))))
browser.find_element_by_class_name("btn-primary").click()
# new_window = browser.window_handles[1]
# browser.switch_to.window(new_window)
x_element = browser.find_element_by_id("input_value")
x = x_element.text
y = calc(x)
browser.find_element_by_id("answer").click()
browser.find_element_by_id("answer").send_keys(y)
browser.find_element_by_id("solve").click() | 31.277778 | 104 | 0.785968 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 227 | 0.201599 |
81d761dcf0b173ad97a22e411c04701a33909ebc | 1,224 | py | Python | django_backend/product/migrations/0002_product.py | itsmahadi007/E-Commerce-VueJS-Django | 4fc298f2181fd22c6aeb74439edef78a397d5447 | [
"MIT"
]
| null | null | null | django_backend/product/migrations/0002_product.py | itsmahadi007/E-Commerce-VueJS-Django | 4fc298f2181fd22c6aeb74439edef78a397d5447 | [
"MIT"
]
| 4 | 2022-01-13T03:56:36.000Z | 2022-03-12T01:01:24.000Z | django_backend/product/migrations/0002_product.py | itsmahadi007/E-Commerce-VueJS-Django | 4fc298f2181fd22c6aeb74439edef78a397d5447 | [
"MIT"
]
| null | null | null | # Generated by Django 3.2.7 on 2021-09-01 17:46
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('product', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Product',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('slug', models.SlugField()),
('description', models.TextField(blank=True, null=True)),
('price', models.DecimalField(decimal_places=2, max_digits=10)),
('image', models.ImageField(blank=True, null=True, upload_to='uploads/')),
('thumbnail', models.ImageField(blank=True, null=True, upload_to='uploads/')),
('data_added', models.DateTimeField(auto_now_add=True)),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='product', to='product.category')),
],
options={
'ordering': ('-data_added',),
},
),
]
| 38.25 | 140 | 0.580882 | 1,098 | 0.897059 | 0 | 0 | 0 | 0 | 0 | 0 | 229 | 0.187092 |
81d9ca381791e7e9b4231c86815a9f9bf5fa4886 | 40,400 | py | Python | kraken/ketos.py | zjsteyn/kraken | eaa9f4290db5425ddf80d0aebfa3944713558ab5 | [
"Apache-2.0"
]
| null | null | null | kraken/ketos.py | zjsteyn/kraken | eaa9f4290db5425ddf80d0aebfa3944713558ab5 | [
"Apache-2.0"
]
| null | null | null | kraken/ketos.py | zjsteyn/kraken | eaa9f4290db5425ddf80d0aebfa3944713558ab5 | [
"Apache-2.0"
]
| null | null | null | # -*- coding: utf-8 -*-
#
# Copyright 2015 Benjamin Kiessling
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing
# permissions and limitations under the License.
import os
import json
import glob
import uuid
import click
import logging
import unicodedata
from click import open_file
from bidi.algorithm import get_display
from typing import cast, Set, List, IO, Any
from kraken.lib import log
from kraken.lib.exceptions import KrakenCairoSurfaceException
from kraken.lib.exceptions import KrakenEncodeException
from kraken.lib.exceptions import KrakenInputException
APP_NAME = 'kraken'
logger = logging.getLogger('kraken')
def message(msg, **styles):
if logger.getEffectiveLevel() >= 30:
click.secho(msg, **styles)
@click.group()
@click.version_option()
@click.option('-v', '--verbose', default=0, count=True)
@click.option('-s', '--seed', default=None, type=click.INT,
help='Seed for numpy\'s and torch\'s RNG. Set to a fixed value to '
'ensure reproducable random splits of data')
def cli(verbose, seed):
if seed:
import numpy.random
numpy.random.seed(seed)
from torch import manual_seed
manual_seed(seed)
log.set_logger(logger, level=30-min(10*verbose, 20))
def _validate_manifests(ctx, param, value):
images = []
for manifest in value:
for entry in manifest.readlines():
im_p = entry.rstrip('\r\n')
if os.path.isfile(im_p):
images.append(im_p)
else:
logger.warning('Invalid entry "{}" in {}'.format(im_p, manifest.name))
return images
def _expand_gt(ctx, param, value):
images = []
for expression in value:
images.extend([x for x in glob.iglob(expression, recursive=True) if os.path.isfile(x)])
return images
@cli.command('train')
@click.pass_context
@click.option('-p', '--pad', show_default=True, type=click.INT, default=16, help='Left and right '
'padding around lines')
@click.option('-o', '--output', show_default=True, type=click.Path(), default='model', help='Output model file')
@click.option('-s', '--spec', show_default=True,
default='[1,48,0,1 Cr3,3,32 Do0.1,2 Mp2,2 Cr3,3,64 Do0.1,2 Mp2,2 S1(1x12)1,3 Lbx100 Do]',
help='VGSL spec of the network to train. CTC layer will be added automatically.')
@click.option('-a', '--append', show_default=True, default=None, type=click.INT,
help='Removes layers before argument and then appends spec. Only works when loading an existing model')
@click.option('-i', '--load', show_default=True, type=click.Path(exists=True, readable=True), help='Load existing file to continue training')
@click.option('-F', '--freq', show_default=True, default=1.0, type=click.FLOAT,
help='Model saving and report generation frequency in epochs during training')
@click.option('-q', '--quit', show_default=True, default='early', type=click.Choice(['early', 'dumb']),
help='Stop condition for training. Set to `early` for early stooping or `dumb` for fixed number of epochs')
@click.option('-N', '--epochs', show_default=True, default=-1, help='Number of epochs to train for')
@click.option('--lag', show_default=True, default=5, help='Number of evaluations (--report frequence) to wait before stopping training without improvement')
@click.option('--min-delta', show_default=True, default=None, type=click.FLOAT, help='Minimum improvement between epochs to reset early stopping. Default is scales the delta by the best loss')
@click.option('-d', '--device', show_default=True, default='cpu', help='Select device to use (cpu, cuda:0, cuda:1, ...)')
@click.option('--optimizer', show_default=True, default='Adam', type=click.Choice(['Adam', 'SGD', 'RMSprop']), help='Select optimizer')
@click.option('-r', '--lrate', show_default=True, default=2e-3, help='Learning rate')
@click.option('-m', '--momentum', show_default=True, default=0.9, help='Momentum')
@click.option('-w', '--weight-decay', show_default=True, default=0.0, help='Weight decay')
@click.option('--schedule', show_default=True, type=click.Choice(['constant', '1cycle']), default='constant',
help='Set learning rate scheduler. For 1cycle, cycle length is determined by the `--epoch` option.')
@click.option('-p', '--partition', show_default=True, default=0.9, help='Ground truth data partition ratio between train/validation set')
@click.option('-u', '--normalization', show_default=True, type=click.Choice(['NFD', 'NFKD', 'NFC', 'NFKC']),
default=None, help='Ground truth normalization')
@click.option('-n', '--normalize-whitespace/--no-normalize-whitespace',
show_default=True, default=True, help='Normalizes unicode whitespace')
@click.option('-c', '--codec', show_default=True, default=None, type=click.File(mode='r', lazy=True),
help='Load a codec JSON definition (invalid if loading existing model)')
@click.option('--resize', show_default=True, default='fail', type=click.Choice(['add', 'both', 'fail']),
help='Codec/output layer resizing option. If set to `add` code '
'points will be added, `both` will set the layer to match exactly '
'the training data, `fail` will abort if training data and model '
'codec do not match.')
@click.option('--reorder/--no-reorder', show_default=True, default=True, help='Reordering of code points to display order')
@click.option('-t', '--training-files', show_default=True, default=None, multiple=True,
callback=_validate_manifests, type=click.File(mode='r', lazy=True),
help='File(s) with additional paths to training data')
@click.option('-e', '--evaluation-files', show_default=True, default=None, multiple=True,
callback=_validate_manifests, type=click.File(mode='r', lazy=True),
help='File(s) with paths to evaluation data. Overrides the `-p` parameter')
@click.option('--preload/--no-preload', show_default=True, default=None, help='Hard enable/disable for training data preloading')
@click.option('--threads', show_default=True, default=1, help='Number of OpenMP threads and workers when running on CPU.')
#@click.option('--load-hyper-parameters/--no-load-hyper-parameters', show_default=True, default=False,
# help='When loading an existing model, retrieve hyperparameters from the model')
@click.argument('ground_truth', nargs=-1, callback=_expand_gt, type=click.Path(exists=False, dir_okay=False))
def train(ctx, pad, output, spec, append, load, freq, quit, epochs,
lag, min_delta, device, optimizer, lrate, momentum, weight_decay,
schedule, partition, normalization, normalize_whitespace, codec,
resize, reorder, training_files, evaluation_files, preload, threads,
ground_truth):
"""
Trains a model from image-text pairs.
"""
if not load and append:
raise click.BadOptionUsage('append', 'append option requires loading an existing model')
if resize != 'fail' and not load:
raise click.BadOptionUsage('resize', 'resize option requires loading an existing model')
import re
import torch
import shutil
import numpy as np
from torch.utils.data import DataLoader
from kraken.lib import models, vgsl, train
from kraken.lib.util import make_printable
from kraken.lib.train import EarlyStopping, EpochStopping, TrainStopper, TrainScheduler, add_1cycle
from kraken.lib.codec import PytorchCodec
from kraken.lib.dataset import GroundTruthDataset, generate_input_transforms
logger.info('Building ground truth set from {} line images'.format(len(ground_truth) + len(training_files)))
completed_epochs = 0
# load model if given. if a new model has to be created we need to do that
# after data set initialization, otherwise to output size is still unknown.
nn = None
#hyper_fields = ['freq', 'quit', 'epochs', 'lag', 'min_delta', 'optimizer', 'lrate', 'momentum', 'weight_decay', 'schedule', 'partition', 'normalization', 'normalize_whitespace', 'reorder', 'preload', 'completed_epochs', 'output']
if load:
logger.info('Loading existing model from {} '.format(load))
message('Loading existing model from {}'.format(load), nl=False)
nn = vgsl.TorchVGSLModel.load_model(load)
#if nn.user_metadata and load_hyper_parameters:
# for param in hyper_fields:
# if param in nn.user_metadata:
# logger.info('Setting \'{}\' to \'{}\''.format(param, nn.user_metadata[param]))
# message('Setting \'{}\' to \'{}\''.format(param, nn.user_metadata[param]))
# locals()[param] = nn.user_metadata[param]
message('\u2713', fg='green', nl=False)
# preparse input sizes from vgsl string to seed ground truth data set
# sizes and dimension ordering.
if not nn:
spec = spec.strip()
if spec[0] != '[' or spec[-1] != ']':
raise click.BadOptionUsage('spec', 'VGSL spec {} not bracketed'.format(spec))
blocks = spec[1:-1].split(' ')
m = re.match(r'(\d+),(\d+),(\d+),(\d+)', blocks[0])
if not m:
raise click.BadOptionUsage('spec', 'Invalid input spec {}'.format(blocks[0]))
batch, height, width, channels = [int(x) for x in m.groups()]
else:
batch, channels, height, width = nn.input
try:
transforms = generate_input_transforms(batch, height, width, channels, pad)
except KrakenInputException as e:
raise click.BadOptionUsage('spec', str(e))
# disable automatic partition when given evaluation set explicitly
if evaluation_files:
partition = 1
ground_truth = list(ground_truth)
# merge training_files into ground_truth list
if training_files:
ground_truth.extend(training_files)
if len(ground_truth) == 0:
raise click.UsageError('No training data was provided to the train command. Use `-t` or the `ground_truth` argument.')
np.random.shuffle(ground_truth)
if len(ground_truth) > 2500 and not preload:
logger.info('Disabling preloading for large (>2500) training data set. Enable by setting --preload parameter')
preload = False
# implicit preloading enabled for small data sets
if preload is None:
preload = True
tr_im = ground_truth[:int(len(ground_truth) * partition)]
if evaluation_files:
logger.debug('Using {} lines from explicit eval set'.format(len(evaluation_files)))
te_im = evaluation_files
else:
te_im = ground_truth[int(len(ground_truth) * partition):]
logger.debug('Taking {} lines from training for evaluation'.format(len(te_im)))
# set multiprocessing tensor sharing strategy
if 'file_system' in torch.multiprocessing.get_all_sharing_strategies():
logger.debug('Setting multiprocessing tensor sharing strategy to file_system')
torch.multiprocessing.set_sharing_strategy('file_system')
gt_set = GroundTruthDataset(normalization=normalization,
whitespace_normalization=normalize_whitespace,
reorder=reorder,
im_transforms=transforms,
preload=preload)
with log.progressbar(tr_im, label='Building training set') as bar:
for im in bar:
logger.debug('Adding line {} to training set'.format(im))
try:
gt_set.add(im)
except FileNotFoundError as e:
logger.warning('{}: {}. Skipping.'.format(e.strerror, e.filename))
except KrakenInputException as e:
logger.warning(str(e))
val_set = GroundTruthDataset(normalization=normalization,
whitespace_normalization=normalize_whitespace,
reorder=reorder,
im_transforms=transforms,
preload=preload)
with log.progressbar(te_im, label='Building validation set') as bar:
for im in bar:
logger.debug('Adding line {} to validation set'.format(im))
try:
val_set.add(im)
except FileNotFoundError as e:
logger.warning('{}: {}. Skipping.'.format(e.strerror, e.filename))
except KrakenInputException as e:
logger.warning(str(e))
logger.info('Training set {} lines, validation set {} lines, alphabet {} symbols'.format(len(gt_set._images), len(val_set._images), len(gt_set.alphabet)))
alpha_diff_only_train = set(gt_set.alphabet).difference(set(val_set.alphabet))
alpha_diff_only_val = set(val_set.alphabet).difference(set(gt_set.alphabet))
if alpha_diff_only_train:
logger.warning('alphabet mismatch: chars in training set only: {} (not included in accuracy test during training)'.format(alpha_diff_only_train))
if alpha_diff_only_val:
logger.warning('alphabet mismatch: chars in validation set only: {} (not trained)'.format(alpha_diff_only_val))
logger.info('grapheme\tcount')
for k, v in sorted(gt_set.alphabet.items(), key=lambda x: x[1], reverse=True):
char = make_printable(k)
if char == k:
char = '\t' + char
logger.info(u'{}\t{}'.format(char, v))
logger.debug('Encoding training set')
# use model codec when given
if append:
# is already loaded
nn = cast(vgsl.TorchVGSLModel, nn)
gt_set.encode(codec)
message('Slicing and dicing model ', nl=False)
# now we can create a new model
spec = '[{} O1c{}]'.format(spec[1:-1], gt_set.codec.max_label()+1)
logger.info('Appending {} to existing model {} after {}'.format(spec, nn.spec, append))
nn.append(append, spec)
nn.add_codec(gt_set.codec)
message('\u2713', fg='green')
logger.info('Assembled model spec: {}'.format(nn.spec))
elif load:
# is already loaded
nn = cast(vgsl.TorchVGSLModel, nn)
# prefer explicitly given codec over network codec if mode is 'both'
codec = codec if (codec and resize == 'both') else nn.codec
try:
gt_set.encode(codec)
except KrakenEncodeException as e:
message('Network codec not compatible with training set')
alpha_diff = set(gt_set.alphabet).difference(set(codec.c2l.keys()))
if resize == 'fail':
logger.error('Training data and model codec alphabets mismatch: {}'.format(alpha_diff))
ctx.exit(code=1)
elif resize == 'add':
message('Adding missing labels to network ', nl=False)
logger.info('Resizing codec to include {} new code points'.format(len(alpha_diff)))
codec.c2l.update({k: [v] for v, k in enumerate(alpha_diff, start=codec.max_label()+1)})
nn.add_codec(PytorchCodec(codec.c2l))
logger.info('Resizing last layer in network to {} outputs'.format(codec.max_label()+1))
nn.resize_output(codec.max_label()+1)
gt_set.encode(nn.codec)
message('\u2713', fg='green')
elif resize == 'both':
message('Fitting network exactly to training set ', nl=False)
logger.info('Resizing network or given codec to {} code sequences'.format(len(gt_set.alphabet)))
gt_set.encode(None)
ncodec, del_labels = codec.merge(gt_set.codec)
logger.info('Deleting {} output classes from network ({} retained)'.format(len(del_labels), len(codec)-len(del_labels)))
gt_set.encode(ncodec)
nn.resize_output(ncodec.max_label()+1, del_labels)
message('\u2713', fg='green')
else:
raise click.BadOptionUsage('resize', 'Invalid resize value {}'.format(resize))
else:
gt_set.encode(codec)
logger.info('Creating new model {} with {} outputs'.format(spec, gt_set.codec.max_label()+1))
spec = '[{} O1c{}]'.format(spec[1:-1], gt_set.codec.max_label()+1)
nn = vgsl.TorchVGSLModel(spec)
# initialize weights
message('Initializing model ', nl=False)
nn.init_weights()
nn.add_codec(gt_set.codec)
# initialize codec
message('\u2713', fg='green')
# half the number of data loading processes if device isn't cuda and we haven't enabled preloading
if device == 'cpu' and not preload:
loader_threads = threads // 2
else:
loader_threads = threads
train_loader = DataLoader(gt_set, batch_size=1, shuffle=True, num_workers=loader_threads, pin_memory=True)
threads -= loader_threads
# don't encode validation set as the alphabets may not match causing encoding failures
val_set.training_set = list(zip(val_set._images, val_set._gt))
logger.debug('Constructing {} optimizer (lr: {}, momentum: {})'.format(optimizer, lrate, momentum))
# set mode to trainindg
nn.train()
# set number of OpenMP threads
logger.debug('Set OpenMP threads to {}'.format(threads))
nn.set_num_threads(threads)
logger.debug('Moving model to device {}'.format(device))
optim = getattr(torch.optim, optimizer)(nn.nn.parameters(), lr=0)
if 'accuracy' not in nn.user_metadata:
nn.user_metadata['accuracy'] = []
tr_it = TrainScheduler(optim)
if schedule == '1cycle':
add_1cycle(tr_it, int(len(gt_set) * epochs), lrate, momentum, momentum - 0.10, weight_decay)
else:
# constant learning rate scheduler
tr_it.add_phase(1, (lrate, lrate), (momentum, momentum), weight_decay, train.annealing_const)
if quit == 'early':
st_it = EarlyStopping(min_delta, lag)
elif quit == 'dumb':
st_it = EpochStopping(epochs - completed_epochs)
else:
raise click.BadOptionUsage('quit', 'Invalid training interruption scheme {}'.format(quit))
#for param in hyper_fields:
# logger.debug('Setting \'{}\' to \'{}\' in model metadata'.format(param, locals()[param]))
# nn.user_metadata[param] = locals()[param]
trainer = train.KrakenTrainer(model=nn,
optimizer=optim,
device=device,
filename_prefix=output,
event_frequency=freq,
train_set=train_loader,
val_set=val_set,
stopper=st_it)
trainer.add_lr_scheduler(tr_it)
with log.progressbar(label='stage {}/{}'.format(1, trainer.stopper.epochs if trainer.stopper.epochs > 0 else 'β'),
length=trainer.event_it, show_pos=True) as bar:
def _draw_progressbar():
bar.update(1)
def _print_eval(epoch, accuracy, chars, error):
message('Accuracy report ({}) {:0.4f} {} {}'.format(epoch, accuracy, chars, error))
# reset progress bar
bar.label = 'stage {}/{}'.format(epoch+1, trainer.stopper.epochs if trainer.stopper.epochs > 0 else 'β')
bar.pos = 0
bar.finished = False
trainer.run(_print_eval, _draw_progressbar)
if quit == 'early':
message('Moving best model {0}_{1}.mlmodel ({2}) to {0}_best.mlmodel'.format(output, trainer.stopper.best_epoch, trainer.stopper.best_loss))
logger.info('Moving best model {0}_{1}.mlmodel ({2}) to {0}_best.mlmodel'.format(output, trainer.stopper.best_epoch, trainer.stopper.best_loss))
shutil.copy('{}_{}.mlmodel'.format(output, trainer.stopper.best_epoch), '{}_best.mlmodel'.format(output))
@cli.command('test')
@click.pass_context
@click.option('-m', '--model', show_default=True, type=click.Path(exists=True, readable=True),
multiple=True, help='Model(s) to evaluate')
@click.option('-e', '--evaluation-files', show_default=True, default=None, multiple=True,
callback=_validate_manifests, type=click.File(mode='r', lazy=True),
help='File(s) with paths to evaluation data.')
@click.option('-d', '--device', show_default=True, default='cpu', help='Select device to use (cpu, cuda:0, cuda:1, ...)')
@click.option('-p', '--pad', show_default=True, type=click.INT, default=16, help='Left and right '
'padding around lines')
@click.option('--threads', show_default=True, default=1, help='Number of OpenMP threads when running on CPU.')
@click.argument('test_set', nargs=-1, callback=_expand_gt, type=click.Path(exists=False, dir_okay=False))
def test(ctx, model, evaluation_files, device, pad, threads, test_set):
"""
Evaluate on a test set.
"""
if not model:
raise click.UsageError('No model to evaluate given.')
import numpy as np
from PIL import Image
from kraken.serialization import render_report
from kraken.lib import models
from kraken.lib.dataset import global_align, compute_confusions, generate_input_transforms
logger.info('Building test set from {} line images'.format(len(test_set) + len(evaluation_files)))
nn = {}
for p in model:
message('Loading model {}\t'.format(p), nl=False)
nn[p] = models.load_any(p)
message('\u2713', fg='green')
test_set = list(test_set)
# set number of OpenMP threads
logger.debug('Set OpenMP threads to {}'.format(threads))
next(iter(nn.values())).nn.set_num_threads(threads)
# merge training_files into ground_truth list
if evaluation_files:
test_set.extend(evaluation_files)
if len(test_set) == 0:
raise click.UsageError('No evaluation data was provided to the test command. Use `-e` or the `test_set` argument.')
def _get_text(im):
with open(os.path.splitext(im)[0] + '.gt.txt', 'r') as fp:
return get_display(fp.read())
acc_list = []
for p, net in nn.items():
algn_gt: List[str] = []
algn_pred: List[str] = []
chars = 0
error = 0
message('Evaluating {}'.format(p))
logger.info('Evaluating {}'.format(p))
batch, channels, height, width = net.nn.input
ts = generate_input_transforms(batch, height, width, channels, pad)
with log.progressbar(test_set, label='Evaluating') as bar:
for im_path in bar:
i = ts(Image.open(im_path))
text = _get_text(im_path)
pred = net.predict_string(i)
chars += len(text)
c, algn1, algn2 = global_align(text, pred)
algn_gt.extend(algn1)
algn_pred.extend(algn2)
error += c
acc_list.append((chars-error)/chars)
confusions, scripts, ins, dels, subs = compute_confusions(algn_gt, algn_pred)
rep = render_report(p, chars, error, confusions, scripts, ins, dels, subs)
logger.info(rep)
message(rep)
logger.info('Average accuracy: {:0.2f}%, (stddev: {:0.2f})'.format(np.mean(acc_list) * 100, np.std(acc_list) * 100))
message('Average accuracy: {:0.2f}%, (stddev: {:0.2f})'.format(np.mean(acc_list) * 100, np.std(acc_list) * 100))
@cli.command('extract')
@click.pass_context
@click.option('-b', '--binarize/--no-binarize', show_default=True, default=True,
help='Binarize color/grayscale images')
@click.option('-u', '--normalization', show_default=True,
type=click.Choice(['NFD', 'NFKD', 'NFC', 'NFKC']), default=None,
help='Normalize ground truth')
@click.option('-s', '--normalize-whitespace/--no-normalize-whitespace',
show_default=True, default=True, help='Normalizes unicode whitespace')
@click.option('-n', '--reorder/--no-reorder', default=False, show_default=True,
help='Reorder transcribed lines to display order')
@click.option('-r', '--rotate/--no-rotate', default=True, show_default=True,
help='Skip rotation of vertical lines')
@click.option('-o', '--output', type=click.Path(), default='training', show_default=True,
help='Output directory')
@click.option('--format', default='{idx:06d}', show_default=True, help='Format for extractor output. valid fields are `src` (source file), `idx` (line number), and `uuid` (v4 uuid)')
@click.argument('transcriptions', nargs=-1, type=click.File(lazy=True))
def extract(ctx, binarize, normalization, normalize_whitespace, reorder,
rotate, output, format, transcriptions):
"""
Extracts image-text pairs from a transcription environment created using
``ketos transcribe``.
"""
import regex
import base64
from io import BytesIO
from PIL import Image
from lxml import html, etree
from kraken import binarization
try:
os.mkdir(output)
except Exception:
pass
text_transforms = []
if normalization:
text_transforms.append(lambda x: unicodedata.normalize(normalization, x))
if normalize_whitespace:
text_transforms.append(lambda x: regex.sub('\s', ' ', x))
if reorder:
text_transforms.append(get_display)
idx = 0
manifest = []
with log.progressbar(transcriptions, label='Reading transcriptions') as bar:
for fp in bar:
logger.info('Reading {}'.format(fp.name))
doc = html.parse(fp)
etree.strip_tags(doc, etree.Comment)
td = doc.find(".//meta[@itemprop='text_direction']")
if td is None:
td = 'horizontal-lr'
else:
td = td.attrib['content']
im = None
dest_dict = {'output': output, 'idx': 0, 'src': fp.name, 'uuid': str(uuid.uuid4())}
for section in doc.xpath('//section'):
img = section.xpath('.//img')[0].get('src')
fd = BytesIO(base64.b64decode(img.split(',')[1]))
im = Image.open(fd)
if not im:
logger.info('Skipping {} because image not found'.format(fp.name))
break
if binarize:
im = binarization.nlbin(im)
for line in section.iter('li'):
if line.get('contenteditable') and (not u''.join(line.itertext()).isspace() and u''.join(line.itertext())):
dest_dict['idx'] = idx
dest_dict['uuid'] = str(uuid.uuid4())
logger.debug('Writing line {:06d}'.format(idx))
l_img = im.crop([int(x) for x in line.get('data-bbox').split(',')])
if rotate and td.startswith('vertical'):
im.rotate(90, expand=True)
l_img.save(('{output}/' + format + '.png').format(**dest_dict))
manifest.append((format + '.png').format(**dest_dict))
text = u''.join(line.itertext()).strip()
for func in text_transforms:
text = func(text)
with open(('{output}/' + format + '.gt.txt').format(**dest_dict), 'wb') as t:
t.write(text.encode('utf-8'))
idx += 1
logger.info('Extracted {} lines'.format(idx))
with open('{}/manifest.txt'.format(output), 'w') as fp:
fp.write('\n'.join(manifest))
@cli.command('transcribe')
@click.pass_context
@click.option('-d', '--text-direction', default='horizontal-lr',
type=click.Choice(['horizontal-lr', 'horizontal-rl', 'vertical-lr', 'vertical-rl']),
help='Sets principal text direction', show_default=True)
@click.option('--scale', default=None, type=click.FLOAT)
@click.option('--bw/--orig', default=True, show_default=True,
help="Put nonbinarized images in output")
@click.option('-m', '--maxcolseps', default=2, type=click.INT, show_default=True)
@click.option('-b/-w', '--black_colseps/--white_colseps', default=False, show_default=True)
@click.option('-f', '--font', default='',
help='Font family to use')
@click.option('-fs', '--font-style', default=None,
help='Font style to use')
@click.option('-p', '--prefill', default=None,
help='Use given model for prefill mode.')
@click.option('-p', '--pad', show_default=True, type=(int, int), default=(0, 0),
help='Left and right padding around lines')
@click.option('-l', '--lines', type=click.Path(exists=True), show_default=True,
help='JSON file containing line coordinates')
@click.option('-o', '--output', type=click.File(mode='wb'), default='transcription.html',
help='Output file', show_default=True)
@click.argument('images', nargs=-1, type=click.File(mode='rb', lazy=True))
def transcription(ctx, text_direction, scale, bw, maxcolseps,
black_colseps, font, font_style, prefill, pad, lines, output,
images):
"""
Creates transcription environments for ground truth generation.
"""
from PIL import Image
from kraken import rpred
from kraken import pageseg
from kraken import transcribe
from kraken import binarization
from kraken.lib import models
from kraken.lib.util import is_bitonal
ti = transcribe.TranscriptionInterface(font, font_style)
if len(images) > 1 and lines:
raise click.UsageError('--lines option is incompatible with multiple image files')
if prefill:
logger.info('Loading model {}'.format(prefill))
message('Loading RNN', nl=False)
prefill = models.load_any(prefill)
message('\u2713', fg='green')
with log.progressbar(images, label='Reading images') as bar:
for fp in bar:
logger.info('Reading {}'.format(fp.name))
im = Image.open(fp)
if im.mode not in ['1', 'L', 'P', 'RGB']:
logger.warning('Input {} is in {} color mode. Converting to RGB'.format(fp.name, im.mode))
im = im.convert('RGB')
logger.info('Binarizing page')
im_bin = binarization.nlbin(im)
im_bin = im_bin.convert('1')
logger.info('Segmenting page')
if not lines:
res = pageseg.segment(im_bin, text_direction, scale, maxcolseps, black_colseps, pad=pad)
else:
with open_file(lines, 'r') as fp:
try:
fp = cast(IO[Any], fp)
res = json.load(fp)
except ValueError as e:
raise click.UsageError('{} invalid segmentation: {}'.format(lines, str(e)))
if prefill:
it = rpred.rpred(prefill, im_bin, res)
preds = []
logger.info('Recognizing')
for pred in it:
logger.debug('{}'.format(pred.prediction))
preds.append(pred)
ti.add_page(im, res, records=preds)
else:
ti.add_page(im, res)
fp.close()
logger.info('Writing transcription to {}'.format(output.name))
message('Writing output', nl=False)
ti.write(output)
message('\u2713', fg='green')
@cli.command('linegen')
@click.pass_context
@click.option('-f', '--font', default='sans',
help='Font family to render texts in.')
@click.option('-n', '--maxlines', type=click.INT, default=0,
help='Maximum number of lines to generate')
@click.option('-e', '--encoding', default='utf-8',
help='Decode text files with given codec.')
@click.option('-u', '--normalization',
type=click.Choice(['NFD', 'NFKD', 'NFC', 'NFKC']), default=None,
help='Normalize ground truth')
@click.option('-ur', '--renormalize',
type=click.Choice(['NFD', 'NFKD', 'NFC', 'NFKC']), default=None,
help='Renormalize text for rendering purposes.')
@click.option('--reorder/--no-reorder', default=False, help='Reorder code points to display order')
@click.option('-fs', '--font-size', type=click.INT, default=32,
help='Font size to render texts in.')
@click.option('-fw', '--font-weight', type=click.INT, default=400,
help='Font weight to render texts in.')
@click.option('-l', '--language',
help='RFC-3066 language tag for language-dependent font shaping')
@click.option('-ll', '--max-length', type=click.INT, default=None,
help="Discard lines above length (in Unicode codepoints).")
@click.option('--strip/--no-strip', help="Remove whitespace from start and end "
"of lines.")
@click.option('-d', '--disable-degradation', is_flag=True, help='Dont degrade '
'output lines.')
@click.option('-a', '--alpha', type=click.FLOAT, default=1.5,
help="Mean of folded normal distribution for sampling foreground pixel flip probability")
@click.option('-b', '--beta', type=click.FLOAT, default=1.5,
help="Mean of folded normal distribution for sampling background pixel flip probability")
@click.option('-d', '--distort', type=click.FLOAT, default=1.0,
help='Mean of folded normal distribution to take distortion values from')
@click.option('-ds', '--distortion-sigma', type=click.FLOAT, default=20.0,
help='Mean of folded normal distribution to take standard deviations for the '
'Gaussian kernel from')
@click.option('--legacy/--no-legacy', default=False,
help='Use ocropy-style degradations')
@click.option('-o', '--output', type=click.Path(), default='training_data',
help='Output directory')
@click.argument('text', nargs=-1, type=click.Path(exists=True))
def line_generator(ctx, font, maxlines, encoding, normalization, renormalize,
reorder, font_size, font_weight, language, max_length, strip,
disable_degradation, alpha, beta, distort, distortion_sigma,
legacy, output, text):
"""
Generates artificial text line training data.
"""
import errno
import numpy as np
from kraken import linegen
from kraken.lib.util import make_printable
lines: Set[str] = set()
if not text:
return
with log.progressbar(text, label='Reading texts') as bar:
for t in text:
with click.open_file(t, encoding=encoding) as fp:
logger.info('Reading {}'.format(t))
for l in fp:
lines.add(l.rstrip('\r\n'))
if normalization:
lines = set([unicodedata.normalize(normalization, line) for line in lines])
if strip:
lines = set([line.strip() for line in lines])
if max_length:
lines = set([line for line in lines if len(line) < max_length])
logger.info('Read {} lines'.format(len(lines)))
message('Read {} unique lines'.format(len(lines)))
if maxlines and maxlines < len(lines):
message('Sampling {} lines\t'.format(maxlines), nl=False)
llist = list(lines)
lines = set(llist[idx] for idx in np.random.randint(0, len(llist), maxlines))
message('\u2713', fg='green')
try:
os.makedirs(output)
except OSError as e:
if e.errno != errno.EEXIST:
raise
# calculate the alphabet and print it for verification purposes
alphabet: Set[str] = set()
for line in lines:
alphabet.update(line)
chars = []
combining = []
for char in sorted(alphabet):
k = make_printable(char)
if k != char:
combining.append(k)
else:
chars.append(k)
message('Ξ£ (len: {})'.format(len(alphabet)))
message('Symbols: {}'.format(''.join(chars)))
if combining:
message('Combining Characters: {}'.format(', '.join(combining)))
lg = linegen.LineGenerator(font, font_size, font_weight, language)
with log.progressbar(lines, label='Writing images') as bar:
for idx, line in enumerate(bar):
logger.info(line)
try:
if renormalize:
im = lg.render_line(unicodedata.normalize(renormalize, line))
else:
im = lg.render_line(line)
except KrakenCairoSurfaceException as e:
logger.info('{}: {} {}'.format(e.message, e.width, e.height))
continue
if not disable_degradation and not legacy:
im = linegen.degrade_line(im, alpha=alpha, beta=beta)
im = linegen.distort_line(im, abs(np.random.normal(distort)), abs(np.random.normal(distortion_sigma)))
elif legacy:
im = linegen.ocropy_degrade(im)
im.save('{}/{:06d}.png'.format(output, idx))
with open('{}/{:06d}.gt.txt'.format(output, idx), 'wb') as fp:
if reorder:
fp.write(get_display(line).encode('utf-8'))
else:
fp.write(line.encode('utf-8'))
@cli.command('publish')
@click.pass_context
@click.option('-i', '--metadata', show_default=True,
type=click.File(mode='r', lazy=True), help='Metadata for the '
'model. Will be prompted from the user if not given')
@click.option('-a', '--access-token', prompt=True, help='Zenodo access token')
@click.argument('model', nargs=1, type=click.Path(exists=False, readable=True, dir_okay=False))
def publish(ctx, metadata, access_token, model):
"""
Publishes a model on the zenodo model repository.
"""
import json
import pkg_resources
from functools import partial
from jsonschema import validate
from jsonschema.exceptions import ValidationError
from kraken import repo
from kraken.lib import models
with pkg_resources.resource_stream(__name__, 'metadata.schema.json') as fp:
schema = json.load(fp)
nn = models.load_any(model)
if not metadata:
author = click.prompt('author')
affiliation = click.prompt('affiliation')
summary = click.prompt('summary')
description = click.edit('Write long form description (training data, transcription standards) of the model here')
accuracy_default = None
# take last accuracy measurement in model metadata
if 'accuracy' in nn.nn.user_metadata and nn.nn.user_metadata['accuracy']:
accuracy_default = nn.nn.user_metadata['accuracy'][-1][1] * 100
accuracy = click.prompt('accuracy on test set', type=float, default=accuracy_default)
script = [click.prompt('script', type=click.Choice(sorted(schema['properties']['script']['items']['enum'])), show_choices=True)]
license = click.prompt('license', type=click.Choice(sorted(schema['properties']['license']['enum'])), show_choices=True)
metadata = {
'authors': [{'name': author, 'affiliation': affiliation}],
'summary': summary,
'description': description,
'accuracy': accuracy,
'license': license,
'script': script,
'name': os.path.basename(model),
'graphemes': ['a']
}
while True:
try:
validate(metadata, schema)
except ValidationError as e:
message(e.message)
metadata[e.path[-1]] = click.prompt(e.path[-1], type=float if e.schema['type'] == 'number' else str)
continue
break
else:
metadata = json.load(metadata)
validate(metadata, schema)
metadata['graphemes'] = [char for char in ''.join(nn.codec.c2l.keys())]
oid = repo.publish_model(model, metadata, access_token, partial(message, '.', nl=False))
print('\nmodel PID: {}'.format(oid))
if __name__ == '__main__':
cli()
| 47.251462 | 234 | 0.62146 | 0 | 0 | 0 | 0 | 38,606 | 0.955476 | 0 | 0 | 12,280 | 0.303923 |
81daacebc9755ed9fad67d0bb9146bb8f488fc5d | 2,728 | py | Python | util/visualize_loss.py | whq-hqw/detr_change | 142f75cc5e0b59ca6e07928ddcbed3e461816611 | [
"Apache-2.0"
]
| 2 | 2020-07-17T15:09:47.000Z | 2020-11-20T13:52:48.000Z | util/visualize_loss.py | whq-hqw/detr_change | 142f75cc5e0b59ca6e07928ddcbed3e461816611 | [
"Apache-2.0"
]
| null | null | null | util/visualize_loss.py | whq-hqw/detr_change | 142f75cc5e0b59ca6e07928ddcbed3e461816611 | [
"Apache-2.0"
]
| null | null | null | from os.path import *
import glob
import json
import numpy as np
from util.plot_utils import plot_curves, plot_multi_loss_distribution
TMPJPG = expanduser("~/Pictures/")
def plot_multi_logs(exp_name, keys, save_name, epoch, addition_len):
root_path = expanduser("/raid/dataset/detection/detr_exp")
folder_candidate = glob.glob(join(root_path, "*"))
folders = []
for name in exp_name:
for folder in folder_candidate:
if folder[-len(name):] == name:
folders.append(folder)
break
assert len(exp_name) == len(folders)
exp_data = np.stack(get_experiment_logs(folders, keys, epoch, addition_len)).transpose((1, 0, 2))
if len(addition_len) > 0 and "test_coco_eval_bbox" in keys:
idx = keys.index("test_coco_eval_bbox")
addition_len.extend(keys[idx + 1:])
keys = keys[:idx] + addition_len
plot_multi_loss_distribution(
multi_line_data=exp_data,
multi_line_labels=[exp_name] * len(keys),
save_path=TMPJPG, window=5, name=save_name,
titles=keys, fig_size=(12, 3 * len(keys)), legend_loc="upper left"
)
def get_experiment_logs(folders, keys, epoch, addition_len):
exp_data = []
for folder in folders:
print(folder)
contents = np.array(load_log(join(folder, "log.txt"), keys, addition_len))
if contents.shape[-1] >= epoch:
contents = contents[:, :epoch]
else:
zeros = np.zeros((contents.shape[0], epoch - contents.shape[1]), dtype=contents.dtype)
contents = np.concatenate((contents, zeros), axis = 1)
exp_data.append(contents)
return exp_data
def load_log(path, keys, addition=6):
if "test_coco_eval_bbox" in keys:
contents = [[] for _ in range(len(keys) + len(addition) - 1)]
else:
contents = [[] for _ in range(len(keys))]
with open(path, "r") as txt:
for line in txt.readlines():
data = json.loads(line)
j = 0
for i, key in enumerate(keys):
if key == "test_coco_eval_bbox":
for j in range(len(addition)):
contents[i + j].append(data[key][j])
else:
contents[i + j].append(data[key])
return contents
if __name__ == '__main__':
exp_name = ["be", "be_768", "be_1024", "be_mid_layer_only", "origin"]
keys = ["train_loss_bbox", "train_loss_ce", "train_loss_giou", "test_coco_eval_bbox"]
eval_name = ["AP", "AP50", "AP75", "AP_small", "AP_mid", "AP_Big",
"AR", "AR50", "AR75", "AR_small", "AR_mid", "AR_Big"]
plot_multi_logs(exp_name, keys, save_name="loss", epoch=50, addition_len=eval_name[:6])
| 38.422535 | 101 | 0.612903 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 376 | 0.13783 |
81dab8323f10c78c0bf2886a1ab5569f40f742ad | 12,616 | py | Python | tower_cli/resources/job.py | kedark3/tower-cli | 487a1b9a8e96509798fee108e4f7d2c187177771 | [
"Apache-2.0"
]
| 363 | 2015-01-14T17:48:34.000Z | 2022-01-29T06:37:04.000Z | tower_cli/resources/job.py | kedark3/tower-cli | 487a1b9a8e96509798fee108e4f7d2c187177771 | [
"Apache-2.0"
]
| 703 | 2015-01-06T17:17:20.000Z | 2020-09-16T15:54:17.000Z | tower_cli/resources/job.py | kedark3/tower-cli | 487a1b9a8e96509798fee108e4f7d2c187177771 | [
"Apache-2.0"
]
| 203 | 2015-01-18T22:38:23.000Z | 2022-01-28T19:19:05.000Z | # Copyright 2015, Ansible, Inc.
# Luke Sneeringer <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, unicode_literals
from getpass import getpass
from distutils.version import LooseVersion
import click
from tower_cli import models, get_resource, resources, exceptions as exc
from tower_cli.api import client
from tower_cli.cli import types
from tower_cli.utils import debug, parser
PROMPT_LIST = ['diff_mode', 'limit', 'tags', 'skip_tags', 'job_type', 'verbosity', 'inventory', 'credential']
class Resource(models.ExeResource):
"""A resource for jobs.
This resource has ordinary list and get methods,
but it does not have create or modify.
Instead of being created, a job is launched.
"""
cli_help = 'Launch or monitor jobs.'
endpoint = '/jobs/'
job_template = models.Field(
key='-J',
type=types.Related('job_template'), required=False, display=True
)
job_explanation = models.Field(required=False, display=False, read_only=True)
created = models.Field(required=False, display=True)
status = models.Field(required=False, display=True)
elapsed = models.Field(required=False, display=True, type=float)
@resources.command(
use_fields_as_options=('job_template',)
)
@click.option('--monitor', is_flag=True, default=False,
help='If sent, immediately calls `job monitor` on the newly '
'launched job rather than exiting with a success.')
@click.option('--wait', is_flag=True, default=False,
help='Monitor the status of the job, but do not print '
'while job is in progress.')
@click.option('--timeout', required=False, type=int,
help='If provided with --monitor, this command (not the job)'
' will time out after the given number of seconds. '
'Does nothing if --monitor is not sent.')
@click.option('--no-input', is_flag=True, default=False,
help='Suppress any requests for input.')
@click.option('-e', '--extra-vars', required=False, multiple=True,
help='yaml format text that contains extra variables '
'to pass on. Use @ to get these from a file.')
@click.option('--diff-mode', type=bool, required=False, help='Specify diff mode for job template to run.')
@click.option('--limit', required=False, help='Specify host limit for job template to run.')
@click.option('--tags', required=False, help='Specify tagged actions in the playbook to run.')
@click.option('--skip-tags', required=False, help='Specify tagged actions in the playbook to omit.')
@click.option('--job-type', required=False, type=click.Choice(['run', 'check']),
help='Specify job type for job template to run.')
@click.option('--verbosity', type=int, required=False, help='Specify verbosity of the playbook run.')
@click.option('--inventory', required=False, type=types.Related('inventory'),
help='Specify inventory for job template to run.')
@click.option('--credential', required=False, multiple=True, type=types.Related('credential'),
help='Specify any type of credential(s) for job template to run.')
def launch(self, job_template=None, monitor=False, wait=False,
timeout=None, no_input=True, extra_vars=None, **kwargs):
"""Launch a new job based on a job template.
Creates a new job in Ansible Tower, immediately starts it, and
returns back an ID in order for its status to be monitored.
=====API DOCS=====
Launch a new job based on a job template.
:param job_template: Primary key or name of the job template to launch new job.
:type job_template: str
:param monitor: Flag that if set, immediately calls ``monitor`` on the newly launched job rather
than exiting with a success.
:type monitor: bool
:param wait: Flag that if set, monitor the status of the job, but do not print while job is in progress.
:type wait: bool
:param timeout: If provided with ``monitor`` flag set, this attempt will time out after the given number
of seconds.
:type timeout: int
:param no_input: Flag that if set, suppress any requests for input.
:type no_input: bool
:param extra_vars: yaml formatted texts that contains extra variables to pass on.
:type extra_vars: array of strings
:param diff_mode: Specify diff mode for job template to run.
:type diff_mode: bool
:param limit: Specify host limit for job template to run.
:type limit: str
:param tags: Specify tagged actions in the playbook to run.
:type tags: str
:param skip_tags: Specify tagged actions in the playbook to omit.
:type skip_tags: str
:param job_type: Specify job type for job template to run.
:type job_type: str
:param verbosity: Specify verbosity of the playbook run.
:type verbosity: int
:param inventory: Specify machine credential for job template to run.
:type inventory: str
:param credential: Specify machine credential for job template to run.
:type credential: str
:returns: Result of subsequent ``monitor`` call if ``monitor`` flag is on; Result of subsequent
``wait`` call if ``wait`` flag is on; Result of subsequent ``status`` call if none of
the two flags are on.
:rtype: dict
=====API DOCS=====
"""
# Get the job template from Ansible Tower.
# This is used as the baseline for starting the job.
jt_resource = get_resource('job_template')
jt = jt_resource.get(job_template)
# Update the job data for special treatment of certain fields
# Special case for job tags, historically just called --tags
tags = kwargs.get('tags', None)
data = {}
if tags:
data['job_tags'] = tags
# Special case for cross-version compatibility with credentials
cred_arg = kwargs.pop('credential', ())
if isinstance(cred_arg, (list, tuple)):
credentials = cred_arg
else:
credentials = [cred_arg]
if credentials:
if 'credentials' in jt['related']:
# Has Tower 3.3 / multi-cred support
# combine user-provided credentials with JT credentials
jt_creds = set(
c['id'] for c in jt['summary_fields']['credentials']
)
kwargs['credentials'] = list(set(credentials) | jt_creds)
else:
if len(credentials) > 1:
raise exc.UsageError(
'Providing multiple credentials on launch can only be '
'done with Tower version 3.3 and higher or recent AWX.'
)
kwargs['credential'] = credentials[0]
# Initialize an extra_vars list that starts with the job template
# preferences first, if they exist
extra_vars_list = []
if 'extra_vars' in data and len(data['extra_vars']) > 0:
# But only do this for versions before 2.3
debug.log('Getting version of Tower.', header='details')
r = client.get('/config/')
if LooseVersion(r.json()['version']) < LooseVersion('2.4'):
extra_vars_list = [data['extra_vars']]
# Add the runtime extra_vars to this list
if extra_vars:
extra_vars_list += list(extra_vars) # accept tuples
# If the job template requires prompting for extra variables,
# do so (unless --no-input is set).
if jt.get('ask_variables_on_launch', False) and not no_input \
and not extra_vars:
# If JT extra_vars are JSON, echo them to user as YAML
initial = parser.process_extra_vars(
[jt['extra_vars']], force_json=False
)
initial = '\n'.join((
'# Specify extra variables (if any) here as YAML.',
'# Lines beginning with "#" denote comments.',
initial,
))
extra_vars = click.edit(initial) or ''
if extra_vars != initial:
extra_vars_list = [extra_vars]
# Data is starting out with JT variables, and we only want to
# include extra_vars that come from the algorithm here.
data.pop('extra_vars', None)
# Replace/populate data fields if prompted.
modified = set()
for resource in PROMPT_LIST:
if jt.pop('ask_' + resource + '_on_launch', False) and not no_input:
resource_object = kwargs.get(resource, None)
if type(resource_object) == types.Related:
resource_class = get_resource(resource)
resource_object = resource_class.get(resource).pop('id', None)
if resource_object is None:
debug.log('{0} is asked at launch but not provided'.
format(resource), header='warning')
elif resource != 'tags':
data[resource] = resource_object
modified.add(resource)
# Dump extra_vars into JSON string for launching job
if len(extra_vars_list) > 0:
data['extra_vars'] = parser.process_extra_vars(
extra_vars_list, force_json=True
)
# Create the new job in Ansible Tower.
start_data = {}
endpoint = '/job_templates/%d/launch/' % jt['id']
if 'extra_vars' in data and len(data['extra_vars']) > 0:
start_data['extra_vars'] = data['extra_vars']
if tags:
start_data['job_tags'] = data['job_tags']
for resource in PROMPT_LIST:
if resource in modified:
start_data[resource] = data[resource]
# There's a non-trivial chance that we are going to need some
# additional information to start the job; in particular, many jobs
# rely on passwords entered at run-time.
#
# If there are any such passwords on this job, ask for them now.
debug.log('Asking for information necessary to start the job.',
header='details')
job_start_info = client.get(endpoint).json()
for password in job_start_info.get('passwords_needed_to_start', []):
start_data[password] = getpass('Password for %s: ' % password)
# Actually start the job.
debug.log('Launching the job.', header='details')
self._pop_none(kwargs)
kwargs.update(start_data)
job_started = client.post(endpoint, data=kwargs)
# Get the job ID from the result.
job_id = job_started.json()['id']
# If returning json indicates any ignored fields, display it in
# verbose mode.
if job_started.text == '':
ignored_fields = {}
else:
ignored_fields = job_started.json().get('ignored_fields', {})
has_ignored_fields = False
for key, value in ignored_fields.items():
if value and value != '{}':
if not has_ignored_fields:
debug.log('List of ignored fields on the server side:',
header='detail')
has_ignored_fields = True
debug.log('{0}: {1}'.format(key, value))
# Get some information about the running job to print
result = self.status(pk=job_id, detail=True)
result['changed'] = True
# If we were told to monitor the job once it started, then call
# monitor from here.
if monitor:
return self.monitor(job_id, timeout=timeout)
elif wait:
return self.wait(job_id, timeout=timeout)
return result
| 46.212454 | 112 | 0.611367 | 11,555 | 0.9159 | 0 | 0 | 10,872 | 0.861763 | 0 | 0 | 6,541 | 0.518469 |
81db0f62ce609e284136aef25e5f81fbdf1a0feb | 1,643 | py | Python | src/backend/expungeservice/models/charge_types/traffic_offense.py | april96415/recordexpungPDX | 43ec60ddfb7fe1ec7940b2a38c6e7d7f85286506 | [
"MIT"
]
| 38 | 2019-05-09T03:13:43.000Z | 2022-03-16T22:59:25.000Z | src/backend/expungeservice/models/charge_types/traffic_offense.py | april96415/recordexpungPDX | 43ec60ddfb7fe1ec7940b2a38c6e7d7f85286506 | [
"MIT"
]
| 938 | 2019-05-02T15:13:21.000Z | 2022-02-27T20:59:00.000Z | src/backend/expungeservice/models/charge_types/traffic_offense.py | april96415/recordexpungPDX | 43ec60ddfb7fe1ec7940b2a38c6e7d7f85286506 | [
"MIT"
]
| 65 | 2019-05-09T03:28:12.000Z | 2022-03-21T00:06:39.000Z | from dataclasses import dataclass
from typing import Any
from expungeservice.models.charge import ChargeType
from expungeservice.models.charge import ChargeUtil
from expungeservice.models.expungement_result import TypeEligibility, EligibilityStatus
@dataclass(frozen=True)
class TrafficOffense(ChargeType):
type_name: str = "Traffic Offense"
expungement_rules: Any = (
"A conviction for a State or municipal traffic offense is not eligible for expungement under ORS 137.225(7)(a).",
"Common convictions under this category include:",
(
"ul",
(
"Reckless Driving",
"Driving While Suspended",
"Driving Under the Influence of Intoxicants",
"Failure to Perform Duties of a Driver",
"Giving False Information to a Police Officer (when in a car)",
"Fleeing/Attempting to Elude a Police Officer",
"Possession of a Stolen Vehicle",
),
),
"Notably, Unauthorized Use of a Vehicle is not considered a traffic offense.",
"A dismissed traffic offense that is of charge level misdemeanor or higher, other than a Diverted DUII, is identified as a Dismissed Criminal Charge, and is thus eligible.",
)
def type_eligibility(self, disposition):
if ChargeUtil.dismissed(disposition):
raise ValueError("Dismissed criminal charges should have been caught by another class.")
elif ChargeUtil.convicted(disposition):
return TypeEligibility(EligibilityStatus.INELIGIBLE, reason="Ineligible under 137.225(7)(a)")
| 45.638889 | 181 | 0.677419 | 1,366 | 0.831406 | 0 | 0 | 1,390 | 0.846013 | 0 | 0 | 799 | 0.486306 |
81dbffa128ea7c27541a642445edf3ebd5fd3197 | 8,918 | py | Python | os_migrate/plugins/modules/import_workload_create_instance.py | jbadiapa/os-migrate | 19b591a672bc9e4af72e62dbd96be94a238a6dc2 | [
"Apache-2.0"
]
| 35 | 2020-01-22T18:38:27.000Z | 2022-03-22T16:19:56.000Z | os_migrate/plugins/modules/import_workload_create_instance.py | jbadiapa/os-migrate | 19b591a672bc9e4af72e62dbd96be94a238a6dc2 | [
"Apache-2.0"
]
| 292 | 2019-12-09T11:15:26.000Z | 2022-03-31T14:37:52.000Z | os_migrate/plugins/modules/import_workload_create_instance.py | jbadiapa/os-migrate | 19b591a672bc9e4af72e62dbd96be94a238a6dc2 | [
"Apache-2.0"
]
| 32 | 2019-12-09T11:09:44.000Z | 2022-03-24T01:13:31.000Z | #!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: import_workload_create_instance
short_description: Create NBD exports of OpenStack volumes
extends_documentation_fragment: openstack
version_added: "2.9.0"
author: "OpenStack tenant migration tools (@os-migrate)"
description:
- "Take an instance from an OS-Migrate YAML structure, and export its volumes over NBD."
options:
auth:
description:
- Dictionary with parameters for chosen auth type on the destination cloud.
required: true
type: dict
auth_type:
description:
- Auth type plugin for destination OpenStack cloud. Can be omitted if using password authentication.
required: false
type: str
region_name:
description:
- Destination OpenStack region name. Can be omitted if using default region.
required: false
type: str
availability_zone:
description:
- Availability zone.
required: false
type: str
cloud:
description:
- Ignored. Present for backwards compatibility.
required: false
type: raw
validate_certs:
description:
- Validate HTTPS certificates when logging in to OpenStack.
required: false
type: bool
data:
description:
- Data structure with server parameters as loaded from OS-Migrate workloads YAML file.
required: true
type: dict
block_device_mapping:
description:
- A block_device_mapping_v2 structure from the transfer_volumes module.
- Used to attach destination volumes to the new instance in the right order.
required: true
type: list
elements: dict
'''
EXAMPLES = '''
main.yml:
- name: validate loaded resources
os_migrate.os_migrate.validate_resource_files:
paths:
- "{{ os_migrate_data_dir }}/workloads.yml"
register: workloads_file_validation
when: import_workloads_validate_file
- name: read workloads resource file
os_migrate.os_migrate.read_resources:
path: "{{ os_migrate_data_dir }}/workloads.yml"
register: read_workloads
- name: get source conversion host address
os_migrate.os_migrate.os_conversion_host_info:
auth:
auth_url: https://src-osp:13000/v3
username: migrate
password: migrate
project_domain_id: default
project_name: migration-source
user_domain_id: default
server_id: ce4dda96-5d8e-4b67-aee2-9845cdc943fe
register: os_src_conversion_host_info
- name: get destination conversion host address
os_migrate.os_migrate.os_conversion_host_info:
auth:
auth_url: https://dest-osp:13000/v3
username: migrate
password: migrate
project_domain_id: default
project_name: migration-destination
user_domain_id: default
server_id: 2d2afe57-ace5-4187-8fca-5f10f9059ba1
register: os_dst_conversion_host_info
- name: import workloads
include_tasks: workload.yml
loop: "{{ read_workloads.resources }}"
workload.yml:
- block:
- name: preliminary setup for workload import
os_migrate.os_migrate.import_workload_prelim:
auth:
auth_url: https://dest-osp:13000/v3
username: migrate
password: migrate
project_domain_id: default
project_name: migration-destination
user_domain_id: default
validate_certs: False
src_conversion_host: "{{ os_src_conversion_host_info.openstack_conversion_host }}"
src_auth:
auth_url: https://src-osp:13000/v3
username: migrate
password: migrate
project_domain_id: default
project_name: migration-source
user_domain_id: default
src_validate_certs: False
data: "{{ item }}"
data_dir: "{{ os_migrate_data_dir }}"
register: prelim
- debug:
msg:
- "{{ prelim.server_name }} log file: {{ prelim.log_file }}"
- "{{ prelim.server_name }} progress file: {{ prelim.state_file }}"
when: prelim.changed
- name: expose source volumes
os_migrate.os_migrate.import_workload_export_volumes:
auth: "{{ os_migrate_src_auth }}"
auth_type: "{{ os_migrate_src_auth_type|default(omit) }}"
region_name: "{{ os_migrate_src_region_name|default(omit) }}"
validate_certs: "{{ os_migrate_src_validate_certs|default(omit) }}"
ca_cert: "{{ os_migrate_src_ca_cert|default(omit) }}"
client_cert: "{{ os_migrate_src_client_cert|default(omit) }}"
client_key: "{{ os_migrate_src_client_key|default(omit) }}"
conversion_host:
"{{ os_src_conversion_host_info.openstack_conversion_host }}"
data: "{{ item }}"
log_file: "{{ os_migrate_data_dir }}/{{ prelim.server_name }}.log"
state_file: "{{ os_migrate_data_dir }}/{{ prelim.server_name }}.state"
ssh_key_path: "{{ os_migrate_conversion_keypair_private_path }}"
register: exports
when: prelim.changed
- name: transfer volumes to destination
os_migrate.os_migrate.import_workload_transfer_volumes:
auth: "{{ os_migrate_dst_auth }}"
auth_type: "{{ os_migrate_dst_auth_type|default(omit) }}"
region_name: "{{ os_migrate_dst_region_name|default(omit) }}"
validate_certs: "{{ os_migrate_dst_validate_certs|default(omit) }}"
ca_cert: "{{ os_migrate_dst_ca_cert|default(omit) }}"
client_cert: "{{ os_migrate_dst_client_cert|default(omit) }}"
client_key: "{{ os_migrate_dst_client_key|default(omit) }}"
data: "{{ item }}"
conversion_host:
"{{ os_dst_conversion_host_info.openstack_conversion_host }}"
ssh_key_path: "{{ os_migrate_conversion_keypair_private_path }}"
transfer_uuid: "{{ exports.transfer_uuid }}"
src_conversion_host_address:
"{{ os_src_conversion_host_info.openstack_conversion_host.address }}"
volume_map: "{{ exports.volume_map }}"
state_file: "{{ os_migrate_data_dir }}/{{ prelim.server_name }}.state"
log_file: "{{ os_migrate_data_dir }}/{{ prelim.server_name }}.log"
register: transfer
when: prelim.changed
- name: create destination instance
os_migrate.os_migrate.import_workload_create_instance:
auth: "{{ os_migrate_dst_auth }}"
auth_type: "{{ os_migrate_dst_auth_type|default(omit) }}"
region_name: "{{ os_migrate_dst_region_name|default(omit) }}"
validate_certs: "{{ os_migrate_dst_validate_certs|default(omit) }}"
ca_cert: "{{ os_migrate_dst_ca_cert|default(omit) }}"
client_cert: "{{ os_migrate_dst_client_cert|default(omit) }}"
client_key: "{{ os_migrate_dst_client_key|default(omit) }}"
data: "{{ item }}"
block_device_mapping: "{{ transfer.block_device_mapping }}"
register: os_migrate_destination_instance
when: prelim.changed
rescue:
- fail:
msg: "Failed to import {{ item.params.name }}!"
'''
RETURN = '''
server_id:
description: The ID of the newly created server.
returned: On successful creation of migrated server on destination cloud.
type: str
sample: 059635b7-451f-4a64-978a-7c2e9e4c15ff
'''
from ansible.module_utils.basic import AnsibleModule
# Import openstack module utils from ansible_collections.openstack.cloud.plugins as per ansible 3+
try:
from ansible_collections.openstack.cloud.plugins.module_utils.openstack \
import openstack_full_argument_spec, openstack_cloud_from_module
except ImportError:
# If this fails fall back to ansible < 3 imports
from ansible.module_utils.openstack \
import openstack_full_argument_spec, openstack_cloud_from_module
from ansible_collections.os_migrate.os_migrate.plugins.module_utils import server
def run_module():
argument_spec = openstack_full_argument_spec(
auth=dict(type='dict', no_log=True, required=True),
data=dict(type='dict', required=True),
block_device_mapping=dict(type='list', required=True, elements='dict'),
)
result = dict(
changed=False,
)
module = AnsibleModule(
argument_spec=argument_spec,
)
sdk, conn = openstack_cloud_from_module(module)
block_device_mapping = module.params['block_device_mapping']
ser_server = server.Server.from_data(module.params['data'])
sdk_server = ser_server.create(conn, block_device_mapping)
# Some info (e.g. flavor ID) will only become available after the
# server is in ACTIVE state, we need to wait for it.
sdk_server = conn.compute.wait_for_server(sdk_server, failures=['ERROR'], wait=600)
dst_ser_server = server.Server.from_sdk(conn, sdk_server)
if sdk_server:
result['changed'] = True
result['server'] = dst_ser_server.data
result['server_id'] = sdk_server.id
module.exit_json(**result)
def main():
run_module()
if __name__ == '__main__':
main()
| 33.152416 | 106 | 0.703185 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7,362 | 0.825521 |
81dc5fd1549c9df6ac26331817777ce1242a46e7 | 427 | py | Python | PythonExercicio/ex081.py | fotavio16/PycharmProjects | f5be49db941de69159ec543e8a6dde61f9f94d86 | [
"MIT"
]
| null | null | null | PythonExercicio/ex081.py | fotavio16/PycharmProjects | f5be49db941de69159ec543e8a6dde61f9f94d86 | [
"MIT"
]
| null | null | null | PythonExercicio/ex081.py | fotavio16/PycharmProjects | f5be49db941de69159ec543e8a6dde61f9f94d86 | [
"MIT"
]
| null | null | null | valores = []
while True:
num = int(input('Digite um valor: '))
valores.append(num)
cont = str(input('Quer continuar? [S/N] ')).upper()
if cont == 'N':
break
print(f'VocΓͺ digitou {len(valores)} elememtos.')
valores.sort(reverse=True)
print(f'Os valores em ordem decrescente sΓ£o {valores}')
if 5 in valores:
print('O valor 5 faz parte da lista!')
else:
print('O valor 5 nΓ£o faz parte da lista.') | 26.6875 | 55 | 0.641686 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 204 | 0.474419 |
81dc7fd7e49eea8472a8c802075bd5a03af475ce | 10,670 | py | Python | huobi/client/margin.py | codemonkey89/huobi_Python | 92b96679f6e239c785df7c4354a0a94deda2768f | [
"Apache-2.0"
]
| 1 | 2021-09-06T00:09:11.000Z | 2021-09-06T00:09:11.000Z | huobi/client/margin.py | codemonkey89/huobi_Python | 92b96679f6e239c785df7c4354a0a94deda2768f | [
"Apache-2.0"
]
| null | null | null | huobi/client/margin.py | codemonkey89/huobi_Python | 92b96679f6e239c785df7c4354a0a94deda2768f | [
"Apache-2.0"
]
| null | null | null |
from huobi.utils.input_checker import *
class MarginClient(object):
def __init__(self, **kwargs):
"""
Create the request client instance.
:param kwargs: The option of request connection.
api_key: The public key applied from Huobi.
secret_key: The private key applied from Huobi.
url: The URL name like "https://api.huobi.pro".
init_log: to init logger
"""
self.__kwargs = kwargs
def post_transfer_in_margin(self, symbol: 'str', currency: 'str', amount: 'float') -> int:
"""
Transfer asset from spot account to margin account.
:param symbol: The symbol, like "btcusdt". (mandatory)
:param currency: The currency of transfer. (mandatory)
:param amount: The amount of transfer. (mandatory)
:return:
"""
check_symbol(symbol)
check_should_not_none(currency, "currency")
check_should_not_none(amount, "amount")
params = {
"symbol": symbol,
"currency": currency,
"amount": amount
}
from huobi.service.margin.post_transfer_in_margin import PostTransferInMarginService
return PostTransferInMarginService(params).request(**self.__kwargs)
def post_transfer_out_margin(self, symbol: 'str', currency: 'str', amount: 'float') -> int:
"""
Transfer asset from margin account to spot account.
:param symbol: The symbol, like "btcusdt". (mandatory)
:param currency: The currency of transfer. (mandatory)
:param amount: The amount of transfer. (mandatory)
:return:
"""
check_symbol(symbol)
check_should_not_none(currency, "currency")
check_should_not_none(amount, "amount")
params = {
"symbol": symbol,
"currency": currency,
"amount": amount
}
from huobi.service.margin.post_transfer_out_margin import PostTransferOutMarginService
return PostTransferOutMarginService(params).request(**self.__kwargs)
def get_margin_account_balance(self, symbol: 'str') -> list:
"""
Get the Balance of the Margin Loan Account.
:param symbol: The currency, like "btc". (mandatory)
:return: The margin loan account detail list.
"""
check_symbol(symbol)
params = {
"symbol": symbol
}
from huobi.service.margin.get_margin_account_balance import GetMarginAccountBalanceService
return GetMarginAccountBalanceService(params).request(**self.__kwargs)
def post_create_margin_order(self, symbol: 'str', currency: 'str', amount: 'float') -> int:
"""
Submit a request to borrow with margin account.
:param symbol: The trading symbol to borrow margin, e.g. "btcusdt", "bccbtc". (mandatory)
:param currency: The currency to borrow,like "btc". (mandatory)
:param amount: The amount of currency to borrow. (mandatory)
:return: The margin order id.
"""
check_symbol(symbol)
check_should_not_none(currency, "currency")
check_should_not_none(amount, "amount")
params = {
"symbol": symbol,
"currency" : currency,
"amount" : amount
}
from huobi.service.margin.post_create_margin_order import PostCreateMarginOrderService
return PostCreateMarginOrderService(params).request(**self.__kwargs)
def post_repay_margin_order(self, loan_id: 'int', amount: 'float') -> int:
"""
Get the margin loan records.
:param load_id: The previously returned order id when loan order was created. (mandatory)
:param amount: The amount of currency to repay. (mandatory)
:return: The margin order id.
"""
check_should_not_none(loan_id, "loan_id")
check_should_not_none(amount, "amount")
params = {
"loan_id": loan_id,
"amount": amount
}
from huobi.service.margin.post_repay_margin_order import PostRepayMarginOrderService
return PostRepayMarginOrderService(params).request(**self.__kwargs)
def get_margin_loan_orders(self, symbol: 'str', start_date: 'str' = None, end_date: 'str' = None,
states: 'LoanOrderState' = None, from_id: 'int' = None,
size: 'int' = None, direction: 'QueryDirection' = None) -> list:
"""
Get the margin loan records.
:param symbol: The symbol, like "btcusdt" (mandatory).
:param start_date: The search starts date in format yyyy-mm-dd. (optional).
:param end_date: The search end date in format yyyy-mm-dd.(optional, can be null).
:param states: The loan order states, it could be created, accrual, cleared or invalid. (optional)
:param from_id: Search order id to begin with. (optional)
:param size: The number of orders to return.. (optional)
:param direction: The query direction, prev or next. (optional)
:return: The list of the margin loan records.
"""
check_symbol(symbol)
start_date = format_date(start_date, "start_date")
end_date = format_date(end_date, "end_date")
params = {
"symbol" : symbol,
"start-date" : start_date,
"end-date" : end_date,
"states" : states,
"from" : from_id,
"size" : size,
"direct" : direction
}
from huobi.service.margin.get_margin_loan_orders import GetMarginLoanOrdersService
return GetMarginLoanOrdersService(params).request(**self.__kwargs)
def get_margin_loan_info(self, symbols: 'str'=None) -> list:
"""
The request of get margin loan info, can return currency loan info list.
:param symbols: The symbol, like "btcusdt,htusdt". (optional)
:return: The cross margin loan info.
"""
check_symbol(symbols)
params = {
"symbols" : symbols
}
from huobi.service.margin.get_margin_loan_info import GetMarginLoanInfoService
return GetMarginLoanInfoService(params).request(**self.__kwargs)
def get_cross_margin_loan_info(self) -> list:
"""
The request of currency loan info list.
:return: The cross margin loan info list.
"""
params = {}
from huobi.service.margin.get_cross_margin_loan_info import GetCrossMarginLoanInfoService
return GetCrossMarginLoanInfoService(params).request(**self.__kwargs)
def post_cross_margin_transfer_in(self, currency: 'str', amount:'float') -> int:
"""
transfer currency to cross account.
:param currency: currency name (mandatory)
:param amount: transfer amount (mandatory)
:return: return transfer id.
"""
check_should_not_none(currency, "currency")
check_should_not_none(amount, "amount")
params = {
"amount": amount,
"currency": currency
}
from huobi.service.margin.post_cross_margin_transfer_in import PostCrossMarginTransferInService
return PostCrossMarginTransferInService(params).request(**self.__kwargs)
def post_cross_margin_transfer_out(self, currency: 'str', amount:'float') -> int:
"""
transfer currency to cross account.
:param currency: currency name (mandatory)
:param amount: transfer amount (mandatory)
:return: return transfer id.
"""
check_should_not_none(currency, "currency")
check_should_not_none(amount, "amount")
params = {
"amount": amount,
"currency": currency
}
from huobi.service.margin.post_cross_margin_transfer_out import PostCrossMarginTransferOutService
return PostCrossMarginTransferOutService(params).request(**self.__kwargs)
def post_cross_margin_create_loan_orders(self, currency:'str', amount: 'float') -> int:
"""
create cross margin loan orders
:param currency: currency name (mandatory)
:param amount: transfer amount (mandatory)
:return: return order id.
"""
check_should_not_none(currency, "currency")
check_should_not_none(amount, "amount")
params = {
"amount": amount,
"currency": currency
}
from huobi.service.margin.post_cross_margin_create_loan_orders import PostCrossMarginCreateLoanOrdersService
return PostCrossMarginCreateLoanOrdersService(params).request(**self.__kwargs)
def post_cross_margin_loan_order_repay(self, order_id: 'str', amount: 'float'):
"""
repay cross margin loan orders
:param order_id: order_id for loan (mandatory)
:param amount: transfer amount (mandatory)
:return: return order id.
"""
check_should_not_none(order_id, "order-id")
check_should_not_none(amount, "amount")
params = {
"amount": amount,
"order-id": order_id
}
from huobi.service.margin.post_cross_margin_loan_order_repay import PostCrossMarginLoanOrderRepayService
return PostCrossMarginLoanOrderRepayService(params).request(**self.__kwargs)
def get_cross_margin_loan_orders(self, currency: 'str' = None, state: 'str' = None,
start_date: 'str' = None, end_date: 'str' = None,
from_id: 'int' = None, size: 'int' = None, direct: 'str' = None,
sub_uid: 'int' = None) -> list:
"""
get cross margin loan orders
:return: return list.
"""
params = {
"currency": currency,
"state": state,
"start-date": start_date,
"end-date": end_date,
"from": from_id,
"size": size,
"direct": direct,
"sub-uid": sub_uid
}
from huobi.service.margin.get_cross_margin_loan_orders import GetCrossMarginLoanOrdersService
return GetCrossMarginLoanOrdersService(params).request(**self.__kwargs)
def get_cross_margin_account_balance(self, sub_uid:'int'=None):
"""
get cross margin account balance
:return: cross-margin account.
"""
params = {
"sub-uid": sub_uid
}
from huobi.service.margin.get_cross_margin_account_balance import GetCrossMarginAccountBalanceService
return GetCrossMarginAccountBalanceService(params).request(**self.__kwargs) | 36.541096 | 116 | 0.624649 | 10,627 | 0.99597 | 0 | 0 | 0 | 0 | 0 | 0 | 4,326 | 0.405436 |
81dd1a8439621b09316ab23b0da1c48479109ea1 | 2,297 | py | Python | vine/commit.py | robinson96/GRAPE | f6404ae6ee2933647e515a9480077ab01fb2c430 | [
"BSD-3-Clause"
]
| 4 | 2017-04-30T17:08:42.000Z | 2019-11-15T04:44:09.000Z | vine/commit.py | robinson96/GRAPE | f6404ae6ee2933647e515a9480077ab01fb2c430 | [
"BSD-3-Clause"
]
| 1 | 2016-02-12T07:51:30.000Z | 2016-02-12T07:51:30.000Z | vine/commit.py | robinson96/GRAPE | f6404ae6ee2933647e515a9480077ab01fb2c430 | [
"BSD-3-Clause"
]
| null | null | null | import os
import option
import grapeGit as git
import grapeConfig
import utility
class Commit(option.Option):
"""
Usage: grape-commit [-m <message>] [-a | <filetree>]
Options:
-m <message> The commit message.
-a Commit modified files that have not been staged.
Arguments:
<filetree> The relative path of files to include in this commit.
"""
def __init__(self):
super(Commit,self).__init__()
self._key = "commit"
self._section = "Workspace"
def description(self):
return "runs git commit in all projects in this workspace"
def commit(self, commitargs, repo):
try:
git.commit(commitargs)
return True
except git.GrapeGitError as e:
utility.printMsg("Commit in %s failed. Perhaps there were no staged changes? Use -a to commit all modified files." % repo)
return False
def execute(self, args):
commitargs = ""
if args['-a']:
commitargs = commitargs + " -a"
elif args["<filetree>"]:
commitargs = commitargs + " %s"% args["<filetree>"]
if not args['-m']:
args["-m"] = utility.userInput("Please enter commit message:")
commitargs += " -m \"%s\"" % args["-m"]
wsDir = utility.workspaceDir()
os.chdir(wsDir)
submodules = [(True, x ) for x in git.getModifiedSubmodules()]
subprojects = [(False, x) for x in grapeConfig.GrapeConfigParser.getAllActiveNestedSubprojectPrefixes()]
for stage,sub in submodules + subprojects:
os.chdir(os.path.join(wsDir,sub))
subStatus = git.status("--porcelain -uno")
if subStatus:
utility.printMsg("Committing in %s..." % sub)
if self.commit(commitargs, sub) and stage:
os.chdir(wsDir)
utility.printMsg("Staging committed change in %s..." % sub)
git.add(sub)
os.chdir(wsDir)
if submodules or git.status("--porcelain"):
utility.printMsg("Performing commit in outer level project...")
self.commit(commitargs, wsDir)
return True
def setDefaultConfig(self,config):
pass
| 33.289855 | 134 | 0.573792 | 2,214 | 0.963866 | 0 | 0 | 0 | 0 | 0 | 0 | 678 | 0.295168 |
81ddc6f0c5c1c51183abe91817be444c4671d793 | 2,743 | py | Python | allopy/optimize/regret/abstract.py | wangcj05/allopy | 0d97127e5132df1449283198143994b45fb11214 | [
"MIT"
]
| 1 | 2021-04-06T04:33:03.000Z | 2021-04-06T04:33:03.000Z | allopy/optimize/regret/abstract.py | wangcj05/allopy | 0d97127e5132df1449283198143994b45fb11214 | [
"MIT"
]
| null | null | null | allopy/optimize/regret/abstract.py | wangcj05/allopy | 0d97127e5132df1449283198143994b45fb11214 | [
"MIT"
]
| null | null | null | from abc import ABC
from typing import List, Optional, Union
import numpy as np
from allopy import OptData
from allopy.penalty import NoPenalty, Penalty
__all__ = ["AbstractObjectiveBuilder", "AbstractConstraintBuilder"]
class AbstractObjectiveBuilder(ABC):
def __init__(self, data: List[OptData], cvar_data: List[OptData], rebalance: bool, time_unit):
self.data, self.cvar_data = format_inputs(data, cvar_data, time_unit)
self.rebalance = rebalance
self.num_scenarios = len(data)
assert self.num_scenarios > 0, "Provide data to the optimizer"
assert self.num_scenarios == len(cvar_data), "data and cvar data must have same number of scenarios"
self.num_assets = data[0].n_assets
assert all(d.n_assets == self.num_assets for d in data), \
f"number of assets in data should equal {self.num_assets}"
assert all(d.n_assets == self.num_assets for d in cvar_data), \
f"number of assets in cvar data should equal {self.num_assets}"
self._penalties = [NoPenalty(self.num_assets)] * self.num_scenarios
@property
def penalties(self):
return self._penalties
@penalties.setter
def penalties(self, penalties):
assert penalties is None or isinstance(penalties, Penalty) or hasattr(penalties, "__iter__"), \
"penalties can be None, a subsclass of the Penalty class or a list which subclasses the Penalty class"
if penalties is None:
self._penalties = [NoPenalty(self.num_assets)] * self.num_scenarios
elif isinstance(penalties, penalties):
self._penalties = [penalties] * self.num_scenarios
else:
penalties = list(penalties)
assert len(penalties) == self.num_scenarios, "number of penalties given must match number of scenarios"
assert all(isinstance(p, Penalty) for p in penalties), "non-Penalty instance detected"
self._penalties = penalties
class AbstractConstraintBuilder(ABC):
def __init__(self, data: List[OptData], cvar_data: List[OptData], rebalance: bool, time_unit):
self.data, self.cvar_data = format_inputs(data, cvar_data, time_unit)
self.rebalance = rebalance
self.num_scenarios = len(self.data)
def format_inputs(data: List[Union[OptData, np.ndarray]],
cvar_data: Optional[List[Union[OptData, np.ndarray]]],
time_unit: int):
data = [d if isinstance(data, OptData) else OptData(d, time_unit) for d in data]
if cvar_data is None:
return [d.cut_by_horizon(3) for d in data]
else:
cvar_data = [c if isinstance(c, OptData) else OptData(c, time_unit) for c in cvar_data]
return data, cvar_data
| 40.338235 | 115 | 0.682829 | 2,048 | 0.746628 | 0 | 0 | 867 | 0.316077 | 0 | 0 | 461 | 0.168064 |
81de227f0a3f6458399634f490d77c2bd9c293a6 | 3,786 | py | Python | dataset-processor3.py | Pawel762/class5-homework | 8e48dcda1ed91b7a5e28bea6db13b2a82182e074 | [
"MIT"
]
| null | null | null | dataset-processor3.py | Pawel762/class5-homework | 8e48dcda1ed91b7a5e28bea6db13b2a82182e074 | [
"MIT"
]
| null | null | null | dataset-processor3.py | Pawel762/class5-homework | 8e48dcda1ed91b7a5e28bea6db13b2a82182e074 | [
"MIT"
]
| null | null | null | import os
import pandas as pd
import matplotlib.pyplot as plt
wine_df = pd.read_csv(filepath_or_buffer='~/class5-homework/wine.data',
sep=',',
header=None)
wine_df.columns = ['Class','Alcohol','Malic_Acid','Ash','Alcalinity_of_Ash','Magnesium',
'Total_Phenols','Flavanoids','Nonflavanoid_Phenols','Proanthocyanins',
'Color_Intensity','Hue','OD280_OD315_of_Diluted_Wines','Proline']
wine_B = wine_df.drop(['Class'], axis = 1)
os.makedirs('graphs', exist_ok=True)
#Ploting line for alcohol
plt.plot(wine_B['Alcohol'], color='g')
plt.title('Alcohol by Index')
plt.xlabel('Index')
plt.ylabel('Alcohol')
plt.savefig(f'graphs/Alcohol_by_index_plot.png', format='png')
plt.clf()
#Ploting line for Malic_Acid
plt.plot(wine_B['Malic_Acid'], color='g')
plt.title('Malic_Acid by Index')
plt.xlabel('Index')
plt.ylabel('Malic_Acid')
plt.savefig(f'graphs/Malic_Acid_by_index_plot.png', format='png')
plt.clf()
#Ploting line for Ash
plt.plot(wine_B['Ash'], color='g')
plt.title('Ash by Index')
plt.xlabel('Index')
plt.ylabel('Ash')
plt.savefig(f'graphs/Ash_by_index_plot.png', format='png')
plt.clf()
#Ploting line for Alcalinity_of_Ash
plt.plot(wine_B['Alcalinity_of_Ash'], color='g')
plt.title('Alcalinity_of_Ash by Index')
plt.xlabel('Index')
plt.ylabel('Alcalinity_of_Ash')
plt.savefig(f'graphs/Alcalinity_of_Ash_by_index_plot.png', format='png')
plt.clf()
#Ploting line for Magnesium
plt.plot(wine_B['Magnesium'], color='g')
plt.title('Magnesium by Index')
plt.xlabel('Index')
plt.ylabel('Magnesium')
plt.savefig(f'graphs/Magnesium_by_index_plot.png', format='png')
plt.clf()
#Ploting line for Total_Phenols
plt.plot(wine_B['Total_Phenols'], color='g')
plt.title('Total_Phenols by Index')
plt.xlabel('Index')
plt.ylabel('Total_Phenols')
plt.savefig(f'graphs/Total_Phenols_by_index_plot.png', format='png')
plt.clf()
#Ploting line for Flavanoids
plt.plot(wine_B['Flavanoids'], color='g')
plt.title('Flavanoids by Index')
plt.xlabel('Index')
plt.ylabel('Flavanoids')
plt.savefig(f'graphs/Flavanoids_by_index_plot.png', format='png')
plt.clf()
#Ploting line for Nonflavanoid_Phenols
plt.plot(wine_B['Nonflavanoid_Phenols'], color='g')
plt.title('Nonflavanoid_Phenols by Index')
plt.xlabel('Index')
plt.ylabel('Nonflavanoid_Phenols')
plt.savefig(f'graphs/Nonflavanoid_Phenols_by_index_plot.png', format='png')
plt.clf()
#Ploting line for Proanthocyanins
plt.plot(wine_B['Proanthocyanins'], color='g')
plt.title('Proanthocyanins by Index')
plt.xlabel('Index')
plt.ylabel('Proanthocyanins')
plt.savefig(f'graphs/Proanthocyanins_by_index_plot.png', format='png')
plt.clf()
#Ploting line for Color_Intensity
plt.plot(wine_B['Color_Intensity'], color='g')
plt.title('Color_Intensity by Index')
plt.xlabel('Index')
plt.ylabel('Color_Intensity')
plt.savefig(f'graphs/Color_Intensity_by_index_plot.png', format='png')
plt.clf()
#Ploting line for Hue
plt.plot(wine_B['Hue'], color='g')
plt.title('Hue by Index')
plt.xlabel('Index')
plt.ylabel('Hue')
plt.savefig(f'graphs/Hue_by_index_plot.png', format='png')
plt.clf()
#Ploting line for OD280_OD315_of_Diluted_Wines
plt.plot(wine_B['OD280_OD315_of_Diluted_Wines'], color='g')
plt.title('OD280_OD315_of_Diluted_Wines by Index')
plt.xlabel('Index')
plt.ylabel('OD280_OD315_of_Diluted_Wines')
plt.savefig(f'graphs/OD280_OD315_of_Diluted_Wines_by_index_plot.png', format='png')
plt.clf()
#Ploting line for Proline
plt.plot(wine_B['Proline'], color='g')
plt.title('Proline by Index')
plt.xlabel('Index')
plt.ylabel('Proline')
plt.savefig(f'graphs/Proline_by_index_plot.png', format='png')
plt.clf()
#plt.plot(wine_B[i], color='green')
#plt.title(str(i)+' by Index')
#plt.xlabel('Index')
#plt.ylabel(i)
#plt.savefig(f'graphs/'+str(i)+'_by_index_plot.png', format='png')
#plt.clf()
| 29.578125 | 88 | 0.744057 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,185 | 0.577126 |
81e0b2153d543771f9ccb08bc53b840667a38757 | 7,451 | py | Python | ares/attack/bim.py | KuanKuanQAQ/ares | 40dbefc18f6438e1812021fe6d6c3195f22ca295 | [
"MIT"
]
| 206 | 2020-12-31T09:43:11.000Z | 2022-03-30T07:02:41.000Z | ares/attack/bim.py | afoolboy/ares | 89610d41fdde194e4ad916d29961aaed73383692 | [
"MIT"
]
| 7 | 2021-01-26T06:45:44.000Z | 2022-02-26T05:25:48.000Z | ares/attack/bim.py | afoolboy/ares | 89610d41fdde194e4ad916d29961aaed73383692 | [
"MIT"
]
| 61 | 2020-12-29T14:02:41.000Z | 2022-03-26T14:21:10.000Z | import tensorflow as tf
import numpy as np
from ares.attack.base import BatchAttack
from ares.attack.utils import get_xs_ph, get_ys_ph, maybe_to_array, get_unit
class BIM(BatchAttack):
''' Basic Iterative Method (BIM). A white-box iterative constraint-based method. Require a differentiable loss
function and a ``ares.model.Classifier`` model.
- Supported distance metric: ``l_2``, ``l_inf``.
- Supported goal: ``t``, ``tm``, ``ut``.
- References: https://arxiv.org/abs/1607.02533.
'''
def __init__(self, model, batch_size, loss, goal, distance_metric, session, iteration_callback=None):
''' Initialize BIM.
:param model: The model to attack. A ``ares.model.Classifier`` instance.
:param batch_size: Batch size for the ``batch_attack()`` method.
:param loss: The loss function to optimize. A ``ares.loss.Loss`` instance.
:param goal: Adversarial goals. All supported values are ``'t'``, ``'tm'``, and ``'ut'``.
:param distance_metric: Adversarial distance metric. All supported values are ``'l_2'`` and ``'l_inf'``.
:param session: The ``tf.Session`` to run the attack in. The ``model`` should be loaded into this session.
:param iteration_callback: A function accept a ``xs`` ``tf.Tensor`` (the original examples) and a ``xs_adv``
``tf.Tensor`` (the adversarial examples for ``xs``). During ``batch_attack()``, this callback function would
be runned after each iteration, and its return value would be yielded back to the caller. By default,
``iteration_callback`` is ``None``.
'''
self.model, self.batch_size, self._session = model, batch_size, session
self.loss, self.goal, self.distance_metric = loss, goal, distance_metric
# placeholder for batch_attack's input
self.xs_ph = get_xs_ph(model, batch_size)
self.ys_ph = get_ys_ph(model, batch_size)
# flatten shape of xs_ph
xs_flatten_shape = (batch_size, np.prod(self.model.x_shape))
# store xs and ys in variables to reduce memory copy between tensorflow and python
# variable for the original example with shape of (batch_size, D)
self.xs_var = tf.Variable(tf.zeros(shape=xs_flatten_shape, dtype=self.model.x_dtype))
# variable for labels
self.ys_var = tf.Variable(tf.zeros(shape=(batch_size,), dtype=self.model.y_dtype))
# variable for the (hopefully) adversarial example with shape of (batch_size, D)
self.xs_adv_var = tf.Variable(tf.zeros(shape=xs_flatten_shape, dtype=self.model.x_dtype))
# magnitude
self.eps_ph = tf.placeholder(self.model.x_dtype, (self.batch_size,))
self.eps_var = tf.Variable(tf.zeros((self.batch_size,), dtype=self.model.x_dtype))
# step size
self.alpha_ph = tf.placeholder(self.model.x_dtype, (self.batch_size,))
self.alpha_var = tf.Variable(tf.zeros((self.batch_size,), dtype=self.model.x_dtype))
# expand dim for easier broadcast operations
eps = tf.expand_dims(self.eps_var, 1)
alpha = tf.expand_dims(self.alpha_var, 1)
# calculate loss' gradient with relate to the adversarial example
# grad.shape == (batch_size, D)
self.xs_adv_model = tf.reshape(self.xs_adv_var, (batch_size, *self.model.x_shape))
self.loss = loss(self.xs_adv_model, self.ys_var)
grad = tf.gradients(self.loss, self.xs_adv_var)[0]
if goal == 't' or goal == 'tm':
grad = -grad
elif goal != 'ut':
raise NotImplementedError
# update the adversarial example
if distance_metric == 'l_2':
grad_unit = get_unit(grad)
xs_adv_delta = self.xs_adv_var - self.xs_var + alpha * grad_unit
# clip by max l_2 magnitude of adversarial noise
xs_adv_next = self.xs_var + tf.clip_by_norm(xs_adv_delta, eps, axes=[1])
elif distance_metric == 'l_inf':
xs_lo, xs_hi = self.xs_var - eps, self.xs_var + eps
grad_sign = tf.sign(grad)
# clip by max l_inf magnitude of adversarial noise
xs_adv_next = tf.clip_by_value(self.xs_adv_var + alpha * grad_sign, xs_lo, xs_hi)
else:
raise NotImplementedError
# clip by (x_min, x_max)
xs_adv_next = tf.clip_by_value(xs_adv_next, self.model.x_min, self.model.x_max)
self.update_xs_adv_step = self.xs_adv_var.assign(xs_adv_next)
self.config_eps_step = self.eps_var.assign(self.eps_ph)
self.config_alpha_step = self.alpha_var.assign(self.alpha_ph)
self.setup_xs = [self.xs_var.assign(tf.reshape(self.xs_ph, xs_flatten_shape)),
self.xs_adv_var.assign(tf.reshape(self.xs_ph, xs_flatten_shape))]
self.setup_ys = self.ys_var.assign(self.ys_ph)
self.iteration = None
self.iteration_callback = None
if iteration_callback is not None:
xs_model = tf.reshape(self.xs_var, (self.batch_size, *self.model.x_shape))
self.iteration_callback = iteration_callback(xs_model, self.xs_adv_model)
def config(self, **kwargs):
''' (Re)config the attack.
:param magnitude: Max distortion, could be either a float number or a numpy float number array with shape of
(batch_size,).
:param alpha: Step size for each iteration, could be either a float number or a numpy float number array with
shape of (batch_size,).
:param iteration: Iteration count. An integer.
'''
if 'magnitude' in kwargs:
eps = maybe_to_array(kwargs['magnitude'], self.batch_size)
self._session.run(self.config_eps_step, feed_dict={self.eps_ph: eps})
if 'alpha' in kwargs:
alpha = maybe_to_array(kwargs['alpha'], self.batch_size)
self._session.run(self.config_alpha_step, feed_dict={self.alpha_ph: alpha})
if 'iteration' in kwargs:
self.iteration = kwargs['iteration']
def _batch_attack_generator(self, xs, ys, ys_target):
''' Attack a batch of examples. It is a generator which yields back ``iteration_callback()``'s return value
after each iteration if the ``iteration_callback`` is not ``None``, and returns the adversarial examples.
'''
labels = ys if self.goal == 'ut' else ys_target
self._session.run(self.setup_xs, feed_dict={self.xs_ph: xs})
self._session.run(self.setup_ys, feed_dict={self.ys_ph: labels})
for _ in range(self.iteration):
self._session.run(self.update_xs_adv_step)
if self.iteration_callback is not None:
yield self._session.run(self.iteration_callback)
return self._session.run(self.xs_adv_model)
def batch_attack(self, xs, ys=None, ys_target=None):
''' Attack a batch of examples.
:return: When the ``iteration_callback`` is ``None``, return the generated adversarial examples. When the
``iteration_callback`` is not ``None``, return a generator, which yields back the callback's return value
after each iteration and returns the generated adversarial examples.
'''
g = self._batch_attack_generator(xs, ys, ys_target)
if self.iteration_callback is None:
try:
next(g)
except StopIteration as exp:
return exp.value
else:
return g
| 53.992754 | 120 | 0.65414 | 7,286 | 0.977855 | 757 | 0.101597 | 0 | 0 | 0 | 0 | 3,009 | 0.403838 |
81e170cb1bf7f677e6d97334533f66e198b1aa28 | 3,148 | py | Python | parasite/resolver.py | SGevorg/parasite | 574b3992abeef03406524a94b6a8a2d662ca13e7 | [
"MIT"
]
| 9 | 2020-09-21T11:21:01.000Z | 2020-12-18T08:21:27.000Z | parasite/resolver.py | bittlingmayer/parasite | daac95eeaa19d5b05c0a3af076e364ca21708ff4 | [
"MIT"
]
| 1 | 2020-11-21T09:45:45.000Z | 2020-11-21T09:45:45.000Z | parasite/resolver.py | bittlingmayer/parasite | daac95eeaa19d5b05c0a3af076e364ca21708ff4 | [
"MIT"
]
| 4 | 2020-11-21T09:08:30.000Z | 2020-12-05T15:46:56.000Z | import numpy as np
from functools import lru_cache
from typing import Tuple
class DynamicResolver:
def __init__(self,
matrix: np.ndarray,
*,
num_src_lines: int = None,
num_tgt_lines: int = None,
max_k: int = 3,
windows_importance: bool = False
):
self.matrix = 100 - matrix
self.max_k = max_k
self.windows_importance = windows_importance
self.n, self.m = matrix.shape
self.num_src_lines = num_src_lines or self.n
self.num_tgt_lines = num_tgt_lines or self.m
def __call__(self) -> Tuple[float, Tuple]:
best, path = self.resolve()
return best, path
@lru_cache(maxsize=None)
def offset(self,
begin: int,
end: int,
num_lines: int) -> int:
if end - begin == 1:
return begin
num_window_elements = num_lines - (end - begin) + 2
prev_offset = self.offset(begin, end - 1, num_lines)
return prev_offset + num_window_elements
def extract_candidate(self,
i: int, src_window_size: int,
j: int, tgt_window_size: int,) -> Tuple[float, Tuple]:
from_i = i - src_window_size
from_j = j - tgt_window_size
if from_i < 0 or from_j < 0:
return 0, ()
candidate_score, candidate_path = self.resolve(from_i, from_j)
if src_window_size == 0 or tgt_window_size == 0:
return candidate_score, candidate_path
offset_i = self.offset(from_i, i, self.num_src_lines)
offset_j = self.offset(from_j, j, self.num_tgt_lines)
if offset_i >= self.n or offset_j >= self.m:
return 0, ()
added_score = self.matrix[offset_i, offset_j]
if self.windows_importance:
added_score *= (src_window_size + tgt_window_size)
candidate_score += added_score
candidate_path = ((offset_i, offset_j), candidate_path)
return candidate_score, candidate_path
@lru_cache(maxsize=None)
def resolve(self,
i: int = None, j: int = None) -> Tuple[float, Tuple]:
if i is None:
i = self.num_src_lines
if j is None:
j = self.num_tgt_lines
if i <= 0 or j <= 0:
return 0, ()
best_score: float = 0.0
best_path: Tuple = ()
for src_window_size in range(self.max_k + 1):
for tgt_window_size in range(self.max_k + 1):
if src_window_size == 0 and tgt_window_size == 0:
continue
if src_window_size > 1 and tgt_window_size > 1:
continue
candidate = self.extract_candidate(i, src_window_size,
j, tgt_window_size)
candidate_score, candidate_path = candidate
if candidate_score > best_score:
best_score = candidate_score
best_path = candidate_path
return best_score, best_path
| 32.453608 | 80 | 0.55432 | 3,067 | 0.974269 | 0 | 0 | 1,395 | 0.443139 | 0 | 0 | 0 | 0 |
81e2d167ec9fa89c74b62f2bf234fc1429ff2619 | 3,864 | py | Python | utils/preprocess.py | Deep-MI/3d-neuro-seg | 57cc1e16e5ecbef8caf9f6f1e735a0e7339d1152 | [
"Apache-2.0"
]
| null | null | null | utils/preprocess.py | Deep-MI/3d-neuro-seg | 57cc1e16e5ecbef8caf9f6f1e735a0e7339d1152 | [
"Apache-2.0"
]
| null | null | null | utils/preprocess.py | Deep-MI/3d-neuro-seg | 57cc1e16e5ecbef8caf9f6f1e735a0e7339d1152 | [
"Apache-2.0"
]
| null | null | null | import numpy as np
"""
Contains preprocessing code for creating additional information based on MRI volumes and true segmentation maps (asegs).
Eg. weight masks for median frequency class weighing, edge weighing etc.
"""
def create_weight_mask(aseg):
"""
Main function for calculating weight mask of segmentation map for loss function. Currently only Median Frequency
Weighing is implemented. Other types can be additively added to the 'weights' variable
Args:
aseg (numpy.ndarray): Segmentation map with shape l x w x d
Returns:
numpy.ndarray: Weight Mask of same shape as aseg
"""
if len(aseg.shape)==4:
_, h,w,d = aseg.shape
elif len(aseg.shape)==3:
h,w,d = aseg.shape
weights = np.zeros((h,w,d), dtype=float) # Container ndarray of zeros for weights
weights += median_freq_class_weighing(aseg) # Add median frequency weights
# Further weights (eg. extra weights for region borders) can be added here
# Eg. weights += edge_weights(aseg)
return weights
def median_freq_class_weighing(aseg):
"""
Median Frequency Weighing. Guarded against class absence of certain classes.
Args:
aseg (numpy.ndarray): Segmentation map with shape l x w x d
Returns:
numpy.ndarray: Median frequency weighted mask of same shape as aseg
"""
# Calculates median frequency based weighing for classes
unique, counts = np.unique(aseg, return_counts=True)
if len(aseg.shape)==4:
_, h,w,d = aseg.shape
elif len(aseg.shape)==3:
h,w,d = aseg.shape
class_wise_weights = np.median(counts)/counts
aseg = aseg.astype(int)
# Guards against the absence of certain classes in sample
discon_guard_lut = np.zeros(int(max(unique))+1)-1
for idx, val in enumerate(unique):
discon_guard_lut[int(val)] = idx
discon_guard_lut = discon_guard_lut.astype(int)
# Assigns weights to w_mask and resets the missing classes
w_mask = np.reshape(class_wise_weights[discon_guard_lut[aseg.ravel()]], (h, w, d))
return w_mask
# Label mapping functions (to aparc (eval) and to label (train))
def map_label2aparc_aseg(mapped_aseg):
"""
Function to perform look-up table mapping from label space to aparc.DKTatlas+aseg space
:param np.ndarray mapped_aseg: label space segmentation (aparc.DKTatlas + aseg)
:return:
"""
aseg = np.zeros_like(mapped_aseg)
labels = np.array([0, 2, 4, 5, 7, 8, 10, 11, 12, 13, 14,
15, 16, 17, 18, 24, 26, 28, 31, 41, 43, 44,
46, 47, 49, 50, 51, 52, 53, 54, 58, 60, 63,
77, 1002, 1003, 1005, 1006, 1007, 1008, 1009, 1010, 1011,
1012, 1013, 1014, 1015, 1016, 1017, 1018, 1019, 1020, 1021, 1022,
1023, 1024, 1025, 1026, 1027, 1028, 1029, 1030, 1031, 1034, 1035,
2002, 2005, 2010, 2012, 2013, 2014, 2016, 2017, 2021, 2022, 2023,
2024, 2025, 2028])
h, w, d = aseg.shape
aseg = labels[mapped_aseg.ravel()]
aseg = aseg.reshape((h, w, d))
return aseg
# if __name__ == "__main__":
# #a = np.random.randint(0, 5, size=(10,10,10))
# #b = np.random.randint(5, 10, size=(10000))
#
# #map_masks_into_5_classes(np.random.randint(0, 250, size=(256, 256, 256)))
#
# import nibabel as nib
# from data_utils.process_mgz_into_hdf5 import map_aparc_aseg2label, map_aseg2label
# path = r"abide_ii/sub-28675/mri/aparc.DKTatlas+aseg.mgz"
# aseg = nib.load(path).get_data()
# labels_full, _ = map_aparc_aseg2label(aseg) # only for 79 classes case
# # labels_full, _ = map_aseg2label(aseg) # only for 37 classes case
# aseg = labels_full
# # print(aseg.shape)
# median_freq_class_weighing(aseg)
# # print(edge_weighing(aseg, 1.5))
| 35.777778 | 120 | 0.646222 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,152 | 0.556936 |
81e2ef476be7e9c25d158962fb2d60491bb44e8a | 4,381 | py | Python | test/test_oneview_hypervisor_cluster_profile_facts.py | nabhajit-ray/oneview-ansible | b31af8a696013bac7a1900748a2fa5ba491fe8e2 | [
"Apache-2.0"
]
| 108 | 2016-06-28T18:14:08.000Z | 2022-02-21T09:16:06.000Z | test/test_oneview_hypervisor_cluster_profile_facts.py | HPE-Japan-Presales/oneview-ansible | 26eb13354333d862d9e80f07e3fe9bbe2eb59af3 | [
"Apache-2.0"
]
| 248 | 2016-07-14T12:50:17.000Z | 2022-02-06T18:57:16.000Z | test/test_oneview_hypervisor_cluster_profile_facts.py | HPE-Japan-Presales/oneview-ansible | 26eb13354333d862d9e80f07e3fe9bbe2eb59af3 | [
"Apache-2.0"
]
| 88 | 2016-06-29T15:52:44.000Z | 2022-03-10T12:34:41.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
###
# Copyright (2016-2020) Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###
import pytest
import mock
from copy import deepcopy
from hpe_test_utils import OneViewBaseFactsTest
from oneview_module_loader import HypervisorClusterProfileFactsModule
PROFILE_URI = '/rest/hypervisor-cluster-profiles/57d3af2a-b6d2-4446-8645-f38dd808ea4d'
PARAMS_GET_ALL = dict(
config='config.json'
)
PARAMS_GET_BY_NAME = dict(
config='config.json',
name="Test Cluster Profile"
)
PARAMS_GET_BY_URI = dict(
config='config.json',
uri="/rest/test/123"
)
PARAMS_WITH_OPTIONS = dict(
config='config.json',
name="Test Cluster Profile",
options=[
'compliancePreview',
]
)
@pytest.mark.resource(TestHypervisorClusterProfileFactsModule='hypervisor_cluster_profiles')
class TestHypervisorClusterProfileFactsModule(OneViewBaseFactsTest):
"""
FactsParamsTestCase has common tests for the parameters support.
"""
def test_should_get_all_cluster_profiles(self):
cluster_profiles = [
{"name": "Cluster Profile Name 1"},
{"name": "Cluster Profile Name 2"}
]
self.mock_ov_client.hypervisor_cluster_profiles.get_all.return_value = cluster_profiles
self.mock_ansible_module.params = deepcopy(PARAMS_GET_ALL)
HypervisorClusterProfileFactsModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=False,
ansible_facts=dict(hypervisor_cluster_profiles=cluster_profiles)
)
def test_should_get_by_name(self):
profile = {"name": "Test Cluster Profile", 'uri': '/rest/test/123'}
obj = mock.Mock()
obj.data = profile
self.mock_ov_client.hypervisor_cluster_profiles.get_by_name.return_value = obj
self.mock_ansible_module.params = deepcopy(PARAMS_GET_BY_NAME)
HypervisorClusterProfileFactsModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=False,
ansible_facts=dict(hypervisor_cluster_profiles=[profile])
)
def test_should_get_by_uri(self):
cluster_profile = {"name": "Test Cluster Profile", 'uri': '/rest/test/123'}
obj = mock.Mock()
obj.data = cluster_profile
self.mock_ov_client.hypervisor_cluster_profiles.get_by_uri.return_value = obj
self.mock_ansible_module.params = deepcopy(PARAMS_GET_BY_URI)
HypervisorClusterProfileFactsModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=False,
ansible_facts=dict(hypervisor_cluster_profiles=[cluster_profile])
)
def test_should_get_cluster_profile_by_name_with_all_options(self):
mock_option_return = {'subresource': 'value'}
self.mock_ov_client.hypervisor_cluster_profiles.data = {"name": "Test Cluster Profile", "uri": PROFILE_URI}
self.mock_ov_client.hypervisor_cluster_profiles.get_by_name.return_value = \
self.mock_ov_client.hypervisor_cluster_profiles
self.mock_ov_client.hypervisor_cluster_profiles.get_compliance_preview.return_value = mock_option_return
self.mock_ansible_module.params = deepcopy(PARAMS_WITH_OPTIONS)
HypervisorClusterProfileFactsModule().run()
self.mock_ov_client.hypervisor_cluster_profiles.get_compliance_preview.assert_called_once_with()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=False,
ansible_facts={'hypervisor_cluster_profiles': [{'name': 'Test Cluster Profile', 'uri': PROFILE_URI}],
'hypervisor_cluster_profile_compliance_preview': mock_option_return,
}
)
if __name__ == '__main__':
pytest.main([__file__])
| 35.048 | 115 | 0.71947 | 2,953 | 0.674047 | 0 | 0 | 3,046 | 0.695275 | 0 | 0 | 1,283 | 0.292856 |
81e5c39849311e6837ffa50cd43accfe28aa75bf | 1,339 | py | Python | utils/predictions.py | jaingaurav3/ML_sample | 4e53de198f7965fa96f0db44717df27032df4b48 | [
"MIT"
]
| 19 | 2018-06-08T05:33:47.000Z | 2021-04-26T16:19:32.000Z | utils/predictions.py | jaingaurav3/ML_sample | 4e53de198f7965fa96f0db44717df27032df4b48 | [
"MIT"
]
| null | null | null | utils/predictions.py | jaingaurav3/ML_sample | 4e53de198f7965fa96f0db44717df27032df4b48 | [
"MIT"
]
| 13 | 2018-09-24T21:52:06.000Z | 2021-02-26T10:40:25.000Z | import os
import scipy
import numpy as np
import pandas as pd
import torch
from torch.autograd import Variable
def predict_batch(net, inputs):
v = Variable(inputs.cuda(), volatile=True)
return net(v).data.cpu().numpy()
def get_probabilities(model, loader):
model.eval()
return np.vstack(predict_batch(model, data[0]) for data in loader)
def get_predictions(probs, thresholds):
preds = np.copy(probs)
preds[preds >= thresholds] = 1
preds[preds < thresholds] = 0
return preds.astype('uint8')
def get_argmax(output):
val,idx = torch.max(output, dim=1)
return idx.data.cpu().view(-1).numpy()
def get_targets(loader):
targets = None
for data in loader:
if targets is None:
shape = list(data[1].size())
shape[0] = 0
targets = np.empty(shape)
target = data[1]
if len(target.size()) == 1:
target = target.view(-1,1)
target = target.numpy()
targets = np.vstack([targets, target])
return targets
def ensemble_with_method(arr, method):
if method == c.MEAN:
return np.mean(arr, axis=0)
elif method == c.GMEAN:
return scipy.stats.mstats.gmean(arr, axis=0)
elif method == c.VOTE:
return scipy.stats.mode(arr, axis=0)[0][0]
raise Exception("Operation not found") | 25.264151 | 70 | 0.630321 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 28 | 0.020911 |
81e5f55f1de69308bb6ff205c3967683e8097ccc | 3,806 | py | Python | gammapy/data/tests/test_pointing.py | Rishank2610/gammapy | 3cd64fdb2c53c8e5c697a9b85ef8d0486bff0b76 | [
"BSD-3-Clause"
]
| 155 | 2015-02-25T12:38:02.000Z | 2022-03-13T17:54:30.000Z | gammapy/data/tests/test_pointing.py | Rishank2610/gammapy | 3cd64fdb2c53c8e5c697a9b85ef8d0486bff0b76 | [
"BSD-3-Clause"
]
| 3,131 | 2015-01-06T15:36:23.000Z | 2022-03-31T17:30:57.000Z | gammapy/data/tests/test_pointing.py | Rishank2610/gammapy | 3cd64fdb2c53c8e5c697a9b85ef8d0486bff0b76 | [
"BSD-3-Clause"
]
| 158 | 2015-03-16T20:36:44.000Z | 2022-03-30T16:05:37.000Z | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from numpy.testing import assert_allclose
from astropy.time import Time
from gammapy.data import FixedPointingInfo, PointingInfo
from gammapy.utils.testing import assert_time_allclose, requires_data
@requires_data()
class TestFixedPointingInfo:
@classmethod
def setup_class(cls):
filename = "$GAMMAPY_DATA/tests/pointing_table.fits.gz"
cls.fpi = FixedPointingInfo.read(filename)
def test_location(self):
lon, lat, height = self.fpi.location.geodetic
assert_allclose(lon.deg, 16.5002222222222)
assert_allclose(lat.deg, -23.2717777777778)
assert_allclose(height.value, 1834.999999999783)
def test_time_ref(self):
expected = Time(51910.00074287037, format="mjd", scale="tt")
assert_time_allclose(self.fpi.time_ref, expected)
def test_time_start(self):
time = self.fpi.time_start
expected = Time(53025.826414166666, format="mjd", scale="tt")
assert_time_allclose(time, expected)
def test_time_stop(self):
time = self.fpi.time_stop
expected = Time(53025.844770648146, format="mjd", scale="tt")
assert_time_allclose(time, expected)
def test_duration(self):
duration = self.fpi.duration
assert_allclose(duration.sec, 1586.0000000044238)
def test_radec(self):
pos = self.fpi.radec
assert_allclose(pos.ra.deg, 83.633333333333)
assert_allclose(pos.dec.deg, 24.51444444)
assert pos.name == "icrs"
def test_altaz(self):
pos = self.fpi.altaz
assert_allclose(pos.az.deg, 7.48272)
assert_allclose(pos.alt.deg, 41.84191)
assert pos.name == "altaz"
@requires_data()
class TestPointingInfo:
@classmethod
def setup_class(cls):
filename = "$GAMMAPY_DATA/tests/pointing_table.fits.gz"
cls.pointing_info = PointingInfo.read(filename)
def test_str(self):
ss = str(self.pointing_info)
assert "Pointing info" in ss
def test_location(self):
lon, lat, height = self.pointing_info.location.geodetic
assert_allclose(lon.deg, 16.5002222222222)
assert_allclose(lat.deg, -23.2717777777778)
assert_allclose(height.value, 1834.999999999783)
def test_time_ref(self):
expected = Time(51910.00074287037, format="mjd", scale="tt")
assert_time_allclose(self.pointing_info.time_ref, expected)
def test_table(self):
assert len(self.pointing_info.table) == 100
def test_time(self):
time = self.pointing_info.time
assert len(time) == 100
expected = Time(53025.826414166666, format="mjd", scale="tt")
assert_time_allclose(time[0], expected)
def test_duration(self):
duration = self.pointing_info.duration
assert_allclose(duration.sec, 1586.0000000044238)
def test_radec(self):
pos = self.pointing_info.radec[0]
assert_allclose(pos.ra.deg, 83.633333333333)
assert_allclose(pos.dec.deg, 24.51444444)
assert pos.name == "icrs"
def test_altaz(self):
pos = self.pointing_info.altaz[0]
assert_allclose(pos.az.deg, 11.45751357)
assert_allclose(pos.alt.deg, 41.34088901)
assert pos.name == "altaz"
def test_altaz_from_table(self):
pos = self.pointing_info.altaz_from_table[0]
assert_allclose(pos.az.deg, 11.20432353385406)
assert_allclose(pos.alt.deg, 41.37921408774436)
assert pos.name == "altaz"
def test_altaz_interpolate(self):
time = self.pointing_info.time[0]
pos = self.pointing_info.altaz_interpolate(time)
assert_allclose(pos.az.deg, 11.45751357)
assert_allclose(pos.alt.deg, 41.34088901)
assert pos.name == "altaz"
| 34.6 | 69 | 0.679453 | 3,503 | 0.920389 | 0 | 0 | 3,537 | 0.929322 | 0 | 0 | 251 | 0.065949 |
81e620b1dfd869927a5135342a7294ba02276c08 | 1,183 | py | Python | src/config.py | BRAVO68WEB/architus | 21b9f94a64b142ee6e9b5efd79bd872a13ce8f6a | [
"MIT"
]
| null | null | null | src/config.py | BRAVO68WEB/architus | 21b9f94a64b142ee6e9b5efd79bd872a13ce8f6a | [
"MIT"
]
| null | null | null | src/config.py | BRAVO68WEB/architus | 21b9f94a64b142ee6e9b5efd79bd872a13ce8f6a | [
"MIT"
]
| null | null | null | from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
# from src.commands import *
# import src.commands as command_modules
secret_token = None
db_user = None
db_pass = None
sessions = {}
try:
lines = [line.rstrip('\n') for line in open('.secret_token')]
secret_token = lines[0]
db_user = lines[1]
db_pass = lines[2]
client_id = lines[3]
client_secret = lines[4]
twitter_consumer_key = lines[5]
twitter_consumer_secret = lines[6]
twitter_access_token_key = lines[7]
twitter_access_token_secret = lines[8]
scraper_token = lines[9]
except Exception as e:
print(e)
print('error reading .secret_token, make it you aut')
def get_session(pid=None):
if pid in sessions:
return sessions[pid]
print("creating postgres session")
try:
engine = create_engine("postgresql://{}:{}@localhost/autbot".format(db_user, db_pass))
Session = sessionmaker(bind=engine)
session = Session()
sessions[pid] = session
except Exception as e:
session = None
print('failed to connect to database')
print(e)
return session
session = get_session()
| 25.170213 | 94 | 0.674556 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 228 | 0.19273 |
81e6447d74e137ba6ed7fb43a3550f34c92da3a7 | 2,876 | py | Python | aict_tools/scripts/plot_regressor_performance.py | LukasBeiske/aict-tools | ccf61c051c58040cf4b676180ae7184021d1b81b | [
"MIT"
]
| null | null | null | aict_tools/scripts/plot_regressor_performance.py | LukasBeiske/aict-tools | ccf61c051c58040cf4b676180ae7184021d1b81b | [
"MIT"
]
| null | null | null | aict_tools/scripts/plot_regressor_performance.py | LukasBeiske/aict-tools | ccf61c051c58040cf4b676180ae7184021d1b81b | [
"MIT"
]
| null | null | null | import click
import logging
import matplotlib
import matplotlib.pyplot as plt
import joblib
import fact.io
from ..configuration import AICTConfig
from ..plotting import (
plot_regressor_confusion,
plot_bias_resolution,
plot_feature_importances,
)
if matplotlib.get_backend() == 'pgf':
from matplotlib.backends.backend_pgf import PdfPages
else:
from matplotlib.backends.backend_pdf import PdfPages
@click.command()
@click.argument('configuration_path', type=click.Path(exists=True, dir_okay=False))
@click.argument('performance_path', type=click.Path(exists=True, dir_okay=False))
@click.argument('model_path', type=click.Path(exists=True, dir_okay=False))
@click.option('-o', '--output', type=click.Path(exists=False, dir_okay=False))
@click.option('-k', '--key', help='HDF5 key for hdf5', default='data')
def main(configuration_path, performance_path, model_path, output, key):
''' Create some performance evaluation plots for the separator '''
logging.basicConfig(level=logging.INFO)
log = logging.getLogger()
log.info('Loading perfomance data')
df = fact.io.read_data(performance_path, key=key)
log.info('Loading model')
model = joblib.load(model_path)
config = AICTConfig.from_yaml(configuration_path)
model_config = config.energy
energy_unit = config.energy_unit
figures = []
# Plot confusion
figures.append(plt.figure())
ax = figures[-1].add_subplot(1, 1, 1)
ax.set_title('Reconstructed vs. True Energy (log color scale)')
plot_regressor_confusion(
df, ax=ax,
label_column=model_config.target_column,
prediction_column=model_config.output_name,
energy_unit=energy_unit,
)
# Plot confusion
figures.append(plt.figure())
ax = figures[-1].add_subplot(1, 1, 1)
ax.set_title('Reconstructed vs. True Energy (linear color scale)')
plot_regressor_confusion(
df, log_z=False, ax=ax,
label_column=model_config.target_column,
prediction_column=model_config.output_name,
energy_unit=energy_unit,
)
# Plot bias/resolution
figures.append(plt.figure())
ax = figures[-1].add_subplot(1, 1, 1)
ax.set_title('Bias and Resolution')
plot_bias_resolution(
df, bins=15, ax=ax,
label_column=model_config.target_column,
prediction_column=model_config.output_name,
energy_unit=energy_unit,
)
if hasattr(model, 'feature_importances_'):
# Plot feature importances
figures.append(plt.figure())
ax = figures[-1].add_subplot(1, 1, 1)
features = model_config.features
plot_feature_importances(model, features, ax=ax)
if output is None:
plt.show()
else:
with PdfPages(output) as pdf:
for fig in figures:
fig.tight_layout(pad=0)
pdf.savefig(fig)
| 30.924731 | 83 | 0.691586 | 0 | 0 | 0 | 0 | 2,454 | 0.853268 | 0 | 0 | 435 | 0.151252 |
c48c8a45a8bc31ea98b3b0eb49ac12298185c634 | 2,426 | py | Python | kenlm_training/cc_net/tokenizer.py | ruinunca/data_tooling | 297e1f8c2898d00b523ccafb7bdd19c6d6aac9ff | [
"Apache-2.0"
]
| 435 | 2019-11-04T22:35:50.000Z | 2022-03-29T20:15:07.000Z | kenlm_training/cc_net/tokenizer.py | ruinunca/data_tooling | 297e1f8c2898d00b523ccafb7bdd19c6d6aac9ff | [
"Apache-2.0"
]
| 331 | 2021-11-02T00:30:56.000Z | 2022-03-08T16:48:13.000Z | kenlm_training/cc_net/tokenizer.py | ruinunca/data_tooling | 297e1f8c2898d00b523ccafb7bdd19c6d6aac9ff | [
"Apache-2.0"
]
| 66 | 2019-11-06T01:28:12.000Z | 2022-03-01T09:18:32.000Z | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import time
from typing import Dict, Optional
import sacremoses # type: ignore
from cc_net import jsonql, text_normalizer
class RobustTokenizer(jsonql.Transformer):
"""Moses tokenizer with the expected preprocessing."""
LANG_WITHOUT_ACCENT = {"en", "my"}
def __init__(self, lang: str):
super().__init__()
self.lang = lang
self.moses = sacremoses.MosesTokenizer(lang)
self.rm_accent = lang in self.LANG_WITHOUT_ACCENT
self.ready = True
def do(self, text: str):
text = text_normalizer.normalize(
text, accent=self.rm_accent, case=False, numbers=False, punct=True
)
text = text_normalizer.normalize_spacing_for_tok(text, language=self.lang)
return self.moses.tokenize(text, return_str=True, escape=False)
class DocTokenizer(jsonql.Transformer):
"""Tokenize the text found in `output_field and store the result in `output_field`."""
def __init__(
self,
field: str,
output_field: str = "tokenized",
language_field: str = "language",
):
super().__init__()
self.field = field
self.output_field = output_field
self.language_field = language_field
self.n_docs = 0
self.tokenizers: Dict[str, RobustTokenizer] = {}
def get_tokenizer(self, lang: str) -> Optional[RobustTokenizer]:
cache = self.tokenizers
if lang in cache:
return cache[lang]
if lang in ("th", "zh", "ja"):
# TODO find a tokenizer for those languages
return None
cache[lang] = RobustTokenizer(lang)
return cache[lang]
def do(self, document):
lang = document[self.language_field]
tok = self.get_tokenizer(lang)
if not tok:
return document
self.n_docs += 1
lines = document[self.field].split("\n")
tokenized = "\n".join(tok(l) for l in lines)
document[self.output_field] = tokenized
return document
def summary(self):
delay = (time.time() - self.start_time) / 3600
speed = self.n_docs / delay
return [
f"Tokenized {self.n_docs:_} documents in {delay:.2}h ({speed:.1} doc/s)."
]
| 30.325 | 90 | 0.626958 | 2,115 | 0.871805 | 0 | 0 | 0 | 0 | 0 | 0 | 493 | 0.203215 |
c48caf2d700cbc3c512434c652a6ac5a08e2206b | 346 | py | Python | scripts/exercicios/ex063.py | RuanBarretodosSantos/python | 4142ccd71c4ffb4bb6a10d61c85f612758f5bb41 | [
"MIT"
]
| null | null | null | scripts/exercicios/ex063.py | RuanBarretodosSantos/python | 4142ccd71c4ffb4bb6a10d61c85f612758f5bb41 | [
"MIT"
]
| null | null | null | scripts/exercicios/ex063.py | RuanBarretodosSantos/python | 4142ccd71c4ffb4bb6a10d61c85f612758f5bb41 | [
"MIT"
]
| null | null | null | cont = 3
t1 = 0
t2 = 1
print('-----' * 12)
print('SequΓͺncia de Fibonacci')
print('-----' * 12)
valor = int(input('Quantos termos vocΓͺ quer mostrar ? '))
print('~~~~~' * 12)
print(f'{t1} β {t2} ' , end='β ')
while cont <= valor:
t3 = t1 + t2
print(f' {t3}', end=' β ')
t1 = t2
t2 = t3
t3 = t1
cont += 1
print(' F I M')
| 19.222222 | 57 | 0.482659 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 131 | 0.370056 |
c48ce6625a976f83a24cccf09278351389aa811f | 3,991 | py | Python | CGAT/Sra.py | 861934367/cgat | 77fdc2f819320110ed56b5b61968468f73dfc5cb | [
"BSD-2-Clause",
"BSD-3-Clause"
]
| null | null | null | CGAT/Sra.py | 861934367/cgat | 77fdc2f819320110ed56b5b61968468f73dfc5cb | [
"BSD-2-Clause",
"BSD-3-Clause"
]
| null | null | null | CGAT/Sra.py | 861934367/cgat | 77fdc2f819320110ed56b5b61968468f73dfc5cb | [
"BSD-2-Clause",
"BSD-3-Clause"
]
| 1 | 2019-08-04T22:46:38.000Z | 2019-08-04T22:46:38.000Z | ##########################################################################
#
# MRC FGU Computational Genomics Group
#
# $Id$
#
# Copyright (C) 2009 Andreas Heger
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
##########################################################################
'''
Sra.py - Methods for dealing with short read archive files
==========================================================
Utility functions for dealing with :term:`SRA` formatted files from
the Short Read Archive.
Requirements:
* fastq-dump >= 2.1.7
Code
----
'''
import os
import glob
import tempfile
import shutil
import CGAT.Experiment as E
import CGAT.Fastq as Fastq
import CGAT.IOTools as IOTools
def peek(sra, outdir=None):
"""return the full file names for all files which will be extracted
Parameters
----------
outdir : path
perform extraction in outdir. If outdir is None, the extraction
will take place in a temporary directory, which will be deleted
afterwards.
Returns
-------
files : list
A list of fastq formatted files that are contained in the archive.
format : string
The quality score format in the :term:`fastq` formatted files.
"""
if outdir is None:
workdir = tempfile.mkdtemp()
else:
workdir = outdir
# --split-files creates files called prefix_#.fastq.gz,
# where # is the read number.
# If file cotains paired end data:
# output = prefix_1.fastq.gz, prefix_2.fastq.gz
# *special case: unpaired reads in a paired end --> prefix.fastq.gz
# *special case: if paired reads are stored in a single read,
# fastq-dump will split. There might be a joining
# sequence. The output would thus be:
# prefix_1.fastq.gz, prefix_2.fastq.gz, prefix_3.fastq.gz
# You want files 1 and 3.
E.run("""fastq-dump --split-files --gzip -X 1000
--outdir %(workdir)s %(sra)s""" % locals())
f = sorted(glob.glob(os.path.join(workdir, "*.fastq.gz")))
ff = [os.path.basename(x) for x in f]
if len(f) == 1:
# sra file contains one read: output = prefix.fastq.gz
pass
elif len(f) == 2:
# sra file contains read pairs:
# output = prefix_1.fastq.gz, prefix_2.fastq.gz
assert ff[0].endswith(
"_1.fastq.gz") and ff[1].endswith("_2.fastq.gz")
elif len(f) == 3:
if ff[2].endswith("_3.fastq.gz"):
f = glob.glob(os.path.join(workdir, "*_[13].fastq.gz"))
else:
f = glob.glob(os.path.join(workdir, "*_[13].fastq.gz"))
# check format of fastqs in .sra
fastq_format = Fastq.guessFormat(IOTools.openFile(f[0], "r"), raises=False)
fastq_datatype = Fastq.guessDataType(IOTools.openFile(f[0], "r"), raises=True)
if outdir is None:
shutil.rmtree(workdir)
return f, fastq_format, fastq_datatype
def extract(sra, outdir, tool="fastq-dump"):
"""return statement for extracting the SRA file in `outdir`.
possible tools are fastq-dump and abi-dump. Use abi-dump for colorspace"""
if tool == "fastq-dump":
tool += " --split-files"
statement = """%(tool)s --gzip --outdir %(outdir)s %(sra)s""" % locals()
return statement
| 32.447154 | 82 | 0.607367 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,825 | 0.707843 |
c48d9b9b2d55aa3083a0ad90f19c76032b967b27 | 11,902 | py | Python | LipidFinder/LFDataFrame.py | s-andrews/LipidFinder | c91d6caa8008e0a67188914e48f30913deff888d | [
"MIT"
]
| null | null | null | LipidFinder/LFDataFrame.py | s-andrews/LipidFinder | c91d6caa8008e0a67188914e48f30913deff888d | [
"MIT"
]
| null | null | null | LipidFinder/LFDataFrame.py | s-andrews/LipidFinder | c91d6caa8008e0a67188914e48f30913deff888d | [
"MIT"
]
| null | null | null | # Copyright (c) 2019 J. Alvarez-Jarreta and C.J. Brasher
#
# This file is part of the LipidFinder software tool and governed by the
# 'MIT License'. Please see the LICENSE file that should have been
# included as part of this software.
"""Represent a DataFrame to be processed with LipidFinder's workflow."""
import glob
import logging
import os
import pandas
class LFDataFrame(pandas.core.frame.DataFrame):
"""A LFDataFrame object stores a dataframe to be used as input data
in LipidFinder.
The input data file(s) must comply with the following requirements:
- The format must be: CSV, TSV, XLS or XLSX. For the last two the
user can also specify the sheet to be read (or the list of
sheets if a folder is given as 'src').
- The first column contains an identifier for each row that is
unique throughout every file.
- There is one column named as "mzCol" parameter and another one
as "rtCol" parameter.
- Starting from the column index in "firstSampleIndex" parameter,
every intensity column must follow. For instance, for 2 samples
with 2 technical replicates, 1 quality control sample and 2
solvents, the columns would be as follows:
sample11 , sample12 , sample21 , sample22 , QC1 , sol1, sol2
Ensure that samples with multiple technical replicates are given
names in the format name1, name2, etc. such that each name is
unique for each column. Replicates should be suffixed 1, 2, etc.
Attributes:
src (Public[str])
Source path where the data was loaded from.
_resolution (Private[int])
Number of digits after the radix point in floats.
Examples:
LFDataFrame objects can be created in two different ways:
>>> from Configuration import LFParameters
>>> from LFDataFrame import LFDataFrame
>>> params = LFParameters(module='peakfilter')
>>> csvData = LFDataFrame('input_data.csv', params)
>>> xlsData = LFDataFrame('input_data.xls', params, sheet=2)
>>> folderData = LFDataFrame('/home/user/data/', params)
After loading the required set of parameters, the data can be
loaded from a single file ('csvData' and 'xlsData' examples) or
from multiple files located in the same folder ('folderData'
example). The latter is meant to be used to merge multiple files
split by time ranges that represent a single run. The first and
last retention time (RT) minutes of every file are trimmed as
they are considered unreliable (except for the first and last
minutes of the first and last files, respectively). The method
supports overlap (after trimming), and the frames retained will
be those from the file with the most frames for each overlapping
minute.
The number of decimal places to keep from the input m/z column
can be changed assigning a value to 'resolution' variable. It
has been predefined to 6, a standard value in high-resolution
liquid-chromatography coupled to mass-spectrometry.
"""
def __init__(self, src, parameters, resolution=6, sheet=0):
# type: (str, LFParameters, int, object) -> LFDataFrame
"""Constructor of the class LFDataFrame.
Keyword Arguments:
src -- source path where to load the data from
parameters -- LipidFinder's parameters instance (can be for
any module)
resolution -- number of decimal places to keep from m/z
column [default: 6]
sheet -- sheet number or list of sheet numbers to read
when input file(s) have XLS or XLSX extension
(zero-indexed position) [default: 0]
"""
rtCol = parameters['rtCol']
if (not os.path.isdir(src)):
data = self._read_file(src, parameters, sheet)
else:
# Create a list of the input files in the source folder (in
# alphabetical order)
fileList = sorted(glob.iglob(os.path.join(src, '*.*')))
if (len(fileList) == 0):
raise FileNotFoundError("No files found in '{0}'".format(src))
data = self._read_file(fileList[0], parameters, sheet[0])
if (len(fileList) > 1):
# Sort first dataframe by RT
data.sort_values([rtCol], inplace=True, kind='mergesort')
# Append "minute" column to the dataframe with the
# integer part of the float values of its RT column
timeCol = 'minute'
data = data.assign(minute=data[rtCol].astype(int))
# Since it is the first file, remove the frames
# corresponding to the last minute
data = data[data[timeCol] != data.iloc[-1][timeCol]]
for index, filePath in enumerate(fileList[1:], start=1):
chunk = self._read_file(filePath, parameters, sheet[index])
# Sort next chunk dataframe by RT
chunk.sort_values([rtCol], inplace=True, kind='mergesort')
# Append "minute" column to the dataframe with the
# integer part of the float values of its RT column
chunk = chunk.assign(minute=chunk[rtCol].astype(int))
# Remove the frames of the first minute
chunk = chunk[chunk[timeCol] != chunk.iloc[0][timeCol]]
if (index < (len(fileList) - 1)):
# Since it is not the last file, remove the
# frames corresponding to the last minute
chunk = chunk[chunk[timeCol] != chunk.iloc[-1][timeCol]]
# Create a dataframe with the number of frames per
# minute for both the dataframe and the next chunk
overlap = pandas.DataFrame(
{'data': data.groupby(timeCol).size(),
'chunk': chunk.groupby(timeCol).size()}
).fillna(0)
# Keep the minutes where the number of frames in the
# next chunk is higher than in the current dataframe
overlap = overlap[overlap['chunk'] > overlap['data']]
minutesToReplace = overlap.index.tolist()
if (minutesToReplace):
# Remove the dataframe frames to be replaced
data = data[~data[timeCol].isin(minutesToReplace)]
# Append chunk frames preserving the column
# order of the main dataframe
data = data.append(
chunk[chunk[timeCol].isin(minutesToReplace)],
ignore_index=True
)[data.columns.tolist()]
# Drop "minute" column as it will be no longer necessary
data.drop(timeCol, axis=1, inplace=True)
# Rename first column if no name was given in the input file(s)
data.rename(columns={'Unnamed: 0': 'id'}, inplace=True)
# Sort dataframe by m/z and RT, and reset the indexing
mzCol = parameters['mzCol']
data.sort_values([mzCol, rtCol], inplace=True, kind='mergesort')
data.reset_index(drop=True, inplace=True)
# Adjust m/z column values to the machine's maximum float
# resolution
data[mzCol] = data[mzCol].apply(round, ndigits=resolution)
super(LFDataFrame, self).__init__(data=data)
self.src = src
self._resolution = resolution
def drop_empty_frames(self, module, parameters, means=False):
# type: (str, LFParameters, bool) -> None
"""Remove empty frames from the dataframe and reset the index.
An empty frame is a row for which every sample replicate or
sample mean has a zero intensity.
Keyword Arguments:
module -- module name to write in the logging file
parameters -- LipidFinder's parameters instance (can be for
any module)
means -- check sample means instead of each sample
replicate? [default: False]
"""
if (means):
meanColIndexes = [i for i, col in enumerate(self.columns)
if col.endswith('_mean')]
if (parameters['numSolventReps'] > 0):
# The first mean column is for the solvents
firstIndex = meanColIndexes[1]
else:
firstIndex = meanColIndexes[0]
lastIndex = meanColIndexes[-1]
else:
firstIndex = parameters['firstSampleIndex'] - 1
lastIndex = firstIndex \
+ (parameters['numSamples'] * parameters['numTechReps'])
# Get the indices of all empty frames
emptyFrames = self.iloc[:, firstIndex : lastIndex].eq(0).all(axis=1)
indices = self[emptyFrames].index.tolist()
if (indices):
# Drop empty frames and reset the index
self.drop(module, labels=indices, axis=0, inplace=True)
self.reset_index(drop=True, inplace=True)
def drop(self, module, **kwargs):
# type: (str, ...) -> LFDataFrame
"""Wrapper of pandas.DataFrame.drop() with logging report.
The report will be updated only if the labels correspond to
rows, i.e. kwargs['axis'] == 0 (default value).
Keyword Arguments:
module -- module name to write in the logging file
*kwargs -- arguments to pass to pandas.DataFrame.drop()
"""
# Create logger to print message to the log file
logger = logging.getLogger(module)
logger.setLevel(logging.INFO)
if ((len(kwargs['labels']) > 0) and (kwargs.get('axis', 0) == 0)):
idCol = self.columns[0]
idList = [str(x) for x in sorted(self.loc[kwargs['labels'], idCol])]
logger.info('%s: removed %d rows. IDs: %s', module, len(idList),
','.join(idList))
return super(LFDataFrame, self).drop(**kwargs)
@staticmethod
def _read_file(src, parameters, sheet):
# type: (str, LFParameters, int) -> pandas.core.frame.DataFrame
"""Return a dataframe with the same content as the source file,
but with retention time in minutes.
The read function will be configured based on the file's
extension. Accepted extensions: CSV, TSV, XLS, XLSX.
Keyword Arguments:
src -- source file path
parameters -- LipidFinder's parameters instance (can be for
any module)
sheet -- sheet number to read when the input file has
XLS or XLSX extension (zero-indexed position)
"""
extension = os.path.splitext(src)[1].lower()[1:]
# Load file based on its extension
if (extension == 'csv'):
data = pandas.read_csv(src, float_precision='high')
elif (extension == 'tsv'):
data = pandas.read_csv(src, sep='\t', float_precision='high')
elif (extension in ['xls', 'xlsx']):
data = pandas.read_excel(src, sheet_name=sheet)
else:
raise IOError(("Unknown file extension '{0}'. Expected: csv, tsv, "
"xls, xlsx").format(extension))
if (('timeUnit' in parameters) and
(parameters['timeUnit'] == 'Seconds')):
rtCol = parameters['rtCol']
data[rtCol] = data[rtCol].apply(lambda x: round(x / 60.0, 2))
return data
| 50.008403 | 80 | 0.584272 | 11,537 | 0.969333 | 0 | 0 | 1,501 | 0.126113 | 0 | 0 | 6,969 | 0.585532 |
c48f04379334e4d1150bc95e2f72b0aa259025e8 | 4,836 | py | Python | tensorflow/python/ops/fused_embedding_ops.py | lixy9474/DeepRec-1 | dbfdf98af68505201a4f647348cce56ecbb652b2 | [
"Apache-2.0"
]
| null | null | null | tensorflow/python/ops/fused_embedding_ops.py | lixy9474/DeepRec-1 | dbfdf98af68505201a4f647348cce56ecbb652b2 | [
"Apache-2.0"
]
| null | null | null | tensorflow/python/ops/fused_embedding_ops.py | lixy9474/DeepRec-1 | dbfdf98af68505201a4f647348cce56ecbb652b2 | [
"Apache-2.0"
]
| null | null | null | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import variables
from tensorflow.python.ops import array_ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import gen_fused_embedding_ops
from tensorflow.python.ops.gen_fused_embedding_ops import fused_embedding_local_sparse_look_up_grad
from tensorflow.python.ops.gen_fused_embedding_ops import fused_embedding_local_sparse_look_up
from tensorflow.python.ops.gen_fused_embedding_ops import fused_embedding_sparse_pre_look_up
from tensorflow.python.ops.gen_fused_embedding_ops import fused_embedding_sparse_post_look_up
from tensorflow.python.ops.gen_fused_embedding_ops import fused_embedding_sparse_post_look_up_grad
from tensorflow.python.util.tf_export import tf_export
def fused_embedding_lookup_sparse(embedding_weights,
sparse_ids,
combiner=None,
name=None,
max_norm=None):
if embedding_weights is None:
raise ValueError("Missing embedding_weights %s." % embedding_weights)
if isinstance(embedding_weights, variables.PartitionedVariable):
# get underlying Variables.
embedding_weights = list(embedding_weights)
if not isinstance(embedding_weights, list):
embedding_weights = [embedding_weights]
if len(embedding_weights) < 1:
raise ValueError("Missing embedding_weights %s." % embedding_weights)
with ops.name_scope(name, "fused_embedding_lookup", embedding_weights +
[sparse_ids]) as scope:
if combiner is None:
logging.warn("The default value of combiner will change from \"mean\" "
"to \"sqrtn\" after 2016/11/01.")
combiner = "mean"
if combiner not in ("mean", "sqrtn", "sum"):
raise ValueError("combiner must be one of 'mean', 'sqrtn' or 'sum'")
if not isinstance(sparse_ids, sparse_tensor.SparseTensor):
raise TypeError("sparse_ids must be SparseTensor")
partition_nums = len(embedding_weights)
# Local fused embedding lookup. Only support local look up and tf.Variable as
# embedding weight. So skip it for now.
#emb_vectors, _ = fused_embedding_local_sparse_look_up(sp_values=sparse_ids.values,
# sp_indices=sparse_ids.indices,
# sp_dense_shape=sparse_ids.dense_shape,
# emb_variable=embedding_weights[0],
# combiner=combiner,
# max_norm=max_norm)
partition_shapes = [w.shape for w in embedding_weights]
partitioned_values, partitioned_indices = fused_embedding_sparse_pre_look_up(
partition_shapes=partition_shapes,
sp_values=sparse_ids.values,
sp_indices=sparse_ids.indices,
)
emb_shards = []
for i in range(partition_nums):
embedding = embedding_weights[i]
sub_partition_values = partitioned_values[i]
with ops.colocate_with(embedding):
shard = array_ops.gather(embedding, sub_partition_values)
emb_shards.append(shard)
emb_vectors, _ = fused_embedding_sparse_post_look_up(
emb_shards=emb_shards, partitioned_indices=partitioned_indices,
sp_dense_shape=sparse_ids.dense_shape,
partitioned_values=partitioned_values,
combiner=combiner, max_norm=max_norm
)
return emb_vectors
@ops.RegisterGradient("FusedEmbeddingLocalSparseLookUp")
def fused_embedding_local_sparse_look_up_grad(op, top_grad_emb_vec, _):
grad_sp_values = gen_fused_embedding_ops.fused_embedding_local_sparse_look_up_grad(
top_grad=top_grad_emb_vec, emb_variable=op.inputs[3],
sp_values=op.inputs[0], sp_values_offset=op.outputs[1],
combiner=op.get_attr("combiner"),
max_norm=op.get_attr("max_norm")
)
grads = ops.IndexedSlices(values=grad_sp_values,
indices=op.inputs[0])
return [None, None, None, grads]
@ops.RegisterGradient("FusedEmbeddingSparsePostLookUp")
def fused_embedding_sparse_post_look_up_grad(op, top_grad_emb_vec, _):
num_partitions = op.get_attr("num_partitions")
grad_shards = gen_fused_embedding_ops.fused_embedding_sparse_post_look_up_grad(
top_grad=top_grad_emb_vec, emb_shards=[op.inputs[i] for i in range(0, num_partitions)],
partitioned_indices=[op.inputs[i] for i in range(num_partitions, 2 * num_partitions)],
feature_nums=op.outputs[1], combiner=op.get_attr("combiner"),
max_norm=op.get_attr("max_norm")
)
return grad_shards + [None for _ in range(0, 2 * num_partitions + 1)]
| 49.346939 | 99 | 0.709884 | 0 | 0 | 0 | 0 | 1,166 | 0.241108 | 0 | 0 | 1,043 | 0.215674 |
c48f102d83062572178277d6397d5fa6395d6e36 | 705 | py | Python | docs/source/conf.py | deeplook/ipycanvas | c42a5540c55534f919da0fd462cef4593ac7d755 | [
"BSD-3-Clause"
]
| null | null | null | docs/source/conf.py | deeplook/ipycanvas | c42a5540c55534f919da0fd462cef4593ac7d755 | [
"BSD-3-Clause"
]
| null | null | null | docs/source/conf.py | deeplook/ipycanvas | c42a5540c55534f919da0fd462cef4593ac7d755 | [
"BSD-3-Clause"
]
| null | null | null | # -*- coding: utf-8 -*-
import sphinx_rtd_theme
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
# 'sphinx.ext.intersphinx',
# 'sphinx.ext.autosummary',
# 'sphinx.ext.viewcode',
# 'jupyter_sphinx.embed_widgets',
]
templates_path = ['_templates']
master_doc = 'index'
source_suffix = '.rst'
# General information about the project.
project = 'ipycanvas'
author = 'Martin Renou'
exclude_patterns = []
highlight_language = 'python'
pygments_style = 'sphinx'
# Output file base name for HTML help builder.
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
htmlhelp_basename = 'ipycanvasdoc'
autodoc_member_order = 'bysource'
| 21.363636 | 58 | 0.721986 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 369 | 0.523404 |
c48f34eb08cb0b005af06224c4565e27b18c0cb3 | 672 | py | Python | pyTuplingUtils/io.py | umd-lhcb/pyTuplingUtils | dd2efe154f1418a70295eabd8919e16ace2785cc | [
"BSD-2-Clause"
]
| null | null | null | pyTuplingUtils/io.py | umd-lhcb/pyTuplingUtils | dd2efe154f1418a70295eabd8919e16ace2785cc | [
"BSD-2-Clause"
]
| 7 | 2020-04-20T17:25:45.000Z | 2021-06-13T21:05:14.000Z | pyTuplingUtils/io.py | umd-lhcb/pyTuplingUtils | dd2efe154f1418a70295eabd8919e16ace2785cc | [
"BSD-2-Clause"
]
| null | null | null | #!/usr/bin/env python3
#
# Author: Yipeng Sun
# License: BSD 2-clause
# Last Change: Sun May 09, 2021 at 02:52 AM +0200
import numpy as np
ARRAY_TYPE = 'np'
def read_branch(ntp, tree, branch, idx=None):
data = ntp[tree][branch].array(library=ARRAY_TYPE)
return data if not idx else data[idx]
def read_branches_dict(ntp, tree, branches):
return ntp[tree].arrays(branches, library=ARRAY_TYPE)
def read_branches(ntp, tree, branches, idx=None, transpose=False):
data = list(ntp[tree].arrays(branches, library=ARRAY_TYPE).values())
if idx is not None:
data = [d[idx] for d in data]
return np.column_stack(data) if transpose else data
| 23.172414 | 72 | 0.699405 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 119 | 0.177083 |
c4900758f320c35b4e8aaebac80f973e8645fdc4 | 483 | py | Python | clinnotes/reminders/forms.py | mattnickerson993/clinnotes2 | bc44e516a5042e22de8c6618425966bd58919eff | [
"MIT"
]
| null | null | null | clinnotes/reminders/forms.py | mattnickerson993/clinnotes2 | bc44e516a5042e22de8c6618425966bd58919eff | [
"MIT"
]
| null | null | null | clinnotes/reminders/forms.py | mattnickerson993/clinnotes2 | bc44e516a5042e22de8c6618425966bd58919eff | [
"MIT"
]
| null | null | null | from django import forms
from .models import Reminder
from clinnotes.users.models import EpisodeOfCare
class ReminderForm(forms.ModelForm):
class Meta:
model = Reminder
fields = ['category', 'title', 'details', 'episode_of_care']
def __init__(self, *args, **kwargs):
user = kwargs.pop('user')
super(ReminderForm, self).__init__(*args, **kwargs)
self.fields['episode_of_care'].queryset = EpisodeOfCare.objects.filter(clinician=user) | 34.5 | 94 | 0.693582 | 378 | 0.782609 | 0 | 0 | 0 | 0 | 0 | 0 | 66 | 0.136646 |
c491762e38ab524f8ee85b6bd6fa42008e8b092d | 3,583 | py | Python | AlgorithmB.py | tejaDhulipala/SnowflakeGen | effabafb790a1a407c5c27cf249806a4775127e4 | [
"MIT"
]
| null | null | null | AlgorithmB.py | tejaDhulipala/SnowflakeGen | effabafb790a1a407c5c27cf249806a4775127e4 | [
"MIT"
]
| null | null | null | AlgorithmB.py | tejaDhulipala/SnowflakeGen | effabafb790a1a407c5c27cf249806a4775127e4 | [
"MIT"
]
| null | null | null | import pygame as pg
from shapely.geometry import Point, Polygon
from time import perf_counter
# Vars
A = [(100, 600), (700, 600), (400, 80)]
triangles = [[(100, 600), (700, 600), (400, 80)]]
SQRT_3 = 3 ** (1 / 2)
WHITE = (255, 255, 255)
# Graphics part
pg.init()
screen = pg.display.set_mode((800, 800))
# Funcs
distance = lambda x, y: ((x[0] - y[0]) ** 2 + (x[1] - y[1]) ** 2) ** 0.5
def generatePoints(pt1, pt2, reference):
slope = (pt1[1] - pt2[1]) / (pt1[0] - pt2[0])
a = pt1[0] + (pt2[0] - pt1[0]) / 3
b = pt1[1] + (pt2[1] - pt1[1]) / 3
c = pt1[0] + (pt2[0] - pt1[0]) * 2 / 3
d = pt1[1] + (pt2[1] - pt1[1]) * 2 / 3
ptm = (pt1[0] + pt2[0]) / 2, (pt1[1] + pt2[1]) / 2
dis = distance((a, b), (c, d))
h = SQRT_3/2 * dis
if slope == 0:
ptc1 = ptm[0], ptm[1] - h
ptc2 = ptm[0], ptm[1] + h
ptc = ptc1 if distance(reference, ptc1) > distance(ptc2, reference) else ptc2
return (round(a), round(b)), (round(c), round(d)), ptc
perp = -1 / slope
x_c = h / (perp ** 2 + 1) ** 0.5
y_c = perp * x_c
ptc1 = round(ptm[0] - x_c), round(ptm[1] - y_c)
ptc2 = round(ptm[0] + x_c), round(ptm[1] + y_c)
ptc = ptc1 if distance(reference, ptc1) > distance(ptc2, reference) else ptc2
return (round(a), round(b)), (round(c), round(d)), ptc
def generatePoints_2(pt1, pt2, father: Polygon):
slope = (pt1[1] - pt2[1]) / (pt1[0] - pt2[0])
a = pt1[0] + (pt2[0] - pt1[0]) / 3
b = pt1[1] + (pt2[1] - pt1[1]) / 3
c = pt1[0] + (pt2[0] - pt1[0]) * 2 / 3
d = pt1[1] + (pt2[1] - pt1[1]) * 2 / 3
ptm = (pt1[0] + pt2[0]) / 2, (pt1[1] + pt2[1]) / 2
dis = distance((a, b), (c, d))
h = SQRT_3/2 * dis
if slope == 0:
ptc1 = ptm[0], ptm[1] - h
ptc2 = ptm[0], ptm[1] + h
ptc = ptc1 if father.contains(Point(*ptc2)) else ptc2
return (round(a), round(b)), (round(c), round(d)), ptc
perp = -1 / slope
x_c = h / (perp ** 2 + 1) ** 0.5
y_c = perp * x_c
ptc1 = round(ptm[0] - x_c), round(ptm[1] - y_c)
ptc2 = round(ptm[0] + x_c), round(ptm[1] + y_c)
ptc = ptc1 if father.contains(Point(*ptc2)) else ptc2
return (round(a), round(b)), (round(c), round(d)), ptc
def generateSnowflake(array: list, level):
for i in range(level):
org = array.copy()
for j in range(len(org)):
pt1 = org[j]
pt2 = org[(j + 1) % (len(org))]
ref = None
for triangle in triangles:
if pt1 in triangle and pt2 in triangle:
b = triangle.copy()
b.remove(pt1)
b.remove(pt2)
ref = b[0]
if ref == None:
pta, ptb, ptc = generatePoints_2(pt1, pt2, Polygon(array))
else:
pta, ptb, ptc = generatePoints(pt1, pt2, ref)
index = array.index(pt2)
array.insert(index, ptb)
array.insert(index, ptc)
array.insert(index, pta)
triangles.append([pta, ptb, ptc])
start = perf_counter()
# Call Func
generateSnowflake(A, 6)
print(len(A))
# Game Loop
while True:
screen.fill(WHITE)
A.append(A[0])
for i in range(len(A) - 1):
pg.draw.line(screen, (0, 0, 0), A[i], A[i + 1])
# exit code
for event in pg.event.get():
if event.type == pg.QUIT:
pg.quit()
quit(0)
# Updating
pg.display.update()
print(perf_counter() - start)
| 32.87156 | 86 | 0.487301 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 78 | 0.021769 |
c492c9c90d1fe8546ec965192035975153cc63a2 | 39,839 | py | Python | validator/testcases/javascript/actions.py | AutomatedTester/amo-validator | a063002497395ce04085a3940713b4467f12e9fd | [
"BSD-3-Clause"
]
| null | null | null | validator/testcases/javascript/actions.py | AutomatedTester/amo-validator | a063002497395ce04085a3940713b4467f12e9fd | [
"BSD-3-Clause"
]
| null | null | null | validator/testcases/javascript/actions.py | AutomatedTester/amo-validator | a063002497395ce04085a3940713b4467f12e9fd | [
"BSD-3-Clause"
]
| null | null | null | from copy import deepcopy
from functools import partial
import sys
import types
# Global import of predefinedentities will cause an import loop
import instanceactions
from validator.constants import (BUGZILLA_BUG, DESCRIPTION_TYPES, FENNEC_GUID,
FIREFOX_GUID, MAX_STR_SIZE, MDN_DOC)
from validator.decorator import version_range
from jstypes import JSArray, JSContext, JSLiteral, JSObject, JSWrapper
NUMERIC_TYPES = (int, long, float, complex)
# None of these operations (or their augmented assignment counterparts) should
# be performed on non-numeric data. Any time we get non-numeric data for these
# guys, we just return window.NaN.
NUMERIC_OPERATORS = ('-', '*', '/', '%', '<<', '>>', '>>>', '|', '^', '&')
NUMERIC_OPERATORS += tuple('%s=' % op for op in NUMERIC_OPERATORS)
def get_NaN(traverser):
# If we've cached the traverser's NaN instance, just use that.
ncache = getattr(traverser, 'NAN_CACHE', None)
if ncache is not None:
return ncache
# Otherwise, we need to import GLOBAL_ENTITIES and build a raw copy.
from predefinedentities import GLOBAL_ENTITIES
ncache = traverser._build_global('NaN', GLOBAL_ENTITIES[u'NaN'])
# Cache it so we don't need to do this again.
traverser.NAN_CACHE = ncache
return ncache
def _get_member_exp_property(traverser, node):
"""Return the string value of a member expression's property."""
if node['property']['type'] == 'Identifier' and not node.get('computed'):
return unicode(node['property']['name'])
else:
eval_exp = traverser._traverse_node(node['property'])
return _get_as_str(eval_exp.get_literal_value())
def _expand_globals(traverser, node):
"""Expands a global object that has a lambda value."""
if node.is_global and callable(node.value.get('value')):
result = node.value['value'](traverser)
if isinstance(result, dict):
output = traverser._build_global('--', result)
elif isinstance(result, JSWrapper):
output = result
else:
output = JSWrapper(result, traverser)
# Set the node context.
if 'context' in node.value:
traverser._debug('CONTEXT>>%s' % node.value['context'])
output.context = node.value['context']
else:
traverser._debug('CONTEXT>>INHERITED')
output.context = node.context
return output
return node
def trace_member(traverser, node, instantiate=False):
'Traces a MemberExpression and returns the appropriate object'
traverser._debug('TESTING>>%s' % node['type'])
if node['type'] == 'MemberExpression':
# x.y or x[y]
# x = base
base = trace_member(traverser, node['object'], instantiate)
base = _expand_globals(traverser, base)
identifier = _get_member_exp_property(traverser, node)
# Handle the various global entity properties.
if base.is_global:
# If we've got an XPCOM wildcard, return a copy of the entity.
if 'xpcom_wildcard' in base.value:
traverser._debug('MEMBER_EXP>>XPCOM_WILDCARD')
from predefinedentities import CONTRACT_ENTITIES
if identifier in CONTRACT_ENTITIES:
kw = dict(err_id=('js', 'actions', 'dangerous_contract'),
warning='Dangerous XPCOM contract ID')
kw.update(CONTRACT_ENTITIES[identifier])
traverser.warning(**kw)
base.value = base.value.copy()
del base.value['xpcom_wildcard']
return base
test_identifier(traverser, identifier)
traverser._debug('MEMBER_EXP>>PROPERTY: %s' % identifier)
output = base.get(
traverser=traverser, instantiate=instantiate, name=identifier)
output.context = base.context
if base.is_global:
# In the cases of XPCOM objects, methods generally
# remain bound to their parent objects, even when called
# indirectly.
output.parent = base
return output
elif node['type'] == 'Identifier':
traverser._debug('MEMBER_EXP>>ROOT:IDENTIFIER')
test_identifier(traverser, node['name'])
# If we're supposed to instantiate the object and it doesn't already
# exist, instantitate the object.
if instantiate and not traverser._is_defined(node['name']):
output = JSWrapper(JSObject(), traverser=traverser)
traverser.contexts[0].set(node['name'], output)
else:
output = traverser._seek_variable(node['name'])
return _expand_globals(traverser, output)
else:
traverser._debug('MEMBER_EXP>>ROOT:EXPRESSION')
# It's an expression, so just try your damndest.
return traverser._traverse_node(node)
def test_identifier(traverser, name):
'Tests whether an identifier is banned'
import predefinedentities
if name in predefinedentities.BANNED_IDENTIFIERS:
traverser.err.warning(
err_id=('js', 'actions', 'banned_identifier'),
warning='Banned or deprecated JavaScript Identifier',
description=predefinedentities.BANNED_IDENTIFIERS[name],
filename=traverser.filename,
line=traverser.line,
column=traverser.position,
context=traverser.context)
def _function(traverser, node):
'Prevents code duplication'
def wrap(traverser, node):
me = JSObject()
traverser.function_collection.append([])
# Replace the current context with a prototypeable JS object.
traverser._pop_context()
me.type_ = 'default' # Treat the function as a normal object.
traverser._push_context(me)
traverser._debug('THIS_PUSH')
traverser.this_stack.append(me) # Allow references to "this"
# Declare parameters in the local scope
params = []
for param in node['params']:
if param['type'] == 'Identifier':
params.append(param['name'])
elif param['type'] == 'ArrayPattern':
for element in param['elements']:
# Array destructuring in function prototypes? LOL!
if element is None or element['type'] != 'Identifier':
continue
params.append(element['name'])
local_context = traverser._peek_context(1)
for param in params:
var = JSWrapper(lazy=True, traverser=traverser)
# We can assume that the params are static because we don't care
# about what calls the function. We want to know whether the
# function solely returns static values. If so, it is a static
# function.
local_context.set(param, var)
traverser._traverse_node(node['body'])
# Since we need to manually manage the "this" stack, pop off that
# context.
traverser._debug('THIS_POP')
traverser.this_stack.pop()
# Call all of the function collection's members to traverse all of the
# child functions.
func_coll = traverser.function_collection.pop()
for func in func_coll:
func()
# Put the function off for traversal at the end of the current block scope.
traverser.function_collection[-1].append(partial(wrap, traverser, node))
return JSWrapper(traverser=traverser, callable=True, dirty=True)
def _define_function(traverser, node):
me = _function(traverser, node)
traverser._peek_context(2).set(node['id']['name'], me)
return me
def _func_expr(traverser, node):
'Represents a lambda function'
return _function(traverser, node)
def _define_with(traverser, node):
'Handles `with` statements'
object_ = traverser._traverse_node(node['object'])
if isinstance(object_, JSWrapper) and isinstance(object_.value, JSObject):
traverser.contexts[-1] = object_.value
traverser.contexts.append(JSContext('block'))
return
def _define_var(traverser, node):
'Creates a local context variable'
traverser._debug('VARIABLE_DECLARATION')
traverser.debug_level += 1
declarations = (node['declarations'] if 'declarations' in node
else node['head'])
kind = node.get('kind', 'let')
for declaration in declarations:
# It could be deconstruction of variables :(
if declaration['id']['type'] == 'ArrayPattern':
vars = []
for element in declaration['id']['elements']:
# NOTE : Multi-level array destructuring sucks. Maybe implement
# it someday if you're bored, but it's so rarely used and it's
# so utterly complex, there's probably no need to ever code it
# up.
if element is None or element['type'] != 'Identifier':
vars.append(None)
continue
vars.append(element['name'])
# The variables are not initialized
if declaration['init'] is None:
# Simple instantiation; no initialization
for var in vars:
if not var:
continue
traverser._declare_variable(var, None)
# The variables are declared inline
elif declaration['init']['type'] == 'ArrayPattern':
# TODO : Test to make sure len(values) == len(vars)
for value in declaration['init']['elements']:
if vars[0]:
traverser._declare_variable(
vars[0], JSWrapper(traverser._traverse_node(value),
traverser=traverser))
vars = vars[1:] # Pop off the first value
# It's being assigned by a JSArray (presumably)
elif declaration['init']['type'] == 'ArrayExpression':
assigner = traverser._traverse_node(declaration['init'])
for value in assigner.value.elements:
if vars[0]:
traverser._declare_variable(vars[0], value)
vars = vars[1:]
elif declaration['id']['type'] == 'ObjectPattern':
init = traverser._traverse_node(declaration['init'])
def _proc_objpattern(init_obj, properties):
for prop in properties:
# Get the name of the init obj's member
if prop['key']['type'] == 'Literal':
prop_name = prop['key']['value']
elif prop['key']['type'] == 'Identifier':
prop_name = prop['key']['name']
else:
continue
if prop['value']['type'] == 'Identifier':
traverser._declare_variable(
prop['value']['name'],
init_obj.get(traverser, prop_name))
elif prop['value']['type'] == 'ObjectPattern':
_proc_objpattern(init_obj.get(traverser, prop_name),
prop['value']['properties'])
if init is not None:
_proc_objpattern(init_obj=init,
properties=declaration['id']['properties'])
else:
var_name = declaration['id']['name']
traverser._debug('NAME>>%s' % var_name)
var_value = traverser._traverse_node(declaration['init'])
traverser._debug('VALUE>>%s' % (var_value.output()
if var_value is not None
else 'None'))
if not isinstance(var_value, JSWrapper):
var = JSWrapper(value=var_value,
const=kind == 'const',
traverser=traverser)
else:
var = var_value
var.const = kind == 'const'
traverser._declare_variable(var_name, var, type_=kind)
if 'body' in node:
traverser._traverse_node(node['body'])
traverser.debug_level -= 1
# The "Declarations" branch contains custom elements.
return True
def _define_obj(traverser, node):
'Creates a local context object'
var = JSObject()
for prop in node['properties']:
if prop['type'] == 'PrototypeMutation':
var_name = 'prototype'
else:
key = prop['key']
if key['type'] == 'Literal':
var_name = key['value']
elif isinstance(key['name'], basestring):
var_name = key['name']
else:
if 'property' in key['name']:
name = key['name']
else:
name = {'property': key['name']}
var_name = _get_member_exp_property(traverser, name)
var_value = traverser._traverse_node(prop['value'])
var.set(var_name, var_value, traverser)
# TODO: Observe "kind"
if not isinstance(var, JSWrapper):
return JSWrapper(var, lazy=True, traverser=traverser)
var.lazy = True
return var
def _define_array(traverser, node):
"""Instantiate an array object from the parse tree."""
arr = JSArray()
arr.elements = map(traverser._traverse_node, node['elements'])
return arr
def _define_template_strings(traverser, node):
"""Instantiate an array of raw and cooked template strings."""
cooked = JSArray()
cooked.elements = map(traverser._traverse_node, node['cooked'])
raw = JSArray()
raw.elements = map(traverser._traverse_node, node['raw'])
cooked.set('raw', raw, traverser)
return cooked
def _define_template(traverser, node):
"""Instantiate a template literal."""
elements = map(traverser._traverse_node, node['elements'])
return reduce(partial(_binary_op, '+', traverser=traverser), elements)
def _define_literal(traverser, node):
"""
Convert a literal node in the parse tree to its corresponding
interpreted value.
"""
value = node['value']
if isinstance(value, dict):
return JSWrapper(JSObject(), traverser=traverser, dirty=True)
wrapper = JSWrapper(value if value is not None else JSLiteral(None),
traverser=traverser)
test_literal(traverser, wrapper)
return wrapper
def test_literal(traverser, wrapper):
"""
Test the value of a literal, in particular only a string literal at the
moment, against possibly dangerous patterns.
"""
value = wrapper.get_literal_value()
if isinstance(value, basestring):
# Local import to prevent import loop.
from validator.testcases.regex import validate_string
validate_string(value, traverser, wrapper=wrapper)
def _call_expression(traverser, node):
args = node['arguments']
for arg in args:
traverser._traverse_node(arg, source='arguments')
member = traverser._traverse_node(node['callee'])
if (traverser.filename.startswith('defaults/preferences/') and
('name' not in node['callee'] or
node['callee']['name'] not in (u'pref', u'user_pref'))):
traverser.err.warning(
err_id=('testcases_javascript_actions',
'_call_expression',
'complex_prefs_defaults_code'),
warning='Complex code should not appear in preference defaults '
'files',
description="Calls to functions other than 'pref' and 'user_pref' "
'should not appear in defaults/preferences/ files.',
filename=traverser.filename,
line=traverser.line,
column=traverser.position,
context=traverser.context)
if member.is_global and callable(member.value.get('dangerous', None)):
result = member.value['dangerous'](a=args, t=traverser._traverse_node,
e=traverser.err)
name = member.value.get('name', '')
if result and name:
kwargs = {
'err_id': ('testcases_javascript_actions', '_call_expression',
'called_dangerous_global'),
'warning': '`%s` called in potentially dangerous manner' %
member.value['name'],
'description':
'The global `%s` function was called using a set '
'of dangerous parameters. Calls of this nature '
'are deprecated.' % member.value['name']}
if isinstance(result, DESCRIPTION_TYPES):
kwargs['description'] = result
elif isinstance(result, dict):
kwargs.update(result)
traverser.warning(**kwargs)
elif (node['callee']['type'] == 'MemberExpression' and
node['callee']['property']['type'] == 'Identifier'):
# If we can identify the function being called on any member of any
# instance, we can use that to either generate an output value or test
# for additional conditions.
identifier_name = node['callee']['property']['name']
if identifier_name in instanceactions.INSTANCE_DEFINITIONS:
result = instanceactions.INSTANCE_DEFINITIONS[identifier_name](
args, traverser, node, wrapper=member)
return result
if member.is_global and 'return' in member.value:
if 'object' in node['callee']:
member.parent = trace_member(traverser, node['callee']['object'])
return member.value['return'](wrapper=member, arguments=args,
traverser=traverser)
return JSWrapper(JSObject(), dirty=True, traverser=traverser)
def _call_settimeout(a, t, e):
"""
Handler for setTimeout and setInterval. Should determine whether a[0]
is a lambda function or a string. Strings are banned, lambda functions are
ok. Since we can't do reliable type testing on other variables, we flag
those, too.
"""
if not a:
return
if a[0]['type'] in ('FunctionExpression', 'ArrowFunctionExpression'):
return
if t(a[0]).callable:
return
return {'err_id': ('javascript', 'dangerous_global', 'eval'),
'description':
'In order to prevent vulnerabilities, the `setTimeout` '
'and `setInterval` functions should be called only with '
'function expressions as their first argument.',
'signing_help': (
'Please do not ever call `setTimeout` or `setInterval` with '
'string arguments. If you are passing a function which is '
'not being correctly detected as such, please consider '
'passing a closure or arrow function, which in turn calls '
'the original function.'),
'signing_severity': 'high'}
def _call_require(a, t, e):
"""
Tests for unsafe uses of `require()` in SDK add-ons.
"""
args, traverse, err = a, t, e
if not err.metadata.get('is_jetpack') and len(args):
return
module = traverse(args[0]).get_literal_value()
if not isinstance(module, basestring):
return
if module.startswith('sdk/'):
module = module[len('sdk/'):]
LOW_LEVEL = {
# Added from bugs 689340, 731109
'chrome', 'window-utils', 'observer-service',
# Added from bug 845492
'window/utils', 'sdk/window/utils', 'sdk/deprecated/window-utils',
'tab/utils', 'sdk/tab/utils',
'system/events', 'sdk/system/events',
}
if module in LOW_LEVEL:
err.metadata['requires_chrome'] = True
return {'warning': 'Usage of low-level or non-SDK interface',
'description': 'Your add-on uses an interface which bypasses '
'the high-level protections of the add-on SDK. '
'This interface should be avoided, and its use '
'may significantly complicate your review '
'process.'}
if module == 'widget':
return {'warning': 'Use of deprecated SDK module',
'description':
"The 'widget' module has been deprecated due to a number "
'of performance and usability issues, and has been '
'removed from the SDK as of Firefox 40. Please use the '
"'sdk/ui/button/action' or 'sdk/ui/button/toggle' module "
'instead. See '
'https://developer.mozilla.org/Add-ons/SDK/High-Level_APIs'
'/ui for more information.'}
def _call_create_pref(a, t, e):
"""
Handler for pref() and user_pref() calls in defaults/preferences/*.js files
to ensure that they don't touch preferences outside of the "extensions."
branch.
"""
# We really need to clean up the arguments passed to these functions.
traverser = t.im_self
if not traverser.filename.startswith('defaults/preferences/') or not a:
return
instanceactions.set_preference(JSWrapper(JSLiteral(None),
traverser=traverser),
a, traverser)
value = _get_as_str(t(a[0]))
return test_preference(value)
def test_preference(value):
for branch in 'extensions.', 'services.sync.prefs.sync.extensions.':
if value.startswith(branch) and value.rindex('.') > len(branch):
return
return ('Extensions should not alter preferences outside of the '
"'extensions.' preference branch. Please make sure that "
"all of your extension's preferences are prefixed with "
"'extensions.add-on-name.', where 'add-on-name' is a "
'distinct string unique to and indicative of your add-on.')
def _readonly_top(traverser, right, node_right):
"""Handle the readonly callback for window.top."""
traverser.notice(
err_id=('testcases_javascript_actions',
'_readonly_top'),
notice='window.top is a reserved variable',
description='The `top` global variable is reserved and cannot be '
'assigned any values starting with Gecko 6. Review your '
'code for any uses of the `top` global, and refer to '
'%s for more information.' % BUGZILLA_BUG % 654137,
for_appversions={FIREFOX_GUID: version_range('firefox',
'6.0a1', '7.0a1'),
FENNEC_GUID: version_range('fennec',
'6.0a1', '7.0a1')},
compatibility_type='warning',
tier=5)
def _expression(traverser, node):
"""
This is a helper method that allows node definitions to point at
`_traverse_node` without needing a reference to a traverser.
"""
return traverser._traverse_node(node['expression'])
def _get_this(traverser, node):
'Returns the `this` object'
if not traverser.this_stack:
from predefinedentities import GLOBAL_ENTITIES
return traverser._build_global('window', GLOBAL_ENTITIES[u'window'])
return traverser.this_stack[-1]
def _new(traverser, node):
'Returns a new copy of a node.'
# We don't actually process the arguments as part of the flow because of
# the Angry T-Rex effect. For now, we just traverse them to ensure they
# don't contain anything dangerous.
args = node['arguments']
if isinstance(args, list):
for arg in args:
traverser._traverse_node(arg, source='arguments')
else:
traverser._traverse_node(args)
elem = traverser._traverse_node(node['callee'])
if not isinstance(elem, JSWrapper):
elem = JSWrapper(elem, traverser=traverser)
if elem.is_global:
traverser._debug('Making overwritable')
elem.value = deepcopy(elem.value)
elem.value['overwritable'] = True
return elem
def _ident(traverser, node):
'Initiates an object lookup on the traverser based on an identifier token'
name = node['name']
# Ban bits like "newThread"
test_identifier(traverser, name)
if traverser._is_defined(name):
return traverser._seek_variable(name)
return JSWrapper(JSObject(), traverser=traverser, dirty=True)
def _expr_assignment(traverser, node):
"""Evaluate an AssignmentExpression node."""
traverser._debug('ASSIGNMENT_EXPRESSION')
traverser.debug_level += 1
traverser._debug('ASSIGNMENT>>PARSING RIGHT')
right = traverser._traverse_node(node['right'])
right = JSWrapper(right, traverser=traverser)
# Treat direct assignment different than augmented assignment.
if node['operator'] == '=':
from predefinedentities import GLOBAL_ENTITIES, is_shared_scope
global_overwrite = False
readonly_value = is_shared_scope(traverser)
node_left = node['left']
traverser._debug('ASSIGNMENT:DIRECT(%s)' % node_left['type'])
if node_left['type'] == 'Identifier':
# Identifiers just need the ID name and a value to push.
# Raise a global overwrite issue if the identifier is global.
global_overwrite = traverser._is_global(node_left['name'])
# Get the readonly attribute and store its value if is_global
if global_overwrite:
global_dict = GLOBAL_ENTITIES[node_left['name']]
if 'readonly' in global_dict:
readonly_value = global_dict['readonly']
traverser._declare_variable(node_left['name'], right, type_='glob')
elif node_left['type'] == 'MemberExpression':
member_object = trace_member(traverser, node_left['object'],
instantiate=True)
global_overwrite = (member_object.is_global and
not ('overwritable' in member_object.value and
member_object.value['overwritable']))
member_property = _get_member_exp_property(traverser, node_left)
traverser._debug('ASSIGNMENT:MEMBER_PROPERTY(%s)'
% member_property)
traverser._debug('ASSIGNMENT:GLOB_OV::%s' % global_overwrite)
# Don't do the assignment if we're facing a global.
if not member_object.is_global:
if member_object.value is None:
member_object.value = JSObject()
if not member_object.is_global:
member_object.value.set(member_property, right, traverser)
else:
# It's probably better to do nothing.
pass
elif 'value' in member_object.value:
member_object_value = _expand_globals(traverser,
member_object).value
if member_property in member_object_value['value']:
# If it's a global and the actual member exists, test
# whether it can be safely overwritten.
member = member_object_value['value'][member_property]
if 'readonly' in member:
global_overwrite = True
readonly_value = member['readonly']
traverser._debug('ASSIGNMENT:DIRECT:GLOB_OVERWRITE %s' %
global_overwrite)
traverser._debug('ASSIGNMENT:DIRECT:READONLY %r' %
readonly_value)
if callable(readonly_value):
readonly_value = readonly_value(traverser, right, node['right'])
if readonly_value and global_overwrite:
kwargs = dict(
err_id=('testcases_javascript_actions',
'_expr_assignment',
'global_overwrite'),
warning='Global variable overwrite',
description='An attempt was made to overwrite a global '
'variable in some JavaScript code.')
if isinstance(readonly_value, DESCRIPTION_TYPES):
kwargs['description'] = readonly_value
elif isinstance(readonly_value, dict):
kwargs.update(readonly_value)
traverser.warning(**kwargs)
return right
lit_right = right.get_literal_value()
traverser._debug('ASSIGNMENT>>PARSING LEFT')
left = traverser._traverse_node(node['left'])
traverser._debug('ASSIGNMENT>>DONE PARSING LEFT')
traverser.debug_level -= 1
if isinstance(left, JSWrapper):
if left.dirty:
return left
lit_left = left.get_literal_value()
token = node['operator']
# Don't perform an operation on None. Python freaks out
if lit_left is None:
lit_left = 0
if lit_right is None:
lit_right = 0
# Give them default values so we have them in scope.
gleft, gright = 0, 0
# All of the assignment operators
operators = {'=': lambda: right,
'+=': lambda: lit_left + lit_right,
'-=': lambda: gleft - gright,
'*=': lambda: gleft * gright,
'/=': lambda: 0 if gright == 0 else (gleft / gright),
'%=': lambda: 0 if gright == 0 else (gleft % gright),
'<<=': lambda: int(gleft) << int(gright),
'>>=': lambda: int(gleft) >> int(gright),
'>>>=': lambda: float(abs(int(gleft)) >> gright),
'|=': lambda: int(gleft) | int(gright),
'^=': lambda: int(gleft) ^ int(gright),
'&=': lambda: int(gleft) & int(gright)}
# If we're modifying a non-numeric type with a numeric operator, return
# NaN.
if (not isinstance(lit_left, NUMERIC_TYPES) and
token in NUMERIC_OPERATORS):
left.set_value(get_NaN(traverser), traverser=traverser)
return left
# If either side of the assignment operator is a string, both sides
# need to be casted to strings first.
if (isinstance(lit_left, types.StringTypes) or
isinstance(lit_right, types.StringTypes)):
lit_left = _get_as_str(lit_left)
lit_right = _get_as_str(lit_right)
gleft, gright = _get_as_num(left), _get_as_num(right)
traverser._debug('ASSIGNMENT>>OPERATION:%s' % token)
if token not in operators:
# We don't support that operator. (yet?)
traverser._debug('ASSIGNMENT>>OPERATOR NOT FOUND', 1)
return left
elif token in ('<<=', '>>=', '>>>=') and gright < 0:
# The user is doing weird bitshifting that will return 0 in JS but
# not in Python.
left.set_value(0, traverser=traverser)
return left
elif (token in ('<<=', '>>=', '>>>=', '|=', '^=', '&=') and
(abs(gleft) == float('inf') or abs(gright) == float('inf'))):
# Don't bother handling infinity for integer-converted operations.
left.set_value(get_NaN(traverser), traverser=traverser)
return left
traverser._debug('ASSIGNMENT::L-value global? (%s)' %
('Y' if left.is_global else 'N'), 1)
try:
new_value = operators[token]()
except Exception:
traverser.system_error(exc_info=sys.exc_info())
new_value = None
# Cap the length of analyzed strings.
if (isinstance(new_value, types.StringTypes) and
len(new_value) > MAX_STR_SIZE):
new_value = new_value[:MAX_STR_SIZE]
traverser._debug('ASSIGNMENT::New value >> %s' % new_value, 1)
left.set_value(new_value, traverser=traverser)
return left
# Though it would otherwise be a syntax error, we say that 4=5 should
# evaluate out to 5.
return right
def _expr_binary(traverser, node):
'Evaluates a BinaryExpression node.'
traverser.debug_level += 1
# Select the proper operator.
operator = node['operator']
traverser._debug('BIN_OPERATOR>>%s' % operator)
# Traverse the left half of the binary expression.
with traverser._debug('BIN_EXP>>l-value'):
if (node['left']['type'] == 'BinaryExpression' and
'__traversal' not in node['left']):
# Process the left branch of the binary expression directly. This
# keeps the recursion cap in line and speeds up processing of
# large chains of binary expressions.
left = _expr_binary(traverser, node['left'])
node['left']['__traversal'] = left
else:
left = traverser._traverse_node(node['left'])
# Traverse the right half of the binary expression.
with traverser._debug('BIN_EXP>>r-value'):
if (operator == 'instanceof' and
node['right']['type'] == 'Identifier' and
node['right']['name'] == 'Function'):
# We make an exception for instanceof's r-value if it's a
# dangerous global, specifically Function.
return JSWrapper(True, traverser=traverser)
else:
right = traverser._traverse_node(node['right'])
traverser._debug('Is dirty? %r' % right.dirty, 1)
return _binary_op(operator, left, right, traverser)
def _binary_op(operator, left, right, traverser):
"""Perform a binary operation on two pre-traversed nodes."""
# Dirty l or r values mean we can skip the expression. A dirty value
# indicates that a lazy operation took place that introduced some
# nondeterminacy.
# FIXME(Kris): We should process these as if they're strings anyway.
if left.dirty:
return left
elif right.dirty:
return right
# Binary expressions are only executed on literals.
left = left.get_literal_value()
right_wrap = right
right = right.get_literal_value()
# Coerce the literals to numbers for numeric operations.
gleft = _get_as_num(left)
gright = _get_as_num(right)
operators = {
'==': lambda: left == right or gleft == gright,
'!=': lambda: left != right,
'===': lambda: left == right, # Be flexible.
'!==': lambda: type(left) != type(right) or left != right,
'>': lambda: left > right,
'<': lambda: left < right,
'<=': lambda: left <= right,
'>=': lambda: left >= right,
'<<': lambda: int(gleft) << int(gright),
'>>': lambda: int(gleft) >> int(gright),
'>>>': lambda: float(abs(int(gleft)) >> int(gright)),
'+': lambda: left + right,
'-': lambda: gleft - gright,
'*': lambda: gleft * gright,
'/': lambda: 0 if gright == 0 else (gleft / gright),
'%': lambda: 0 if gright == 0 else (gleft % gright),
'in': lambda: right_wrap.contains(left),
# TODO : implement instanceof
# FIXME(Kris): Treat instanceof the same as `QueryInterface`
}
output = None
if (operator in ('>>', '<<', '>>>') and
(left is None or right is None or gright < 0)):
output = False
elif operator in operators:
# Concatenation can be silly, so always turn undefineds into empty
# strings and if there are strings, make everything strings.
if operator == '+':
if left is None:
left = ''
if right is None:
right = ''
if isinstance(left, basestring) or isinstance(right, basestring):
left = _get_as_str(left)
right = _get_as_str(right)
# Don't even bother handling infinity if it's a numeric computation.
if (operator in ('<<', '>>', '>>>') and
(abs(gleft) == float('inf') or abs(gright) == float('inf'))):
return get_NaN(traverser)
try:
output = operators[operator]()
except Exception:
traverser.system_error(exc_info=sys.exc_info())
output = None
# Cap the length of analyzed strings.
if (isinstance(output, types.StringTypes) and
len(output) > MAX_STR_SIZE):
output = output[:MAX_STR_SIZE]
wrapper = JSWrapper(output, traverser=traverser)
# Test the newly-created literal for dangerous values.
# This may cause duplicate warnings for strings which
# already match a dangerous value prior to concatenation.
test_literal(traverser, wrapper)
return wrapper
return JSWrapper(output, traverser=traverser)
def _expr_unary(traverser, node):
"""Evaluate a UnaryExpression node."""
expr = traverser._traverse_node(node['argument'])
expr_lit = expr.get_literal_value()
expr_num = _get_as_num(expr_lit)
operators = {'-': lambda: -1 * expr_num,
'+': lambda: expr_num,
'!': lambda: not expr_lit,
'~': lambda: -1 * (expr_num + 1),
'void': lambda: None,
'typeof': lambda: _expr_unary_typeof(expr),
'delete': lambda: None} # We never want to empty the context
if node['operator'] in operators:
output = operators[node['operator']]()
else:
output = None
if not isinstance(output, JSWrapper):
output = JSWrapper(output, traverser=traverser)
return output
def _expr_unary_typeof(wrapper):
"""Evaluate the "typeof" value for a JSWrapper object."""
if (wrapper.callable or
(wrapper.is_global and 'return' in wrapper.value and
'value' not in wrapper.value)):
return 'function'
value = wrapper.value
if value is None:
return 'undefined'
elif isinstance(value, JSLiteral):
value = value.value
if isinstance(value, bool):
return 'boolean'
elif isinstance(value, (int, long, float)):
return 'number'
elif isinstance(value, types.StringTypes):
return 'string'
return 'object'
def _get_as_num(value):
"""Return the JS numeric equivalent for a value."""
if isinstance(value, JSWrapper):
value = value.get_literal_value()
if value is None:
return 0
try:
if isinstance(value, types.StringTypes):
if value.startswith('0x'):
return int(value, 16)
else:
return float(value)
elif isinstance(value, (int, float, long)):
return value
else:
return int(value)
except (ValueError, TypeError):
return 0
def _get_as_str(value):
"""Return the JS string equivalent for a literal value."""
if isinstance(value, JSWrapper):
value = value.get_literal_value()
if value is None:
return ''
if isinstance(value, bool):
return u'true' if value else u'false'
elif isinstance(value, (int, float, long)):
if value == float('inf'):
return u'Infinity'
elif value == float('-inf'):
return u'-Infinity'
# Try to see if we can shave off some trailing significant figures.
try:
if int(value) == value:
return unicode(int(value))
except ValueError:
pass
return unicode(value)
| 37.407512 | 79 | 0.588192 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 12,717 | 0.31921 |
c4930d25761ee9d797224e253c155e8643ca0fdb | 14,588 | py | Python | geometry_utils/tests/test_bound_box.py | NOAA-ORR-ERD/geometry_utils | 0417a8c459fb17f101945f53d048191dc22e97c0 | [
"BSD-3-Clause"
]
| null | null | null | geometry_utils/tests/test_bound_box.py | NOAA-ORR-ERD/geometry_utils | 0417a8c459fb17f101945f53d048191dc22e97c0 | [
"BSD-3-Clause"
]
| null | null | null | geometry_utils/tests/test_bound_box.py | NOAA-ORR-ERD/geometry_utils | 0417a8c459fb17f101945f53d048191dc22e97c0 | [
"BSD-3-Clause"
]
| null | null | null | #!/usr/bin/env python
"""
Test code for the BBox Object
"""
import numpy as np
import pytest
from geometry_utils.bound_box import (BBox,
asBBox,
NullBBox,
InfBBox,
fromBBArray,
from_points,
)
class TestConstructors():
def test_creates(self):
B = BBox(((0, 0), (5, 5)))
assert isinstance(B, BBox)
def test_type(self):
B = np.array(((0, 0), (5, 5)))
assert not isinstance(B, BBox)
def testDataType(self):
B = BBox(((0, 0), (5, 5)))
assert B.dtype == np.float
def testShape(self):
B = BBox((0, 0, 5, 5))
assert B.shape == (2, 2)
def testShape2(self):
with pytest.raises(ValueError):
BBox((0, 0, 5))
def testShape3(self):
with pytest.raises(ValueError):
BBox((0, 0, 5, 6, 7))
def testArrayConstruction(self):
A = np.array(((4, 5), (10, 12)), np.float_)
B = BBox(A)
assert isinstance(B, BBox)
def testMinMax(self):
with pytest.raises(ValueError):
BBox((0, 0, -1, 6))
def testMinMax2(self):
with pytest.raises(ValueError):
BBox((0, 0, 1, -6))
def testMinMax3(self):
# OK to have a zero-sized BB
B = BBox(((0, 0), (0, 5)))
assert isinstance(B, BBox)
def testMinMax4(self):
# OK to have a zero-sized BB
B = BBox(((10., -34), (10., -34.0)))
assert isinstance(B, BBox)
def testMinMax5(self):
# OK to have a tiny BB
B = BBox(((0, 0), (1e-20, 5)))
assert isinstance(B, BBox)
def testMinMax6(self):
# Should catch tiny difference
with pytest.raises(ValueError):
BBox(((0, 0), (-1e-20, 5)))
class TestAsBBox():
def testPassThrough(self):
B = BBox(((0, 0), (5, 5)))
C = asBBox(B)
assert B is C
def testPassThrough2(self):
B = ((0, 0), (5, 5))
C = asBBox(B)
assert B is not C
def testPassArray(self):
# Different data type
A = np.array(((0, 0), (5, 5)))
C = asBBox(A)
assert A is not C
def testPassArray2(self):
# same data type -- should be a view
A = np.array(((0, 0), (5, 5)), np.float_)
C = asBBox(A)
A[0, 0] = -10
assert C[0, 0] == A[0, 0]
class TestIntersect():
def testSame(self):
B = BBox(((-23.5, 456), (56, 532.0)))
C = BBox(((-23.5, 456), (56, 532.0)))
assert B.Overlaps(C)
def testUpperLeft(self):
B = BBox(((5, 10), (15, 25)))
C = BBox(((0, 12), (10, 32.0)))
assert B.Overlaps(C)
def testUpperRight(self):
B = BBox(((5, 10), (15, 25)))
C = BBox(((12, 12), (25, 32.0)))
assert B.Overlaps(C)
def testLowerRight(self):
B = BBox(((5, 10), (15, 25)))
C = BBox(((12, 5), (25, 15)))
assert B.Overlaps(C)
def testLowerLeft(self):
B = BBox(((5, 10), (15, 25)))
C = BBox(((-10, 5), (8.5, 15)))
assert B.Overlaps(C)
def testBelow(self):
B = BBox(((5, 10), (15, 25)))
C = BBox(((-10, 5), (8.5, 9.2)))
assert not B.Overlaps(C)
def testAbove(self):
B = BBox(((5, 10), (15, 25)))
C = BBox(((-10, 25.001), (8.5, 32)))
assert not B.Overlaps(C)
def testLeft(self):
B = BBox(((5, 10), (15, 25)))
C = BBox(((4, 8), (4.95, 32)))
assert not B.Overlaps(C)
def testRight(self):
B = BBox(((5, 10), (15, 25)))
C = BBox(((17.1, 8), (17.95, 32)))
assert not B.Overlaps(C)
def testInside(self):
B = BBox(((-15, -25), (-5, -10)))
C = BBox(((-12, -22), (-6, -8)))
assert B.Overlaps(C)
def testOutside(self):
B = BBox(((-15, -25), (-5, -10)))
C = BBox(((-17, -26), (3, 0)))
assert B.Overlaps(C)
def testTouch(self):
B = BBox(((5, 10), (15, 25)))
C = BBox(((15, 8), (17.95, 32)))
assert B.Overlaps(C)
def testCorner(self):
B = BBox(((5, 10), (15, 25)))
C = BBox(((15, 25), (17.95, 32)))
assert B.Overlaps(C)
def testZeroSize(self):
B = BBox(((5, 10), (15, 25)))
C = BBox(((15, 25), (15, 25)))
assert B.Overlaps(C)
def testZeroSize2(self):
B = BBox(((5, 10), (5, 10)))
C = BBox(((15, 25), (15, 25)))
assert not B.Overlaps(C)
def testZeroSize3(self):
B = BBox(((5, 10), (5, 10)))
C = BBox(((0, 8), (10, 12)))
assert B.Overlaps(C)
def testZeroSize4(self):
B = BBox(((5, 1), (10, 25)))
C = BBox(((8, 8), (8, 8)))
assert B.Overlaps(C)
class TestEquality():
def testSame(self):
B = BBox(((1.0, 2.0), (5., 10.)))
C = BBox(((1.0, 2.0), (5., 10.)))
assert B == C
def testIdentical(self):
B = BBox(((1.0, 2.0), (5., 10.)))
assert B == B
def testNotSame(self):
B = BBox(((1.0, 2.0), (5., 10.)))
C = BBox(((1.0, 2.0), (5., 10.1)))
assert not B == C
def testWithArray(self):
B = BBox(((1.0, 2.0), (5., 10.)))
C = np.array(((1.0, 2.0), (5., 10.)))
assert B == C
def testWithArray2(self):
B = BBox(((1.0, 2.0), (5., 10.)))
C = np.array(((1.0, 2.0), (5., 10.)))
assert C == B
def testWithArray3(self):
B = BBox(((1.0, 2.0), (5., 10.)))
C = np.array(((1.01, 2.0), (5., 10.)))
assert not C == B
class TestInside():
def testSame(self):
B = BBox(((1.0, 2.0), (5., 10.)))
C = BBox(((1.0, 2.0), (5., 10.)))
assert B.Inside(C)
def testPoint(self):
B = BBox(((1.0, 2.0), (5., 10.)))
C = BBox(((3.0, 4.0), (3.0, 4.0)))
assert B.Inside(C)
def testPointOutside(self):
B = BBox(((1.0, 2.0), (5., 10.)))
C = BBox(((-3.0, 4.0), (0.10, 4.0)))
assert not B.Inside(C)
def testUpperLeft(self):
B = BBox(((5, 10), (15, 25)))
C = BBox(((0, 12), (10, 32.0)))
assert not B.Inside(C)
def testUpperRight(self):
B = BBox(((5, 10), (15, 25)))
C = BBox(((12, 12), (25, 32.0)))
assert not B.Inside(C)
def testLowerRight(self):
B = BBox(((5, 10), (15, 25)))
C = BBox(((12, 5), (25, 15)))
assert not B.Inside(C)
def testLowerLeft(self):
B = BBox(((5, 10), (15, 25)))
C = BBox(((-10, 5), (8.5, 15)))
assert not (B.Inside(C))
def testBelow(self):
B = BBox(((5, 10), (15, 25)))
C = BBox(((-10, 5), (8.5, 9.2)))
assert not (B.Inside(C))
def testAbove(self):
B = BBox(((5, 10), (15, 25)))
C = BBox(((-10, 25.001), (8.5, 32)))
assert not (B.Inside(C))
def testLeft(self):
B = BBox(((5, 10), (15, 25)))
C = BBox(((4, 8), (4.95, 32)))
assert not (B.Inside(C))
def testRight(self):
B = BBox(((5, 10), (15, 25)))
C = BBox(((17.1, 8), (17.95, 32)))
assert not (B.Inside(C))
class TestPointInside():
def testPointIn(self):
B = BBox(((1.0, 2.0), (5., 10.)))
P = (3.0, 4.0)
assert (B.PointInside(P))
def testUpperLeft(self):
B = BBox(((5, 10), (15, 25)))
P = (4, 30)
assert not (B.PointInside(P))
def testUpperRight(self):
B = BBox(((5, 10), (15, 25)))
P = (16, 30)
assert not (B.PointInside(P))
def testLowerRight(self):
B = BBox(((5, 10), (15, 25)))
P = (16, 4)
assert not (B.PointInside(P))
def testLowerLeft(self):
B = BBox(((5, 10), (15, 25)))
P = (-10, 5)
assert not (B.PointInside(P))
def testBelow(self):
B = BBox(((5, 10), (15, 25)))
P = (10, 5)
assert not (B.PointInside(P))
def testAbove(self):
B = BBox(((5, 10), (15, 25)))
P = (10, 25.001)
assert not (B.PointInside(P))
def testLeft(self):
B = BBox(((5, 10), (15, 25)))
P = (4, 12)
assert not (B.PointInside(P))
def testRight(self):
B = BBox(((5, 10), (15, 25)))
P = (17.1, 12.3)
assert not (B.PointInside(P))
def testPointOnTopLine(self):
B = BBox(((1.0, 2.0), (5., 10.)))
P = (3.0, 10.)
assert (B.PointInside(P))
def testPointLeftTopLine(self):
B = BBox(((1.0, 2.0), (5., 10.)))
P = (-3.0, 10.)
assert not (B.PointInside(P))
def testPointOnBottomLine(self):
B = BBox(((1.0, 2.0), (5., 10.)))
P = (3.0, 5.)
assert (B.PointInside(P))
def testPointOnLeft(self):
B = BBox(((-10., -10.), (-1.0, -1.0)))
P = (-10, -5.)
assert (B.PointInside(P))
def testPointOnRight(self):
B = BBox(((-10., -10.), (-1.0, -1.0)))
P = (-1, -5.)
assert (B.PointInside(P))
def testPointOnBottomRight(self):
B = BBox(((-10., -10.), (-1.0, -1.0)))
P = (-1, -10.)
assert (B.PointInside(P))
class Test_from_points():
def testCreate(self):
Pts = np.array(((5, 2), (3, 4), (1, 6)), np.float64)
B = from_points(Pts)
assert (B[0, 0] == 1.0 and
B[0, 1] == 2.0 and
B[1, 0] == 5.0 and
B[1, 1] == 6.0)
def testCreateInts(self):
Pts = np.array(((5, 2), (3, 4), (1, 6)))
B = from_points(Pts)
assert (B[0, 0] == 1.0 and
B[0, 1] == 2.0 and
B[1, 0] == 5.0 and
B[1, 1] == 6.0)
def testSinglePoint(self):
Pts = np.array((5, 2), np.float_)
B = from_points(Pts)
assert (B[0, 0] == 5. and
B[0, 1] == 2.0 and
B[1, 0] == 5. and
B[1, 1] == 2.0)
def testListTuples(self):
Pts = [(3, 6.5), (13, 43.2), (-4.32, -4), (65, -23), (-0.0001,
23.432)]
B = from_points(Pts)
assert (B[0, 0] == -4.32 and
B[0, 1] == -23.0 and
B[1, 0] == 65.0 and
B[1, 1] == 43.2)
class TestMerge():
A = BBox(((-23.5, 456), (56, 532.0)))
B = BBox(((-20.3, 460), (54, 465))) # B should be completely inside A
C = BBox(((-23.5, 456), (58, 540.))) # up and to the right or A
D = BBox(((-26.5, 12), (56, 532.0)))
def testInside(self):
C = self.A.copy()
C.Merge(self.B)
assert (C == self.A)
def testFullOutside(self):
C = self.B.copy()
C.Merge(self.A)
assert (C == self.A)
def testUpRight(self):
A = self.A.copy()
A.Merge(self.C)
assert (A[0] == self.A[0] and A[1] == self.C[1])
def testDownLeft(self):
A = self.A.copy()
A.Merge(self.D)
assert (A[0] == self.D[0] and A[1] == self.A[1])
class TestWidthHeight():
B = BBox(((1.0, 2.0), (5., 10.)))
def testWidth(self):
assert (self.B.Width == 4.0)
def testWidth2(self):
assert (self.B.Height == 8.0)
def testSetW(self):
with pytest.raises(AttributeError):
self.B.Height = 6
def testSetH(self):
with pytest.raises(AttributeError):
self.B.Width = 6
class TestCenter():
B = BBox(((1.0, 2.0), (5., 10.)))
def testCenter(self):
assert ((self.B.Center == (3.0, 6.0)).all())
def testSetCenter(self):
with pytest.raises(AttributeError):
self.B.Center = (6, 5)
class TestBBarray():
BBarray = np.array((((-23.5, 456), (56, 532.0)), ((-20.3, 460),
(54, 465)), ((-23.5, 456), (58, 540.)), ((-26.5,
12), (56, 532.0))), dtype=np.float)
BB = asBBox(((-26.5, 12.), (58., 540.)))
def testJoin(self):
BB = fromBBArray(self.BBarray)
assert BB == self.BB
class TestNullBBox():
B1 = NullBBox()
B2 = NullBBox()
B3 = BBox(((1.0, 2.0), (5., 10.)))
def testValues(self):
assert (np.alltrue(np.isnan(self.B1)))
def testIsNull(self):
assert (self.B1.IsNull)
def testEquals(self):
assert ((self.B1 == self.B2) is True)
def testNotEquals(self):
assert not self.B1 == self.B3
def testNotEquals2(self):
assert not self.B3 == self.B1
def testMerge(self):
C = self.B1.copy()
C.Merge(self.B3)
assert C == self.B3, 'merge failed, got: %s' % C
def testOverlaps(self):
assert self.B1.Overlaps(self.B3) is False
def testOverlaps2(self):
assert self.B3.Overlaps(self.B1) is False
class TestInfBBox():
B1 = InfBBox()
B2 = InfBBox()
B3 = BBox(((1.0, 2.0), (5., 10.)))
NB = NullBBox()
def testValues(self):
assert (np.alltrue(np.isinf(self.B1)))
# def testIsNull(self):
# assert ( self.B1.IsNull )
def testEquals(self):
assert self.B1 == self.B2
def testNotEquals(self):
assert not self.B1 == self.B3
def testNotEquals2(self):
assert self.B1 != self.B3
def testNotEquals3(self):
assert not self.B3 == self.B1
def testMerge(self):
C = self.B1.copy()
C.Merge(self.B3)
assert C == self.B2, 'merge failed, got: %s' % C
def testMerge2(self):
C = self.B3.copy()
C.Merge(self.B1)
assert C == self.B1, 'merge failed, got: %s' % C
def testOverlaps(self):
assert (self.B1.Overlaps(self.B2) is True)
def testOverlaps2(self):
assert (self.B3.Overlaps(self.B1) is True)
def testOverlaps3(self):
assert (self.B1.Overlaps(self.B3) is True)
def testOverlaps4(self):
assert (self.B1.Overlaps(self.NB) is True)
def testOverlaps5(self):
assert (self.NB.Overlaps(self.B1) is True)
class TestSides():
B = BBox(((1.0, 2.0), (5., 10.)))
def testLeft(self):
assert self.B.Left == 1.0
def testRight(self):
assert self.B.Right == 5.0
def testBottom(self):
assert self.B.Bottom == 2.0
def testTop(self):
assert self.B.Top == 10.0
class TestAsPoly():
B = BBox(((5, 0), (10, 20)))
corners = np.array([(5., 0.), (5., 20.), (10., 20.), (10., 0.)],
dtype=np.float64)
def testCorners(self):
print(self.B.AsPoly())
assert np.array_equal(self.B.AsPoly(), self.corners)
| 25.151724 | 75 | 0.466822 | 14,120 | 0.967919 | 0 | 0 | 0 | 0 | 0 | 0 | 411 | 0.028174 |
c493107b1fd8b943176b6566abf9ca29701a3c9c | 4,789 | py | Python | cresi/net/augmentations/functional.py | ankshah131/cresi | 99328e065910c45a626e761cd308670e4a60f058 | [
"Apache-2.0"
]
| 117 | 2019-08-29T08:43:55.000Z | 2022-03-24T20:56:23.000Z | cresi/net/augmentations/functional.py | ankshah131/cresi | 99328e065910c45a626e761cd308670e4a60f058 | [
"Apache-2.0"
]
| 9 | 2019-11-23T10:55:13.000Z | 2021-06-22T12:26:21.000Z | cresi/net/augmentations/functional.py | ankshah131/cresi | 99328e065910c45a626e761cd308670e4a60f058 | [
"Apache-2.0"
]
| 33 | 2019-08-08T16:56:37.000Z | 2022-02-24T20:52:44.000Z | import cv2
cv2.setNumThreads(0)
cv2.ocl.setUseOpenCL(False)
import numpy as np
import math
from functools import wraps
def clip(img, dtype, maxval):
return np.clip(img, 0, maxval).astype(dtype)
def clipped(func):
"""
wrapper to clip results of transform to image dtype value range
"""
@wraps(func)
def wrapped_function(img, *args, **kwargs):
dtype, maxval = img.dtype, np.max(img)
return clip(func(img, *args, **kwargs), dtype, maxval)
return wrapped_function
def fix_shift_values(img, *args):
"""
shift values are normally specified in uint, but if your data is float - you need to remap values
"""
if img.dtype == np.float32:
return list(map(lambda x: x / 255, args))
return args
def vflip(img):
return cv2.flip(img, 0)
def hflip(img):
return cv2.flip(img, 1)
def flip(img, code):
return cv2.flip(img, code)
def transpose(img):
return img.transpose(1, 0, 2) if len(img.shape) > 2 else img.transpose(1, 0)
def rot90(img, times):
img = np.rot90(img, times)
return np.ascontiguousarray(img)
def rotate(img, angle):
"""
rotate image on specified angle
:param angle: angle in degrees
"""
height, width = img.shape[0:2]
mat = cv2.getRotationMatrix2D((width/2, height/2), angle, 1.0)
img = cv2.warpAffine(img, mat, (width, height),
flags=cv2.INTER_LINEAR,
borderMode=cv2.BORDER_REFLECT_101)
return img
def shift_scale_rotate(img, angle, scale, dx, dy):
"""
:param angle: in degrees
:param scale: relative scale
"""
height, width = img.shape[:2]
cc = math.cos(angle/180*math.pi) * scale
ss = math.sin(angle/180*math.pi) * scale
rotate_matrix = np.array([[cc, -ss], [ss, cc]])
box0 = np.array([[0, 0], [width, 0], [width, height], [0, height], ])
box1 = box0 - np.array([width/2, height/2])
box1 = np.dot(box1, rotate_matrix.T) + np.array([width/2+dx*width, height/2+dy*height])
box0 = box0.astype(np.float32)
box1 = box1.astype(np.float32)
mat = cv2.getPerspectiveTransform(box0, box1)
img = cv2.warpPerspective(img, mat, (width, height),
flags=cv2.INTER_LINEAR,
borderMode=cv2.BORDER_REFLECT_101)
return img
def center_crop(img, height, width):
h, w, c = img.shape
dy = (h-height)//2
dx = (w-width)//2
y1 = dy
y2 = y1 + height
x1 = dx
x2 = x1 + width
img = img[y1:y2, x1:x2, :]
return img
def shift_hsv(img, hue_shift, sat_shift, val_shift):
dtype = img.dtype
maxval = np.max(img)
img = cv2.cvtColor(img, cv2.COLOR_RGB2HSV).astype(np.int32)
h, s, v = cv2.split(img)
h = cv2.add(h, hue_shift)
h = np.where(h < 0, maxval - h, h)
h = np.where(h > maxval, h - maxval, h)
h = h.astype(dtype)
s = clip(cv2.add(s, sat_shift), dtype, maxval)
v = clip(cv2.add(v, val_shift), dtype, maxval)
img = cv2.merge((h, s, v)).astype(dtype)
img = cv2.cvtColor(img, cv2.COLOR_HSV2RGB)
return img
def shift_channels(img, r_shift, g_shift, b_shift):
img[...,0] = clip(img[...,0] + r_shift, np.uint8, 255)
img[...,1] = clip(img[...,1] + g_shift, np.uint8, 255)
img[...,2] = clip(img[...,2] + b_shift, np.uint8, 255)
return img
def clahe(img, clipLimit=2.0, tileGridSize=(8,8)):
img_yuv = cv2.cvtColor(img, cv2.COLOR_RGB2LAB)
clahe = cv2.createCLAHE(clipLimit=clipLimit, tileGridSize=tileGridSize)
img_yuv[:, :, 0] = clahe.apply(img_yuv[:, :, 0])
img_output = cv2.cvtColor(img_yuv, cv2.COLOR_LAB2RGB)
return img_output
def blur(img, ksize):
return cv2.blur(img, (ksize, ksize))
def invert(img):
return 255 - img
def channel_shuffle(img):
ch_arr = [0, 1, 2]
np.random.shuffle(ch_arr)
img = img[..., ch_arr]
return img
def img_to_tensor(im, verbose=False):
'''AVE edit'''
im_out = np.moveaxis(im / (255. if im.dtype == np.uint8 else 1), -1, 0).astype(np.float32)
if verbose:
print ("augmentations.functiona.py.img_to_tensor(): im_out.shape:", im_out.shape)
print ("im_out.unique:", np.unique(im_out))
return im_out
def mask_to_tensor(mask, num_classes, verbose=False):
'''AVE edit'''
if num_classes > 1:
mask = img_to_tensor(mask)
else:
mask = np.expand_dims(mask / (255. if mask.dtype == np.uint8 else 1), 0).astype(np.float32)
if verbose:
print ("augmentations.functiona.py.img_to_tensor(): mask.shape:", mask.shape)
print ("mask.unique:", np.unique(mask))
return mask
| 28.005848 | 102 | 0.595114 | 0 | 0 | 0 | 0 | 173 | 0.036124 | 0 | 0 | 531 | 0.110879 |
c493ccaae899498f57b59dd3ded561a78518f5a9 | 417 | py | Python | regtestsWin_customBuildPy.py | greenwoodms/TRANSFORM-Library | dc152d4f0298d3f18385f2ea33645d87d7812915 | [
"Apache-2.0"
]
| 29 | 2018-04-24T17:06:19.000Z | 2021-11-21T05:17:28.000Z | regtestsWin_customBuildPy.py | greenwoodms/TRANSFORM-Library | dc152d4f0298d3f18385f2ea33645d87d7812915 | [
"Apache-2.0"
]
| 13 | 2018-04-05T08:34:27.000Z | 2021-10-04T14:24:41.000Z | regtestsWin_customBuildPy.py | greenwoodms/TRANSFORM-Library | dc152d4f0298d3f18385f2ea33645d87d7812915 | [
"Apache-2.0"
]
| 17 | 2018-08-06T22:18:01.000Z | 2022-01-29T21:38:17.000Z | # -*- coding: utf-8 -*-
"""
Created on Mon Aug 14 09:49:13 2017
@author: vmg
"""
import os
import buildingspy.development.regressiontest as r
rt = r.Tester(check_html=False)#,tool="dymola")
LibPath = os.path.join("TRANSFORM")
ResPath = LibPath
rt.showGUI(True)
rt.setLibraryRoot(LibPath, ResPath)
rt.setNumberOfThreads(1)
#rt.TestSinglePackage('Media.Solids.Examples.Hastelloy_N_Haynes', SinglePack=True)
rt.run()
| 23.166667 | 82 | 0.748201 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 189 | 0.453237 |
c49410af0e45267e29dfed5b9b7fc2e415dd48a2 | 2,039 | py | Python | src/deoxys/model/activations.py | huynhngoc/deoxys | b2e9936b723807e129fda36d8d6131ca00db558f | [
"MIT"
]
| 1 | 2021-12-28T15:48:45.000Z | 2021-12-28T15:48:45.000Z | src/deoxys/model/activations.py | huynhngoc/deoxys | b2e9936b723807e129fda36d8d6131ca00db558f | [
"MIT"
]
| 2 | 2020-06-26T11:03:53.000Z | 2020-06-26T11:05:09.000Z | src/deoxys/model/activations.py | huynhngoc/deoxys | b2e9936b723807e129fda36d8d6131ca00db558f | [
"MIT"
]
| null | null | null | # -*- coding: utf-8 -*-
__author__ = "Ngoc Huynh Bao"
__email__ = "[email protected]"
from ..keras.layers import Activation
from ..keras.activations import deserialize
from ..utils import Singleton
class Activations(metaclass=Singleton):
"""
A singleton that contains all the registered customized activations
"""
def __init__(self):
self._activations = {}
def register(self, key, activation):
if not issubclass(activation, Activation):
raise ValueError(
"The customized activation has to be a subclass"
+ " of keras.activations.Activation"
)
if key in self._activations:
raise KeyError(
"Duplicated key, please use another key for this activation"
)
else:
self._activations[key] = activation
def unregister(self, key):
if key in self._activations:
del self._activations[key]
@property
def activations(self):
return self._activations
def register_activation(key, activation):
"""
Register the customized activation.
If the key name is already registered, it will raise a KeyError exception
Parameters
----------
key: str
The unique key-name of the activation
activation: tensorflow.keras.activations.Activation
The customized activation class
"""
Activations().register(key, activation)
def unregister_activation(key):
"""
Remove the registered activation with the key-name
Parameters
----------
key: str
The key-name of the activation to be removed
"""
Activations().unregister(key)
def activation_from_config(config):
if 'class_name' not in config:
raise ValueError('class_name is needed to define activation')
if 'config' not in config:
# auto add empty config for activation with only class_name
config['config'] = {}
return deserialize(config, custom_objects=Activations().activations)
| 26.141026 | 77 | 0.647376 | 840 | 0.411967 | 0 | 0 | 69 | 0.03384 | 0 | 0 | 896 | 0.439431 |
c4950cd78452abf9ab6be8b01808431c4aeef93d | 2,618 | py | Python | raspisump/reading.py | seanm/raspi-sump | 65456b5e0d1e93bb2574a46527f410a08f9f51ba | [
"MIT"
]
| 79 | 2015-03-11T12:13:35.000Z | 2022-03-30T04:35:09.000Z | raspisump/reading.py | seanm/raspi-sump | 65456b5e0d1e93bb2574a46527f410a08f9f51ba | [
"MIT"
]
| 72 | 2015-02-20T02:26:25.000Z | 2022-02-19T01:22:36.000Z | raspisump/reading.py | seanm/raspi-sump | 65456b5e0d1e93bb2574a46527f410a08f9f51ba | [
"MIT"
]
| 36 | 2015-01-11T12:11:43.000Z | 2022-01-07T01:35:30.000Z | """ Module to take a water_level reading."""
# Raspi-sump, a sump pump monitoring system.
# Al Audet
# http://www.linuxnorth.org/raspi-sump/
#
# All configuration changes should be done in raspisump.conf
# MIT License -- http://www.linuxnorth.org/raspi-sump/license.html
try:
import ConfigParser as configparser # Python2
except ImportError:
import configparser # Python3
from hcsr04sensor import sensor
from raspisump import log, alerts, heartbeat
config = configparser.RawConfigParser()
config.read("/home/pi/raspi-sump/raspisump.conf")
configs = {
"critical_water_level": config.getint("pit", "critical_water_level"),
"pit_depth": config.getint("pit", "pit_depth"),
"temperature": config.getint("pit", "temperature"),
"trig_pin": config.getint("gpio_pins", "trig_pin"),
"echo_pin": config.getint("gpio_pins", "echo_pin"),
"unit": config.get("pit", "unit"),
}
# If item in raspisump.conf add to configs dict. If not provide defaults.
try:
configs["alert_when"] = config.get("pit", "alert_when")
except configparser.NoOptionError:
configs["alert_when"] = "high"
try:
configs["heartbeat"] = config.getint("email", "heartbeat")
except configparser.NoOptionError:
configs["heartbeat"] = 0
def initiate_heartbeat():
"""Initiate the heartbeat email process if needed"""
if configs["heartbeat"] == 1:
heartbeat.determine_if_heartbeat()
else:
pass
def water_reading():
"""Initiate a water level reading."""
pit_depth = configs["pit_depth"]
trig_pin = configs["trig_pin"]
echo_pin = configs["echo_pin"]
temperature = configs["temperature"]
unit = configs["unit"]
value = sensor.Measurement(trig_pin, echo_pin, temperature, unit)
try:
raw_distance = value.raw_distance(sample_wait=0.3)
except SystemError:
log.log_errors(
"**ERROR - Signal not received. Possible cable or sensor problem."
)
exit(0)
return round(value.depth(raw_distance, pit_depth), 1)
def water_depth():
"""Determine the depth of the water, log result and generate alert
if needed.
"""
critical_water_level = configs["critical_water_level"]
water_depth = water_reading()
if water_depth < 0.0:
water_depth = 0.0
log.log_reading(water_depth)
if water_depth > critical_water_level and configs["alert_when"] == "high":
alerts.determine_if_alert(water_depth)
elif water_depth < critical_water_level and configs["alert_when"] == "low":
alerts.determine_if_alert(water_depth)
else:
pass
initiate_heartbeat()
| 29.088889 | 79 | 0.688312 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,026 | 0.391902 |
c49a318a7b8ef2bfca67b0d3b643cbd37094de2d | 2,830 | py | Python | pipelines/pancreas_pipeline.py | marvinquiet/RefConstruction_supervisedCelltyping | 7bdd02a0486c175785ec24461dc6356c4d172091 | [
"MIT"
]
| null | null | null | pipelines/pancreas_pipeline.py | marvinquiet/RefConstruction_supervisedCelltyping | 7bdd02a0486c175785ec24461dc6356c4d172091 | [
"MIT"
]
| null | null | null | pipelines/pancreas_pipeline.py | marvinquiet/RefConstruction_supervisedCelltyping | 7bdd02a0486c175785ec24461dc6356c4d172091 | [
"MIT"
]
| null | null | null | '''
Configuration generation for running Pancreas datasets
'''
import os, argparse
from pipelines import method_utils, dataloading_utils
from preprocess.process_train_test_data import *
if __name__ == "__main__":
data_dir = "~/gpu/data"
## parse arguments
import argparse
parser = argparse.ArgumentParser(description="Celltyping pipeline.")
parser.add_argument('data_source', help="Load which dataset",
choices=[
'pancreas', 'pancreas_seg_cond', 'pancreas_custom',
'pancreas_seg_mix', 'pancreas_multi_to_multi'
])
parser.add_argument('-m', '--method', help="Run which method",
choices=['MLP', 'MLP_GO', 'MLP_CP', 'GEDFN', 'ItClust', 'SVM_RBF', 'SVM_linear', 'RF'], ## remove DFN
required=True)
parser.add_argument('--select_on', help="Feature selection on train or test, or None of them",
choices=['train', 'test'])
parser.add_argument('--select_method', help="Feature selection method, Seurat/FEAST or None",
choices=['Seurat', 'FEAST', 'F-test'])
parser.add_argument('--n_features', help="Number of features selected",
default=1000, type=int)
parser.add_argument('--train', help="Specify which as train", required=True)
parser.add_argument('--test', help="Specify which as test", required=True)
parser.add_argument('--sample_seed', help="Downsample seed in combined individual effect",
default=0, type=int)
args = parser.parse_args()
pipeline_dir = "pipelines/result_Pancreas_collections"
result_prefix = pipeline_dir+os.sep+"result_"+args.data_source+'_'+\
args.train+'_to_'+args.test
os.makedirs(result_prefix, exist_ok=True)
## create file directory
if args.select_on is None and args.select_method is None:
result_dir = result_prefix+os.sep+"no_feature"
else:
result_dir = result_prefix+os.sep+args.select_method+'_'+\
str(args.n_features)+'_on_'+args.select_on
os.makedirs(result_dir, exist_ok=True)
load_ind, train_adata, test_adata = load_adata(result_dir)
if not load_ind:
train_adata, test_adata = dataloading_utils.load_Pancreas_adata(
data_dir, result_dir, args=args)
## whether to purify reference dataset
purify_method = ""
if "purify_dist" in args.data_source:
purify_method = "distance"
elif "purify_SVM" in args.data_source:
purify_method = "SVM"
train_adata, test_adata = dataloading_utils.process_loaded_data(
train_adata, test_adata, result_dir, args=args, purify_method=purify_method)
print("Train anndata: \n", train_adata)
print("Test anndata: \n", test_adata)
method_utils.run_pipeline(args, train_adata, test_adata, data_dir, result_dir)
| 41.617647 | 109 | 0.677032 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 909 | 0.321201 |
c49b19da4180160b333ba71b17f93e31d23578f0 | 436 | py | Python | MachineLearning/StandardScaler/standardization.py | yexianyi/AI_Practice | 80499ab3a06ac055641aa069fe1e37864c9e41c4 | [
"Apache-2.0"
]
| null | null | null | MachineLearning/StandardScaler/standardization.py | yexianyi/AI_Practice | 80499ab3a06ac055641aa069fe1e37864c9e41c4 | [
"Apache-2.0"
]
| null | null | null | MachineLearning/StandardScaler/standardization.py | yexianyi/AI_Practice | 80499ab3a06ac055641aa069fe1e37864c9e41c4 | [
"Apache-2.0"
]
| null | null | null | import pandas as pd
from sklearn.preprocessing import StandardScaler
def stand_demo():
data = pd.read_csv("dating.txt")
print(data)
transfer = StandardScaler()
data = transfer.fit_transform(data[['milage', 'Liters', 'Consumtime']])
print("Standardization result: \n", data)
print("Mean of each figure: \n", transfer.mean_)
print("Variance of each figure: \n", transfer.var_)
return None
stand_demo()
| 25.647059 | 75 | 0.692661 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 122 | 0.279817 |
c49cdbfcdbfea8b874ccc9f3311ef29e7510510a | 716 | py | Python | tests/test_primitive_roots.py | greysonDEV/rng | 5af76f5edff4de1b502c21ca1c1ce93243a618c8 | [
"MIT"
]
| null | null | null | tests/test_primitive_roots.py | greysonDEV/rng | 5af76f5edff4de1b502c21ca1c1ce93243a618c8 | [
"MIT"
]
| null | null | null | tests/test_primitive_roots.py | greysonDEV/rng | 5af76f5edff4de1b502c21ca1c1ce93243a618c8 | [
"MIT"
]
| null | null | null | from prng.util.util import primitive_roots
import pytest
def test_primitive_roots():
prim_roots_sets = [
[3, [2]],
[7, [3,5]],
[13, [2,6,7,11]],
[17, [3,5,6,7,10,11,12,14]],
[19, [2,3,10,13,14,15]],
[31, [3,11,12,13,17,21,22,24]],
[53, [2,3,5,8,12,14,18,19,20,21,22,26,27,31,32,33,34,35,39,41,45,48,50,51]],
[61, [2,6,7,10,17,18,26,30,31,35,43,44,51,54,55,59]],
[79, [3,6,7,28,29,30,34,35,37,39,43,47,48,53,54,59,60,63,66,68,70,74,75,77]],
[103, [5,6,11,12,20,21,35,40,43,44,45,48,51,53,54,62,65,67,70,71,74,75,77,78,84,85,86,87,88,96,99,101]],
]
assert all(sorted(primitive_roots(a)) == prs for a,prs in prim_roots_sets)
| 37.684211 | 112 | 0.544693 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
c49d07ec16361493f21d1cdf3590979db22f9935 | 383 | py | Python | hackerrank-python/xml-1-find-the-score.py | fmelihh/competitive-programming-solutions | c15c2f7d90153f35f9bd9ffcea20ac921564eacf | [
"MIT"
]
| 2 | 2021-09-06T22:13:12.000Z | 2021-11-22T08:50:04.000Z | hackerrank-python/xml-1-find-the-score.py | fmelihh/competitive-programming-solutions | c15c2f7d90153f35f9bd9ffcea20ac921564eacf | [
"MIT"
]
| null | null | null | hackerrank-python/xml-1-find-the-score.py | fmelihh/competitive-programming-solutions | c15c2f7d90153f35f9bd9ffcea20ac921564eacf | [
"MIT"
]
| null | null | null |
# https://www.hackerrank.com/challenges/xml-1-find-the-score/problem
import sys
import xml.etree.ElementTree as etree
def get_attr_number(node):
return etree.tostring(node).count(b'=')
if __name__ == '__main__':
sys.stdin.readline()
xml = sys.stdin.read()
tree = etree.ElementTree(etree.fromstring(xml))
root = tree.getroot()
print(get_attr_number(root))
| 23.9375 | 68 | 0.710183 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 82 | 0.214099 |
c49d9514c95f15c6be6ba6695dcb54d27f071828 | 347 | py | Python | CodeChef/Contest/June Long/pricecon.py | GSri30/Competetive_programming | 0dc1681500a80b6f0979d0dc9f749357ee07bcb8 | [
"MIT"
]
| 22 | 2020-01-03T17:32:00.000Z | 2021-11-07T09:31:44.000Z | CodeChef/Contest/June Long/pricecon.py | GSri30/Competetive_programming | 0dc1681500a80b6f0979d0dc9f749357ee07bcb8 | [
"MIT"
]
| 10 | 2020-09-30T09:41:18.000Z | 2020-10-11T11:25:09.000Z | CodeChef/Contest/June Long/pricecon.py | GSri30/Competetive_programming | 0dc1681500a80b6f0979d0dc9f749357ee07bcb8 | [
"MIT"
]
| 25 | 2019-10-14T19:25:01.000Z | 2021-05-26T08:12:20.000Z | test = int(input())
while test > 0 :
n,k = map(int,input().split())
p = list(map(int,input().split()))
original = 0
later = 0
for i in p :
if i > k :
later += k
original += i
else :
later += i
original += i
print(original-later)
test -= 1 | 23.133333 | 39 | 0.414986 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
c49e67e8dbe87dd913b66006fd7f5daf6198c333 | 2,948 | py | Python | src/utils/Shell.py | vlab-cs-ucsb/quacky | c031577883550820e2586ce530e59eb30aeccc37 | [
"BSD-2-Clause"
]
| 1 | 2022-02-28T18:10:29.000Z | 2022-02-28T18:10:29.000Z | src/utils/Shell.py | vlab-cs-ucsb/quacky | c031577883550820e2586ce530e59eb30aeccc37 | [
"BSD-2-Clause"
]
| null | null | null | src/utils/Shell.py | vlab-cs-ucsb/quacky | c031577883550820e2586ce530e59eb30aeccc37 | [
"BSD-2-Clause"
]
| null | null | null | # -*- coding: utf-8 -*-
"""
Created on Mon Aug 18 22:20:01 2014
@author: baki
"""
import shlex
from subprocess import Popen, PIPE
from .Log import Log
class Shell:
def __init__(self, TAG=""):
self.log = Log(TAG=TAG)
self.current_process = None
self.process_output = None
def setTag(self, tag):
self.log.setTag(tag)
def runcmd(self, cmd, cwd=None, shell=False):
# self.log.v("cmd: {}\n with params: cwd={}, shell={}".format(cmd, cwd, shell))
args = shlex.split(cmd)
p = Popen(args, stdout=PIPE, stderr=PIPE, cwd=cwd, shell=shell)
out, err = p.communicate()
if out:
out = out.decode("ascii")
# self.log.v("cmd output: {}\n".format(out))
if err:
err = err.decode("ascii")
# self.log.v("cmd error: {}\n".format(err))
return out, err
def runcmdBgrnd(self, cmd, out=PIPE, cwd=None, shell=False):
assert self.current_process == None, "currently, one shell object supports only one background process"
self.log.v("cmd: {}\n with params: out={}, cwd={}, shell={}".format(cmd, out, cwd, shell))
redirect_to = out
if out is not PIPE:
assert self.process_output == None, "currently, one shell object supports only one background process"
redirect_to = open(out, "w")
args = shlex.split(cmd)
p = Popen(args, stdout=redirect_to, stderr=redirect_to, cwd=cwd, shell=shell)
self.current_process = p
self.process_output = redirect_to
return p
def kill(self, process=None):
if process is None:
process = self.current_process
process and process.kill()
self.process_output and self.process_output.close()
def terminate(self, process=None):
if process is None:
process = self.current_process
process and process.terminate()
self.process_output and self.process_output.close()
def runGrep(self, search, subject, options):
cmd = "grep {} \"{}\" {}".format(options, search, subject)
return self.runcmd(cmd)
def rm(self, name):
cmd = "rm {}".format(name)
return self.runcmd(cmd)
def rmdir(self, name):
cmd = "rmdir {}".format(name)
return self.runcmd(cmd)
def rmrdir(self, name):
cmd = "rm -r {}".format(name)
return self.runcmd(cmd)
def mv(self, src, dst):
cmd = "mv {} {}".format(src, dst)
return self.runcmd(cmd)
def cp(self, src, dst):
cmd = "cp -r {} {}".format(src, dst)
return self.runcmd(cmd)
def mkdir(self, name):
cmd = "mkdir {} -p".format(name)
return self.runcmd(cmd)
def clean(self, name):
self.rmrdir(name)
self.mkdir(name)
| 32.043478 | 119 | 0.557327 | 2,792 | 0.947083 | 0 | 0 | 0 | 0 | 0 | 0 | 544 | 0.184532 |
c49f689d742bf5ec9d22f74b83fe32c9c62c281f | 4,458 | py | Python | scrapy_autounit/middleware.py | ogiaquino/scrapy-autounit | 97f00d2d62c1ad49bbac462018907abe6a20e4cd | [
"BSD-3-Clause"
]
| null | null | null | scrapy_autounit/middleware.py | ogiaquino/scrapy-autounit | 97f00d2d62c1ad49bbac462018907abe6a20e4cd | [
"BSD-3-Clause"
]
| null | null | null | scrapy_autounit/middleware.py | ogiaquino/scrapy-autounit | 97f00d2d62c1ad49bbac462018907abe6a20e4cd | [
"BSD-3-Clause"
]
| null | null | null | import os
import six
import copy
import pickle
import random
import logging
from scrapy.http import Request
from scrapy.exceptions import NotConfigured
from scrapy.commands.genspider import sanitize_module_name
from scrapy.spiders import CrawlSpider
from .utils import (
add_sample,
response_to_dict,
get_or_create_test_dir,
parse_request,
parse_object,
get_project_dir,
get_middlewares,
create_dir,
)
logger = logging.getLogger(__name__)
def _copy_settings(settings):
out = {}
for name in settings.getlist('AUTOUNIT_INCLUDED_SETTINGS', []):
out[name] = settings.get(name)
return out
class AutounitMiddleware:
def __init__(self, settings):
if not any(
self.__class__.__name__ in s
for s in settings.getwithbase('SPIDER_MIDDLEWARES').keys()
):
raise ValueError(
'%s must be in SPIDER_MIDDLEWARES' % (
self.__class__.__name__,))
if not settings.getbool('AUTOUNIT_ENABLED'):
raise NotConfigured('scrapy-autounit is not enabled')
if settings.getint('CONCURRENT_REQUESTS') > 1:
logger.warn(
'Recording with concurrency > 1! '
'Data races in shared object modification may create broken '
'tests.'
)
self.max_fixtures = settings.getint(
'AUTOUNIT_MAX_FIXTURES_PER_CALLBACK',
default=10
)
self.max_fixtures = \
self.max_fixtures if self.max_fixtures >= 10 else 10
self.base_path = settings.get(
'AUTOUNIT_BASE_PATH',
default=os.path.join(get_project_dir(), 'autounit')
)
create_dir(self.base_path, exist_ok=True)
self.fixture_counters = {}
@classmethod
def from_crawler(cls, crawler):
return cls(crawler.settings)
def process_spider_input(self, response, spider):
filter_args = {'crawler', 'settings', 'start_urls'}
if isinstance(spider, CrawlSpider):
filter_args |= {'rules', '_rules'}
response.meta['_autounit'] = pickle.dumps({
'request': parse_request(response.request, spider),
'response': response_to_dict(response),
'spider_args': {
k: v for k, v in spider.__dict__.items()
if k not in filter_args
},
'middlewares': get_middlewares(spider),
})
return None
def process_spider_output(self, response, result, spider):
settings = spider.settings
processed_result = []
out = []
for elem in result:
out.append(elem)
is_request = isinstance(elem, Request)
if is_request:
_data = parse_request(elem, spider)
else:
_data = parse_object(copy.deepcopy(elem), spider)
processed_result.append({
'type': 'request' if is_request else 'item',
'data': _data
})
input_data = pickle.loads(response.meta.pop('_autounit'))
request = input_data['request']
callback_name = request['callback']
spider_attr_out = {
k: v for k, v in spider.__dict__.items()
if k not in ('crawler', 'settings', 'start_urls')
}
data = {
'spider_name': spider.name,
'request': request,
'response': input_data['response'],
'spider_args_out': spider_attr_out,
'result': processed_result,
'spider_args_in': input_data['spider_args'],
'settings': _copy_settings(settings),
'middlewares': input_data['middlewares'],
'python_version': 2 if six.PY2 else 3,
}
callback_counter = self.fixture_counters.setdefault(callback_name, 0)
self.fixture_counters[callback_name] += 1
test_dir, test_name = get_or_create_test_dir(
self.base_path,
sanitize_module_name(spider.name),
callback_name,
settings.get('AUTOUNIT_EXTRA_PATH'),
)
if callback_counter < self.max_fixtures:
add_sample(callback_counter + 1, test_dir, test_name, data)
else:
r = random.randint(0, callback_counter)
if r < self.max_fixtures:
add_sample(r + 1, test_dir, test_name, data)
return out
| 31.617021 | 77 | 0.593091 | 3,814 | 0.855541 | 0 | 0 | 85 | 0.019067 | 0 | 0 | 681 | 0.152759 |
c4a06bb562106e2918ecce48527f9b40a6d8d42c | 2,735 | py | Python | python_examples/create_tags/utils.py | kirank0220/api-examples | 9d6c51eeb2d4e38d95b0b7d88fd30fe96ef28d20 | [
"MIT"
]
| 1 | 2021-12-20T16:49:00.000Z | 2021-12-20T16:49:00.000Z | python_examples/create_tags/utils.py | kirank0220/api-examples | 9d6c51eeb2d4e38d95b0b7d88fd30fe96ef28d20 | [
"MIT"
]
| 2 | 2020-11-20T04:51:16.000Z | 2021-06-16T17:02:35.000Z | python_examples/create_tags/utils.py | kirank0220/api-examples | 9d6c51eeb2d4e38d95b0b7d88fd30fe96ef28d20 | [
"MIT"
]
| 1 | 2020-11-20T04:46:17.000Z | 2020-11-20T04:46:17.000Z | #########################################################################
# _________ ___. ______________________ ___
# \_ ___ \___.__.\_ |__ ___________ / _____/\______ \ \/ /
# / \ \< | | | __ \_/ __ \_ __ \/ \ ___ | _/\ /
# \ \___\___ | | \_\ \ ___/| | \/\ \_\ \| | \/ \
# \______ / ____| |___ /\___ >__| \______ /|____|_ /___/\ \
# \/\/ \/ \/ \/ \/ \_/
#
#
import os
import json
import requests
from collections import OrderedDict
from openpyxl import Workbook
from openpyxl.styles.fills import FILL_SOLID
from openpyxl.styles import Color, PatternFill, Font, Border, Side
from openpyxl.styles import colors
from openpyxl.cell import Cell
from tqdm import tqdm
from glom import glom
def _cell_value(cell):
return "{}".format(cell.value).strip() if cell and cell.value else ""
def columns_for_headers(row, header_map):
mapping = {}
for idx, col in enumerate(row):
column = _cell_value(col)
if column and header_map.get(column, None):
mapping[idx] = header_map.get(column, None)
return mapping
def process_companies(sheet, header_mapping):
companies = []
headers = {}
for _, row in enumerate(sheet.iter_rows()):
if not headers:
headers = columns_for_headers(row, header_mapping)
if headers and len(headers) != 2:
print(headers)
raise Exception("Need column headers for both company names and tags")
else:
company = OrderedDict()
for column_index, col in enumerate(row):
if column_index not in headers:
continue
if col.value is not None:
try:
company[headers[column_index]] = bytearray(col.value, "utf-8").decode("utf-8")
except:
company[headers[column_index]] = col.value
if not company:
continue
if "tags" not in company:
print("Company did not have any tags: ", company, " did you provide the correct column header?")
continue
if "name" not in company:
print("Company did not have a name: ", company, " did you provide the correct column header?")
continue
company["tags"] = [str(tag).strip() for tag in company["tags"].split(",") if tag and str(tag).strip()]
if not company["tags"]:
print("Company did not have any tags: ", company)
else:
companies.append(company)
return companies
| 35.064103 | 114 | 0.527971 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 798 | 0.291773 |
c4a0dfed3531558a43bad867fbac20a6c63fe3e4 | 3,104 | py | Python | gpytorch/lazy/non_lazy_tensor.py | phumm/gpytorch | 4e8042bcecda049956f8f9e823d82ba6340766d5 | [
"MIT"
]
| 1 | 2019-09-30T06:51:03.000Z | 2019-09-30T06:51:03.000Z | gpytorch/lazy/non_lazy_tensor.py | phumm/gpytorch | 4e8042bcecda049956f8f9e823d82ba6340766d5 | [
"MIT"
]
| null | null | null | gpytorch/lazy/non_lazy_tensor.py | phumm/gpytorch | 4e8042bcecda049956f8f9e823d82ba6340766d5 | [
"MIT"
]
| 1 | 2020-09-16T16:35:27.000Z | 2020-09-16T16:35:27.000Z | #!/usr/bin/env python3
import torch
from .lazy_tensor import LazyTensor
class NonLazyTensor(LazyTensor):
def _check_args(self, tsr):
if not torch.is_tensor(tsr):
return "NonLazyTensor must take a torch.Tensor; got {}".format(tsr.__class__.__name__)
if tsr.dim() < 2:
return "NonLazyTensor expects a matrix (or batches of matrices) - got a Tensor of size {}.".format(
tsr.shape
)
def __init__(self, tsr):
"""
Not a lazy tensor
Args:
- tsr (Tensor: matrix) a Tensor
"""
super(NonLazyTensor, self).__init__(tsr)
self.tensor = tsr
def _expand_batch(self, batch_shape):
return self.__class__(self.tensor.expand(*batch_shape, *self.matrix_shape))
def _get_indices(self, row_index, col_index, *batch_indices):
# Perform the __getitem__
res = self.tensor[(*batch_indices, row_index, col_index)]
return res
def _getitem(self, row_index, col_index, *batch_indices):
# Perform the __getitem__
res = self.tensor[(*batch_indices, row_index, col_index)]
return self.__class__(res)
def _matmul(self, rhs):
return torch.matmul(self.tensor, rhs)
def _prod_batch(self, dim):
return self.__class__(self.tensor.prod(dim))
def _quad_form_derivative(self, left_vecs, right_vecs):
res = left_vecs.matmul(right_vecs.transpose(-1, -2))
return (res,)
def _size(self):
return self.tensor.size()
def _sum_batch(self, dim):
return self.__class__(self.tensor.sum(dim))
def _transpose_nonbatch(self):
return NonLazyTensor(self.tensor.transpose(-1, -2))
def _t_matmul(self, rhs):
return torch.matmul(self.tensor.transpose(-1, -2), rhs)
def diag(self):
if self.tensor.ndimension() < 3:
return self.tensor.diag()
else:
row_col_iter = torch.arange(0, self.matrix_shape[-1], dtype=torch.long, device=self.device)
return self.tensor[..., row_col_iter, row_col_iter].view(*self.batch_shape, -1)
def evaluate(self):
return self.tensor
def __add__(self, other):
if isinstance(other, NonLazyTensor):
return NonLazyTensor(self.tensor + other.tensor)
else:
return super(NonLazyTensor, self).__add__(other)
def mul(self, other):
if isinstance(other, NonLazyTensor):
return NonLazyTensor(self.tensor * other.tensor)
else:
return super(NonLazyTensor, self).mul(other)
def lazify(obj):
"""
A function which ensures that `obj` is a LazyTensor.
If `obj` is a LazyTensor, this function does nothing.
If `obj` is a (normal) Tensor, this function wraps it with a `NonLazyTensor`.
"""
if torch.is_tensor(obj):
return NonLazyTensor(obj)
elif isinstance(obj, LazyTensor):
return obj
else:
raise TypeError("object of class {} cannot be made into a LazyTensor".format(obj.__class__.__name__))
__all__ = ["NonLazyTensor", "lazify"]
| 30.431373 | 111 | 0.634665 | 2,513 | 0.809601 | 0 | 0 | 0 | 0 | 0 | 0 | 585 | 0.188466 |
c4a0e5d601dd26ad6e285ee33bc0cea8cb5b622e | 1,956 | py | Python | aoc_wim/aoc2019/q19.py | wimglenn/advent-of-code-wim | 6308c3fa5d29b318680419f877fd5b8ac1359b5d | [
"WTFPL"
]
| 20 | 2019-10-15T07:33:13.000Z | 2022-01-19T13:40:36.000Z | aoc_wim/aoc2019/q19.py | wimglenn/advent-of-code-wim | 6308c3fa5d29b318680419f877fd5b8ac1359b5d | [
"WTFPL"
]
| 5 | 2019-02-01T23:31:27.000Z | 2021-12-03T06:55:58.000Z | aoc_wim/aoc2019/q19.py | wimglenn/advent-of-code-wim | 6308c3fa5d29b318680419f877fd5b8ac1359b5d | [
"WTFPL"
]
| 8 | 2019-12-03T15:41:23.000Z | 2021-12-06T17:13:57.000Z | """
--- Day 19: Tractor Beam ---
https://adventofcode.com/2019/day/19
"""
from aocd import data
from aoc_wim.aoc2019 import IntComputer
from aoc_wim.zgrid import ZGrid
from aoc_wim.search import Bisect
import functools
@functools.lru_cache(maxsize=100**2)
def beam(z):
comp = IntComputer(data, inputs=[int(z.imag), int(z.real)])
comp.run(until=IntComputer.op_output)
[result] = comp.output
return result
def left_edge_of_beam(y, gradient, beam=beam):
x = int(y / gradient)
z = x + y*1j
if beam(z):
while beam(z - 1):
z -= 1
else:
while not beam(z + 1):
z += 1
z += 1
assert beam(z) and not beam(z - 1)
return z
def locate_square(beam, width, gradient_estimate=1., hi=None):
d = width - 1
def check(y):
z = left_edge_of_beam(y, gradient_estimate, beam)
val = beam(z + d * ZGrid.NE)
print(f"y={y}", "wide" if val else "narrow")
return val
bisect = Bisect(check, lo=d, hi=hi)
print("bisecting...")
y = bisect.run() + 1
z = left_edge_of_beam(y, gradient_estimate, beam) + d * ZGrid.N
return z
if __name__ == "__main__":
print("populating 50x50 zgrid...")
grid = ZGrid()
x0 = 0
for y in range(50):
on = False
for x in range(x0, 50):
z = x + y * 1j
val = grid[z] = beam(z)
if not on and val:
on = True
x0 = x
if x0:
m = y / x0
if on and not val:
break
print("part a", sum(grid.values()))
grid.translate({0: ".", 1: "#"})
grid.draw()
print("initial gradient is approx -->", m)
print("refining gradient estimate -->", end=" ")
z = left_edge_of_beam(2000, gradient=m)
m = z.imag/z.real
print(m)
z = locate_square(beam, width=100, gradient_estimate=m)
print("part b", int(z.real)*10000 + int(z.imag))
| 24.759494 | 67 | 0.552147 | 0 | 0 | 0 | 0 | 200 | 0.102249 | 0 | 0 | 235 | 0.120143 |
c4a20cea738e338abf0c6eb0710a2bbf72908f18 | 378 | py | Python | BlurDetection.py | samaritan-security/samaritan-backend | 3a4450e4a2e7a823d6d2fb1e982871ac0aa97744 | [
"WTFPL"
]
| null | null | null | BlurDetection.py | samaritan-security/samaritan-backend | 3a4450e4a2e7a823d6d2fb1e982871ac0aa97744 | [
"WTFPL"
]
| 59 | 2020-02-05T03:09:43.000Z | 2020-04-23T19:29:58.000Z | BlurDetection.py | samaritan-security/samaritan-backend | 3a4450e4a2e7a823d6d2fb1e982871ac0aa97744 | [
"WTFPL"
]
| null | null | null | import cv2
def variance_of_laplacian(image):
return cv2.Laplacian(image, cv2.CV_64F).var()
"""
checks if an image is blurry
returns True if blurry, False otherwise
"""
def detect_blurry_image(image, threshold):
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blur = variance_of_laplacian(image)
if(blur < threshold):
return True
return False | 19.894737 | 50 | 0.708995 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 76 | 0.201058 |
c4a251a0e7e64524dd68b799ffbb2a257e20933b | 894 | py | Python | python-essential-training/4_operators/main.py | alexprodan99/python-workspace | 8c805afc29fafe3916759d1cf07e597f945b8b45 | [
"MIT"
]
| null | null | null | python-essential-training/4_operators/main.py | alexprodan99/python-workspace | 8c805afc29fafe3916759d1cf07e597f945b8b45 | [
"MIT"
]
| null | null | null | python-essential-training/4_operators/main.py | alexprodan99/python-workspace | 8c805afc29fafe3916759d1cf07e597f945b8b45 | [
"MIT"
]
| null | null | null | def main():
# Arithmetic operators
a = 7
b = 2
print(f'{a} + {b} = {a+b}')
print(f'{a} - {b} = {a-b}')
print(f'{a} * {b} = {a*b}')
print(f'{a} / {b} = {a/b}')
print(f'{a} // {b} = {a//b}')
print(f'{a} % {b} = {a%b}')
print(f'{a} ^ {b} = {a**b}')
# Bitwise operators
# &, |, ^, <<, >>
print(f'{a} & {b} = {a&b}')
print(f'{a} | {b} = {a|b}')
print(f'{a} ^ {b} = {a^b}')
print(f'{a} << {b} = {a<<b}')
print(f'{a} >> {b} = {a>>b}')
a = 0xff
print(a) # 255
# fill with zeroes and second arg is the minimum number of bits that will be displayed
print(f'hex(a)={a:03x}') # 0ff
print(f'bin(a)={a:09b}')
# Comparison operators
# >,<,==,!=, >=, <=
# Boolean operators
# and, or, not, in, not in, is, is not
if __name__ == '__main__':
main() | 21.804878 | 90 | 0.401566 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 543 | 0.607383 |
c4a2a593c0a2d8ff337685d877ea3ffe9f0a0f35 | 2,689 | py | Python | UPGen/utils.py | HenryLiangzy/COMP9517_Group | 83be7304bee47d52781ea71f06838cd148dbd0bd | [
"Apache-2.0"
]
| 21 | 2020-04-24T01:14:30.000Z | 2021-11-26T09:44:00.000Z | UPGen/utils.py | HenryLiangzy/COMP9517_Group | 83be7304bee47d52781ea71f06838cd148dbd0bd | [
"Apache-2.0"
]
| null | null | null | UPGen/utils.py | HenryLiangzy/COMP9517_Group | 83be7304bee47d52781ea71f06838cd148dbd0bd | [
"Apache-2.0"
]
| 2 | 2020-05-18T11:43:17.000Z | 2020-06-19T13:13:14.000Z | """
Helper functions and utilities
"""
from datetime import datetime as dt
from mrcnn import visualize
import numpy as np
import os
import cv2
TIMESTAMP_FORMAT = "%d/%m/%Y %H:%M:%S"
class Logger(object):
"""
Log events and information to a file
"""
def __init__(self, savePath):
self.savePath = savePath
self.log_file = open(self.savePath, 'a')
self.log_line("Start of Log File")
def close(self):
self.log_line("End of Log File")
self.log_file.close()
def flush(self):
self.log_file.flush()
def time_stamp(self):
now = dt.now()
date_time = now.strftime(TIMESTAMP_FORMAT)
self.log_file.write(date_time + ': ')
def log_line(self, *args):
'''
Write each thing to the log file
'''
self.time_stamp()
for log_item in args:
self.log_file.write(str(log_item) + ' ')
self.log_file.write('\n')
self.flush()
def log(self, *args):
'''
Write each thing to the log file
'''
self.time_stamp()
for log_item in args:
self.log_file.write(str(log_item) + ' ')
self.flush()
def newline(self):
self.log_file.write("\n")
self.flush()
def mask_to_rgb(mask):
"""
Converts a mask to RGB Format
"""
colours = visualize.random_colors(mask.shape[2])
rgb_mask = np.zeros((mask.shape[0], mask.shape[1], 3))
for i in range(mask.shape[2]):
for c in range(3):
rgb_mask[:, :, c] = np.where(mask[:, :, i] != 0, int(colours[i][c] * 255), rgb_mask[:, :, c])
return rgb_mask
def mask_to_outlined(mask):
"""
Converts a mask to RGB Format
"""
colours = visualize.random_colors(mask.shape[2])
rgb_mask = np.zeros((mask.shape[0], mask.shape[1], 3))
for i in range(mask.shape[2]):
for c in range(3):
rgb_mask[:, :, c] = np.where(mask[:, :, i] != 0, int(colours[i][c] * 255), rgb_mask[:, :, c])
# put edges over the top of the colours
for i in range(mask.shape[2]):
# Find the contour of the leaf
threshold = mask[:, :, i]
threshold[threshold != 0] = 255
_, contours, hierarchy = cv2.findContours(threshold.astype(np.uint8),cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
# Draw outline on mask
if len(contours) > 0:
cv2.drawContours(rgb_mask, [contours[0]], 0, (255, 255, 255), thickness=1)
return rgb_mask
def check_create_dir(directory):
if not os.path.isdir(directory):
print("creating directory:", directory)
os.mkdir(directory)
return True
return False | 25.855769 | 115 | 0.579026 | 1,103 | 0.41019 | 0 | 0 | 0 | 0 | 0 | 0 | 480 | 0.178505 |
c4a31e4a9faadb779ad5e3539b89e160045375e9 | 108 | py | Python | lmctl/project/mutate/base.py | manojn97/lmctl | 844925cb414722351efac90cb97f10c1185eef7a | [
"Apache-2.0"
]
| 3 | 2021-07-19T09:46:01.000Z | 2022-03-07T13:51:25.000Z | lmctl/project/mutate/base.py | manojn97/lmctl | 844925cb414722351efac90cb97f10c1185eef7a | [
"Apache-2.0"
]
| 43 | 2019-08-27T12:36:29.000Z | 2020-08-27T14:50:40.000Z | lmctl/project/mutate/base.py | manojn97/lmctl | 844925cb414722351efac90cb97f10c1185eef7a | [
"Apache-2.0"
]
| 7 | 2020-09-22T20:32:17.000Z | 2022-03-29T12:25:51.000Z | import abc
class Mutator(abc.ABC):
def apply(self, original_content):
return original_content
| 15.428571 | 38 | 0.712963 | 95 | 0.87963 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
c4a3253a85318d51afd9e6db7f79225a1972648a | 630 | py | Python | src/django_otp/conf.py | jaap3/django-otp | d7980bf516018319158570cc75353c905375a3ab | [
"BSD-2-Clause"
]
| 318 | 2019-08-27T15:57:05.000Z | 2022-03-30T08:38:29.000Z | src/django_otp/conf.py | jaap3/django-otp | d7980bf516018319158570cc75353c905375a3ab | [
"BSD-2-Clause"
]
| 77 | 2019-09-17T11:48:38.000Z | 2022-03-13T17:26:56.000Z | src/django_otp/conf.py | jaap3/django-otp | d7980bf516018319158570cc75353c905375a3ab | [
"BSD-2-Clause"
]
| 76 | 2019-08-30T20:29:40.000Z | 2022-03-30T09:14:36.000Z | import django.conf
class Settings:
"""
This is a simple class to take the place of the global settings object. An
instance will contain all of our settings as attributes, with default values
if they are not specified by the configuration.
"""
defaults = {
'OTP_LOGIN_URL': django.conf.settings.LOGIN_URL,
'OTP_ADMIN_HIDE_SENSITIVE_DATA': False,
}
def __getattr__(self, name):
if name in self.defaults:
return getattr(django.conf.settings, name, self.defaults[name])
else:
return getattr(django.conf.settings, name)
settings = Settings()
| 27.391304 | 80 | 0.668254 | 584 | 0.926984 | 0 | 0 | 0 | 0 | 0 | 0 | 269 | 0.426984 |
c4a3547cec59bda0f54c29fe3708b9bf82715b42 | 544 | py | Python | Moderation/purge.py | DevFlock/Multis | 8332edddcbb957ad8fc47766d102295da8aef591 | [
"MIT"
]
| 3 | 2020-12-27T20:32:14.000Z | 2021-09-02T08:59:34.000Z | Moderation/purge.py | DevFlock/Multis | 8332edddcbb957ad8fc47766d102295da8aef591 | [
"MIT"
]
| 1 | 2021-05-09T21:44:42.000Z | 2022-03-01T22:36:53.000Z | Moderation/purge.py | DevFlock/Multis | 8332edddcbb957ad8fc47766d102295da8aef591 | [
"MIT"
]
| 1 | 2021-05-10T22:55:41.000Z | 2021-05-10T22:55:41.000Z | import asyncio
import discord
from discord.ext import commands
from discord.ext.commands.core import has_permissions
class cog(commands.Cog):
def __init__(self, client):
self.client = client
@commands.command(aliases=["clear"])
@has_permissions(ban_members=True)
async def purge(self, ctx, count):
await ctx.channel.purge(limit=count+1)
message = await ctx.send(f"Deleted {count} messages.")
asyncio.sleep(2)
await message.delete()
def setup(client):
client.add_cog(cog(client))
| 24.727273 | 62 | 0.693015 | 373 | 0.685662 | 0 | 0 | 281 | 0.516544 | 201 | 0.369485 | 35 | 0.064338 |
c4a3b7fd35e583f4df4df37c10b28021b5e84c76 | 184 | py | Python | tensorboard/acceptance/__init__.py | DeepLearnI/atlas | 8aca652d7e647b4e88530b93e265b536de7055ed | [
"Apache-2.0"
]
| 296 | 2020-03-16T19:55:00.000Z | 2022-01-10T19:46:05.000Z | tensorboard/acceptance/__init__.py | DeepLearnI/atlas | 8aca652d7e647b4e88530b93e265b536de7055ed | [
"Apache-2.0"
]
| 57 | 2020-03-17T11:15:57.000Z | 2021-07-10T14:42:27.000Z | tensorboard/acceptance/__init__.py | DeepLearnI/atlas | 8aca652d7e647b4e88530b93e265b536de7055ed | [
"Apache-2.0"
]
| 38 | 2020-03-17T21:06:05.000Z | 2022-02-08T03:19:34.000Z | from .test_tensorboard_rest_api import TestTensorboardRestAPI
from .test_tensorboard_server import TestTensorboardServer
from .test_tensorboard_endpoints import TestTensorboardEndpoint | 61.333333 | 63 | 0.923913 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
c4a3dde4993c9a95e8e97065cdef59f7fea5aa64 | 3,988 | py | Python | tests/store/test_fetch_purchases_to_ship.py | yuzi-ziyu/alphasea-agent | a8ff803fd7dedd50c757630aa83b79fb624c0414 | [
"CC0-1.0"
]
| 1 | 2022-01-27T14:29:39.000Z | 2022-01-27T14:29:39.000Z | tests/store/test_fetch_purchases_to_ship.py | yuzi-ziyu/alphasea-agent | a8ff803fd7dedd50c757630aa83b79fb624c0414 | [
"CC0-1.0"
]
| null | null | null | tests/store/test_fetch_purchases_to_ship.py | yuzi-ziyu/alphasea-agent | a8ff803fd7dedd50c757630aa83b79fb624c0414 | [
"CC0-1.0"
]
| null | null | null | from unittest import TestCase
from ..helpers import (
create_web3,
create_contract,
get_future_execution_start_at_timestamp,
proceed_time,
get_prediction_time_shift,
get_purchase_time_shift,
get_shipping_time_shift,
get_publication_time_shift,
get_tournament_id,
get_chain_id,
create_store,
generate_redis_namespace,
BaseHardhatTestCase
)
from src.web3 import get_account_address
execution_start_at = get_future_execution_start_at_timestamp()
content = 'abc'.encode()
model_id = 'model1'
model_id_other = 'model_other'
class TestStoreFetchPurchasesToShip(BaseHardhatTestCase):
def setUp(self):
super().setUp()
w3 = create_web3()
contract = create_contract(w3)
store = create_store(w3, contract)
self.store = store
self.w3 = w3
w3_other = create_web3(account_index=1)
contract_other = create_contract(w3_other)
store_other = create_store(w3_other, contract_other)
w3_purchaser = create_web3(account_index=2)
contract_purchaser = create_contract(w3_purchaser)
store_purchaser = create_store(w3_purchaser, contract_purchaser)
self.store_purchaser = store_purchaser
self.w3_purchaser = w3_purchaser
# predict
proceed_time(w3, execution_start_at + get_prediction_time_shift())
store.create_models_if_not_exist([dict(
model_id=model_id,
tournament_id=get_tournament_id(),
prediction_license='CC0-1.0',
)])
store.create_predictions([dict(
model_id=model_id,
execution_start_at=execution_start_at,
content=content,
price=1,
)])
# other predict
store_other.create_models_if_not_exist([dict(
model_id=model_id_other,
tournament_id=get_tournament_id(),
prediction_license='CC0-1.0',
)])
store_other.create_predictions([dict(
model_id=model_id_other,
execution_start_at=execution_start_at,
content=content,
price=1,
)])
# purchase
proceed_time(w3, execution_start_at + get_purchase_time_shift())
store_purchaser.create_purchases([dict(
model_id=model_id,
execution_start_at=execution_start_at,
), dict(
model_id=model_id_other,
execution_start_at=execution_start_at,
)])
def test_ok(self):
purchases = self.store.fetch_purchases_to_ship(
tournament_id=get_tournament_id(),
execution_start_at=execution_start_at
)
self.assertEqual(purchases, [{
**purchases[0],
'model_id': model_id,
'execution_start_at': execution_start_at,
'purchaser': get_account_address(self.w3_purchaser.eth.default_account),
}])
def test_different_tournament_id(self):
purchases = self.store.fetch_purchases_to_ship(
tournament_id='different',
execution_start_at=execution_start_at
)
self.assertEqual(purchases, [])
def test_different_execution_start_at(self):
purchases = self.store.fetch_purchases_to_ship(
tournament_id=get_tournament_id(),
execution_start_at=execution_start_at + 1,
)
self.assertEqual(purchases, [])
def test_already_shipped(self):
store = self.store
# ship
proceed_time(self.w3, execution_start_at + get_shipping_time_shift())
store.ship_purchases([dict(
model_id=model_id,
execution_start_at=execution_start_at,
purchaser=get_account_address(self.w3_purchaser.eth.default_account),
)])
purchases = store.fetch_purchases_to_ship(
tournament_id=get_tournament_id(),
execution_start_at=execution_start_at,
)
self.assertEqual(purchases, [])
| 31.904 | 84 | 0.654965 | 3,412 | 0.855567 | 0 | 0 | 0 | 0 | 0 | 0 | 136 | 0.034102 |
c4a4559c8dad0a9248c5e83d6827ea4d86bb7ecb | 579 | py | Python | tests/test_add_option_backtrace.py | ponponon/loguru | d38ced7539b888e9e9db7495f49f4499b3ee77e1 | [
"MIT"
]
| 11,391 | 2018-12-08T17:44:13.000Z | 2022-03-31T17:55:24.000Z | tests/test_add_option_backtrace.py | ponponon/loguru | d38ced7539b888e9e9db7495f49f4499b3ee77e1 | [
"MIT"
]
| 610 | 2018-12-08T18:03:03.000Z | 2022-03-31T22:28:14.000Z | tests/test_add_option_backtrace.py | ponponon/loguru | d38ced7539b888e9e9db7495f49f4499b3ee77e1 | [
"MIT"
]
| 601 | 2018-12-08T17:46:42.000Z | 2022-03-30T04:23:56.000Z | from loguru import logger
# See "test_catch_exceptions.py" for extended testing
def test_backtrace(writer):
logger.add(writer, format="{message}", backtrace=True)
try:
1 / 0
except Exception:
logger.exception("")
result_with = writer.read().strip()
logger.remove()
writer.clear()
logger.add(writer, format="{message}", backtrace=False)
try:
1 / 0
except Exception:
logger.exception("")
result_without = writer.read().strip()
assert len(result_with.splitlines()) > len(result_without.splitlines())
| 23.16 | 75 | 0.651123 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 79 | 0.136442 |
c4a4edff90f57692413fd77c390b6d607d322a51 | 251 | py | Python | BasicPythonPrograms/PythonDestructor.py | Pushkar745/PythonProgramming | ea60e97b70d46fb63ef203913c8b3f9570232dd3 | [
"Apache-2.0"
]
| null | null | null | BasicPythonPrograms/PythonDestructor.py | Pushkar745/PythonProgramming | ea60e97b70d46fb63ef203913c8b3f9570232dd3 | [
"Apache-2.0"
]
| null | null | null | BasicPythonPrograms/PythonDestructor.py | Pushkar745/PythonProgramming | ea60e97b70d46fb63ef203913c8b3f9570232dd3 | [
"Apache-2.0"
]
| null | null | null | class Employee:
#Initializaing
def __init__(self):
print('Employee created ')
#Deleting (Calling destructor)
def __del__(self):
print('Destructor called,Employee deleted')
obj=Employee()
del obj | 25.1 | 55 | 0.609562 | 216 | 0.860558 | 0 | 0 | 0 | 0 | 0 | 0 | 99 | 0.394422 |
c4a64cd498868ef1b6019445d7127a1f346b9fe4 | 13,670 | py | Python | envi/registers.py | ConfusedMoonbear/vivisect | 8d6048037f85f745cd11923c6a8d662c150fe330 | [
"ECL-2.0",
"Apache-2.0"
]
| 1 | 2019-12-11T19:13:59.000Z | 2019-12-11T19:13:59.000Z | envi/registers.py | ConfusedMoonbear/vivisect | 8d6048037f85f745cd11923c6a8d662c150fe330 | [
"ECL-2.0",
"Apache-2.0"
]
| null | null | null | envi/registers.py | ConfusedMoonbear/vivisect | 8d6048037f85f745cd11923c6a8d662c150fe330 | [
"ECL-2.0",
"Apache-2.0"
]
| null | null | null | """
Similar to the memory subsystem, this is a unified way to
access information about objects which contain registers
"""
import envi.bits as e_bits
from envi.const import *
class InvalidRegisterName(Exception):
pass
class RegisterContext:
def __init__(self, regdef=(), metas=(), pcindex=None, spindex=None, srindex=None):
"""
Hand in a register definition which consists of
a list of (<name>, <width>) tuples.
"""
self.loadRegDef(regdef)
self.loadRegMetas(metas)
self.setRegisterIndexes(pcindex, spindex, srindex=srindex)
self._rctx_dirty = False
def getRegisterSnap(self):
"""
Use this to bulk save off the register state.
"""
return list(self._rctx_vals)
def setRegisterSnap(self, snap):
"""
Use this to bulk restore the register state.
NOTE: This may only be used under the assumption that the
RegisterContext has been initialized the same way
(like context switches in tracers, or emulaction snaps)
"""
self._rctx_vals = list(snap)
def isDirty(self):
"""
Returns true if registers in this context have been modififed
since their import.
"""
return self._rctx_dirty
def setIsDirty(self, bool):
self._rctx_dirty = bool
def setRegisterIndexes(self, pcindex, spindex, srindex=None):
self._rctx_pcindex = pcindex
self._rctx_spindex = spindex
self._rctx_srindex = srindex
def loadRegDef(self, regdef, defval=0):
"""
Load a register definition. A register definition consists
of a list of tuples with the following format:
(regname, regwidth)
NOTE: All widths in envi RegisterContexts are in bits.
"""
self._rctx_regdef = regdef # Save this for snaps etc..
self._rctx_names = {}
self._rctx_ids = {}
self._rctx_widths = []
self._rctx_vals = []
self._rctx_masks = []
for i, (name, width) in enumerate(regdef):
self._rctx_names[name] = i
self._rctx_ids[i] = name
self._rctx_widths.append(width)
self._rctx_masks.append((2**width)-1)
self._rctx_vals.append(defval)
def getRegDef(self):
return self._rctx_regdef
def loadRegMetas(self, metas, statmetas=None):
"""
Load a set of defined "meta" registers for this architecture. Meta
registers are defined as registers who exist as a subset of the bits
in some other "real" register. The argument metas is a list of tuples
with the following format:
(regname, regidx, reg_shift_offset, reg_width)
The given example is for the AX register in the i386 subsystem
regname: "ax"
reg_shift_offset: 0
reg_width: 16
Optionally a set of status meta registers can be loaded as well.
The argument is a list of tuples with the following format:
(regname, regidx, reg_shift_offset, reg_width, description)
"""
self._rctx_regmetas = metas
for name, idx, offset, width in metas:
self.addMetaRegister(name, idx, offset, width)
self._rctx_statmetas = statmetas
def addMetaRegister(self, name, idx, offset, width):
"""
Meta registers are registers which are really just directly
addressable parts of already existing registers (eax -> al).
To add a meta register, you give the name, the idx of the *real*
register, the width of the meta reg, and it's left shifted (in bits)
offset into the real register value. The RegisterContext will take
care of accesses after that.
"""
newidx = (offset << 24) + (width << 16) + idx
self._rctx_names[name] = newidx
self._rctx_ids[newidx] = name
def isMetaRegister(self, index):
return (index & 0xffff) != index
def _rctx_Import(self, sobj):
"""
Given an object with attributes with the same names as
registers in our context, populate our values from it.
NOTE: This also clears the dirty flag
"""
# On import from a structure, we are clean again.
self._rctx_dirty = False
for name,idx in self._rctx_names.items():
# Skip meta registers
if (idx & 0xffff) != idx:
continue
x = getattr(sobj, name, None)
if x != None:
self._rctx_vals[idx] = x
def _rctx_Export(self, sobj):
"""
Given an object with attributes with the same names as
registers in our context, set the ones he has to match
our values.
"""
for name,idx in self._rctx_names.items():
# Skip meta registers
if (idx & 0xffff) != idx:
continue
if hasattr(sobj, name):
setattr(sobj, name, self._rctx_vals[idx])
def getRegisterInfo(self, meta=False):
"""
Return an object which can be stored off, and restored
to re-initialize a register context. (much like snapshot
but it takes the definitions with it)
"""
regdef = self._rctx_regdef
regmeta = self._rctx_regmetas
pcindex = self._rctx_pcindex
spindex = self._rctx_spindex
snap = self.getRegisterSnap()
return (regdef, regmeta, pcindex, spindex, snap)
def setRegisterInfo(self, info):
regdef, regmeta, pcindex, spindex, snap = info
self.loadRegDef(regdef)
self.loadRegMetas(regmeta)
self.setRegisterIndexes(pcindex, spindex)
self.setRegisterSnap(snap)
def getRegisterName(self, index):
return self._rctx_ids.get(index,"REG%.8x" % index)
def getProgramCounter(self):
"""
Get the value of the program counter for this register context.
"""
return self.getRegister(self._rctx_pcindex)
def setProgramCounter(self, value):
"""
Set the value of the program counter for this register context.
"""
self.setRegister(self._rctx_pcindex, value)
def getStackCounter(self):
return self.getRegister(self._rctx_spindex)
def setStackCounter(self, value):
self.setRegister(self._rctx_spindex, value)
def hasStatusRegister(self):
'''
Returns True if this context is aware of a status register.
'''
if self._rctx_srindex == None:
return False
return True
def getStatusRegNameDesc(self):
'''
Return a list of status register names and descriptions.
'''
return [(name, desc) for name, idx, offset, width, desc in self._rctx_statmetas]
def getStatusRegister(self):
'''
Gets the status register for this register context.
'''
return self.getRegister(self._rctx_srindex)
def setStatusRegister(self, value):
'''
Sets the status register for this register context.
'''
self.setRegister(self._rctx_srindex, value)
def getStatusFlags(self):
'''
Return a dictionary of reg name and reg value for the meta registers
that are part of the status register.
'''
ret = {}
for name, idx, offset, width, desc in self._rctx_statmetas:
ret[name] = self.getRegisterByName(name)
return ret
def getRegisterByName(self, name):
idx = self._rctx_names.get(name)
if idx == None:
raise InvalidRegisterName("Unknown Register: %s" % name)
return self.getRegister(idx)
def setRegisterByName(self, name, value):
idx = self._rctx_names.get(name)
if idx == None:
raise InvalidRegisterName("Unknown Register: %s" % name)
self.setRegister(idx, value)
def getRegisterNames(self):
'''
Returns a list of the 'real' (non meta) registers.
'''
regs = [rname for rname, ridx in self._rctx_names.items()
if not self.isMetaRegister(ridx)]
return regs
def getRegisterNameIndexes(self):
'''
Return a list of all the 'real' (non meta) registers and their indexes.
Example: for regname, regidx in x.getRegisterNameIndexes():
'''
regs = [(rname, ridx) for rname, ridx in self._rctx_names.items()
if not self.isMetaRegister(ridx)]
return regs
def getRegisters(self):
"""
Get all the *real* registers from this context as a dictionary of name
value pairs.
"""
ret = {}
for name,idx in self._rctx_names.items():
if (idx & 0xffff) != idx:
continue
ret[name] = self.getRegister(idx)
return ret
def setRegisters(self, regdict):
"""
For any name value pairs in the specified dictionary, set the current
register values in this context.
"""
for name,value in regdict.items():
self.setRegisterByName(name, value)
def getRegisterIndex(self, name):
"""
Get a register index by name.
(faster to use the index multiple times)
"""
return self._rctx_names.get(name)
def getRegisterWidth(self, index):
"""
Return the width of the register which lives at the specified
index (width is always in bits).
"""
ridx = index & 0xffff
if ridx == index:
return self._rctx_widths[index]
width = (index >> 16) & 0xff
return width
def getRegister(self, index):
"""
Return the current value of the specified register index.
"""
ridx = index & 0xffff
value = self._rctx_vals[ridx]
if ridx != index:
value = self._xlateToMetaReg(index, value)
return value
def getMetaRegInfo(self, index):
'''
Return the appropriate realreg, shift, mask info
for the specified metareg idx (or None if it's not
meta).
Example:
real_reg, lshift, mask = r.getMetaRegInfo(x)
'''
ridx = index & 0xffff
if ridx == index:
return None
offset = (index >> 24) & 0xff
width = (index >> 16) & 0xff
mask = (2**width)-1
return ridx, offset, mask
def _xlateToMetaReg(self, index, value):
'''
Translate a register value to the meta register value
(used when getting a meta register)
'''
ridx = index & 0xffff
offset = (index >> 24) & 0xff
width = (index >> 16) & 0xff
mask = (2**width)-1
if offset != 0:
value >>= offset
return value & mask
def _xlateToNativeReg(self, index, value):
'''
Translate a register value to the native register value
(used when setting a meta register)
'''
ridx = index & 0xffff
width = (index >> 16) & 0xff
offset = (index >> 24) & 0xff
# FIXME is it faster to generate or look these up?
mask = (2 ** width) - 1
mask = mask << offset
# NOTE: basewidth is in *bits*
basewidth = self._rctx_widths[ridx]
basemask = (2 ** basewidth) - 1
# cut a whole in basemask at the size/offset of mask
finalmask = basemask ^ mask
curval = self._rctx_vals[ridx]
if offset:
value <<= offset
return value | (curval & finalmask)
def setRegister(self, index, value):
"""
Set a register value by index.
"""
self._rctx_dirty = True
ridx = index & 0xffff
# If it's a meta register index, lets mask it into
# the real thing...
if ridx != index:
value = self._xlateToNativeReg(index, value)
self._rctx_vals[ridx] = (value & self._rctx_masks[ridx])
def getRealRegisterNameByIdx(self, regidx):
"""
Returns the Name of the Containing register (in the case
of meta-registers) or the name of the register.
(by Index)
"""
return self.getRegisterName(regidx& RMETA_NMASK)
def getRealRegisterName(self, regname):
"""
Returns the Name of the Containing register (in the case
of meta-registers) or the name of the register.
"""
ridx = self.getRegisterIndex(regname)
if ridx != None:
return self.getRegisterName(ridx & RMETA_NMASK)
return regname
def addLocalEnums(l, regdef):
"""
Update a dictionary (or module locals) with REG_FOO index
values for all the base registers defined in regdef.
"""
for i,(rname,width) in enumerate(regdef):
l["REG_%s" % rname.upper()] = i
def addLocalStatusMetas(l, metas, statmetas, regname):
'''
Dynamically create data based on the status register meta register
definition.
Adds new meta registers and bitmask constants.
'''
for metaname, idx, offset, width, desc in statmetas:
# create meta registers
metas.append( (metaname, idx, offset, width) )
# create local bitmask constants (EFLAGS_%)
l['%s_%s' % (regname, metaname)] = 1 << offset # TODO: fix for arbitrary width
def addLocalMetas(l, metas):
"""
Update a dictionary (or module locals) with REG_FOO index
values for all meta registers defined in metas.
"""
for name, idx, offset, width in metas:
l["REG_%s" % name.upper()] = (offset << 24) | (width << 16) | idx
| 31.643519 | 88 | 0.59744 | 12,466 | 0.911924 | 0 | 0 | 0 | 0 | 0 | 0 | 5,689 | 0.416167 |
c4a6ac024777e5d5757393235c2f8a34ef55a681 | 531 | py | Python | services/nris-api/backend/app/extensions.py | parc-jason/mds | 8f181a429442208a061ed72065b71e6c2bd0f76f | [
"Apache-2.0"
]
| null | null | null | services/nris-api/backend/app/extensions.py | parc-jason/mds | 8f181a429442208a061ed72065b71e6c2bd0f76f | [
"Apache-2.0"
]
| null | null | null | services/nris-api/backend/app/extensions.py | parc-jason/mds | 8f181a429442208a061ed72065b71e6c2bd0f76f | [
"Apache-2.0"
]
| null | null | null |
from flask_caching import Cache
from flask_jwt_oidc import JwtManager
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate, MigrateCommand
from flask import current_app
from elasticapm.contrib.flask import ElasticAPM
from .config import Config
from .helper import Api
apm = ElasticAPM()
db = SQLAlchemy()
migrate = Migrate()
jwt = JwtManager()
cache = Cache()
api = Api(
prefix=f'{Config.BASE_PATH}',
doc=f'{Config.BASE_PATH}/',
default='nris_api',
default_label='NRIS related operations')
| 23.086957 | 49 | 0.770245 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 78 | 0.146893 |
c4a933bda29b080bd1aab7e22c3ee0df61cffb17 | 3,510 | py | Python | tests/test_mqtt_async.py | mpi-sws-rse/antevents-python | 5b9226813583141986014fc83f6f74342a5f271e | [
"Apache-2.0"
]
| 7 | 2016-09-27T00:21:46.000Z | 2017-03-18T20:04:29.000Z | tests/test_mqtt_async.py | mpi-sws-rse/antevents-python | 5b9226813583141986014fc83f6f74342a5f271e | [
"Apache-2.0"
]
| null | null | null | tests/test_mqtt_async.py | mpi-sws-rse/antevents-python | 5b9226813583141986014fc83f6f74342a5f271e | [
"Apache-2.0"
]
| 2 | 2017-03-16T21:47:43.000Z | 2020-10-20T22:58:03.000Z | # Copyright 2017 by MPI-SWS and Data-Ken Research.
# Licensed under the Apache 2.0 License.
"""Test async version of mqtt libraries. Depends on hbmqtt
(https://github.com/beerfactory/hbmqtt)
"""
import unittest
import sys
import asyncio
import string
from random import choice, seed
from antevents.base import Scheduler, SensorPub, SensorEvent
import antevents.linq.output
import antevents.linq.combinators
import antevents.linq.select
from antevents.adapters.mqtt_async import QueueWriter, QueueReader
from antevents.linq.transducer import PeriodicMedianTransducer
from utils import ValueListSensor, ValidateAndStopSubscriber
seed()
try:
import hbmqtt
HBMQTT_AVAILABLE = True
except ImportError:
HBMQTT_AVAILABLE = False
URL = "mqtt://localhost:1883"
VALUES = [
1.0,
2.5,
3.7,
4.1,
8.1,
0.5,
6.5,
4.5,
3.9,
6.5
]
EXPECTED = [
2.5,
4.1,
4.5,
6.5
]
def msg_to_event(msg):
return SensorEvent(sensor_id=msg[0], ts=msg[1], val=msg[2])
CHARS=string.ascii_letters+string.digits
def get_topic_name(test_class):
return test_class.__class__.__name__ + ''.join([ choice(CHARS) for i in range(5) ])
@unittest.skipUnless(HBMQTT_AVAILABLE,
"HBMQTT library not installed for python at %s" %
sys.executable)
class TestCase(unittest.TestCase):
def setUp(self):
# Creating a new event loop each test case does not seem to work.
# I think it is due to hbmqtt not cleaning up some state in the asyncio
# layer.
#self.loop = asyncio.new_event_loop()
self.loop = asyncio.get_event_loop()
self.sched = Scheduler(self.loop)
def tearDown(self):
pass
#self.loop.stop()
#self.loop.close()
def test_client_only(self):
SENSOR_ID='sensor-1'
TOPIC=get_topic_name(self)
sensor = SensorPub(ValueListSensor(SENSOR_ID, VALUES))
td = sensor.transduce(PeriodicMedianTransducer(period=3))
qw = QueueWriter(td, URL, TOPIC, self.sched)
qw.output()
self.sched.schedule_periodic(sensor, 0.5)
self.sched.run_forever()
self.assertFalse(qw.has_pending_requests(),
"QueueWriter has pending requests: %s" % qw.dump_state())
print("test_client_only completed")
def send_and_recv_body(self, sleep_timeout):
SENSOR_ID='sensor-1'
TOPIC=get_topic_name(self)
sensor = SensorPub(ValueListSensor(SENSOR_ID, VALUES))
td = sensor.transduce(PeriodicMedianTransducer(period=3))
qw = QueueWriter(td, URL, TOPIC, self.sched)
qw.output()
qr = QueueReader(URL, TOPIC, self.sched, timeout=sleep_timeout)
self.sched.schedule_periodic(sensor, 0.5)
stop_qr = self.sched.schedule_on_main_event_loop(qr)
vs = ValidateAndStopSubscriber(EXPECTED, self, stop_qr)
qr.select(msg_to_event).subscribe(vs)
self.sched.run_forever()
self.assertFalse(qw.has_pending_requests(),
"QueueWriter has pending requests: %s" % qw.dump_state())
self.assertEqual(qr.state, QueueReader.FINAL_STATE)
self.assertEqual(vs.next_idx, len(EXPECTED))
print("send_and_recv_bod(%s) completed" % sleep_timeout)
def test_short_timeout(self):
self.send_and_recv_body(0.1)
def test_long_timeout(self):
self.send_and_recv_body(3.0)
if __name__ == '__main__':
unittest.main()
| 30 | 88 | 0.665242 | 2,116 | 0.602849 | 0 | 0 | 2,263 | 0.644729 | 0 | 0 | 648 | 0.184615 |
c4ac27bb61de371ddbaa59cbf8dcb19f1eb8972f | 7,407 | py | Python | edit/main.py | team-alpha-kr/Partner-pyWeb | da88f10b4c511616219a701a794fa0a5ef33f01b | [
"Apache-2.0"
]
| null | null | null | edit/main.py | team-alpha-kr/Partner-pyWeb | da88f10b4c511616219a701a794fa0a5ef33f01b | [
"Apache-2.0"
]
| null | null | null | edit/main.py | team-alpha-kr/Partner-pyWeb | da88f10b4c511616219a701a794fa0a5ef33f01b | [
"Apache-2.0"
]
| null | null | null | # -*- coding: utf8 -*-
import os
from flask import Flask, request, render_template, request, redirect, url_for, jsonify
from flask_discord import DiscordOAuth2Session, requires_authorization
from discord import Webhook, RequestsWebhookAdapter
webhook = Webhook.partial(814742019489660939, "rvSBVHtGPflSASjeGEEKdZxC5Z_w1UM_ovc_xD0ZPcFy1UeUybFM4ClGANu6CEWTQame", adapter=RequestsWebhookAdapter())
run_webhook = Webhook.partial(804602090537091072, "6ZMww14Nh7OVeeHUt5bWeixreoWQmSzPVfFmIpU3BEr8OYLGqickY1VyoqH2IeMs1Kd8", adapter=RequestsWebhookAdapter())
app = Flask(__name__)
app.secret_key = b"%\xe0'\x01\xdeH\x8e\x85m|\xb3\xffCN\xc9g"
os.environ["OAUTHLIB_INSECURE_TRANSPORT"] = "false"
app.config["DISCORD_CLIENT_ID"] = "801279922722045962"
app.config["DISCORD_CLIENT_SECRET"] = "zosKMQ95etnO1dZv7D5vet7TyVhyXwt5" # Discord client secret.
# app.config["DISCORD_REDIRECT_URI"] = "http://localhost:3333/callback" # URL to your callback endpoint.
app.config["DISCORD_REDIRECT_URI"] = "https://partner-e.alphakr.xyz/callback" # URL to your callback endpoint.
app.config["DISCORD_BOT_TOKEN"] = "ODAxMjc5OTIyNzIyMDQ1OTYy.YAeYFA.G9TddtDdPZ3Xlb7AAHD6ddVWVbY"
discord = DiscordOAuth2Session(app)
def on_json_loading_failed_return_dict(e):
return 'μμ'
@app.route('/', methods=['GET','POST'])
def index():
return render_template('form/1.html')
@app.route("/login", methods=["GET"])
def login():
if not discord.authorized:
return discord.create_session(scope=['guilds', 'email', 'identify'])
else:
return render_template("login.html")
@app.route("/callback", methods=["GET", "POST"])
def callback():
data = discord.callback()
redirect_to = data.get("redirect", "/form/1")
return redirect(redirect_to)
@app.route("/logout", methods=['GET', 'POST'])
def logout():
if discord.authorized:
discord.revoke()
return redirect(url_for("index"))
else:
return redirect(url_for("index"))
@app.route('/form/1', methods=['GET','POST'])
def form1():
if request.method == 'GET':
if discord.authorized: #λ‘κ·ΈμΈμ΄ λμ΄μλκ°
try:
discord.fetch_guilds() #λ‘κ·ΈμΈμ 보μ κ°μ ΈμλΌ
except:
return redirect(url_for("logout")) #λͺ»κ°μ Έμ€λ©΄ λ‘κ·Έμμ
user = discord.fetch_user()
return render_template('form/1.html', user=user)
else: #λ‘κ·ΈμΈμ΄ μλμ΄μλκ°?
return redirect(url_for("login"))
else:
if discord.authorized: #λ‘κ·ΈμΈμ΄ λμ΄μλκ°
try:
discord.fetch_guilds() #λ‘κ·ΈμΈμ 보μ κ°μ ΈμλΌ
except:
return redirect(url_for("logout")) #λͺ»κ°μ Έμ€λ©΄ λ‘κ·Έμμ
user = discord.fetch_user()
run_webhook.send(f"β [ 403 ERROR ] {user}λμ΄ ννΈλ μ μ² 1λ¨κ³ νμ΄μ§μ μ μμ μ΄μ§ μμ μ κ·Όμ μλ νμ΅λλ€.")
return "<script>alert('μ μμ μ΄μ§ μμ μ κ·Όμ
λλ€.');location.replace('/');</script>"
else: #λ‘κ·ΈμΈμ΄ μλμ΄μλκ°?
ip = request.environ.get('HTTP_X_REAL_IP', request.remote_addr)
run_webhook.send(f"β [ 403 ERROR ] λΉ λ‘κ·ΈμΈ μ μ ({ip})κ° ννΈλ μ μ² 1λ¨κ³ νμ΄μ§μ μ μμ μ΄μ§ μμ μ κ·Όμ μλ νμ΅λλ€.")
return "<script>alert('μ μμ μ΄μ§ μμ μ κ·Όμ
λλ€.');location.replace('/');</script>"
@app.route('/form/2', methods=['GET','POST'])
def form2():
if request.method == 'POST':
if discord.authorized: #λ‘κ·ΈμΈμ΄ λμ΄μλκ°
try:
discord.fetch_guilds() #λ‘κ·ΈμΈμ 보μ κ°μ ΈμλΌ
except:
return redirect(url_for("logout")) #λͺ»κ°μ Έμ€λ©΄ λ‘κ·Έμμ
code = request.form['code']
nickname = request.form['nickname']
return render_template('form/2.html', code=code, nickname=nickname)
else: #λ‘κ·ΈμΈμ΄ μλμ΄μλκ°?
return redirect(url_for("login"))
else:
if discord.authorized: #λ‘κ·ΈμΈμ΄ λμ΄μλκ°
try:
discord.fetch_guilds() #λ‘κ·ΈμΈμ 보μ κ°μ ΈμλΌ
except:
return redirect(url_for("logout")) #λͺ»κ°μ Έμ€λ©΄ λ‘κ·Έμμ
user = discord.fetch_user()
run_webhook.send(f"β [ 403 ERROR ] {user}λμ΄ ννΈλ μ μ² 2λ¨κ³ νμ΄μ§μ μ μμ μ΄μ§ μμ μ κ·Όμ μλ νμ΅λλ€.")
return "<script>alert('μ μμ μ΄μ§ μμ μ κ·Όμ
λλ€.');location.replace('/');</script>"
else: #λ‘κ·ΈμΈμ΄ μλμ΄μλκ°?
ip = request.environ.get('HTTP_X_REAL_IP', request.remote_addr)
run_webhook.send(f"β [ 403 ERROR ] λΉ λ‘κ·ΈμΈ μ μ ({ip})κ° ννΈλ μ μ² 2λ¨κ³ νμ΄μ§μ μ μμ μ΄μ§ μμ μ κ·Όμ μλ νμ΅λλ€.")
return "<script>alert('μ μμ μ΄μ§ μμ μ κ·Όμ
λλ€.');location.replace('/');</script>"
@app.route('/form/3', methods=['GET','POST'])
def form3():
if request.method == 'POST':
if discord.authorized: #λ‘κ·ΈμΈμ΄ λμ΄μλκ°
try:
discord.fetch_guilds() #λ‘κ·ΈμΈμ 보μ κ°μ ΈμλΌ
except:
return redirect(url_for("logout")) #λͺ»κ°μ Έμ€λ©΄ λ‘κ·Έμμ
code = request.form['code']
nickname = request.form['nickname']
server = request.form['server']
member = request.form['member']
category = request.form['category']
etc_text = request.form['etc_text']
return render_template('form/3.html', code=code, nickname=nickname, server=server, member=member, category=category, etc_text=etc_text)
else: #λ‘κ·ΈμΈμ΄ μλμ΄μλκ°?
return redirect(url_for("login"))
else:
if discord.authorized: #λ‘κ·ΈμΈμ΄ λμ΄μλκ°
try:
discord.fetch_guilds() #λ‘κ·ΈμΈμ 보μ κ°μ ΈμλΌ
except:
return redirect(url_for("logout")) #λͺ»κ°μ Έμ€λ©΄ λ‘κ·Έμμ
return "<script>alert('μ μμ μ΄μ§ μμ μ κ·Όμ
λλ€.');location.replace('/');</script>"
else: #λ‘κ·ΈμΈμ΄ μλμ΄μλκ°?
return "<script>alert('μ μμ μ΄μ§ μμ μ κ·Όμ
λλ€.');location.replace('/');</script>"
@app.route('/form/action', methods=['GET','POST'])
def action():
if request.method == 'GET':
if discord.authorized: #λ‘κ·ΈμΈμ΄ λμ΄μλκ°
try:
discord.fetch_guilds() #λ‘κ·ΈμΈμ 보μ κ°μ ΈμλΌ
except:
return redirect(url_for("logout")) #λͺ»κ°μ Έμ€λ©΄ λ‘κ·Έμμ
user = discord.fetch_user()
run_webhook.send(f"β [ 403 ERROR ] {user}λμ΄ ννΈλ μ μ² κ²°κ³Ό μ μ‘ νμ΄μ§μ μ μμ μ΄μ§ μμ μ κ·Όμ μλ νμ΅λλ€.")
return "<script>alert('μ μμ μ΄μ§ μμ μ κ·Όμ
λλ€.');location.replace('/');</script>"
else: #λ‘κ·ΈμΈμ΄ μλμ΄μλκ°?
ip = request.environ.get('HTTP_X_REAL_IP', request.remote_addr)
run_webhook.send(f"β [ 403 ERROR ] λΉ λ‘κ·ΈμΈ μ μ ({ip})κ° ννΈλ μ μ² κ²°κ³Ό μ μ‘ νμ΄μ§μ μ μμ μ΄μ§ μμ μ κ·Όμ μλ νμ΅λλ€.")
return "<script>alert('μ μμ μ΄μ§ μμ μ κ·Όμ
λλ€.');location.replace('/');</script>"
else:
if discord.authorized: #λ‘κ·ΈμΈμ΄ λμ΄μλκ°
try:
discord.fetch_guilds() #λ‘κ·ΈμΈμ 보μ κ°μ ΈμλΌ
except:
return redirect(url_for("logout")) #λͺ»κ°μ Έμ€λ©΄ λ‘κ·Έμμ
code = request.form['code']
nickname = request.form['nickname']
server = request.form['server']
member = request.form['member']
category = request.form['category']
etc_text = request.form['etc_text']
message = request.form['message']
image = request.form['image']
video = request.form['video']
if etc_text == '':
etc_text = 'Unknown'
webhook.send(f"<@627292715956043785>\nβ
ννΈλ μμ μ μ²μ΄ λμ°©νμ΅λλ€.\n\nννΈλ μ½λ: {code}\nμ μ²μ: {nickname}\nμλ²(μ΄λ λ§ν¬): {server}\nλ©€λ² μ: {member}\nμΉ΄ν
κ³ λ¦¬ μ 보: {category} ({etc_text})\nν보μ§: {message}\nμ΄λ―Έμ§: {image}\nμμ: {video}")
return render_template('form/action.html', code = code)
else: #λ‘κ·ΈμΈμ΄ μλμ΄μλκ°?
return redirect(url_for("index"))
@app.route('/guide/<id>', methods=['GET', 'POST'])
def guide(id):
return f"<script>location.replace('https://team-alpha-kr.github.io/Partner-Guide/{id}.html');</script>"
# S: 2021 ννΈλ μΉμ¬μ΄νΈ κ°νΈ μ½λ
# S: 210210 곡μ§μ¬ν
@app.route('/notice/<id>', methods=['GET', 'POST'])
def notice(id):
return render_template(f"2021temp/notice/{id}.html")
# E: 210210 곡μ§μ¬ν
# E: 2021 ννΈλ μΉμ¬μ΄νΈ κ°νΈ μ½λ
@app.errorhandler(404)
def page_not_found(error):
return render_template("error/404.html")
@app.errorhandler(500)
def servererror(error):
run_webhook.send(f"<@673776952578146315> β [ 500 ERROR ] μλ²μ μ€λ₯κ° λ°μνμ΅λλ€.")
return render_template("error/500.html")
@app.errorhandler(400)
def badrequest(error):
run_webhook.send(f"<@673776952578146315> β [ 400 ERROR ] μλ²μ μ€λ₯κ° λ°μνμ΅λλ€.")
return render_template("error/400.html")
run_webhook.send("β
ννΈλ μ 보 μμ - μΉμ¬μ΄νΈκ° μ€νμ΄ λμμ΅λλ€!")
app.run(host='0.0.0.0', port=3333, debug=False) | 37.984615 | 212 | 0.699743 | 0 | 0 | 0 | 0 | 7,281 | 0.822247 | 0 | 0 | 4,670 | 0.527386 |
c4ad485be7bdd5e1ac650e5ab444023836dc2e62 | 257 | py | Python | protocols/tpkt.py | dparnishchev/s7scan | 87a7aeeb3c932491745dfded2577d221083f87df | [
"Unlicense"
]
| 98 | 2018-10-12T10:36:55.000Z | 2022-03-31T15:55:46.000Z | protocols/tpkt.py | FOGSEC/s7scan | d7f9c3bbd6a97a7f83991ea865be95b0e9280346 | [
"Unlicense"
]
| null | null | null | protocols/tpkt.py | FOGSEC/s7scan | d7f9c3bbd6a97a7f83991ea865be95b0e9280346 | [
"Unlicense"
]
| 35 | 2018-10-12T17:08:25.000Z | 2022-03-28T20:12:27.000Z | from scapy.fields import ByteField, ShortField
from scapy.packet import Packet
class TPKT(Packet):
name = "TPKT"
fields_desc = [ByteField("version", 3),
ByteField("reserved", 0),
ShortField("length", 0x0000)]
| 25.7 | 48 | 0.614786 | 175 | 0.680934 | 0 | 0 | 0 | 0 | 0 | 0 | 33 | 0.128405 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.