max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
tottle/types/objects/contact.py | muffleo/tottle | 12 | 12789951 | from typing import Optional
from pydantic import BaseModel
class Contact(BaseModel):
phone_number: Optional[str] = None
first_name: Optional[str] = None
last_name: Optional[str] = None
user_id: Optional[int] = None
vcard: Optional[str] = None
| 2.6875 | 3 |
functions/args_parameters.py | danielkpodo/python-zero-to-mastery | 0 | 12789952 | def greet_user(username, emoji):
print("Welcome {}, we are happy to have you{}".format(username, emoji))
greet_user("narh", "🙋")
greet_user("kpodo", '🍀')
# When you define a function you give it what we call parameters
# When you are invoking the function you pass to it arguments
| 3.390625 | 3 |
api/beers/tests/test_deactivate_inactive.py | haavardnk/Vinmonopolet-x-Untappd | 0 | 12789953 | <reponame>haavardnk/Vinmonopolet-x-Untappd<filename>api/beers/tests/test_deactivate_inactive.py
import pytest
from django.utils import timezone
from datetime import timedelta
from beers.models import Beer
from beers.tasks import deactivate_inactive
@pytest.mark.django_db
def test_deactivate_inactive_beer():
"""
Test that a beer which is no longer on vinmonopolet gets deactivated.
"""
Beer.objects.create(
vmp_id=12611502,
vmp_name="<NAME>",
active=True,
vmp_updated=timezone.now() - timedelta(days=31),
)
deactivate_inactive(30)
beer = Beer.objects.get(vmp_id=12611502)
assert beer.active == False
@pytest.mark.django_db
def test_active_beer_does_not_get_deactivated():
"""
Test that a beer which is active on vinmonopolet does not get deactivated.
"""
Beer.objects.create(
vmp_id=12611502,
vmp_name="<NAME>",
active=True,
vmp_updated=timezone.now() - timedelta(days=29),
)
deactivate_inactive(30)
beer = Beer.objects.get(vmp_id=12611502)
assert beer.active == True
| 2.296875 | 2 |
steelpy/sections/shapes/elliptical.py | svortega/steelpy | 4 | 12789954 | #
# Copyright (c) 2019-2021 steelpy
#
# Python stdlib imports
import math
#
# package imports
#import steelpy.units.control as units
#from steelpy.sectionproperty.shapes.iomodule import (find_section_dimensions,
# get_dimension)
# ----------------------------------------
# Elliptical Sections Profiles
# ----------------------------------------
#
class HollowSemiellipse:
"""
Calculate the section properties of a Hollow Semiellipse
with constant wall thickness Tw.
The midthickness perimeter is an ellipse 0.2 < a/b < 0.50
Parameters
----------
d : Section Heigh
b : Base
tw : Wall thickness
Returns
----------
area: Section area
Zc : Elastic neutral centre
Yc : Elastic neutral centre
Iy : Second moment of area about mayor axis
Zey : Elastic modulus about mayor axis
Zpy : Plastic modulus about mayor axis
SFy : Shape factor mayor axis
ry : Radius of gyration about mayor Axis
Iz : Second moment of area about minor axis
Zez : Elastic modulus about minor axis
Zpz : Plastic modulus about minor axis
SFz : Shape factor minor axis
rz : Radius of gyration about minor Axis
SC : Shear centre
Cw : Warping constant
Notes
----------
Uses formulas from:
1.- Formulas for stress, strain and strucutral matrices [W.D. Pilkey]
2.- Roark's formulas for stress and strain [7th Edition]
3.- Wikipedia
Examples
----------
"""
#
def __init__(self):
#
# Build [WELDED / ROLLED]
self.build = 'welded'
# Shear Stress [MAXIMUM / AVERAGE]
self.shear_stress = 'average'
self.compactness = 'N/A'
self.units_in = ["", "", "second", "", "", ""]
def units_input(self, **kwargs):
"""
Input:
======
length : [mandatory]
force :
temperature :
gravity : [default : 9.81ms^2]
------
units [length, mass, time, temperature, force, pressure/stress]
"""
for key, value in kwargs.items():
_unit = units.find_unit_case(key)
self.units_in = units.units_module(_unit, value,
self.units_in)
if self.units_in[0]:
pass
else:
print('error length unit must be provided')
print(' program aborted')
sys.exit()
#
def geometry(self, **kwargs):
for key, value in kwargs.items():
_dim = find_section_dimensions(key)
get_dimension(self, _dim, value)
self.type = 'Hollow Semiellipse'
#
def units_output(self, **kwargs):
"""
Input:\n
length : [mandatory]\n
force : [mandatory]\n
temperature : \n
gravity : [default : 9.81ms^2]\n
------
units [length, mass, time, temperature, force, pressure/stress]/n
"""
_units_in = ["", "", "second", "", "", ""]
for key, value in kwargs.items():
_unit = units.find_unit_case(key)
self.units_out = units.units_module(_unit, value,
_units_in)
#
#
#
def get_property(self):
#
if self.units_in[0]:
_units_input = self.units_in
else:
print(' ** error input units not provided')
print(' process terminated')
sys.exit()
# units
try:
_units_output = self.units_out
except AttributeError:
_units_output = self.units_in
self.units_out = self.units_in
factors = units.get_length_mass(_units_input,
_units_output)
self.units_in = _units_output
self.d *= factors[0]
#self.tw *= factors[0]
#self.a *= factors[0]
#self.ta *= factors[0]
self.b *= factors[0]
#self.tb *= factors[0]
#
_a = self.d - 0.50 * self.tw
_b = self.b / 2.0 - 0.50 * self.tw
# Note : there is a limit on the maximum
# wall thickness allowed in this case.
# Cusps will form in the perimeter at
# the ends of the mayor axis if this
# maximum is exceeded.
if _a/_b < 1.0 :
_tmax = 2 * _a**2 / _b
else: _tmax = 2 * _b**2 / _a
if self.tw > _tmax :
sys.exit('error : t > tmax')
#-------------------------------------------------
# Cross-Sectional Area
_C = (_a - _b) / (_a + _b)
_K1 = 0.2464 + 0.002222 * ((_a / _b) + (_b / _a))
_K2 = 1 - 0.3314 * _C + 0.0136 * _C**2 + 0.1097 * _C**3
_K3 = 1 + 0.9929 * _C - 0.2287 * _C**2 - 0.2193 * _C**3
self.area = ((self.tw * math.pi / 2.0) *
(_a + _b) * (1.0 + _K1 * ((_a - _b) / (_a + _b))**2))
# Centroid
self.Zc = ((2.0 * _a * _K2 / math.pi)
+ (self.tw**2 * _K3 / (6.0 * math.pi * _a)))
_Zc1 = _a + self.tw / 2.0 - self.Zc
self.Yc = 0
_Yc1 = _b + self.tw / 2.0
#-------------------------------------------------
# Section Properties
#-------------------------------------------------
# Second Moment of Area about Mayor Axis
# --------------------------------------
_K4 = 0.1349 + 0.1279 * (_a / _b) - 0.01284 * (_a / _b)**2
_K5 = 0.1349 + 0.1279 * (_a / _b) - 0.01284 * (_b / _a)**2
_Iy = ((((self.tw * _a**2 * math.pi / 8.0) * (_a + 3 * _b))
* (1 + _K4 * ((_a - _b) / (_a + _b))**2))
+ (((self.tw**3 * math.pi / 32.0) * (3 * _a + _b))
* (1 + _K5 * ((_a - _b) / (_a + _b))**2)))
self.Iy = _Iy - self.area * self.Zc**2
_K2 = 0.1349 + 0.1279 * (_b / _a) - 0.01284 * (_b / _a)**2
_K3 = 0.1349 + 0.1279 * (_a / _b) - 0.01284 * (_a / _b)**2
self.Iz = 0.50 * ((((self.tw * _b**2 * math.pi / 4.0) * (_b + 3 * _a))
* (1 + _K2 * ((_b - _a) / (_b + _a))**2))
+ (((self.tw**3 * math.pi / 16.0) * (3 * _b + _a))
* (1 + _K3 * ((_b - _a) / (_b + _a))**2)))
# Elastic Modulus about Mayor Axis
# --------------------------------------
self.Zey = self.Iy / _Zc1
#
self.Zez = self.Iz / _Yc1
# Plastic Modulus about Mayor Axis
# --------------------------------------
# Let Zp be the vertical distance from the bottom
# to the plastic neutral axis
_DD = self.tw / _tmax
_DD = max(_DD , 0.20)
_DD = min(_DD , 1.0)
if _a / _b > 0.25 and _a / _b < 1.0:
_C1 = 0.5067 - 0.5588 * _DD + 1.3820 * _DD**2
_C2 = 0.3731 + 0.1938 * _DD - 1.4078 * _DD**2
_C3 = -0.140 + 0.0179 * _DD + 0.4885 * _DD**2
_C4 = 0.0170 - 0.0079 * _DD - 0.0565 * _DD**2
#
_C5 = -0.0292 + 0.3749 * math.sqrt(_DD) + 0.0578 * _DD
_C6 = 0.36740 - 0.8531 * math.sqrt(_DD) + 0.3882 * _DD
_C7 = -0.1218 + 0.3563 * math.sqrt(_DD) - 0.1803 * _DD
_C8 = 0.01540 - 0.0448 * math.sqrt(_DD) + 0.0233 * _DD
#
elif _a / _b >= 1.0 and _a / _b < 4.0:
_C1 = 0.4829 + 0.0725 * _DD - 0.1815 * _DD**2
_C2 = 0.1957 - 0.6608 * _DD + 1.4222 * _DD**2
_C3 = 0.0203 + 1.8999 * _DD - 3.4356 * _DD**2
_C4 = 0.0578 - 1.6666 * _DD + 2.6012 * _DD**2
#
_C5 = 0.22410 - 0.3922 * math.sqrt(_DD) + 0.2960 * _DD
_C6 = -0.6637 + 2.7357 * math.sqrt(_DD) - 2.0482 * _DD
_C7 = 1.52110 - 5.3864 * math.sqrt(_DD) + 3.9286 * _DD
_C8 = -0.8498 + 2.8763 * math.sqrt(_DD) - 1.8874 * _DD
#
else :
sys.exit('error a/b > 4 or a/b < 0.25')
# Plastic neutral axis
_Zp = (_a * (_C1 + _C2 / (_a / _b) + _C3 / (_a / _b)**2
+ _C4 / (_a / _b)**3))
_Yp = 0
# Plastic section moduli mayor axis
self.Zpy = (4.0 * _a**2 * self.tw * (_C5 + _C6 / (_a / _b)
+ _C7 / (_a / _b)**2
+ _C8 / (_a / _b)**3))
# Plastic section moduli minor axis
_K4 = 0.1835 + 0.895 * (_b / _a) - 0.00978 * (_b / _a)**2
self.Zpz = (0.50 * (((1.3333 * self.tw * _b * (_b + 2 * _a))
* (1 + _K4 * ((_b - _a) / (_a + _b))**2))
+ (self.tw**3 / 3.0)))
#-------------------------------------------------
# Radius of gyration
self.ry = math.sqrt(self.Iy / self.area)
self.rz = math.sqrt(self.Iz / self.area)
#
#return self.area, _Zc, _Yc, _Iy, _Zey, _Zpy, _ry, _Iz, _Zez, _Zpz, _rz, _Zp
#
def print_file(self, file_name):
check_out = print_header()
check_out.append("{:23s} {:1.4E} {:1.4E}"
.format(self.type, self.d, self.tw))
check_out.extend(print_properties(self))
#file_checkout = split_file_name(file_name)
#file_checkout = str(file_checkout[0]) +'_check_me.txt'
file_checkout = str(file_name) + '.txt'
add_out = open(file_checkout,'w')
add_out.write("".join(check_out))
add_out.close()
print('ok')
#
class EllipticalSegment:
"""
Calculate the circular and elliptical segments
cross section properties
Parameters
----------
a : Mayor Axis
b : Minor Axis
thetaG : Angle (degrees)
Returns
----------
area: Section area
Zc : Elastic neutral centre
Yc : Elastic neutral centre
Iy : Second moment of area about mayor axis
Zey : Elastic modulus about mayor axis
ry : Radius of gyration about mayor Axis
Iz : Second moment of area about minor axis
Zez : Elastic modulus about minor axis
rz : Radius of gyration about minor Axis
Notes
----------
Uses formulas from:
1.- Geometric properties for the design of unusual member
cross-sections in bending [A.J. Sadowski]
Examples
----------
"""
def __init__(self):
# Build [WELDED / ROLLED]
self.build = 'welded'
# Shear Stress [MAXIMUM / AVERAGE]
self.shear_stress = 'average'
self.compactness = 'N/A'
self.units_in = ["", "", "second", "", "", ""]
def units_input(self, **kwargs):
"""
Input:
======
length : [mandatory]
force :
temperature :
gravity : [default : 9.81ms^2]
------
units [length, mass, time, temperature, force, pressure/stress]
"""
for key, value in kwargs.items():
_unit = units.find_unit_case(key)
self.units_in = units.units_module(_unit, value,
self.units_in)
if self.units_in[0]:
pass
else:
print('error length unit must be provided')
print(' program aborted')
sys.exit()
#
def geometry(self, a, b, thetaG):
#
self.a = float(a)
self.b = float(b)
self.theta = float(thetaG)
self.p = 0
self.q = 0
self.type = 'Elliptical Segment'
def units_output(self, **kwargs):
"""
Input:\n
length : [mandatory]\n
force : [mandatory]\n
temperature : \n
gravity : [default : 9.81ms^2]\n
------
units [length, mass, time, temperature, force, pressure/stress]/n
"""
_units_in = ["", "", "second", "", "", ""]
for key, value in kwargs.items():
_unit = units.find_unit_case(key)
self.units_out = units.units_module(_unit, value,
_units_in)
#
def get_property(self):
#
if self.units_in[0]:
_units_input = self.units_in
else:
print(' ** error input units not provided')
print(' process terminated')
sys.exit()
# units
try:
_units_output = self.units_out
except AttributeError:
_units_output = self.units_in
self.units_out = self.units_in
factors = units.get_length_mass(_units_input,
_units_output)
self.units_in = _units_output
self.a *= factors[0]
self.b *= factors[0]
self.p *= factors[0]
self.q *= factors[0]
_thetaG = math.radians(self.theta)
_thetaG = min(abs(_thetaG), 0.50 * math.pi)
# Area
self.area = (0.50 * self.a * self.b
* (2 * _thetaG - math.sin( 2 * _thetaG)))
# Centroid
self.Zc = ((4.0 * self.b * math.sin(_thetaG)**3)
/ (3.0 * (2 * _thetaG - math.sin(2 * _thetaG))))
self.Yc = 0
# Second Moment of Area about x
self.Iy = ((self.a * self.b**3 / 16.0)
* (4 * _thetaG - math.sin(4 * _thetaG)))
# Second Moment of Area about y
self.Iz = ((self.a**3 * self.b / 24.0)
* (6.0 * _thetaG - math.sin(2 * _thetaG)
* (3.0 + 2.0 * math.sin(_thetaG)**2)))
# Second Moment of Area about the horizontal centroidal C
self.Ic = self.Iy - self.area * self.Zc**2
# The distances from the centroid to the extreme fibres
_y1 = self.a * math.sin(_thetaG)
_z1 = self.b - self.Zc
_z2 = self.Zc - self.b * math.cos(_thetaG)
# elastic section moduli
self.Zey = min(self.Ic / _z1, self.Ic / _z2)
self.Zez = self.Iz / _y1
# plastic section moduli
_Zpy = 0
_Zpz = 0
# radii of gyration
self.ry = math.sqrt(self.Ic / self.area)
self.rz = math.sqrt(self.Iz / self.area)
#
#return _Area, _Zc, _Yc, _Iy, _Zey, self.Ic, _ry, _Iz, _Zez, _Zpz, _rz
#
def print_file(self, file_name):
check_out = print_header_ellipse()
check_out.append("{:23s} {:1.4E} {:1.4E} {:1.4E}"
.format(self.type, self.a, self.b, self.theta))
check_out.extend(print_properties(self))
#file_checkout = split_file_name(file_name)
#file_checkout = str(file_checkout[0]) +'_check_me.txt'
file_checkout = str(file_name) + '.txt'
add_out = open(file_checkout,'w')
add_out.write("".join(check_out))
add_out.close()
print('ok')
#
class EllipticalSector:
"""
Calculate the circular and elliptical sectors
cross section properties
Parameters
----------
a : Mayor Axis
b : Minor Axis
thetaG : Angle (degrees)
Returns
----------
area: Section area
Zc : Elastic neutral centre
Yc : Elastic neutral centre
Iy : Second moment of area about mayor axis
Zey : Elastic modulus about mayor axis
ry : Radius of gyration about mayor Axis
Iz : Second moment of area about minor axis
Zez : Elastic modulus about minor axis
rz : Radius of gyration about minor Axis
Notes
----------
Uses formulas from:
1.- Geometric properties for the design of unusual member
cross-sections in bending [<NAME>]
Examples
----------
"""
def __init__(self):
# Build [WELDED / ROLLED]
self.build = 'welded'
# Shear Stress [MAXIMUM / AVERAGE]
self.shear_stress = 'average'
self.compactness = 'N/A'
self.units_in = ["", "", "second", "", "", ""]
def units_input(self, **kwargs):
"""
Input:
======
length : [mandatory]
force :
temperature :
gravity : [default : 9.81ms^2]
------
units [length, mass, time, temperature, force, pressure/stress]
"""
for key, value in kwargs.items():
_unit = units.find_unit_case(key)
self.units_in = units.units_module(_unit, value,
self.units_in)
if self.units_in[0]:
pass
else:
print('error length unit must be provided')
print(' program aborted')
sys.exit()
#
def geometry(self, a, b, thetaG):
#
self.a = float(a)
self.b = float(b)
self.theta = float(thetaG)
self.p = 0
self.q = 0
self.type = 'Elliptical Sector'
def units_output(self, **kwargs):
"""
Input:\n
length : [mandatory]\n
force : [mandatory]\n
temperature : \n
gravity : [default : 9.81ms^2]\n
------
units [length, mass, time, temperature, force, pressure/stress]/n
"""
_units_in = ["", "", "second", "", "", ""]
for key, value in kwargs.items():
_unit = units.find_unit_case(key)
self.units_out = units.units_module(_unit, value,
_units_in)
#
def get_property(self):
#
if self.units_in[0]:
_units_input = self.units_in
else:
print(' ** error input units not provided')
print(' process terminated')
sys.exit()
# units
try:
_units_output = self.units_out
except AttributeError:
_units_output = self.units_in
self.units_out = self.units_in
factors = units.get_length_mass(_units_input,
_units_output)
self.units_in = _units_output
self.a *= factors[0]
self.b *= factors[0]
self.p *= factors[0]
self.q *= factors[0]
_thetaG = math.radians(self.theta)
_thetaG = min(_thetaG, 0.50 * math.pi)
# Area
self.area = self.a * self.b * _thetaG
# Centroid
self.Zc = (2 * self.b * math.sin(_thetaG)) / (3 * _thetaG)
self.Yc = 0
# Second Moment of Area about x
self.Iy = ((self.a * self.b**3 / 8.0)
* (2 * _thetaG + math.sin(2 * _thetaG)))
# Second Moment of Area about y
self.Iz = ((self.a**3 * self.b / 8.0)
* (2 * _thetaG - math.sin(2 * _thetaG)))
# Second Moment of Area about the horizontal centroidal C
self.Ic = self.Iy - self.area * self.Zc**2
# The distances from the centroid to the extreme fibres
_y1 = self.a * math.sin(_thetaG)
_z1 = self.b - self.Zc
_z2 = self.Zc - self.b * math.cos(_thetaG)
# elastic section moduli
self.Zey = min(self.Ic / _z1, self.Ic / _z2)
self.Zez = self.Iz / _y1
# plastic section moduli
_Zpy = 0
_Zpz = 0
# radii of gyration
self.ry = math.sqrt(self.Ic / self.area)
self.rz = math.sqrt(self.Iz / self.area)
#
#
#return self.area, self.Zc, _Yc, self.Ic, _Zey, _Zpy, _ry, self.Iz, _Zez, _Zpz, _rz
#
def print_file(self, file_name):
check_out = print_header_ellipse()
check_out.append("{:23s} {:1.4E} {:1.4E} {:1.4E}"
.format(self.type, self.a, self.b, self.theta))
check_out.extend(print_properties(self))
#file_checkout = split_file_name(file_name)
#file_checkout = str(file_checkout[0]) +'_check_me.txt'
file_checkout = str(file_name) + '.txt'
add_out = open(file_checkout,'w')
add_out.write("".join(check_out))
add_out.close()
print('ok')
#
class SuperEllipse:
"""
Calculate the superellipse cross section properties
Superellipses as a function of the powers p and q
Parameters
----------
a : Mayor Axis
b : Minor Axis
p :
q :
Returns
----------
area: Section area
Zc : Elastic neutral centre
Yc : Elastic neutral centre
Iy : Second moment of area about mayor axis
Zey : Elastic modulus about mayor axis
ry : Radius of gyration about mayor Axis
Iz : Second moment of area about minor axis
Zez : Elastic modulus about minor axis
rz : Radius of gyration about minor Axis
Notes
----------
Uses formulas from:
1.- Geometric properties for the design of unusual member
cross-sections in bending [<NAME>]
Examples
----------
"""
def __init__(self):
# Build [WELDED / ROLLED]
self.build = 'welded'
# Shear Stress [MAXIMUM / AVERAGE]
self.shear_stress = 'average'
self.compactness = 'N/A'
self.units_in = ["", "", "second", "", "", ""]
def units_input(self, **kwargs):
"""
Input:
======
length : [mandatory]
force :
temperature :
gravity : [default : 9.81ms^2]
------
units [length, mass, time, temperature, force, pressure/stress]
"""
for key, value in kwargs.items():
_unit = units.find_unit_case(key)
self.units_in = units.units_module(_unit, value,
self.units_in)
if self.units_in[0]:
pass
else:
print('error length unit must be provided')
print(' program aborted')
sys.exit()
#
def geometry(self, a, b, p=2.0, q=2.0):
#
self.a = float(a)
self.b = float(b)
self.theta = 90
self.p = float(p)
self.q = float(q)
self.type = 'Super Ellipse'
def units_output(self, **kwargs):
"""
Input:\n
length : [mandatory]\n
force : [mandatory]\n
temperature : \n
gravity : [default : 9.81ms^2]\n
------
units [length, mass, time, temperature, force, pressure/stress]/n
"""
_units_in = ["", "", "second", "", "", ""]
for key, value in kwargs.items():
_unit = units.find_unit_case(key)
self.units_out = units.units_module(_unit, value,
_units_in)
#
def get_property(self):
#
if self.units_in[0]:
_units_input = self.units_in
else:
print(' ** error input units not provided')
print(' process terminated')
sys.exit()
# units
try:
_units_output = self.units_out
except AttributeError:
_units_output = self.units_in
self.units_out = self.units_in
factors = units.get_length_mass(_units_input,
_units_output)
self.units_in = _units_output
self.a *= factors[0]
self.b *= factors[0]
self.p *= factors[0]
self.q *= factors[0]
if self.p <= 0 or self.q <= 0:
sys.exit("error p & q > 0")
# Area
self.area = ((2.0 * self.a * self.b / self.q) *
((math.gamma(1.0 / self.q) * math.gamma((1.0 + self.p) / self.p))
/ (math.gamma((self.p + self.p * self.q + self.q) / (self.p * self.q)))))
# Centroid
self.Zc = ((math.pow(4, 1.0 / self.q) * self.b / (2 * math.sqrt(math.pi)))
* ((math.gamma((2.0 + self.q) / (2 * self.q))
* math.gamma((self.p + self.p * self.q + self.q) / (self.p * self.q)))
/ (math.gamma((2 * self.p + self.p * self.q + self.q) / (self.p * self.q)))))
self.Yc = 0
# Second Moment of Area about x
self.Iy = ((2.0 * self.a * self.b**3 / self.q) *
((math.gamma(3.0 / self.q) * math.gamma((1.0 + self.p) / self.p))
/ (math.gamma((3 * self.p + self.p * self.q + self.q) / (self.p * self.q)))))
# Second Moment of Area about y
self.Iz = ((2.0 * self.a**3 * self.b / self.p) *
((math.gamma(3.0 / self.p) * math.gamma((1.0 + self.q) / self.q))
/ (math.gamma((self.p + self.p * self.q + 3 * self.q) / (self.p * self.q)))))
#print('Jy',_Iz / 10**4)
# Second Moment of Area about the horizontal centroidal C
self.Ic = self.Iy - self.area * self.Zc**2
#print('Jx',self.Ic / 10**4)
# The distances from the centroid to the extreme fibres
_y1 = self.a
_z1 = self.b - self.Zc
_z2 = self.Zc
# elastic section moduli
self.Zey = min(self.Ic / _z1, self.Ic / _z2)
self.Zez = self.Iz / _y1
# plastic section moduli
_Zpy = 0
_Zpz = 0
# radii of gyration
self.ry = math.sqrt(self.Ic / self.area)
self.rz = math.sqrt(self.Iz / self.area)
#
#return _Area, _Zc, _Yc, self.Ic, _Zey, _Zpy, _ry, _Iz, _Zez, _Zpz, _rz
#
#
def print_file(self, file_name):
check_out = print_header_ellipse()
check_out.append("{:23s} {:1.4E} {:1.4E} {:1.4E} {:1.4E} {:1.4E}"
.format(self.type, self.a, self.b, self.theta, self.p, self.q))
check_out.extend(print_properties(self))
#file_checkout = split_file_name(file_name)
#file_checkout = str(file_checkout[0]) +'_check_me.txt'
file_checkout = str(file_name) + '.txt'
add_out = open(file_checkout,'w')
add_out.write("".join(check_out))
add_out.close()
print('ok')
#
def quarterCircle(r):
"""
Calculate a quarter of a circle
Parameters
----------
r : radius
Returns
----------
area: Section area
Zc : Elastic neutral centre
Yc : Elastic neutral centre
Iy : Second moment of area about mayor axis
Zey : Elastic modulus about mayor axis
ry : Radius of gyration about mayor Axis
Iz : Second moment of area about minor axis
Zez : Elastic modulus about minor axis
rz : Radius of gyration about minor Axis
Notes
----------
Uses formulas from:
1.- Structural Engineering Formulas
<NAME>
Examples
----------
"""
# Area
_Area = math.pi * r**2 / 4.0
#
# Centroid
_Zc = 4 * r / (3 * math.pi)
_Yc = _Zc
# Second Moment of Area about x
_Iy = 0.07135 * r**4
_Iy1 = 0.05489 * r**4
_Iy2 = math.pi * r**4 / 16.0
# Second Moment of Area about y
_Iz = 0.03843 * r**4
_Iz1 = _Iy1
_Iz2 = _Iy2
return _Area, _Zc, _Yc, _Iy, _Iy1, _Iy2, _Iz, _Iz1, _Iz2
#
def closed_cross_section(a, b1, A1, Yc1, Ic1, Iy1,
b2 = 0, A2 = 0, Yc2 = 0,
Ic2 = 0, Iy2 = 0):
"""
Elliptical Sections Profiles Extension
Open cross-sections which are extended to half of the circumference
(thetaG = 1/2pi) may be combined together to make a hollow
closed cross-section with finite thickness t, e.g. a tube, hollow
rod, pipe or cylindrical shell,
"""
# check if section is symmetrical
if b2 == 0:
b2 = b1
A2 = A1
Yc2 = Yc1
Ic2 = Ic1
Iy2 = Iy1
_d = b1 + b2
# Total cross area
_A = A1 + A2
# Centroidal C-axis of full section
_Yc = (A1 * (Yc1 + b2) + A2 * (b2 - Yc2)) / _A
# Second moment of full area
_Ixx = ((Ic1 + A1 * (Yc1 + b2 - _Yc)**2)
+ (Ic2 + A2 * (_Yc - b2 + Yc2)**2))
_Iyy = Iy1 + Iy2
# Extreme fibre distances
_x1 = a
_y1 = _d - _Yc
_y2 = _Yc
# Elastic section moduli
_Sy = min(_Iyy / _y1, _Iyy / _y2)
_Sx = _Ixx / _x1
# radii of gyration
_ry = math.sqrt(_Iyy / _A)
_rx = math.sqrt(_Ixx / _A)
#
return _A, _Yc, _x1, _Ixx, _Sx, _rx, _Iyy, _Sy, _ry
#
#
def hollow_ellipse(a, b, t):
"""
a
b
t
"""
# Area
K1 = 0.2464 + 0.002222 * (a/b + b/a)
Area = math.pi * t * (a + b) * (1 + K1 * ((a-b)/(a+b))**2)
# Centroid
Zc = a + t / 2.0
Yc = b + t / 2.0
# Second Moment of Area about Mayor Axis
# --------------------------------------
K2 = 0.1349 + 0.1279 * a/b - 0.01284 * (a/b)**2
K3 = 0.1349 + 0.1279 * b/a - 0.01284 * (b/a)**2
Iy = (math.pi * t * a**2 / 4.0 * (a + 3*b) * (1 + K2 * ((a-b)/(a+b))**2)
+ math.pi * t**3 / 16.0 * (3*a + b) * (1 + K3 * ((a-b)/(a+b))**2))
# Second Moment of Area about Minor Axis
# --------------------------------------
K2 = 0.1349 + 0.1279 * b/a - 0.01284 * (b/a)**2
K3 = 0.1349 + 0.1279 * a/b - 0.01284 * (a/b)**2
Iz = (math.pi * t * b**2 / 4.0 * (b + 3*a) * (1 + K2 * ((b-a)/(b+a))**2)
+ math.pi * t**3 / 16.0 * (3*b + a) * (1 + K3 * ((b-a)/(b+a))**2))
# Elastic Modulus about Mayor Axis
# --------------------------------------
K4 = 0.1835 + 0.895 * a/b - 0.00978 * (a/b)**2
Zey = 1.3333 * t * a * (a + 2*b) * (1 + K4 * ((a-b)/(a+b))**2) + t**3 / 3.0
# Elastic Modulus about Minor Axis
# --------------------------------------
K4 = 0.1835 + 0.895 * b/a - 0.00978 * (b/a)**2
Zez = 1.3333 * t * b * (b + 2*a) * (1 + K4 * ((b-a)/(b+a))**2) + t**3 / 3.0
return Area, Zc, Yc, Iy, Zey, Iz, Zez | 2.546875 | 3 |
Backend/oeda/rtxlib/executionstrategy/SelfOptimizerStrategy.py | iliasger/OEDA | 2 | 12789955 | <filename>Backend/oeda/rtxlib/executionstrategy/SelfOptimizerStrategy.py
from colorama import Fore
from skopt import gp_minimize
from oeda.log import *
from oeda.rtxlib.execution import experimentFunction
from oeda.rtxlib.executionstrategy import applyInitKnobs
from oeda.rtxlib.executionstrategy import applyDefaultKnobs
def start_self_optimizer_strategy(wf):
applyInitKnobs(wf)
""" executes a self optimizing strategy """
info("> ExecStrategy | SelfOptimizer", Fore.CYAN)
acquisition_method = wf.execution_strategy["acquisition_method"]
wf.totalExperiments = wf.execution_strategy["optimizer_iterations"]
optimizer_iterations_in_design = wf.execution_strategy["optimizer_iterations_in_design"]
info("> Optimizer | " + acquisition_method, Fore.CYAN)
# we look at the ranges the user has specified in the knobs
knobs = wf.execution_strategy["knobs"]
# we create a list of variable names and a list of knob (from,to)
variables = []
range_tuples = []
# we fill the arrays and use the index to map from gauss-optimizer-value to variable
for key in knobs:
variables.append(key)
# values in knobs might come unordered, so sort them to avoid dimension errors of scikit
min_value = min(float(knobs[key][0]), float(knobs[key][1]))
max_value = max(float(knobs[key][0]), float(knobs[key][1]))
tpl = tuple([min_value, max_value])
range_tuples.append(tpl)
# we give the minimization function a callback to execute
# it uses the return value (it tries to minimize it) to select new knobs to test
print("variables", variables)
print("range_tuples", range_tuples)
optimizer_result = gp_minimize(lambda opti_values: self_optimizer_execution(wf, opti_values, variables),
range_tuples, n_calls=wf.totalExperiments, n_random_starts=optimizer_iterations_in_design, acq_func=acquisition_method)
# optimizer is done, print results
info(">")
info("> OptimalResult | Knobs: " + str(recreate_knob_from_optimizer_values(variables, optimizer_result.x)))
info("> | Result: " + str(optimizer_result.fun))
# finished
info(">")
applyDefaultKnobs(wf)
return recreate_knob_from_optimizer_values(variables, optimizer_result.x), optimizer_result.fun
def recreate_knob_from_optimizer_values(variables, opti_values):
""" recreates knob values from a variable """
knob_object = {}
# create the knobObject based on the position of the opti_values and variables in their array
for idx, val in enumerate(variables):
knob_object[val] = opti_values[idx]
return knob_object
def self_optimizer_execution(wf, opti_values, variables):
""" this is the function we call and that returns a value for optimization """
knob_object = recreate_knob_from_optimizer_values(variables, opti_values)
print("knob_object in self_opt_execution", knob_object)
# create a new experiment to run in execution
exp = dict()
exp["ignore_first_n_samples"] = wf.primary_data_provider["ignore_first_n_samples"]
exp["sample_size"] = wf.execution_strategy["sample_size"]
exp["knobs"] = knob_object
wf.setup_stage(wf, exp["knobs"])
return experimentFunction(wf, exp)
| 2.578125 | 3 |
tests/test_plotting.py | fredchettouh/neural-processes | 0 | 12789956 | <gh_stars>0
from cnp.plotting import get_contxt_coordinates, get_colour_based_idx, Plotter
import torch
def test_get_contxt_coordinates():
contxt = torch.tensor([0, 1, 2, 3])
rows, cols = get_contxt_coordinates(contxt, 2)
assert (rows[0] == 0 and rows[2] == 1)
assert (cols[0] == 0 and cols[2] == 0)
def test_get_colour_based_idx():
img = torch.tensor([0, 0, 1, 0.2]).reshape(2, 2)
rows = torch.tensor([0, 1])
cols = torch.tensor([0, 1])
white_idx, black_idx = get_colour_based_idx(rows, cols, img)
assert (white_idx[0] == 1 and len(white_idx) == 1)
assert (black_idx[0] == 0 and len(black_idx) == 1)
def test_plot_training_progress():
with open('tests/fixtures/train_loss.txt') as file:
train_loss = [float(line) for line in file.readlines()]
with open('tests/fixtures/vali_loss.txt') as file:
vali_loss = [float(line) for line in file.readlines()]
Plotter.plot_training_progress(
training_losses=train_loss,
vali_losses=vali_loss,
interval=1000)
def test_plot_context_target_1d():
contxt_idx = torch.randperm(400)[:10]
xvalues = torch.normal(0, 1, (1, 400, 1))
target_x = xvalues
funcvalues = xvalues**2
target_y = funcvalues
mu = xvalues
cov_matrix = torch.normal(0, 0.001, (1, 400, 1))
Plotter.plot_context_target_1d(
contxt_idx=contxt_idx,
xvalues=xvalues,
funcvalues=funcvalues,
target_y=target_y,
target_x=target_x,
mu=mu,
cov_matrix=cov_matrix)
def test_paint_prediction_greyscale():
mu = torch.rand((28 * 28))[None, :]
width = 28
height = 28
Plotter.paint_prediction_greyscale(mu=mu, width=width, height=height)
def test_paint_groundtruth_greyscale():
func_x = torch.rand((28 * 28))[None, :]
width = 28
height = 28
Plotter.paint_groundtruth_greyscale(
func_x=func_x, width=width, height=height)
| 2.140625 | 2 |
demo/demo/urls.py | JanMalte/processlib | 1 | 12789957 | from django.conf.urls import url, include
from rest_framework import routers
from crm_inbox.flows import * # noqa
from processlib.views import (ProcessViewSet)
router = routers.DefaultRouter()
router.register('process', ProcessViewSet)
urlpatterns = [
url(r'^process/', include('processlib.urls', namespace='processlib')),
url(r'^api/', include(router.urls)),
]
| 1.734375 | 2 |
home/migrations/0004_auto_20180517_0440.py | marcanuy/keraban | 4 | 12789958 | <reponame>marcanuy/keraban
# Generated by Django 2.0.5 on 2018-05-17 04:40
from django.db import migrations
import wagtail.core.blocks
import wagtail.core.fields
import wagtail.images.blocks
class Migration(migrations.Migration):
dependencies = [
('home', '0003_standardpage_subtitle'),
]
operations = [
migrations.AlterField(
model_name='standardpage',
name='body',
field=wagtail.core.fields.StreamField((('heading', wagtail.core.blocks.CharBlock(classname='full title', required=False)), ('paragraph', wagtail.core.blocks.RichTextBlock(required=False)), ('image', wagtail.images.blocks.ImageChooserBlock(required=False)))),
),
]
| 1.796875 | 2 |
examples/4.py | Time2003/lr10 | 0 | 12789959 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
if __name__ == "__main__":
a = {0, 1, 2, 3}
print(len(a))
a.add(4)
print(a)
a = {0, 1, 2, 3}
a.remove(3)
print(a)
a = {0, 1, 2, 3}
a.clear()
print(a) | 3.65625 | 4 |
flask_run.py | rednafi/ashen | 4 | 12789960 | from dynaconf import settings
from app import create_app
application = create_app()
# runs this only when the environment is 'development'
if settings.ENVIRONMENT == "development" and settings.GUNICORN is False:
application.run(host="0.0.0.0", port=settings.FLASK_CONFIG.PORT, debug=True)
| 2.03125 | 2 |
src/pysparkbundle/filesystem/FilesystemInterface.py | daipe-ai/pyspark-bundle | 0 | 12789961 | from abc import ABC, abstractmethod
class FilesystemInterface(ABC):
@abstractmethod
def exists(self, path: str):
pass
@abstractmethod
def put(self, path: str, content: str, overwrite: bool = False):
pass
@abstractmethod
def makedirs(self, path: str):
pass
@abstractmethod
def copy(self, source: str, destination: str, recursive: bool = False):
pass
@abstractmethod
def move(self, source: str, destination: str, recursive: bool = False):
pass
@abstractmethod
def delete(self, path: str, recursive: bool = False):
pass
| 3.578125 | 4 |
vmf_embeddings/third_party/dml_cross_entropy/resnet.py | google-research/vmf_embeddings | 8 | 12789962 | # coding=utf-8
# Copyright 2021 The vMF Embeddings Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Class for instantiating a ResNet in PyTorch.
Code adapted from:
https://github.com/jeromerony/dml_cross_entropy/blob/master/models/base_model.py
https://github.com/jeromerony/dml_cross_entropy/blob/master/models/architectures/resnet.py
"""
import logging
import torch.nn as nn
from torchvision.models import resnet
from torchvision.models.utils import load_state_dict_from_url
from vmf_embeddings.archs import arch
from vmf_embeddings.archs import utils
log = logging.getLogger("main")
class ResNet(arch.Arch):
"""Class for defining a ResNet architecture."""
def __init__(
self,
n_classes,
embedding_dim,
set_bn_eval,
first_conv_3x3,
use_vmf,
learn_temp,
init_temp,
kappa_confidence,
block,
layers,
groups=1,
width_per_group=64,
replace_stride_with_dilation=None,
):
"""Initializes a ResNet architecture object. See arguments in arch.py."""
super(ResNet, self).__init__(embedding_dim, n_classes, use_vmf, learn_temp,
init_temp, kappa_confidence)
self.backbone_features = 512 * block.expansion
self._norm_layer = nn.BatchNorm2d
# Fixes batch-norm to eval mode during training
self.set_bn_eval = set_bn_eval
# Make first convolution use a 3x3 kernel for CIFAR datasets
self.first_conv_3x3 = first_conv_3x3
# Linear layer that remaps from the backbone output of ResNet
# to the embedding dimensionality
self.remap = nn.Linear(self.backbone_features, self.embedding_dim)
nn.init.zeros_(self.remap.bias)
self.classifier = nn.Linear(self.embedding_dim, self.n_classes, bias=False)
if self.use_vmf:
# This is the empirical approximation for initialization the vMF
# distributions for each class in the final layer.
utils.vmf_class_weight_init(self.classifier.weight, self.kappa_confidence,
self.embedding_dim)
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
# Each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError(
"replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
if self.first_conv_3x3:
self.conv1 = nn.Conv2d(
3, self.inplanes, kernel_size=3, stride=1, padding=1, bias=False)
else:
self.conv1 = nn.Conv2d(
3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = self._norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(
block, 128, layers[1], stride=2, dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(
block, 256, layers[2], stride=2, dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(
block, 512, layers[3], stride=2, dilate=replace_stride_with_dilation[2])
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu")
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-init
for m in self.modules():
if isinstance(m, resnet.Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, resnet.BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
resnet.conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(
block(
self.inplanes,
planes,
stride,
downsample,
self.groups,
self.base_width,
previous_dilation,
norm_layer,
))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(
block(
self.inplanes,
planes,
groups=self.groups,
base_width=self.base_width,
dilation=self.dilation,
norm_layer=norm_layer,
))
return nn.Sequential(*layers)
def create_encoder(self):
self.encoder = nn.Sequential(
self.conv1,
self.bn1,
self.relu,
self.maxpool,
self.layer1,
self.layer2,
self.layer3,
self.layer4,
self.avgpool,
utils.Flatten(),
self.remap,
self.classifier,
)
def train(self, mode=True):
"""Sets the module in training mode.
This has any effect only on certain modules. See documentations of
particular modules for details of their behaviors in training/evaluation
mode, if they are affected, e.g. :class:`Dropout`, :class:`BatchNorm`, etc.
Args:
mode: whether to set training mode ("True") or evaluation mode ("False").
Returns:
self
"""
self.training = mode
for module in self.children():
module.train(mode)
if self.set_bn_eval:
for module in self.modules():
if isinstance(module, nn.BatchNorm2d):
module.eval()
return self
def _resnet(
arch_name,
block,
layers,
pretrained,
progress,
n_classes,
embedding_dim,
set_bn_eval,
first_conv_3x3,
use_vmf,
learn_temp,
init_temp,
kappa_confidence,
):
"""Instantiates a ResNet model."""
model = ResNet(
n_classes,
embedding_dim,
set_bn_eval,
first_conv_3x3,
use_vmf,
learn_temp,
init_temp,
kappa_confidence,
block,
layers,
)
if pretrained:
log.info("Loading ResNet50 from Pytorch pretrained")
state_dict = load_state_dict_from_url(
resnet.model_urls[arch_name], progress=progress)
model.load_state_dict(state_dict, strict=False)
model.create_encoder()
return model
def resnet50(
n_classes,
embedding_dim,
set_bn_eval,
pretrained,
first_conv_3x3,
use_vmf,
learn_temp,
init_temp,
kappa_confidence,
progress=False,
):
"""ResNet-50 model from "Deep Residual Learning for Image Recognition"."""
return _resnet(
"resnet50",
resnet.Bottleneck,
[3, 4, 6, 3],
pretrained,
progress,
n_classes,
embedding_dim,
set_bn_eval,
first_conv_3x3,
use_vmf,
learn_temp,
init_temp,
kappa_confidence,
)
| 2.109375 | 2 |
aioruuvitag/aioruuvitag_bleak.py | hulttis/ruuvigw | 7 | 12789963 | # coding=utf-8
# !/usr/bin/python3
# Name: aioruuvitag_bleak - Bluetooth Low Energy platform Agnostic Klient by <NAME>
# https://github.com/hbldh/bleak.git
# Copyright: (c) 2019 TK
# Licence: MIT
#
# sudo apt install bluez
# requires bluez 5.43
# ------------------------------------------------------------------------------
import logging
logger = logging.getLogger('ruuvitag')
import asyncio
from contextlib import suppress
from datetime import datetime as _dt, timedelta as _td
import platform
if platform.system() == 'Windows':
from .scanner_windows import scanner as _scanner
elif platform.system() == 'Linux':
from .scanner_linux import scanner as _scanner
from .ruuvitag_misc import hex_string, get_sec
from .ble_data import BLEData
# ===============================================================================
class ruuvitag_bleak(object):
SCHEDULER_MAX_INSTANCES = 5
HCICONFIG_CMD = '/bin/hciconfig'
#-------------------------------------------------------------------------------
def __init__(self,*,
loop,
callback,
scheduler=None,
device='hci0',
mfids=None,
device_reset=False,
device_timeout=10.0,
**kwargs
):
logger.info(f'>>> device:{device}')
if not loop:
raise ValueError(f'loop is None')
self._loop = loop
if not callback:
raise ValueError(f'callback is None')
self._callback = callback
self._stopevent = asyncio.Event()
self._scheduler = scheduler
self._mfids = mfids
self._device_reset = device_reset
self._device_timeout = device_timeout
self._device = device
self._data_ts = 0
self._inqueue = asyncio.Queue()
self._scanner_stop = None
self._scanner_task = None
logger.info(f'>>> {self} initialized')
# -------------------------------------------------------------------------------
def __repr__(self):
return f'ruuvitag_bleak device:{self._device} mfids:{self._mfids} reset:{self._device_reset} timeout:{self._device_timeout}'
#-------------------------------------------------------------------------------
def _schedule(self):
"""
Initializes scheduler for hci device nodata checking
"""
logger.debug(f'>>> enter {type(self._scheduler)} device_timeout:{self._device_timeout}')
if not self._scheduler:
return
if self._device_timeout:
l_jobid = f'bleak_timeout'
try:
self._scheduler.add_job(
self._do_bleak_timeout,
'interval',
seconds = 1,
kwargs = {
'jobid': l_jobid,
'reset': self._device_reset
},
id = l_jobid,
replace_existing = True,
max_instances = self.SCHEDULER_MAX_INSTANCES,
coalesce = True,
next_run_time = _dt.now()+_td(seconds=self._device_timeout)
)
logger.info(f'>>> jobid:{l_jobid} scheduled')
except:
logger.exception(f'>>> jobid:{l_jobid}')
#-------------------------------------------------------------------------------
async def _do_bleak_timeout(self, *,
jobid,
reset=False
):
"""
Supervises reception of the bleak data
Restarts socket if no data received within device_timeout period
"""
l_now = get_sec()
if (l_now - self._data_ts) > self._device_timeout:
self._data_ts = l_now
logger.warning(f'>>> jobid:{jobid} device_timeout timer ({self._device_timeout}sec) expired')
try:
logger.info(f'>>> jobid:{jobid} restarting device:{self._device}')
try:
self._reset()
self._scanner_task = self._loop.create_task(_scanner(device=self._device, loop=self._loop, outqueue=self._inqueue, stopevent=self._scanner_stop))
except:
logger.exception(f'>>> exception')
pass
except:
logger.exception(f'>>> jobid:{jobid}')
# ------------------------------------------------------------------------------
async def _reset(self):
logger.debug(f'>>> device:{self._device}')
self._scanner_stop.set()
await asyncio.sleep(1.0)
if self._device_reset:
await self._shell_cmd(cmd=f'{self.HCICONFIG_CMD} {self._device} down')
await asyncio.sleep(1.0)
await self._shell_cmd(cmd=f'{self.HCICONFIG_CMD} {self._device} up')
await asyncio.sleep(1.0)
self._scanner_stop.clear()
# ------------------------------------------------------------------------------
async def _shell_cmd(self, *, cmd):
if platform.system() == 'Linux':
logger.info(f'>>> {cmd!r}')
l_proc = await asyncio.create_subprocess_shell(
cmd,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE)
l_stdout, l_stderr = await l_proc.communicate()
logger.info(f'>>> {cmd!r} exited with {l_proc.returncode}')
if l_stdout:
logger.debug(f'>>> stdout: {l_stdout.decode()}')
if l_stderr:
logger.error(f'>>> stder: {l_stderr.decode()}')
# ------------------------------------------------------------------------------
async def _handle_data(self, *, data):
"""
Handles received data from the Bleak scanner
"""
if not data:
return
self._data_ts = get_sec()
try:
l_mdata = data.metadata['manufacturer_data']
for l_mfid in list(l_mdata.keys()):
if not self._mfids or l_mfid in self._mfids:
l_mfdata = l_mdata[l_mfid]
logger.debug(f'''>>> device:{self._device} mac:{data.address} rssi:{data.rssi} mfid:{l_mfid} mflen:{len(l_mfdata)} mfdata:{hex_string(data=l_mfdata, filler='')}''')
try:
await self._callback(bledata=BLEData(
mac = data.address,
rssi = data.rssi,
mfid = l_mfid,
mfdata = l_mfdata,
rawdata = data
))
except:
logger.exception(f'>>> exception')
pass
except:
logger.exception(f'>>> exception')
pass
# -------------------------------------------------------------------------------
async def run(self):
logger.info(f'>>> starting...')
try:
self._scanner_stop = asyncio.Event()
self._scanner_task = self._loop.create_task(_scanner(device=self._device, loop=self._loop, outqueue=self._inqueue, stopevent=self._scanner_stop))
self._schedule()
except:
logger.exception(f'>>> exception')
raise
while not self._stopevent.is_set():
try:
if (self._inqueue):
await self._handle_data(data=await self._inqueue.get())
else:
await asyncio.sleep(100)
except GeneratorExit:
logger.error(f'>>> GeneratorExit')
self._stopevent.set()
break
except asyncio.CancelledError:
self._stopevent.set()
logger.warning(f'>>> CanceledError')
break
except:
logger.exception(f'>>> exception')
break
# l_task.cancel()
# with suppress(asyncio.CancelledError):
# self._loop.run_until_complete(l_task)
self._scanner_stop.set()
await asyncio.sleep(0.2)
logger.info('>>> bleak completed')
return True
# -------------------------------------------------------------------------------
def stop(self):
logger.info(f'>>> bleak')
self._stopevent.set()
| 2.03125 | 2 |
setup.py | Spico197/REx | 4 | 12789964 | import os
import setuptools
from rex import __version__
readme_filepath = os.path.join(os.path.dirname(os.path.abspath(__file__)), "README.md")
with open(readme_filepath, "r") as fh:
long_description = fh.read()
setuptools.setup(
name="pytorch-rex",
version=__version__,
author="<NAME>",
author_email="<EMAIL>",
description="A toolkit for Relation Extraction and more...",
long_description_content_type="text/markdown",
long_description=long_description,
url="https://github.com/Spico197/REx",
packages=setuptools.find_packages(exclude=["tests", "tests.*", "docs", "docs.*"]),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires=">=3.7",
install_requires=[
"numpy>=1.19.0",
"scikit-learn>=0.21.3",
"omegaconf>=2.0.6",
"loguru==0.5.3",
"tqdm==4.61.1",
],
# package_data={
# 'rex' : [
# 'models/*.pth'
# ],
# },
# include_package_data=True,
)
| 1.460938 | 1 |
kf_model_fhir/mappers/resources/kfdrc_research_study.py | kids-first/kf-model-fhir | 3 | 12789965 | """
This module converts Kids First studies to FHIR kfdrc-research-study
(derived from FHIR ResearchStudy).
"""
from kf_lib_data_ingest.common.concept_schema import CONCEPT
from common.utils import make_identifier, make_select, get
RESOURCE_TYPE = "ResearchStudy"
def yield_kfdrc_research_studies(
eng, table, target_service_id, organizations, practitioner_roles, groups
):
for row in make_select(
eng,
table,
CONCEPT.STUDY.ID,
CONCEPT.INVESTIGATOR.INSTITUTION,
CONCEPT.INVESTIGATOR.NAME,
CONCEPT.STUDY.ATTRIBUTION,
CONCEPT.STUDY.SHORT_NAME,
CONCEPT.STUDY.AUTHORITY,
CONCEPT.STUDY.NAME,
):
study_id = get(row, CONCEPT.STUDY.ID)
institution = get(row, CONCEPT.INVESTIGATOR.INSTITUTION)
investigator_name = get(row, CONCEPT.INVESTIGATOR.NAME)
study_name = get(row, CONCEPT.STUDY.NAME)
attribution = get(row, CONCEPT.STUDY.ATTRIBUTION)
short_name = get(row, CONCEPT.STUDY.SHORT_NAME)
if not all((study_id, institution, investigator_name, study_name)):
continue
retval = {
"resourceType": RESOURCE_TYPE,
"id": make_identifier(RESOURCE_TYPE, study_id),
"meta": {
"profile": [
"http://fhir.kids-first.io/StructureDefinition/kfdrc-research-study"
]
},
"identifier": [
{
"system": "https://kf-api-dataservice.kidsfirstdrc.org/studies",
"value": target_service_id,
},
{
"system": "https://kf-api-dataservice.kidsfirstdrc.org/studies?external_id=",
"value": study_id,
},
],
"extension": [
{
"url": "http://fhir.kids-first.io/StructureDefinition/related-organization",
"extension": [
{
"url": "organization",
"valueReference": {
"reference": f'Organization/{organizations[institution]["id"]}'
},
}
],
}
],
"title": study_name,
"status": "completed",
"principalInvestigator": {
"reference": f'PractitionerRole/{practitioner_roles[(institution, investigator_name)]["id"]}'
},
}
if attribution:
retval["identifier"].append({"value": attribution})
if short_name:
retval["extension"].append(
{
"url": "http://fhir.kids-first.io/StructureDefinition/display-name",
"valueString": short_name,
}
)
if groups:
retval["enrollment"] = [
{"reference": f'Group/{group["id"]}'}
for group in groups.values()
]
yield retval
| 2.265625 | 2 |
elasticapm/transport/base.py | shareablee/apm-agent-python | 0 | 12789966 | <reponame>shareablee/apm-agent-python
# -*- coding: utf-8 -*-
import gzip
import logging
import threading
import timeit
from collections import defaultdict
from elasticapm.contrib.async_worker import AsyncWorker
from elasticapm.utils import json_encoder
from elasticapm.utils.compat import BytesIO
logger = logging.getLogger("elasticapm.transport")
class TransportException(Exception):
def __init__(self, message, data=None, print_trace=True):
super(TransportException, self).__init__(message)
self.data = data
self.print_trace = print_trace
class Transport(object):
"""
All transport implementations need to subclass this class
You must implement a send method..
"""
async_mode = False
def __init__(
self,
metadata=None,
compress_level=5,
json_serializer=json_encoder.dumps,
max_flush_time=None,
max_buffer_size=None,
**kwargs
):
"""
Create a new Transport instance
:param metadata: Metadata object to prepend to every queue
:param compress_level: GZip compress level. If zero, no GZip compression will be used
:param json_serializer: serializer to use for JSON encoding
:param max_flush_time: Maximum time between flushes in seconds
:param max_buffer_size: Maximum size of buffer before flush
:param kwargs:
"""
self.state = TransportState()
self._metadata = metadata if metadata is not None else {}
self._compress_level = min(9, max(0, compress_level if compress_level is not None else 0))
self._json_serializer = json_serializer
self._max_flush_time = max_flush_time
self._max_buffer_size = max_buffer_size
self._queued_data = None
self._queue_lock = threading.Lock()
self._last_flush = timeit.default_timer()
self._flush_timer = None
self._counts = defaultdict(int)
def queue(self, event_type, data, flush=False):
with self._queue_lock:
queued_data = self.queued_data
queued_data.write((self._json_serializer({event_type: data}) + "\n").encode("utf-8"))
self._counts[event_type] += 1
since_last_flush = timeit.default_timer() - self._last_flush
queue_size = 0 if queued_data.fileobj is None else queued_data.fileobj.tell()
if flush:
logger.debug("forced flush")
self.flush()
elif self._max_flush_time and since_last_flush > self._max_flush_time:
logger.debug(
"flushing due to time since last flush %.3fs > max_flush_time %.3fs",
since_last_flush,
self._max_flush_time,
)
self.flush()
elif self._max_buffer_size and queue_size > self._max_buffer_size:
logger.debug(
"flushing since queue size %d bytes > max_queue_size %d bytes", queue_size, self._max_buffer_size
)
self.flush()
elif not self._flush_timer:
with self._queue_lock:
self._start_flush_timer()
@property
def queued_data(self):
if self._queued_data is None:
self._queued_data = gzip.GzipFile(fileobj=BytesIO(), mode="w", compresslevel=self._compress_level)
data = (self._json_serializer({"metadata": self._metadata}) + "\n").encode("utf-8")
self._queued_data.write(data)
return self._queued_data
def flush(self, sync=False, start_flush_timer=True):
"""
Flush the queue
:param sync: if true, flushes the queue synchronously in the current thread
:param start_flush_timer: set to True if the flush timer thread should be restarted at the end of the flush
:return: None
"""
with self._queue_lock:
self._stop_flush_timer()
queued_data, self._queued_data = self._queued_data, None
if queued_data and not self.state.should_try():
logger.error("dropping flushed data due to transport failure back-off")
elif queued_data:
fileobj = queued_data.fileobj # get a reference to the fileobj before closing the gzip file
queued_data.close()
# StringIO on Python 2 does not have getbuffer, so we need to fall back to getvalue
data = fileobj.getbuffer() if hasattr(fileobj, "getbuffer") else fileobj.getvalue()
if hasattr(self, "send_async") and not sync:
self.send_async(data)
else:
try:
self.send(data)
self.handle_transport_success()
except Exception as e:
self.handle_transport_fail(e)
self._last_flush = timeit.default_timer()
if start_flush_timer:
self._start_flush_timer()
def send(self, data):
"""
You need to override this to do something with the actual
data. Usually - this is sending to a server
"""
raise NotImplementedError
def close(self):
"""
Cleans up resources and closes connection
:return:
"""
self.flush(sync=True, start_flush_timer=False)
def handle_transport_success(self, **kwargs):
"""
Success handler called by the transport on successful send
"""
self.state.set_success()
def handle_transport_fail(self, exception=None, **kwargs):
"""
Failure handler called by the transport on send failure
"""
message = str(exception)
logger.error("Failed to submit message: %r", message, exc_info=getattr(exception, "print_trace", True))
self.state.set_fail()
def _start_flush_timer(self, timeout=None):
timeout = timeout or self._max_flush_time
self._flush_timer = threading.Timer(timeout, self.flush)
self._flush_timer.name = "elasticapm flush timer"
self._flush_timer.daemon = True
logger.debug("Starting flush timer")
self._flush_timer.start()
def _stop_flush_timer(self):
if self._flush_timer:
logger.debug("Cancelling flush timer")
self._flush_timer.cancel()
class AsyncTransport(Transport):
async_mode = True
sync_transport = Transport
def __init__(self, *args, **kwargs):
super(AsyncTransport, self).__init__(*args, **kwargs)
self._worker = None
@property
def worker(self):
if not self._worker or not self._worker.is_alive():
self._worker = AsyncWorker()
return self._worker
def send_sync(self, data=None):
try:
self.sync_transport.send(self, data)
self.handle_transport_success()
except Exception as e:
self.handle_transport_fail(exception=e)
def send_async(self, data):
self.worker.queue(self.send_sync, {"data": data})
def close(self):
super(AsyncTransport, self).close()
if self._worker:
self._worker.main_thread_terminated()
class TransportState(object):
ONLINE = 1
ERROR = 0
def __init__(self):
self.status = self.ONLINE
self.last_check = None
self.retry_number = -1
def should_try(self):
if self.status == self.ONLINE:
return True
interval = min(self.retry_number, 6) ** 2
return timeit.default_timer() - self.last_check > interval
def set_fail(self):
self.status = self.ERROR
self.retry_number += 1
self.last_check = timeit.default_timer()
def set_success(self):
self.status = self.ONLINE
self.last_check = None
self.retry_number = -1
def did_fail(self):
return self.status == self.ERROR
| 1.960938 | 2 |
Hashing/count_dist_elem_window.py | lakshyarawal/pythonPractice | 0 | 12789967 | """ Count Distinct Elements in Each Window: We are given an array and a number k (k<=n).
We need to find distinct elements in each k sized window in this array"""
def distinct_in_window(arr, win_sz) -> list:
result = []
curr_dict = dict()
for i in range(win_sz):
if arr[i] in curr_dict.keys():
curr_dict[arr[i]] += 1
else:
curr_dict[arr[i]] = 1
result.append(len(curr_dict.keys()))
m = 0
for i in range(win_sz, len(arr)):
curr_dict[arr[m]] -= 1
if arr[i] in curr_dict.keys():
curr_dict[arr[i]] += 1
else:
curr_dict[arr[i]] = 1
m += 1
curr_dict = {k: v for k, v in curr_dict.items() if v != 0}
result.append(len(curr_dict.keys()))
return result
def main():
arr_input = [10, 20, 20, 10, 30, 40, 10]
k = 4
print(distinct_in_window(arr_input, k))
# Using the special variable
# __name__
if __name__ == "__main__":
main()
| 3.90625 | 4 |
example.py | lingyunfeng/PyDDA | 49 | 12789968 | import pyart
import pydda
from matplotlib import pyplot as plt
import numpy as np
berr_grid = pyart.io.read_grid("berr_Darwin_hires.nc")
cpol_grid = pyart.io.read_grid("cpol_Darwin_hires.nc")
sounding = pyart.io.read_arm_sonde(
"/home/rjackson/data/soundings/twpsondewnpnC3.b1.20060119.231600.custom.cdf")
print(berr_grid.projection)
print(cpol_grid.get_projparams())
u_back = sounding[1].u_wind
v_back = sounding[1].v_wind
z_back = sounding[1].height
#u_init, v_init, w_init = pydda.retrieval.make_constant_wind_field(cpol_grid, wind=(0.0,0.0,0.0), vel_field='VT')
u_init, v_init, w_init = pydda.retrieval.make_wind_field_from_profile(cpol_grid, sounding, vel_field='VT')
#u_init, v_init, w_init = pydda.retrieval.make_test_divergence_field(
# cpol_grid, 30, 9.0, 15e3, 20e3, 5, 0, -20e3, 0)
# Test mass continuity by putting convergence at surface and divergence aloft
berr_grid.fields['DT']['data'] = cpol_grid.fields['DT']['data']
# Step 1 - do iterations with just data
Grids = pydda.retrieval.get_dd_wind_field([berr_grid, cpol_grid], u_init,
v_init, w_init,u_back=u_back,
v_back=v_back, z_back=z_back,
Co=100.0, Cm=1500.0, vel_name='VT',
refl_field='DT', frz=5000.0,
filt_iterations=0,
mask_w_outside_opt=False)
plt.figure(figsize=(8,8))
pydda.vis.plot_horiz_xsection_barbs(Grids, 'DT', level=6,
vel_contours=[1, 4, 10])
plt.interactive(False)
cpol_z = cpol_grid.fields['DT']['data']
lat_level=45
plt.figure(figsize=(10,10))
plt.pcolormesh(cpol_x[::,lat_level,::], cpol_h[::,lat_level,::],
cpol_z[::,lat_level,::],
cmap=pyart.graph.cm_colorblind.HomeyerRainbow)
plt.colorbar(label='Z [dBZ]')
plt.barbs(cpol_x[::barb_density_vert,lat_level,::barb_density],
cpol_h[::barb_density_vert,lat_level,::barb_density],
u['data'][::barb_density_vert,lat_level,::barb_density],
w['data'][::barb_density_vert,lat_level,::barb_density])
cs = plt.contour(cpol_x[::,lat_level,::], cpol_h[::,lat_level,::],
w['data'][::,lat_level,::], levels=np.arange(1,20,2),
linewidth=16, alpha=0.5)
plt.clabel(cs)
plt.xlabel('X [km]', fontsize=20)
plt.ylabel('Z [m]', fontsize=20)
plt.show() | 2.28125 | 2 |
deprecated/jutge-like/X10534.py | balqui/pytokr | 0 | 12789969 | def make_get_toks(f=None):
"make iterator and next functions out of iterable of split strings"
from sys import stdin
from itertools import chain
def sp(ln):
"to split the strings with a map"
return ln.split()
def the_it():
"so that both results are callable in similar manner"
return it
if f is None:
f = stdin
it = chain.from_iterable(map(sp, f))
return the_it, it.__next__
get_toks, get_tok = make_get_toks()
from collections import defaultdict as dd
n, k = int(get_tok()), int(get_tok())
d = dd(int)
on = True
for m in get_toks():
m = int(m)
if m % n == 0:
d[m] += 1
if d[m] >= k and on:
print(m)
on = False
if on:
print("none")
| 3.234375 | 3 |
acceptance_tests/utilities/collex_helper.py | ONSdigital/ssdc-rm-acceptance-tests | 0 | 12789970 | <reponame>ONSdigital/ssdc-rm-acceptance-tests
from datetime import datetime, timedelta
import requests
from acceptance_tests.utilities.event_helper import get_emitted_collection_exercise_update
from acceptance_tests.utilities.test_case_helper import test_helper
from config import Config
def add_collex(survey_id, collection_instrument_selection_rules):
collex_name = '<NAME> ' + datetime.now().strftime("%m/%d/%Y, %H:%M:%S")
start_date = datetime.utcnow()
end_date = start_date + timedelta(days=2)
url = f'{Config.SUPPORT_TOOL_API}/collectionExercises'
body = {'name': collex_name,
'surveyId': survey_id,
'reference': "MVP012021",
'startDate': f'{start_date.isoformat()}Z',
'endDate': f'{end_date.isoformat()}Z',
'metadata': {'test': 'passed'},
'collectionInstrumentSelectionRules': collection_instrument_selection_rules
}
response = requests.post(url, json=body)
response.raise_for_status()
collex_id = response.json()
collection_exercise_update_event = get_emitted_collection_exercise_update()
test_helper.assertEqual(collection_exercise_update_event['name'], collex_name,
'Unexpected collection exercise name')
test_helper.assertEqual(collection_exercise_update_event['surveyId'], survey_id,
'Unexpected survey ID')
test_helper.assertEqual(collection_exercise_update_event['reference'], "MVP012021",
'Unexpected reference')
parsed_start_date = datetime.strptime(collection_exercise_update_event['startDate'], "%Y-%m-%dT%H:%M:%S.%fZ")
parsed_end_date = datetime.strptime(collection_exercise_update_event['endDate'], "%Y-%m-%dT%H:%M:%S.%fZ")
test_helper.assertEqual(parsed_start_date, start_date, 'Invalid or missing start date')
test_helper.assertEqual(parsed_end_date, end_date, 'Invalid or missing end date')
test_helper.assertEqual(collection_exercise_update_event['metadata'], {'test': 'passed'},
'Unexpected metadata')
return collex_id
| 2.265625 | 2 |
src/api/resources/light_curve/light_curve.py | alercebroker/ztf-api-apf | 0 | 12789971 | from flask_restx import Namespace, Resource
from .parsers import survey_id_parser
from .models import (
light_curve_model,
detection_model,
non_detection_model,
)
from dependency_injector.wiring import inject, Provide
from dependency_injector.providers import Factory
from api.container import AppContainer
from shared.interface.command import Command
from shared.interface.command import ResultHandler
from core.light_curve.domain.lightcurve_service import LightcurveServicePayload
from ralidator_flask.decorators import (
set_permissions_decorator,
set_filters_decorator,
check_permissions_decorator,
)
api = Namespace("lightcurve", description="LightCurve related operations")
api.models[light_curve_model.name] = light_curve_model
api.models[detection_model.name] = detection_model
api.models[non_detection_model.name] = non_detection_model
@api.route("/<id>/lightcurve")
@api.param("id", "The object's identifier")
@api.response(200, "Success")
@api.response(404, "Not found")
class LightCurve(Resource):
@set_permissions_decorator(["admin", "basic_user"])
@set_filters_decorator(["filter_atlas_lightcurve"])
@check_permissions_decorator
@api.doc("lightcurve")
@api.marshal_with(light_curve_model, skip_none=True)
@api.expect(survey_id_parser)
@inject
def get(
self,
id,
command_factory: Factory[Command] = Provide[
AppContainer.lightcurve_package.get_lightcurve_command.provider
],
result_handler: ResultHandler = Provide[
AppContainer.view_result_handler
],
):
"""
Gets detections and non detections
"""
survey_id = survey_id_parser.parse_args()["survey_id"]
command = command_factory(
payload=LightcurveServicePayload(id, survey_id),
handler=result_handler,
)
command.execute()
return result_handler.result
@api.route("/<id>/detections")
@api.param("id", "The object's identifier")
@api.response(200, "Success")
@api.response(404, "Not found")
class ObjectDetections(Resource):
@set_permissions_decorator(["admin", "basic_user"])
@set_filters_decorator(["filter_atlas_detections"])
@check_permissions_decorator
@api.doc("detections")
@api.marshal_list_with(detection_model, skip_none=True)
@api.expect(survey_id_parser)
@inject
def get(
self,
id,
command_factory: Factory[Command] = Provide[
AppContainer.lightcurve_package.get_detections_command.provider
],
result_handler: ResultHandler = Provide[
AppContainer.view_result_handler
],
):
"""
Just the detections
"""
survey_id = survey_id_parser.parse_args()["survey_id"]
command = command_factory(
payload=LightcurveServicePayload(id, survey_id),
handler=result_handler,
)
command.execute()
return result_handler.result
@api.route("/<id>/non_detections")
@api.param("id", "The object's identifier")
@api.response(200, "Success")
@api.response(404, "Not found")
class NonDetections(Resource):
@set_permissions_decorator(["admin", "basic_user"])
@set_filters_decorator(["filter_atlas_non_detections"])
@check_permissions_decorator
@api.doc("non_detections")
@api.marshal_list_with(non_detection_model, skip_none=True)
@api.expect(survey_id_parser)
@inject
def get(
self,
id,
command_factory: Factory[Command] = Provide[
AppContainer.lightcurve_package.get_non_detections_command.provider
],
result_handler: ResultHandler = Provide[
AppContainer.view_result_handler
],
):
"""
Just non detections
"""
survey_id = survey_id_parser.parse_args()["survey_id"]
command = command_factory(
payload=LightcurveServicePayload(id, survey_id),
handler=result_handler,
)
command.execute()
return result_handler.result
| 2.109375 | 2 |
resources/lib/basictypes/__init__.py | torstehu/Transmission-XBMC | 22 | 12789972 | """Common data-modeling Python types
The idea of the basictypes package is to provide
types which provide enough metadata to allow an
application to use introspection to perform much
of the housekeeping required to create business
applications.
"""
| 2.390625 | 2 |
c_gan.py | webcok/CGANHumanTrajectory | 1 | 12789973 | <gh_stars>1-10
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import os
import random
def plot(samples):
fig = plt.figure()
plt.gca().set_color_cycle(['blue', 'red','green', 'black'])
plt.plot(samples[0],linewidth=2.0)
plt.show()
return fig
## Noise for the GAN
def sample_Z(m, n):
return np.random.uniform(-100., 100., size=[m, n])
## Load the Data
npzfile = np.load("xSet.npz")
Train= npzfile["train"]
Add = npzfile["add"]
## Batch Size
mb_size = 20
## Noise Dimension
Z_dim = 10000
X_dim = Train.shape[1]
## Number of epochs
num_epochs = 100000
y_dim = Add.shape[1]
## Hidden dimensions
h_dim = 1000
h2_dim = 500
h3_dim = 250
random.seed()
## Learning Rate
lr = 0.1
## For putting outputs in a specific directory
if not os.path.exists('out/'):
os.makedirs('out/')
i = 0
## Create random batches
def rand_batch(size):
global Train
global Add
s_size = Train.shape[0]
mybatch = []
count = 0
X_mb = []
y_mb = []
while count < size:
rn = random.randint(0,s_size-1)
if rn not in mybatch:
mybatch.append(rn)
count +=1
for i in mybatch:
X_mb.append(Train[i])
y_mb.append(Add[i])
return (X_mb,y_mb)
def xavier_init(size):
in_dim = size[0]
xavier_stddev = 1. / tf.sqrt(in_dim / 2.)
return tf.random_normal(shape=size, stddev=xavier_stddev)
""" Discriminator Net model """
X = tf.placeholder(tf.float32, shape=[None, X_dim])
y = tf.placeholder(tf.float32, shape=[None, y_dim])
D_W1 = tf.Variable(xavier_init([X_dim + y_dim, h_dim]))
D_b1 = tf.Variable(tf.ones(shape=[h_dim]))
D_W2 = tf.Variable(xavier_init([h_dim, h2_dim]))
D_b2 = tf.Variable(tf.ones(shape=[h2_dim]))
D_W3 = tf.Variable(xavier_init([h2_dim, h3_dim]))
D_b3 = tf.Variable(tf.ones(shape=[h3_dim]))
D_W4 = tf.Variable(xavier_init([h3_dim, 1]))
D_b4 = tf.Variable(tf.ones(shape=[1]))
theta_D = [D_W1, D_W2, D_W3, D_W4, D_b1, D_b2, D_b3, D_b4]
def discriminator(x, y):
inputs = tf.concat(axis=1, values=[x, y])
D_h1 = tf.nn.tanh(tf.matmul(inputs, D_W1) + D_b1)
D_h2 = tf.nn.tanh(tf.matmul(D_h1, D_W2) + D_b2)
D_h3 = tf.nn.tanh(tf.matmul(D_h2, D_W3) + D_b3)
D_logit = tf.matmul(D_h3, D_W4) + D_b4
D_prob = tf.nn.sigmoid(D_logit)
return D_prob,D_logit
""" Generator Net model """
Z = tf.placeholder(tf.float32, shape=[None, Z_dim])
G_W1 = tf.Variable(xavier_init([Z_dim + y_dim, h_dim]))
G_b1 = tf.Variable(tf.zeros(shape=[h_dim]))
G_W2 = tf.Variable(xavier_init([h_dim, h2_dim]))
G_b2 = tf.Variable(tf.zeros(shape=[h2_dim]))
G_W3 = tf.Variable(xavier_init([h2_dim, h3_dim]))
G_b3 = tf.Variable(tf.zeros(shape=[h3_dim]))
G_W4 = tf.Variable(xavier_init([h3_dim, X_dim]))
G_b4 = tf.Variable(tf.zeros(shape=[X_dim]))
theta_G = [G_W1, G_W2, G_W3, G_W4, G_b1, G_b2, G_b3, G_b4]
def generator(z, y):
inputs = tf.concat(axis=1, values=[z, y])
G_h1 = tf.nn.tanh(tf.matmul(inputs, G_W1) + G_b1)
G_h2 = tf.nn.tanh(tf.matmul(G_h1, G_W2) + G_b2)
G_h3 = tf.nn.tanh(tf.matmul(G_h2, G_W3) + G_b3)
G_log_prob = tf.matmul(G_h3,G_W4)+G_b4
G_prob = tf.nn.sigmoid(G_log_prob)
return G_log_prob
G_sample = generator(Z, y)
D_real, D_logit_real = discriminator(X, y)
D_fake, D_logit_fake = discriminator(G_sample, y)
D_loss_real = tf.reduce_mean(- (tf.log((1 - D_fake)+1e-10)+tf.log(D_real+1e-10) ))
D_loss_fake = tf.reduce_mean(- tf.log(D_fake+1e-10))
D_loss = D_loss_real
G_loss = D_loss_fake
D_solver = tf.train.AdagradOptimizer(learning_rate=lr).minimize(D_loss, var_list=theta_D)
G_solver = tf.train.AdagradOptimizer(learning_rate=lr).minimize(G_loss, var_list=theta_G)
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
for epoch_idx in range(num_epochs):
if epoch_idx % 10000 == 0:
n_sample = 1
Z_sample = sample_Z(n_sample, Z_dim)
y_sample = np.ones(shape=[n_sample, y_dim])
y_sample[0][0] = 0.0
y_sample[0][1] = 50.0
samples = sess.run(G_sample, feed_dict={Z: Z_sample, y:y_sample})
print samples
fig = plot(samples)
X_mb, y_mb = rand_batch(mb_size)
Z_sample = sample_Z(mb_size, Z_dim)
A,B = sess.run([D_solver, D_loss], feed_dict={X: X_mb, Z: Z_sample, y:y_mb})
C,D = sess.run([G_solver, G_loss], feed_dict={Z: Z_sample, y:y_mb})
if epoch_idx % 100 == 0:
print('Iter: {}'.format(epoch_idx))
print('D loss: {}'.format(B))
print('G loss: {}'.format(D))
print()
print(D_W1.eval())
print(G_W1.eval())
| 2.765625 | 3 |
setup.py | ig0774/eulxml | 19 | 12789974 | #!/usr/bin/env python
"""Setup.py for eulxml package"""
from distutils.command.build_py import build_py
from distutils.command.clean import clean
from distutils.command.sdist import sdist
from distutils.core import Command
import os
import sys
import shutil
from setuptools import setup, find_packages
import eulxml
class GenerateXmlCatalog(Command):
'''Custom setup command to generate fresh catalog and schemas'''
user_options = []
def initialize_options(self):
"""init options"""
pass
def finalize_options(self):
"""finalize options"""
pass
def run(self):
from eulxml.catalog import generate_catalog
generate_catalog()
def generate_catalog_if_needed():
# helper method to check if catalog is present, and generate if not
if not os.path.exists(eulxml.XMLCATALOG_FILE):
from eulxml.catalog import generate_catalog
print("Cenerating XML catalog...")
generate_catalog()
class CleanSchemaData(clean):
"""Custom cleanup command to delete build and schema files"""
description = "Custom clean command; remove schema files and XML catalog"
def run(self):
# remove schema data and then do any other normal cleaning
try:
shutil.rmtree(eulxml.XMLCATALOG_DIR)
except OSError:
pass
clean.run(self)
class BuildPyWithPly(build_py):
"""Use ply to generate parsetab and lextab modules."""
def run(self):
# importing this forces ply to generate parsetab/lextab
import eulxml.xpath.core
generate_catalog_if_needed()
build_py.run(self)
class SdistWithCatalog(sdist):
"""Extend sdist command to ensure schema catalog is included."""
def run(self):
generate_catalog_if_needed()
sdist.run(self)
CLASSIFIERS = [
'Development Status :: 5 - Production/Stable',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Text Processing :: Markup :: XML',
]
LONG_DESCRIPTION = None
try:
# read the description if it's there
with open('README.rst') as desc_f:
LONG_DESCRIPTION = desc_f.read()
except:
pass
dev_requirements = [
'sphinx>=1.3.5',
'coverage',
'Django<1.9',
'rdflib>=3.0',
'mock',
'nose',
'tox',
'requests',
]
# NOTE: dev requirements should be duplicated in pip-dev-req.txt
# for generating documentation on readthedocs.org
# unittest2 should only be included for py2.6
if sys.version_info < (2, 7):
dev_requirements.append('unittest2')
setup(
cmdclass={
'build_py': BuildPyWithPly,
'clean': CleanSchemaData,
'sdist': SdistWithCatalog,
'xmlcatalog': GenerateXmlCatalog
},
name='eulxml',
version=eulxml.__version__,
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/emory-libraries/eulxml',
license='Apache License, Version 2.0',
packages=find_packages(),
setup_requires=[
'ply>=3.8',
],
install_requires=[
'ply>=3.8',
'lxml>=3.4',
'six>=1.10',
],
extras_require={
'django': ['Django<1.9'],
'rdf': ['rdflib>=3.0'],
'dev': dev_requirements
},
package_data={'eulxml': [
# include schema catalog and all downloaded schemas in the package
'%s/*' % eulxml.SCHEMA_DATA_DIR
]},
description='XPath-based XML data binding, with Django form support',
long_description=LONG_DESCRIPTION,
classifiers=CLASSIFIERS,
)
| 2.25 | 2 |
alnitak/tests/cloudflare_test.py | definitelyprobably/alnitak | 0 | 12789975 |
import pytest
import re
from pathlib import Path
from time import sleep
from alnitak import config
from alnitak.api import cloudflare
from alnitak.tests import setup
from alnitak import prog as Prog
from alnitak import exceptions as Except
@pytest.fixture(scope="module")
def cloudflare_api(request):
return Path(request.fspath.dirname) / 'cloudflare.api'
def api_file_exists(cloudflare_api):
if cloudflare_api.exists():
return True
return False
def get_domain(api_path):
with open(str(api_path), 'r') as file:
lines = file.read().splitlines()
domain = None
for l in lines:
m = re.match(r'\s*#.*domain:\s*(?P<domain>\S+)\s*$', l)
if m:
domain = m.group('domain')
return domain
def test_cloudflare(cloudflare_api):
if not api_file_exists(cloudflare_api):
pytest.skip("no cloudflare.api file")
# need the domain
domain = get_domain(cloudflare_api)
assert domain
s = setup.Init(keep=True)
s.create_cloudflare_config(cloudflare_api, domain)
prog = setup.create_state_obj(s, config=s.configC1)
# need this to log if create_state_obj set 'log=True', otherwise this will
# do nothing.
with prog.log:
retval = config.read(prog)
assert retval == Prog.RetVal.ok
t_a2 = setup.create_tlsa_obj('211', '53527', 'tcp', domain)
t_a1 = setup.create_tlsa_obj('311', '53527', 'tcp', domain)
assert len(prog.target_list) == 1
target = prog.target_list[0]
assert len(target.tlsa) == 2
assert t_a1 in target.tlsa
assert t_a2 in target.tlsa
tlsa1 = target.tlsa[0]
tlsa2 = target.tlsa[1]
api = target.api
assert api.domain == domain
assert len(api.email) > 0
assert len(api.key) > 0
hash211 = s.hash['a.com']['cert1'][211]
hash311 = s.hash['a.com']['cert1'][311]
cloudflare.api_publish(prog, api, tlsa1, hash211)
cloudflare.api_publish(prog, api, tlsa2, hash311)
# error encountered: Except.DNSProcessingError
# record is already up: Except.DNSSkipProcessing
sleep(3)
records211 = cloudflare.api_read(prog, api, tlsa1)
records311 = cloudflare.api_read(prog, api, tlsa2)
# error encountered: Except.DNSProcessingError
# record is not up: Except.DNSNotLive
assert len(records211) == 1
assert hash211 in records211
assert len(records311) == 1
assert hash311 in records311
id211 = records211[hash211]
id311 = records311[hash311]
sleep(3)
cloudflare.api_delete(prog, api, tlsa1, id211)
cloudflare.api_delete(prog, api, tlsa2, id311)
# error encountered: Except.DNSProcessingError
sleep(3)
with pytest.raises(Except.DNSNotLive) as ex:
cloudflare.api_read(prog, api, tlsa1)
with pytest.raises(Except.DNSNotLive) as ex:
cloudflare.api_read(prog, api, tlsa2)
| 2.03125 | 2 |
main.py | Sajadrahimi/quarto-monte-carlo | 0 | 12789976 | <filename>main.py
from game.game import Game
game = Game()
print(game.status())
| 1.953125 | 2 |
src/Gon/jrj_spyder.py | somewheve/Listed-company-news-crawl-and-text-analysis | 1 | 12789977 | <filename>src/Gon/jrj_spyder.py
"""
金融界:http://www.jrj.com.cn
股票频道全部新闻:http://stock.jrj.com.cn/xwk/202012/20201203_1.shtml
"""
import __init__
from spyder import Spyder
from Kite import config
from Kite import utils
import time
import logging
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',
datefmt='%a, %d %b %Y %H:%M:%S')
class JrjSpyder(Spyder):
def __init__(self):
super(JrjSpyder, self).__init__()
self.col = self.db_obj.create_col(self.db, config.COLLECTION_NAME_JRJ)
self.terminated_amount = 0
def get_url_info(self, url, specific_date):
try:
bs = utils.html_parser(url)
except Exception:
return False
date = ""
for span in bs.find_all("span"):
if span.contents[0] == "jrj_final_date_start":
date = span.text.replace("\r", "").replace("\n", "")
break
if date == "":
date = specific_date
article = ""
for p in bs.find_all("p"):
if not p.find_all("jrj_final_daohang_start") and p.attrs == {} and \
not p.find_all("input") and not p.find_all("a", attrs={"class": "red"}) and not p.find_all(
"i") and not p.find_all("span"):
# if p.contents[0] != "jrj_final_daohang_start1" and p.attrs == {} and \
# not p.find_all("input") and not p.find_all("a", attrs={"class": "red"}) and not p.find_all("i"):
article += p.text.replace("\r", "").replace("\n", "").replace("\u3000", "")
return [date, article]
def get_historical_news(self, url, start_date, end_date):
# # 抽取数据库中已爬取的从start_date到latest_date_str所有新闻,避免重复爬取
# # 比如数据断断续续爬到了2016-10-10 15:00:00时间节点,但是在没调整参数的情
# # 况下,从2015-01-01(自己设定)开始重跑程序会导致大量重复数据,因此在这里稍
# # 作去重。直接从最新的时间节点开始跑是完全没问题,但从2015-01-01(自己设定)
# # 开始重跑程序可以尝试将前面未成功爬取的URL重新再试一遍
# extracted_data_list = self.extract_data(["Date"])[0]
# if len(extracted_data_list) != 0:
# latest_date_str = max(extracted_data_list).split(" ")[0]
# else:
# latest_date_str = start_date
# logging.info("latest time in database is {} ... ".format(latest_date_str))
# crawled_urls_list = list()
# for _date in utils.get_date_list_from_range(start_date, latest_date_str):
# query_results = self.query_news("Date", _date)
# for qr in query_results:
# crawled_urls_list.append(qr["Url"])
# # crawled_urls_list = self.extract_data(["Url"])[0] # abandoned
# logging.info("the length of crawled data from {} to {} is {} ... ".format(start_date,
# latest_date_str,
# len(crawled_urls_list)))
crawled_urls_list = list()
dates_list = utils.get_date_list_from_range(start_date, end_date)
dates_separated_into_ranges_list = utils.gen_dates_list(dates_list, config.JRJ_DATE_RANGE)
for dates_range in dates_separated_into_ranges_list:
for date in dates_range:
first_url = "{}/{}/{}_1.shtml".format(url, date.replace("-", "")[0:6], date.replace("-", ""))
max_pages_num = utils.search_max_pages_num(first_url, date)
for num in range(1, max_pages_num + 1):
_url = "{}/{}/{}_{}.shtml".format(url, date.replace("-", "")[0:6], date.replace("-", ""), str(num))
bs = utils.html_parser(_url)
a_list = bs.find_all("a")
for a in a_list:
if "href" in a.attrs and a.string and \
a["href"].find("/{}/{}/".format(date.replace("-", "")[:4],
date.replace("-", "")[4:6])) != -1:
if a["href"] not in crawled_urls_list:
# 如果标题不包含"收盘","报于"等字样,即可写入数据库,因为包含这些字样标题的新闻多为机器自动生成
if a.string.find("收盘") == -1 and a.string.find("报于") == -1 and \
a.string.find("新三板挂牌上市") == -1:
result = self.get_url_info(a["href"], date)
while not result:
self.terminated_amount += 1
if self.terminated_amount > config.JRJ_MAX_REJECTED_AMOUNTS:
# 始终无法爬取的URL保存起来
with open(config.RECORD_JRJ_FAILED_URL_TXT_FILE_PATH, "a+") as file:
file.write("{}\n".format(a["href"]))
logging.info("rejected by remote server longer than {} minutes, "
"and the failed url has been written in path {}"
.format(config.JRJ_MAX_REJECTED_AMOUNTS,
config.RECORD_JRJ_FAILED_URL_TXT_FILE_PATH))
break
logging.info("rejected by remote server, request {} again after "
"{} seconds...".format(a["href"], 60 * self.terminated_amount))
time.sleep(60 * self.terminated_amount)
result = self.get_url_info(a["href"], date)
if not result:
# 爬取失败的情况
logging.info("[FAILED] {} {}".format(a.string, a["href"]))
else:
# 有返回但是article为null的情况
article_specific_date, article = result
while article == "" and self.is_article_prob >= .1:
self.is_article_prob -= .1
result = self.get_url_info(a["href"], date)
while not result:
self.terminated_amount += 1
if self.terminated_amount > config.JRJ_MAX_REJECTED_AMOUNTS:
# 始终无法爬取的URL保存起来
with open(config.RECORD_JRJ_FAILED_URL_TXT_FILE_PATH, "a+") as file:
file.write("{}\n".format(a["href"]))
logging.info("rejected by remote server longer than {} minutes, "
"and the failed url has been written in path {}"
.format(config.JRJ_MAX_REJECTED_AMOUNTS,
config.RECORD_JRJ_FAILED_URL_TXT_FILE_PATH))
break
logging.info("rejected by remote server, request {} again after "
"{} seconds...".format(a["href"],
60 * self.terminated_amount))
time.sleep(60 * self.terminated_amount)
result = self.get_url_info(a["href"], date)
article_specific_date, article = result
self.is_article_prob = .5
if article != "":
data = {"Date": article_specific_date,
"Url": a["href"],
"Title": a.string,
"Article": article}
self.col.insert_one(data)
logging.info("[SUCCESS] {} {} {}".format(article_specific_date,
a.string,
a["href"]))
self.terminated_amount = 0 # 爬取结束后重置该参数
else:
logging.info("[QUIT] {}".format(a.string))
def get_realtime_news(self, url):
pass
if __name__ == "__main__":
jrj_spyder = JrjSpyder()
jrj_spyder.get_historical_news(config.WEBSITES_LIST_TO_BE_CRAWLED_JRJ, "2017-05-06", "2018-01-01")
# jrj_spyder.get_historical_news(config.WEBSITES_LIST_TO_BE_CRAWLED_JRJ, "2016-04-15", "2020-12-03")
# TODO:继续爬取RECORD_JRJ_FAILED_URL_TXT_FILE_PATH文件中失败的URL
pass
| 2.390625 | 2 |
pikuli/uia/control_wrappers/menu_item.py | NVoronchev/pikuli | 0 | 12789978 | # -*- coding: utf-8 -*-
from .uia_control import UIAControl
class MenuItem(UIAControl):
''' Контекстное меню, к примеру. '''
CONTROL_TYPE = 'MenuItem'
| 1.382813 | 1 |
earthquakes_package/scripts/dbmanager.py | saramakosa/earthquakes | 0 | 12789979 | <gh_stars>0
"""This module helps a user to manage the database. Only one table ("users") is
available and new users can me added or the existing ones can be removed. See
the argparse options for more information."""
import sqlite3
import argparse
import os
import random
import hashlib
conn = None
cursor = None
db_abs_path = 'earthquakes_package/scripts/database.db'
def open_and_create(db_path):
"""Connect to sqlite database given the path to the .db file
:param db_path: The path to the database file
:type db_path: string
"""
global conn
global cursor
conn = sqlite3.connect(db_path)
cursor = conn.cursor()
try:
cursor.execute("SELECT * FROM users")
# if the table does not exist create one
except sqlite3.OperationalError:
create_users_table()
def create_users_table():
"""Create table for users according to the defiend schema"""
global conn
global cursor
# Create table with username, password and salt
cursor.execute('''CREATE TABLE users
(username CHARACTER(256) NOT NULL,
password CHARACTER(256) NOT NULL,
salt CHARACTER(256) NOT NULL,
PRIMARY KEY (username))''')
def add_user(u, p):
"""Add a new user to the database given username and password
:param u: username
:type u: string
:param p: password
:type p: string
"""
global conn
global cursor
salt = random.randint(1, 1000000)
# add the salt to the password before computing the hash
p = str(salt) + p
digest = hashlib.sha256(p.encode('utf-8')).hexdigest()
# if the user already exists, replace its password and salt
cursor.execute("INSERT OR REPLACE INTO users VALUES (?,?,?)",
(u, digest, salt))
conn.commit()
def remove_user(u):
"""Remove a user from the database given his username
:param u: username
:type u: string
"""
global conn
global cursor
cursor.execute("DELETE FROM users WHERE username = ?", (u,))
conn.commit()
def get_users():
"""Get all the existing users, this is useful for the --show parameter
:return: list of existing users
:rtype: list of existing users
"""
global conn
global cursor
cursor.execute('SELECT * FROM users')
users = cursor.fetchall()
if len(users) > 0:
return users
return False
def is_allowed(u, given_password):
"""Check if a user is allowed tu perform the action
:param u: username
:param given_password: password given by the user
:return: True or False based on the user's permission
:rtype: Boolean
"""
global conn
global cursor
rows = cursor.execute("SELECT * FROM users WHERE username=?", (u,))
conn.commit()
user = rows.fetchall()
# return False if no user is found with that username
if len(user) == 0:
return False
# check if the stored password is correct
# (i.e if the stored password == digest(salt + password given by the user))
stored_salt = str(user[0][2])
given_password = <PASSWORD>_salt + given_password
stored_password = user[0][1]
digest = hashlib.sha256(given_password.encode('utf-8')).hexdigest()
# return False if the user is found but the password is incorrect
if digest == stored_password.lower():
return True
else:
return False
def parse_arguments():
"""Parse the arguments given by the user.
:return: Arguments parsed from the console
:rtype: list
"""
parser = argparse.ArgumentParser(description="Add users / Remove users")
parser.add_argument("-a", help="Add username '-u' with password '-p'",
action="store_true")
parser.add_argument("-r", help="Remove username '-u' with password '-p'",
action="store_true")
parser.add_argument("-show", help="Show all existing users",
action="store_true")
parser.add_argument('-username', help="add a username name",
required=True, default=None)
parser.add_argument('-password', help="the username password",
required=False, default=None)
parser.add_argument("--version", action="version", version="1.0")
args = parser.parse_args()
return args
if __name__ == "__main__":
# get the correct path based on the folder where the script is invoked in
db_path = os.path.abspath(os.path.join(os.getcwd(), db_abs_path))
open_and_create(db_path)
args = parse_arguments()
# If the user wants to add another user
if args.a:
# If the user tries to add and remove at the same time
if args.r:
print("Incompatible actions, please choose only one!")
exit()
# if the password is not given
if not args.password:
print("Please choose a password as well!")
exit()
add_user(args.username, args.password)
# If the user wants to remove another user
if args.r:
remove_user(args.username)
# Show all the users in the database if needed
if args.show:
print('Retrieving all existing users...')
users = get_users()
if not users:
print("No users found!")
else:
for i in range(len(users)):
print('username: ' + users[i][0], '\tpassword: ' + users[i][1])
| 3.359375 | 3 |
pyspedas/mms/eis/mms_eis_spec_combine_sc.py | ergsc-devel/pyspedas | 75 | 12789980 | import numpy as np
# use nanmean from bottleneck if it's installed, otherwise use the numpy one
# bottleneck nanmean is ~2.5x faster
try:
import bottleneck as bn
nanmean = bn.nanmean
except ImportError:
nanmean = np.nanmean
from pytplot import get_data, store_data, options
from ...utilities.tnames import tnames
def mms_eis_spec_combine_sc(
species='proton', data_units='flux', datatype='extof', data_rate='srvy',
level='l2', suffix='',
):
'''
Combines omni-directional energy spectrogram variable from EIS on multiple
MMS spacecraft.
Parameters
----------
datatype: str
'extof', 'electroenergy', or 'phxtof' (default: 'extof')
data_rate: str
instrument data rate, e.g., 'srvy' or 'brst' (default: 'srvy')
level: str
data level ['l1a','l1b','l2pre','l2' (default)]
data_units: str
desired units for data, e.g., 'flux' or 'cps' (default: 'flux')
suffix: str
suffix of the loaded data; useful for preserving original tplot var
species: str
species for calculation, e.g., proton, oxygen, alpha or electron
(default: 'proton')
Returns:
Name of tplot variables created.
'''
## Thoughts for extensions:
## - Ensure arguments passed to modules are of lowecase
if data_units == 'flux':
units_label = 'Intensity\n[1/cm^2-sr-s-keV]'
elif data_units == 'cps':
units_label = 'CountRate\n[counts/s]'
elif data_units == 'counts':
units_label = 'Counts\n[counts]'
#assert type(datatype) is str
if not isinstance(species, list): species = [species]
if not isinstance(datatype, list): datatype = [datatype]
out_vars = []
for species_id in species:
for dtype in datatype:
# retrieve: omni variables of species to determine # of probes
_species = species_id
if dtype == 'electronenergy':
_species = 'electron'
eis_sc_check = tnames('mms*eis*' + data_rate + '*' + dtype+'*' + _species + '*' + data_units + '*omni'+ suffix)
# process multiple probes
probes = []
for name in eis_sc_check:
probes.append(name[3:4])
if len(probes) > 4:
probes = probes[:-2]
if len(probes) > 1:
probe_string = probes[0] + '-' + probes[-1]
else:
if probes:
probe_string = probes[0]
else:
print('No probes found from eis_sc_check tnames.')
return
allmms_prefix = 'mmsx_epd_eis_' + data_rate + '_' + level + '_' + dtype + '_'
# DETERMINE SPACECRAFT WITH SMALLEST NUMBER OF TIME STEPS TO USE
# AS A REFERENCE SPACECRAFT
omni_vars = tnames('mms?_epd_eis_'+data_rate+'_'+level+'_'+dtype+'_'+_species+'_'+data_units+'_omni'+suffix)
if not omni_vars:
print('No EIS '+dtype+'data loaded!')
return
time_size = np.zeros(len(probes))
energy_size = np.zeros(len(probes))
# Retrieve probe's pitch angle dist for all 6 (omni) telescopes
for p, probe in enumerate(probes):
# note: return from get_data here is (times, data, v)
# according to https://github.com/MAVENSDC/PyTplot/blob/ec87591521e84bae8d81caccaf64fc2a5785186f/pytplot/get_data.py#L66
# note: there are also available 'spec_bins' values
#print(pytplot.data_quants[omni_vars[p]].coords)
#t, data, v = get_data(omni_vars[p])
omni_times, omni_data, omni_energies = get_data(omni_vars[p])
time_size[p] = len(omni_times)
energy_size[p] = len(omni_energies)
reftime_sc_loc = np.argmin(time_size)
ref_sc_time_size = int(min(time_size))
refenergy_sc_loc = np.argmin(energy_size)
ref_sc_energy_size = int(min(energy_size))
prefix = 'mms'+probes[reftime_sc_loc]+'_epd_eis_'+data_rate+'_'+level+'_'+dtype+'_'
# Retrieve specific probe's data based on minimum time/energy
# Note: I did not split these tuples as the namespace is reused, i.e., "_refprobe"
time_refprobe = get_data(omni_vars[reftime_sc_loc])
energy_refprobe = get_data(omni_vars[refenergy_sc_loc])
# time x energy x spacecraft
omni_spec_data = np.empty([len(time_refprobe[0]), len(energy_refprobe[2]), len(probes)])
omni_spec_data[:] = np.nan
# time x energy
omni_spec = np.empty([len(time_refprobe[0]), len(energy_refprobe[2])])
omni_spec[:] = np.nan
energy_data = np.zeros([len(energy_refprobe[2]), len(probes)])
common_energy = np.zeros(len(energy_refprobe[2]))
# Average omni flux over all spacecraft and define common energy grid
for pp in range(len(omni_vars)):
temp_data = get_data(omni_vars[pp])
energy_data[:,pp] = temp_data[2][0:len(common_energy)]
omni_spec_data[0:ref_sc_time_size,:,pp] = temp_data[1][0:ref_sc_time_size,0:len(common_energy)]
for ee in range(len(common_energy)):
common_energy[ee] = nanmean(energy_data[ee,:], axis=0)
# Average omni flux over all spacecraft
for tt in range(len(time_refprobe[0])):
for ee in range(len(energy_refprobe[2])):
omni_spec[tt,ee] = nanmean(omni_spec_data[tt,ee,:], axis=0)
# store new tplot variable
omni_spec[np.isnan(omni_spec)] = 0.
new_name = allmms_prefix+_species+'_'+data_units+'_omni'
store_data(new_name, data={'x':time_refprobe[0], 'y':omni_spec, 'v':energy_refprobe[2]})
options(new_name, 'ylog', True)
options(new_name, 'zlog', True)
options(new_name, 'spec', True)
options(new_name, 'Colormap', 'jet')
options(new_name, 'ztitle', units_label)
options(new_name, 'ytitle', ' \\ '.join(['mms'+probe_string, _species.upper(), 'Energy [keV]']))
out_vars.append(new_name)
# Spin-average the data
spin_nums = get_data(prefix+'spin'+suffix)
if spin_nums is None:
print('Error: Could not find EIS spin variable -- now ending procedure.')
return
# find where the spin starts
_, spin_starts = np.unique(spin_nums[1], return_index=True)
spin_sum_flux = np.zeros([len(spin_starts), len(omni_spec[0,:])])
current_start = 0
for spin_idx in range(len(spin_starts)):
spin_sum_flux[spin_idx,:] = nanmean(omni_spec[current_start:spin_starts[spin_idx],:], axis=0)
current_start = spin_starts[spin_idx] + 1
sp = '_spin'
new_name = allmms_prefix+_species+'_'+data_units+'_omni'+sp
store_data(new_name, data={'x':spin_nums[0][spin_starts], 'y':spin_sum_flux, 'v':energy_refprobe[2]})
options(new_name, 'spec', True)
options(new_name, 'zlog', True)
options(new_name, 'ylog', True)
options(new_name, 'spec', True)
out_vars.append(new_name)
return out_vars
| 2.109375 | 2 |
grb/dataset/__init__.py | Stanislas0/grb | 0 | 12789981 | from .dataset import Dataset, CustomDataset, CogDLDataset
| 1.15625 | 1 |
server/laundry.py | pennlabs/labs-api-server | 9 | 12789982 | import calendar
import datetime
from flask import g, jsonify, request
from pytz import timezone
from requests.exceptions import HTTPError
from sqlalchemy import Integer, cast, exists, func
from server import app, sqldb
from server.auth import auth
from server.base import cached_route
from server.models import LaundryPreference, LaundrySnapshot, User
from server.penndata import laundry
@app.route("/laundry/halls", methods=["GET"])
def all_halls():
try:
return jsonify({"halls": laundry.all_status()})
except HTTPError:
return jsonify({"error": "The laundry api is currently unavailable."})
@app.route("/laundry/rooms/<hall_ids>", methods=["GET"])
def get_rooms(hall_ids):
est = timezone("EST")
date = datetime.datetime.now(est)
halls = [int(x) for x in hall_ids.split(",")]
output = {"rooms": []}
for hall in halls:
hall_data = laundry.hall_status(hall)
hall_data["id"] = hall
hall_data["usage_data"] = usage_data(hall, date.year, date.month, date.day)
output["rooms"].append(hall_data)
return jsonify(output)
@app.route("/laundry/hall/<int:hall_id>", methods=["GET"])
def hall(hall_id):
try:
return jsonify(laundry.hall_status(hall_id))
except ValueError:
return jsonify({"error": "Invalid hall id passed to server."})
except HTTPError:
return jsonify({"error": "The laundry api is currently unavailable."})
@app.route("/laundry/hall/<int:hall_id>/<int:hall_id2>", methods=["GET"])
def two_halls(hall_id, hall_id2):
try:
to_ret = {"halls": [laundry.hall_status(hall_id), laundry.hall_status(hall_id2)]}
return jsonify(to_ret)
except ValueError:
return jsonify({"error": "Invalid hall id passed to server."})
except HTTPError:
return jsonify({"error": "The laundry api is currently unavailable."})
@app.route("/laundry/halls/ids", methods=["GET"])
def id_to_name():
try:
return jsonify({"halls": laundry.hall_id_list})
except HTTPError:
return jsonify({"error": "The laundry api is currently unavailable."})
def safe_division(a, b):
return round(a / float(b), 3) if b > 0 else 0
@app.route("/laundry/usage/<int:hall_no>")
def usage_shortcut(hall_no):
est = timezone("EST")
now = datetime.datetime.now(est)
return usage(hall_no, now.year, now.month, now.day)
def usage_data(hall_no, year, month, day):
# turn date info into a date object
# find start range by subtracting 30 days
now = datetime.date(year, month, day)
start = now - datetime.timedelta(days=30)
# get the current day of the week for today and tomorrow
# python dow is monday = 0, while sql dow is sunday = 0
dow = (now.weekday() + 1) % 7
tmw = (dow + 1) % 7
# some commands are different between mysql and sqlite
is_mysql = sqldb.engine.name == "mysql"
# get the laundry information for today based on the day
# of week (if today is tuesday, get all the tuesdays
# in the past 30 days), group them by time, and include
# the first 2 hours of the next day
data = (
sqldb.session.query(
LaundrySnapshot.date,
(
func.floor(LaundrySnapshot.time / 60).label("time")
if is_mysql
else cast(LaundrySnapshot.time / 60, Integer).label("time")
),
func.avg(LaundrySnapshot.washers).label("all_washers"),
func.avg(LaundrySnapshot.dryers).label("all_dryers"),
func.avg(LaundrySnapshot.total_washers).label("all_total_washers"),
func.avg(LaundrySnapshot.total_dryers).label("all_total_dryers"),
)
.filter(
(
(LaundrySnapshot.room == hall_no)
& (
(
func.dayofweek(LaundrySnapshot.date) == dow + 1
if is_mysql
else func.strftime("%w", LaundrySnapshot.date) == str(dow)
)
| (
(LaundrySnapshot.time <= 180 - 1)
& (
func.dayofweek(LaundrySnapshot.date) == tmw + 1
if is_mysql
else func.strftime("%w", LaundrySnapshot.date) == str(tmw)
)
)
)
& (LaundrySnapshot.date >= start)
)
)
.group_by(LaundrySnapshot.date, "time")
.order_by(LaundrySnapshot.date, "time")
.all()
)
data = [x._asdict() for x in data]
all_dryers = [int(x["all_total_dryers"]) for x in data]
all_washers = [int(x["all_total_washers"]) for x in data]
washer_points = {k: 0 for k in range(27)}
dryer_points = {k: 0 for k in range(27)}
washer_total = {k: 0 for k in range(27)}
dryer_total = {k: 0 for k in range(27)}
for x in data:
hour = int(x["time"])
# if the value is for tomorrow, add 24 hours
if x["date"].weekday() != now.weekday():
hour += 24
washer_points[hour] += int(x["all_washers"])
dryer_points[hour] += int(x["all_dryers"])
washer_total[hour] += 1
dryer_total[hour] += 1
dates = [x["date"] for x in data]
if not dates:
dates = [now]
return {
"hall_name": laundry.id_to_hall[hall_no],
"location": laundry.id_to_location[hall_no],
"day_of_week": calendar.day_name[now.weekday()],
"start_date": min(dates).strftime("%Y-%m-%d"),
"end_date": max(dates).strftime("%Y-%m-%d"),
"total_number_of_dryers": safe_division(sum(all_dryers), len(all_dryers)),
"total_number_of_washers": safe_division(sum(all_washers), len(all_washers)),
"washer_data": {x: safe_division(washer_points[x], washer_total[x]) for x in washer_points},
"dryer_data": {x: safe_division(dryer_points[x], dryer_total[x]) for x in dryer_points},
}
@app.route("/laundry/usage/<int:hall_no>/<int:year>-<int:month>-<int:day>", methods=["GET"])
def usage(hall_no, year, month, day):
def get_data():
return usage_data(hall_no, year, month, day)
td = datetime.timedelta(minutes=15)
return cached_route("laundry:usage:%s:%s-%s-%s" % (hall_no, year, month, day), td, get_data)
def save_data():
"""Retrieves current laundry info and saves it into the database."""
# get the number of minutes since midnight
est = timezone("EST")
now = datetime.datetime.now(est)
midnight = now.replace(hour=0, minute=0, second=0, microsecond=0)
date = now.date()
time = round((now - midnight).seconds / 60)
# check if we already have data for this minute
# if we do, skip
with app.app_context():
if sqldb.session.query(
exists().where((LaundrySnapshot.date == date) & (LaundrySnapshot.time == time))
).scalar():
return
# make a dict for hall name -> id
ids = {x["hall_name"]: x["id"] for x in laundry.hall_id_list}
data = laundry.all_status()
for name, room in data.items():
id = ids[name]
dryers = room["dryers"]["open"]
washers = room["washers"]["open"]
total_dryers = sum(
[room["dryers"][x] for x in ["open", "running", "offline", "out_of_order"]]
)
total_washers = sum(
[room["washers"][x] for x in ["open", "running", "offline", "out_of_order"]]
)
item = LaundrySnapshot(
date=date,
time=time,
room=id,
washers=washers,
dryers=dryers,
total_washers=total_washers,
total_dryers=total_dryers,
)
sqldb.session.add(item)
sqldb.session.commit()
@app.route("/laundry/preferences", methods=["POST"])
@auth(nullable=True)
def save_laundry_preferences():
try:
user = User.get_or_create()
except ValueError as e:
return jsonify({"success": False, "error": str(e)})
room_ids = request.form.get("rooms")
if not room_ids:
return jsonify({"success": False, "error": "No rooms specified."})
# delete old preferences for user
LaundryPreference.query.filter_by(user_id=user.id).delete()
room_ids = [int(x) for x in room_ids.split(",")]
account_id = g.account.id if g.account else None
for room_id in room_ids:
laundry_preference = LaundryPreference(user_id=user.id, account=account_id, room_id=room_id)
sqldb.session.add(laundry_preference)
sqldb.session.commit()
return jsonify({"success": True, "error": None})
@app.route("/laundry/preferences", methods=["GET"])
def get_laundry_preferences():
try:
user = User.get_or_create()
except ValueError:
return jsonify({"rooms": []})
preferences = LaundryPreference.query.filter_by(user_id=user.id)
room_ids = [x.room_id for x in preferences]
return jsonify({"rooms": room_ids})
@app.route("/laundry/status", methods=["GET"])
def get_laundry_status():
def get_data():
if laundry.check_is_working():
return {"is_working": True, "error_msg": None}
else:
error_msg = "Penn's laundry server is currently not updating. We hope this will be fixed shortly."
return {"is_working": False, "error_msg": error_msg}
td = datetime.timedelta(hours=1)
return cached_route("laundry:working", td, get_data)
| 2.640625 | 3 |
configuration.py | praveen-elastic/workplace-search-sharepoint16-connector | 0 | 12789983 | <reponame>praveen-elastic/workplace-search-sharepoint16-connector
import yaml
from yaml.error import YAMLError
from cerberus import Validator
from schema import schema
from sharepoint_utils import print_and_log
class Configuration:
__instance = None
def __new__(cls, *args, **kwargs):
if not Configuration.__instance:
Configuration.__instance = object.__new__(cls)
return Configuration.__instance
def __init__(self, file_name, logger=None):
self.logger = logger
self.file_name = file_name
try:
with open(file_name, "r", encoding="utf-8") as stream:
self.configurations = yaml.safe_load(stream)
except YAMLError as exception:
if hasattr(exception, 'problem_mark'):
mark = exception.problem_mark
print_and_log(
self.logger,
"exception",
"Error while reading the configurations from %s file at line %s."
% (file_name, mark.line),
)
else:
print_and_log(
self.logger,
"exception",
"Something went wrong while parsing yaml file %s. Error: %s"
% (file_name, exception),
)
self.configurations = self.validate()
# Converting datetime object to string
for date_config in ["start_time", "end_time"]:
self.configurations[date_config] = self.configurations[date_config].strftime('%Y-%m-%dT%H:%M:%SZ')
def validate(self):
"""Validates each properties defined in the yaml configuration file
"""
self.logger.info("Validating the configuration parameters")
validator = Validator(schema)
validator.validate(self.configurations, schema)
if validator.errors:
print_and_log(self.logger, "error", "Error while validating the config. Errors: %s" % (
validator.errors))
exit(0)
self.logger.info("Successfully validated the config file")
return validator.document
| 2.390625 | 2 |
utils/__init__.py | uthcode/baysourashtra | 0 | 12789984 | import jinja2
veg_cost = 10.00
non_veg_cost = 10.00
JINJA_ENVIRONMENT = jinja2.Environment(
# templates directory is relative to app root.
loader=jinja2.FileSystemLoader('templates'),
extensions=['jinja2.ext.autoescape'],
autoescape=True)
form_template = JINJA_ENVIRONMENT.get_template('form.html')
pay_template = JINJA_ENVIRONMENT.get_template('paypal.html')
thankyou_template = JINJA_ENVIRONMENT.get_template('thankyou.html')
cancel_template = JINJA_ENVIRONMENT.get_template('cancel.html')
step_by_step_template = JINJA_ENVIRONMENT.get_template('stepbystep.html')
email_template = JINJA_ENVIRONMENT.get_template('email_template.html')
list_template = JINJA_ENVIRONMENT.get_template('list.html')
index_template = JINJA_ENVIRONMENT.get_template('index.html')
| 1.710938 | 2 |
lib/modules/SSTIDetector.py | mukeran/dinlas | 2 | 12789985 | # coding:utf-8
import logging
from urllib import parse
from copy import deepcopy
import random
import requests
class SSTIDetector:
def __init__(self, results, reports, **kwargs):
self.results = results
self.reports = reports
self.args = kwargs
self.vulnerable = []
@staticmethod
def meta():
return {
'name': 'Server-Side Template Injector for all',
'version': '1.0'
}
@staticmethod
def set_payload():
randint1 = random.randint(32768, 65536)
randint2 = random.randint(16384, 32768)
_sum = randint1 + randint2
_payload = '{{' + str(randint1) + '+' + str(randint2) + '}}'
check_str = str(_sum)
return {'payload': _payload, 'check_str': check_str}
def exec(self):
headers = {
'User_Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.'
'2924.87 Safari/537.36'
}
for url in self.results['urls']:
logging.critical('SSTI testing on {}'.format(url))
attack_url = []
if url[0:4] != 'http':
url = 'http://' + url
parse_result = parse.urlparse(url)
query = parse.parse_qs(parse_result.query)
split_dir = parse_result.path.split('/')
_url = parse_result.scheme + '://' + parse_result.netloc
for i in range(1, len(split_dir)):
payload = self.set_payload()
split = deepcopy(split_dir)
split[i] = payload['payload']
check_url = _url + '/'.join(split)
attack_url.append({'url': check_url, 'payload': payload})
_url += parse_result.path + '?'
for key in query.keys():
payload = self.set_payload()
tmp = deepcopy(query)
tmp[key][0] = payload['payload']
_query = []
for _key, _value in tmp.items():
_query += list(map(lambda x: '{}={}'.format(_key, x), _value))
attack_url.append({'url': _url + '&'.join(_query), 'payload': payload})
for test_url in attack_url:
req = requests.get(test_url['url'], headers=headers)
if req.text.find(test_url['payload']['check_str']) != -1:
logging.critical('SSTI detected: vulnerable url: {}'.format(test_url['url']))
self.vulnerable.append({
'url': test_url['url'],
'payload': test_url['payload']['payload']
})
self.reports.append({
'title': 'Server Side Template Injection Points',
'overview': 'Found {} SSTI point(s)'.format(len(self.vulnerable)),
'header': ['Path', 'Payload'],
'entries': list(map(lambda x: [x['url'], x['payload']], self.vulnerable))
})
logging.info("SSTI scan finished!")
| 2.515625 | 3 |
main.py | Fovik/Translater | 0 | 12789986 | <filename>main.py<gh_stars>0
import wx
import wx.adv
import yaml
from lib import clipboard
TRAY_TOOLTIP = 'System Tray Demo'
ID_MENU_ABOUT = 1025
TRANSLATE_EVENTS_START = 2000
__version__ = '0.0.1'
def ShowAbout(event):
dialog = wx.MessageDialog(None, "Создал <NAME>", caption="О программе", style=wx.OK|wx.CENTRE, pos=wx.DefaultPosition)
dialog.ShowModal()
def onExit(event):
wx.Exit()
def Translete(event):
text = clipboard.get_clipboard_data()
text_array = list(text)
dictionary = encoding_translate_config[event.GetId() - TRANSLATE_EVENTS_START]["dictionary"]
print(text_array)
for i in range(len(text_array)):
if text_array[i] in dictionary:
text_array[i] = dictionary[text_array[i]]
text = "".join(text_array)
clipboard.set_clipboard_data(text)
print(text)
class MyTaskBarIcon(wx.adv.TaskBarIcon):
def __init__(self):
super().__init__()
def CreatePopupMenu(self):
menu = wx.Menu()
for i in range(len(encoding_translate_config)):
menu.Append(TRANSLATE_EVENTS_START + i, encoding_translate_config[i]["title"], "")
self.Bind(wx.EVT_MENU, Translete, id=TRANSLATE_EVENTS_START + i)
menu.Append(wx.ID_SEPARATOR, '', "")
menu.Append(ID_MENU_ABOUT, 'О программе', "")
menu.Append(wx.ID_EXIT, 'Выход', "")
self.Bind(wx.EVT_MENU, onExit, id=wx.ID_EXIT)
self.Bind(wx.EVT_MENU, ShowAbout, id=ID_MENU_ABOUT)
return menu
app = wx.App(False)
crutch = wx.Frame(None, -1, "")
#TODO change dir to this script dir
sys_tray = MyTaskBarIcon()
icon = wx.Icon(wx.Bitmap("./assets/icon.png"))
sys_tray.SetIcon(icon, TRAY_TOOLTIP)
#sys_tray.Bind(wx.adv.EVT_TASKBAR_RIGHT_UP, showInfoMenu)
print(clipboard.get_clipboard_data())
try:
with open("./assets/sw_templates.yml", "r", encoding="utf8") as fh:
encoding_translate_config = yaml.safe_load(fh)
#TODO if codepage in encoding_translate_config[i]
except FileNotFoundError:
None
except Exception as e:
print(str(e))
dialog = wx.MessageDialog(None, "Неудалось запустить программу. Код ошибки: " + str(e), caption="Error", style=wx.OK|wx.CENTRE|wx.ICON_ERROR, pos=wx.DefaultPosition)
dialog.ShowModal()
app.MainLoop()
| 2.171875 | 2 |
todo-tags.py | jerheff/pre-commit | 0 | 12789987 | <filename>todo-tags.py
#!/usr/bin/env python3 -s
# Searches passed files for TODO comments.
#
# Prints out (and exits non-zero) if any TODO comments are not in the form:
#
# TODO(DEV-1234): some text
#
# Ensuring all TODOs are either complete when opening the PR, or have a JIRA
# ticket to track the future change.
import argparse
import re
import sys
DEFAULT_TAG = "DEV"
parser = argparse.ArgumentParser()
parser.add_argument('-t', '--tag',
type=str,
help="A JIRA-like tag (i.e. DEV) to search for",
default=DEFAULT_TAG,
dest='tag')
parser.add_argument('-r', '--regex',
type=str,
help="Specify the regex to match inner in TODO(inner)",
dest='regex')
parser.add_argument('files', metavar='FILES', type=str, nargs='+',
help='Files to search')
args = parser.parse_args()
# Do not allow specifying a tag and --regex
if args.regex and args.tag != DEFAULT_TAG:
sys.exit("cannot provide tag with --regex")
# Figure out what regex to use
tag = args.tag + "-[0-9]+"
if args.regex:
tag = args.regex
# This regex matches all TODO comments (prefixed by // or #) that are not
# immediately followed by "($TAG)"
pattern = re.compile(
r"(\/\/|#)+\s?TODO((?!\("+tag+r"\)).)*$", re.IGNORECASE)
ret = 0
for f in args.files:
for i, line in enumerate(open(f)):
for match in re.finditer(pattern, line):
print('%s:%s %s' % (f, i+1, match.string.strip()))
ret += 1
exit(ret)
| 3.015625 | 3 |
lectures/cs285/hw1/cs285/infrastructure/torch_utils.py | ainklain/re_papers | 0 | 12789988 | import torch
from torch import nn
import os
############################################
############################################
class MLP(nn.Module):
def __init__(self, input_size, output_size, n_layers, size, activation=torch.tanh, output_activation=None):
super(MLP, self).__init__()
self.activation = activation
self.output_activation = output_activation
self.layers = nn.ModuleList()
in_ = input_size
for i in range(n_layers):
self.layers.append(nn.Linear(in_, size))
in_ = size
self.layers.append(nn.Linear(size, output_size))
def forward(self, x):
for layer in self.layers:
x = self.activation(layer(x))
if not self.output_activation:
return x
else:
return self.output_activation(x)
############################################
############################################
def lrelu(x, leak=0.2):
f1 = 0.5 * (1 + leak)
f2 = 0.5 * (1 - leak)
return f1 * x + f2 * abs(x)
| 2.796875 | 3 |
nba_automation/page_objects/BasePage.py | sohailchd/RobotAndLocust | 0 | 12789989 | <reponame>sohailchd/RobotAndLocust
from utilities.BrowserManager import BrowserManager
import conf
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
class BasePage():
_driver = BrowserManager.get_browser()
def __init__(self,url=conf.base_url):
print("base_page init called...")
self.page_url = url
BasePage._driver.get(self.page_url)
def open_page(self,url=None):
if not url:
self._driver.get(self.page_url)
else:
self._driver.get(url)
def get_page_url(self):
return self.page_url
def explicit_wait(self,locator,time=20,is_visible=False,driver=None):
'''
custom wait for given element
'''
if not driver:
driver = self._driver
print(f"locator : {locator}")
if not is_visible:
target = WebDriverWait(driver,time).until(
EC.presence_of_element_located(locator)
)
else:
target = WebDriverWait(driver,time).until(
EC.visibility_of_element_located(locator)
)
print(f"exlicit wait, found element {target} ")
## module level driver instance
driver = BasePage._driver | 2.84375 | 3 |
edb/testbase/connection.py | Marnixvdb/edgedb | 0 | 12789990 | #
# This source file is part of the EdgeDB open source project.
#
# Copyright 2016-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""A specialized client API for EdgeDB tests.
Historically EdgeDB tests relied on a very specific client API that
is no longer supported by edgedb-python. Here we implement that API
(for example, transactions can be nested and are non-retrying).
"""
from __future__ import annotations
import typing
import abc
import asyncio
import enum
import functools
import random
import socket
import ssl
import time
from edgedb import abstract
from edgedb import errors
from edgedb import con_utils
from edgedb import enums as edgedb_enums
from edgedb import options
from edgedb.protocol import asyncio_proto # type: ignore
from edgedb.protocol import protocol # type: ignore
class TransactionState(enum.Enum):
NEW = 0
STARTED = 1
COMMITTED = 2
ROLLEDBACK = 3
FAILED = 4
class BaseTransaction(abc.ABC):
ID_COUNTER = 0
def __init__(self, owner):
self._connection = owner
self._state = TransactionState.NEW
self._managed = False
self._nested = False
type(self).ID_COUNTER += 1
self._id = f'raw_tx_{self.ID_COUNTER}'
def is_active(self) -> bool:
return self._state is TransactionState.STARTED
def __check_state_base(self, opname):
if self._state is TransactionState.COMMITTED:
raise errors.InterfaceError(
f'cannot {opname}; the transaction is already committed')
if self._state is TransactionState.ROLLEDBACK:
raise errors.InterfaceError(
f'cannot {opname}; the transaction is already rolled back')
if self._state is TransactionState.FAILED:
raise errors.InterfaceError(
f'cannot {opname}; the transaction is in error state')
def __check_state(self, opname):
if self._state is not TransactionState.STARTED:
if self._state is TransactionState.NEW:
raise errors.InterfaceError(
f'cannot {opname}; the transaction is not yet started')
self.__check_state_base(opname)
def _make_start_query(self):
self.__check_state_base('start')
if self._state is TransactionState.STARTED:
raise errors.InterfaceError(
'cannot start; the transaction is already started')
return self._make_start_query_inner()
@abc.abstractmethod
def _make_start_query_inner(self):
...
def _make_commit_query(self):
self.__check_state('commit')
return 'COMMIT;'
def _make_rollback_query(self):
self.__check_state('rollback')
if self._connection._top_xact is self:
self._connection._top_xact = None
if self._nested:
query = f'ROLLBACK TO SAVEPOINT {self._id};'
else:
query = 'ROLLBACK;'
return query
async def start(self) -> None:
query = self._make_start_query()
try:
await self._connection.execute(query)
except BaseException:
self._state = TransactionState.FAILED
raise
else:
self._state = TransactionState.STARTED
async def commit(self) -> None:
if self._managed:
raise errors.InterfaceError(
'cannot manually commit from within an `async with` block')
await self._commit()
async def _commit(self) -> None:
query = self._make_commit_query()
try:
await self._connection.execute(query)
except BaseException:
self._state = TransactionState.FAILED
raise
else:
self._state = TransactionState.COMMITTED
async def rollback(self) -> None:
if self._managed:
raise errors.InterfaceError(
'cannot manually rollback from within an `async with` block')
await self._rollback()
async def _rollback(self) -> None:
query = self._make_rollback_query()
try:
await self._connection.execute(query)
except BaseException:
self._state = TransactionState.FAILED
raise
else:
self._state = TransactionState.ROLLEDBACK
class RawTransaction(BaseTransaction):
def _make_start_query_inner(self):
con = self._connection
if con._top_xact is None:
con._top_xact = self
else:
# Nested transaction block
self._nested = True
if self._nested:
query = f'DECLARE SAVEPOINT {self._id};'
else:
query = 'START TRANSACTION;'
return query
def _make_commit_query(self):
query = super()._make_commit_query()
if self._connection._top_xact is self:
self._connection._top_xact = None
if self._nested:
query = f'RELEASE SAVEPOINT {self._id};'
return query
def _make_rollback_query(self):
query = super()._make_rollback_query()
if self._connection._top_xact is self:
self._connection._top_xact = None
if self._nested:
query = f'ROLLBACK TO SAVEPOINT {self._id};'
return query
async def __aenter__(self):
if self._managed:
raise errors.InterfaceError(
'cannot enter context: already in an `async with` block')
self._managed = True
await self.start()
return self
async def __aexit__(self, extype, ex, tb):
try:
if extype is not None:
await self._rollback()
else:
await self._commit()
finally:
self._managed = False
class Iteration(BaseTransaction, abstract.AsyncIOExecutor):
def __init__(self, retry, connection, iteration):
super().__init__(connection)
self._options = retry._options.transaction_options
self.__retry = retry
self.__iteration = iteration
self.__started = False
async def __aenter__(self):
if self._managed:
raise errors.InterfaceError(
'cannot enter context: already in an `async with` block')
self._managed = True
return self
async def __aexit__(self, extype, ex, tb):
self._managed = False
if not self.__started:
return False
try:
if extype is None:
await self._commit()
else:
await self._rollback()
except errors.EdgeDBError as err:
if ex is None:
# On commit we don't know if commit is succeeded before the
# database have received it or after it have been done but
# network is dropped before we were able to receive a response
raise err
# If we were going to rollback, look at original error
# to find out whether we want to retry, regardless of
# the rollback error.
# In this case we ignore rollback issue as original error is more
# important, e.g. in case `CancelledError` it's important
# to propagate it to cancel the whole task.
# NOTE: rollback error is always swallowed, should we use
# on_log_message for it?
if (
extype is not None and
issubclass(extype, errors.EdgeDBError) and
ex.has_tag(errors.SHOULD_RETRY)
):
return self.__retry._retry(ex)
def _make_start_query_inner(self):
return self._options.start_transaction_query()
def _get_query_cache(self) -> abstract.QueryCache:
return self._connection._query_cache
async def _query(self, query_context: abstract.QueryContext):
await self._ensure_transaction()
result, _ = await self._connection.raw_query(query_context)
return result
async def execute(self, query: str) -> None:
await self._ensure_transaction()
await self._connection.execute(query)
async def _ensure_transaction(self):
if not self._managed:
raise errors.InterfaceError(
"Only managed retriable transactions are supported. "
"Use `async with transaction:`"
)
if not self.__started:
self.__started = True
if self._connection.is_closed():
await self._connection.connect(
single_attempt=self.__iteration != 0
)
await self.start()
class Retry:
def __init__(self, connection):
self._connection = connection
self._iteration = 0
self._done = False
self._next_backoff = 0
self._options = connection._options
def _retry(self, exc):
self._last_exception = exc
rule = self._options.retry_options.get_rule_for_exception(exc)
if self._iteration >= rule.attempts:
return False
self._done = False
self._next_backoff = rule.backoff(self._iteration)
return True
def __aiter__(self):
return self
async def __anext__(self):
# Note: when changing this code consider also
# updating Retry.__next__.
if self._done:
raise StopAsyncIteration
if self._next_backoff:
await asyncio.sleep(self._next_backoff)
self._done = True
iteration = Iteration(self, self._connection, self._iteration)
self._iteration += 1
return iteration
class Connection(options._OptionsMixin, abstract.AsyncIOExecutor):
_top_xact: RawTransaction | None = None
def __init__(self, connect_args, *, test_no_tls=False):
super().__init__()
self._connect_args = connect_args
self._protocol = None
self._query_cache = abstract.QueryCache(
codecs_registry=protocol.CodecsRegistry(),
query_cache=protocol.QueryCodecsCache(),
)
self._test_no_tls = test_no_tls
self._params = None
self._log_listeners = set()
def add_log_listener(self, callback):
self._log_listeners.add(callback)
def remove_log_listener(self, callback):
self._log_listeners.discard(callback)
def _on_log_message(self, msg):
if self._log_listeners:
loop = asyncio.get_running_loop()
for cb in self._log_listeners:
loop.call_soon(cb, self, msg)
def _shallow_clone(self):
con = self.__class__.__new__(self.__class__)
con._connect_args = self._connect_args
con._protocol = self._protocol
con._query_cache = self._query_cache
con._test_no_tls = self._test_no_tls
con._params = self._params
return con
def _get_query_cache(self) -> abstract.QueryCache:
return self._query_cache
async def _query(self, query_context: abstract.QueryContext):
await self.ensure_connected()
result, _ = await self.raw_query(query_context)
return result
async def execute(self, query: str) -> None:
await self.ensure_connected()
await self._protocol.simple_query(
query, edgedb_enums.Capability.ALL # type: ignore
)
async def ensure_connected(self):
if self.is_closed():
await self.connect()
return self
async def raw_query(self, query_context: abstract.QueryContext):
return await self._protocol.execute_anonymous(
query=query_context.query.query,
args=query_context.query.args,
kwargs=query_context.query.kwargs,
reg=query_context.cache.codecs_registry,
qc=query_context.cache.query_cache,
io_format=query_context.query_options.io_format,
expect_one=query_context.query_options.expect_one,
required_one=query_context.query_options.required_one,
allow_capabilities=edgedb_enums.Capability.ALL, # type: ignore
)
async def _fetchall(
self,
query: str,
*args,
__limit__: int = 0,
__typeids__: bool = False,
__typenames__: bool = False,
__allow_capabilities__: typing.Optional[int] = None,
**kwargs,
):
await self.ensure_connected()
result, _ = await self._protocol.execute_anonymous(
query=query,
args=args,
kwargs=kwargs,
reg=self._query_cache.codecs_registry,
qc=self._query_cache.query_cache,
implicit_limit=__limit__,
inline_typeids=__typeids__,
inline_typenames=__typenames__,
io_format=protocol.IoFormat.BINARY,
allow_capabilities=__allow_capabilities__,
)
return result
async def _fetchall_with_headers(
self,
query: str,
*args,
__limit__: int = 0,
__typeids__: bool = False,
__typenames__: bool = False,
__allow_capabilities__: typing.Optional[int] = None,
**kwargs,
):
await self.ensure_connected()
return await self._protocol.execute_anonymous(
query=query,
args=args,
kwargs=kwargs,
reg=self._query_cache.codecs_registry,
qc=self._query_cache.query_cache,
implicit_limit=__limit__,
inline_typeids=__typeids__,
inline_typenames=__typenames__,
io_format=protocol.IoFormat.BINARY,
allow_capabilities=__allow_capabilities__,
)
async def _fetchall_json(
self,
query: str,
*args,
__limit__: int = 0,
**kwargs,
):
await self.ensure_connected()
result, _ = await self._protocol.execute_anonymous(
query=query,
args=args,
kwargs=kwargs,
reg=self._query_cache.codecs_registry,
qc=self._query_cache.query_cache,
implicit_limit=__limit__,
inline_typenames=False,
io_format=protocol.IoFormat.JSON,
)
return result
async def _fetchall_json_elements(self, query: str, *args, **kwargs):
await self.ensure_connected()
result, _ = await self._protocol.execute_anonymous(
query=query,
args=args,
kwargs=kwargs,
reg=self._query_cache.codecs_registry,
qc=self._query_cache.query_cache,
io_format=protocol.IoFormat.JSON_ELEMENTS,
allow_capabilities=edgedb_enums.Capability.EXECUTE, # type: ignore
)
return result
def _clear_codecs_cache(self):
self._query_cache.codecs_registry.clear_cache()
def _get_last_status(self) -> typing.Optional[str]:
if self._protocol is None:
return None
status = self._protocol.last_status
if status is not None:
status = status.decode()
return status
def is_closed(self):
return self._protocol is None or not self._protocol.connected
async def connect(self, single_attempt=False):
self._params, client_config = con_utils.parse_connect_arguments(
**self._connect_args,
command_timeout=None,
server_settings=None,
)
start = time.monotonic()
if single_attempt:
max_time = 0
else:
max_time = start + client_config.wait_until_available
iteration = 1
while True:
addr = self._params.address
try:
await asyncio.wait_for(
self.connect_addr(),
client_config.connect_timeout,
)
except TimeoutError as e:
if iteration > 1 and time.monotonic() >= max_time:
raise errors.ClientConnectionTimeoutError(
f"connecting to {addr} failed in"
f" {client_config.connect_timeout} sec"
) from e
except errors.ClientConnectionError as e:
if (
not e.has_tag(errors.SHOULD_RECONNECT) or
(iteration > 1 and time.monotonic() >= max_time)
):
nice_err = e.__class__(
con_utils.render_client_no_connection_error(
e,
addr,
attempts=iteration,
duration=time.monotonic() - start,
))
raise nice_err from e.__cause__
else:
return
iteration += 1
await asyncio.sleep(0.01 + random.random() * 0.2)
async def connect_addr(self):
tr = None
loop = asyncio.get_running_loop()
addr = self._params.address
protocol_factory = functools.partial(
asyncio_proto.AsyncIOProtocol, self._params, loop
)
try:
if isinstance(addr, str):
# UNIX socket
tr, pr = await loop.create_unix_connection(
protocol_factory, addr
)
elif self._test_no_tls:
tr, pr = await loop.create_connection(protocol_factory, *addr)
else:
try:
tr, pr = await loop.create_connection(
protocol_factory, *addr, ssl=self._params.ssl_ctx
)
except ssl.CertificateError as e:
raise con_utils.wrap_error(e) from e
except ssl.SSLError as e:
if e.reason == 'CERTIFICATE_VERIFY_FAILED':
raise con_utils.wrap_error(e) from e
tr, pr = await loop.create_connection(
functools.partial(protocol_factory, tls_compat=True),
*addr,
)
else:
con_utils.check_alpn_protocol(
tr.get_extra_info('ssl_object')
)
except socket.gaierror as e:
# All name resolution errors are considered temporary
raise errors.ClientConnectionFailedTemporarilyError(str(e)) from e
except OSError as e:
raise con_utils.wrap_error(e) from e
except Exception:
if tr is not None:
tr.close()
raise
pr.set_connection(self)
try:
await pr.connect()
except OSError as e:
if tr is not None:
tr.close()
raise con_utils.wrap_error(e) from e
except BaseException:
if tr is not None:
tr.close()
raise
self._protocol = pr
def retrying_transaction(self) -> Retry:
return Retry(self)
def transaction(self) -> RawTransaction:
return RawTransaction(self)
def is_in_transaction(self):
return self._protocol.is_in_transaction()
def get_settings(self) -> typing.Dict[str, typing.Any]:
return self._protocol.get_settings()
@property
def dbname(self) -> str:
return self._params.database
def connected_addr(self):
return self._params.address
async def aclose(self):
if not self.is_closed():
try:
self._protocol.terminate()
await self._protocol.wait_for_disconnect()
except (Exception, asyncio.CancelledError):
self.terminate()
raise
def terminate(self):
if not self.is_closed():
self._protocol.abort()
async def async_connect_test_client(
dsn: str = None,
host: str = None,
port: int = None,
credentials: str = None,
credentials_file: str = None,
user: str = None,
password: str = None,
database: str = None,
tls_ca: str = None,
tls_ca_file: str = None,
tls_security: str = None,
test_no_tls: bool = False,
wait_until_available: int = 30,
timeout: int = 10,
) -> Connection:
return await Connection(
{
"dsn": dsn,
"host": host,
"port": port,
"credentials": credentials,
"credentials_file": credentials_file,
"user": user,
"password": password,
"database": database,
"timeout": timeout,
"tls_ca": tls_ca,
"tls_ca_file": tls_ca_file,
"tls_security": tls_security,
"wait_until_available": wait_until_available,
},
test_no_tls=test_no_tls,
).ensure_connected()
| 1.789063 | 2 |
tests.py | horoshenkih/artm | 0 | 12789991 | from utils import *
from algorithms import nmf
import numpy as np
import sys
w = 200
d = 100
t = 6
beta0 = 0.01 # const
n_iter = 300
results = open(sys.argv[1], "w")
for run in range(1):
seeds = [30+run,40+run]
for alpha0 in [0.01, 0.02, 0.05]:
#for alpha0 in [0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1., 1.2,1.4, 1.6, 1.8, 2.]:
alpha = np.ones((1,t)).ravel() * alpha0
beta = np.ones((1,w)).ravel() * beta0
phi0, theta0, prod0, nd, collection = generate_all(w,d,t,alpha,beta,seed=seeds[0])
seed=seeds[1]
phi1 = generate_phi(w,t,beta,seed=seed)
theta1 = generate_theta(d,t,alpha,seed=seed)
params = {
'alpha': alpha,
'beta': beta,
'use_early_stopping': False,
'verbose': False,
'gamma': .5, # adaptive LDA, 0.5 <=> 1 / n_regularizers
}
print "Alpha0:", alpha0
for algorithm in ['em','lda']:
phi, theta = nmf(collection, t, phi1, theta1, algorithm=algorithm, n_iter=n_iter, params=params)
print "Algorithm:", algorithm
print "D(phi, phi0):", dist(phi0, phi)
print "D(theta, theta0)", dist(theta0, theta)
print "D(prod, prod0)", dist (prod0, phi * theta)
results.write("\t".join(map(str,(run, alpha0, algorithm, 'phi', dist(phi0,phi)))))
results.write("\n")
results.write("\t".join(map(str,(run, alpha0, algorithm, 'theta', dist(theta0,theta)))))
results.write("\n")
results.write("\t".join(map(str,(run, alpha0, algorithm, 'prod', dist(prod0,phi*theta)))))
results.write("\n")
# several versions of adaptive LDA
for gamma in [0.1, 0.5, 1.]:
params['gamma'] = gamma
phi, theta = nmf(collection, t, phi1, theta1, algorithm='adaptive_lda', n_iter=n_iter, params=params)
algorithm = 'adaptive_lda_'+str(gamma)
print "Algorithm: adaptive lda, gamma =", gamma
print "D(phi, phi0):", dist(phi0, phi)
print "D(theta, theta0)", dist(theta0, theta)
print "D(prod, prod0)", dist (prod0, phi * theta)
results.write("\t".join(map(str,(run, alpha0, algorithm, 'phi', dist(phi0,phi)))))
results.write("\n")
results.write("\t".join(map(str,(run, alpha0, algorithm, 'theta', dist(theta0,theta)))))
results.write("\n")
results.write("\t".join(map(str,(run, alpha0, algorithm, 'prod', dist(prod0,phi*theta)))))
results.write("\n")
print
results.close()
| 2.296875 | 2 |
get_bridge_status.py | joshuakaluba/WellandCanalScraper | 0 | 12789992 | import time
import bridge
import json
import requests
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.common.exceptions import NoSuchElementException
canal_web_source = 'http://www.greatlakes-seaway.com/R2/jsp/mNiaBrdgStatus.jsp?language=E'
welland_canal_api = 'https://wellandcanalapi.kaluba.tech'
try:
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument('--no-sandbox')
chrome_options.add_argument('--headless')
chrome_options.add_argument('--disable-gpu')
driver = webdriver.Chrome(chrome_options=chrome_options)
driver.implicitly_wait(5)
driver.maximize_window()
driver.get(canal_web_source)
list_elements = driver.find_element_by_css_selector('div.sections')
list_items = list_elements.find_elements_by_tag_name("li")
json_output = "[ "
for item in list_items:
split_item = item.text.replace('Bridge ', '').replace(
'Bridge Status:', '').replace('Status: ', '').replace('Next Arrival: ', '').splitlines()
bridge_id = split_item[0]
bridge_status = split_item[2]
next_arrival = split_item[3]
canal_bridge = bridge.Bridge(bridge_id, bridge_status, next_arrival)
json_output += canal_bridge.toJsonString() + " ,"
driver.quit()
json_output = json_output[:-1]
json_output += " ]"
data = {'payload': json_output}
update_status_url = welland_canal_api+'/update_bridge_status'
request = requests.post(url=update_status_url, data=data)
print(json_output)
except:
print('An error occurred.')
| 2.75 | 3 |
tests/helpers.py | floatingpurr/sync_with_poetry | 9 | 12789993 | <gh_stars>1-10
from typing import Optional
import yaml
# A lock file
LOCK_CONTENT = (
"[[package]]\n"
'name = "mypy"\n'
'version = "0.910"\n'
'description = "Optional static typing for Python"\n'
'category = "dev"\n'
"optional = false\n"
'python-versions = ">=3.5"\n'
"[[package]]\n"
'name = "flake8"\n'
'version = "4.0.1"\n'
'description = "the modular source code checker: pep8 pyflakes and co"\n'
'category = "dev"\n'
"optional = false\n"
'python-versions = ">=3.6"\n'
"[[package]]\n"
'name = "black"\n'
'version = "21.11b1"\n'
'description = "The uncompromising code formatter."\n'
'category = "main"\n'
"optional = false\n"
'python-versions = ">=3.6.2"\n'
"[[package]]\n"
'name = "pytest"\n'
'version = "6.2.5"\n'
'description = "pytest: simple powerful testing with Python"\n'
'category = "dev"\n'
"optional = false\n"
'python-versions = ">=3.6"\n'
)
# A .pre-commit-config.yaml file
CONFIG_CONTENT = (
"repos:\n"
" # local hooks\n"
" - repo: local\n"
" hooks:\n"
" - id: sync\n"
" name: sync with poetry\n"
" entry: swp\n"
" language: system\n"
" files: poetry.lock\n"
" # mypy\n"
" - repo: https://github.com/pre-commit/mirrors-mypy\n"
" rev: v0.812\n"
" hooks:\n"
" - id: mypy\n"
" # comment\n"
" - repo: https://github.com/pycqa/flake8\n"
" rev: 3.9.0\n"
" hooks:\n"
" - id: flake8\n"
" args: [--max-line-length=88]\n"
" - repo: https://github.com/psf/black\n"
" rev: 21.5b2 # this is a rev\n"
" hooks:\n"
" - id: black\n"
" # another repo\n"
" - repo: https://github.com/pycqa/isort\n"
" rev: 5.10.1\n"
" hooks:\n"
" - id: isort\n"
" args: [--filter-files]\n"
)
def get_repo_version(filename: str, repo: str) -> Optional[str]:
"""Return the version (i.e., rev) of a repo
Args:
filename (str): .pre-commit-config.yaml
repo (str): repo URL
Returns:
Optional[str]: the version of the repo
"""
with open(filename, "r") as stream:
pre_commit_data = yaml.safe_load(stream)
pre_config_repo = next(
(item for item in pre_commit_data["repos"] if item["repo"] == repo), None
)
if pre_config_repo:
return pre_config_repo["rev"]
return None
| 2.15625 | 2 |
test/binance_api_tests.py | iswanlun/PatternTradingBot | 0 | 12789994 | <reponame>iswanlun/PatternTradingBot<gh_stars>0
import unittest, os, sys
sys.path.append(os.getcwd() + "\\src")
from binance_stream import BinanceStream
import talib
import numpy as np
class BinanceStreamTests(unittest.TestCase):
def setUp(self) -> None:
super().setUp()
self.stream = BinanceStream()
def test_history(self):
hist = self.stream.return_history()
print(hist)
self.assertEqual(len(hist), 200)
def test_analysis(self):
hist = self.stream.return_history()
upper, middle, lower = talib.BBANDS(np.array(hist), 99, 2, 2, 0)
margin = upper - lower
print(margin)
self.assertEqual(len(margin), len(hist))
unittest.main() | 2.546875 | 3 |
September 2020/05-Functions-Advanced/Exercises/05-Odd-or-Even.py | eclipse-ib/Software-University-Professional-Advanced-Module | 0 | 12789995 | command = input()
list_of_numbers = [int(i) for i in input().split()]
result = 0
if command == "Odd":
# odd_numbers = list(filter(lambda x: x % 2 != 0, list_of_numbers))
# print(sum(odd_numbers) * len(list_of_numbers))
result = sum(list(filter(lambda x: x % 2 != 0, list_of_numbers))) * len(list_of_numbers)
elif command == "Even":
# even_numbers = list(filter(lambda x: x % 2 == 0, list_of_numbers))
# print(sum(even_numbers) * len(list_of_numbers))
result = sum(list(filter(lambda x: x % 2 == 0, list_of_numbers))) * len(list_of_numbers)
print(result) | 3.921875 | 4 |
wx/models/report.py | kmarekspartz/wx | 0 | 12789996 | from datetime import datetime
from peewee import ForeignKeyField, DateTimeField
from wx.app import database
from wx.models.station import Station
class Report(database.Model):
station = ForeignKeyField(Station, related_name='reports')
timestamp = DateTimeField(default=datetime.now)
class Meta:
order_by = ('-timestamp',)
| 2.296875 | 2 |
tests/artifacts/drivers/simple_hello/http_rest_helper.py | SergeyKanzhelev/anthos-appconfig | 14 | 12789997 | <filename>tests/artifacts/drivers/simple_hello/http_rest_helper.py
#!/usr/bin/python
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright 2019 Google LLC. This software is provided as-is,
# without warranty or representation for any use or purpose.#
#
import sys
def main(argv):
pass
if __name__ == '__main__':
main(sys.argv)
import httplib2
import logging
import json
import os
import urllib
import requests
class RestHelper(object):
def __init__(self, url, timeout=30, is_public=True):
self.__log = 1
self.__debug = True
self.__is_public = is_public
self.__service_url = url
self.__timeout = timeout
http = httplib2.Http(timeout=timeout)
def get_text(self, jwt=None, headers={}):
print('get_text')
if jwt:
headers['Authorization'] = 'Bearer ' + jwt
# print('headers', headers)
result = requests.get(self.__service_url, timeout=self.__timeout, headers=headers)
# logging.info('process_file_nlp:results-[{}]-[{}]'.format(result, len(result.text)))
if result.status_code and str(result.status_code) in ["200"]:
# print('get_test', result.text)
return result.text
raise Exception("Respose Failure for HTTP - {} - {}".format(result.status_code, result.text))
@staticmethod
def call_with_sequence(url_base, collection, jwt=None, headers={}, timeout=30):
logging.info('call_with_sequence:start')
get_params={}
for index,c in enumerate(collection):
get_params.update(c)
logging.info('call_with_sequence:base[{}]:params[{}]'.format(url_base, get_params))
if jwt:
headers['Authorization'] = 'Bearer ' + jwt
# print('headers', headers)
result = requests.get(url_base, params=get_params, timeout=timeout, headers = headers)
# logging.info('process_file_nlp:results-[{}]-[{}]'.format(result, len(result.text)))
if result.status_code and str(result.status_code) in ["200"]:
# print('get_test', result.text)
return result.text
logging.error('HTTP Error')
raise Exception("Respose Failure for HTTP - {} - {}".format(result.status_code, result.text))
return None
| 2.53125 | 3 |
firststreet/models/historic.py | owenhw/fsf_api_access_python | 15 | 12789998 | # Author: <NAME> <<EMAIL>>
# Copyright: This module is owned by First Street Foundation
# Internal Imports
from firststreet.models.api import Api
from firststreet.models.geometry import Geometry
class HistoricEvent(Api):
"""Creates a Historic Event object given a response
Args:
response (JSON): A JSON response received from the API
"""
def __init__(self, response):
super().__init__(response)
self.eventId = str(response.get('eventId'))
self.name = response.get('name')
self.month = response.get('month')
self.year = response.get('year')
self.returnPeriod = response.get('returnPeriod')
self.type = response.get('type')
self.properties = response.get('properties')
self.geometry = Geometry(response.get('geometry'))
class HistoricSummary(Api):
"""Creates a Historic Summary object given a response
Args:
response (JSON): A JSON response received from the API
"""
def __init__(self, response):
super().__init__(response)
self.fsid = str(response.get('fsid'))
self.historic = response.get('historic')
| 2.46875 | 2 |
codes/utilities/gan_tournament_selection.py | roddtalebi/ezCGP | 0 | 12789999 | '''
tournament to rank refiners + discriminators for simgan
'''
import numpy as np
import pandas as pd
import torch
def get_graph_ratings(refiners,
discriminators,
validation_data,
device,
starting_rating=1500,
starting_rd=350,
norm_val=173.7178,
n_rounds=3,
matches_per_pairing=5,
samples_per_match=10,
discriminator_win_thresh=0.6):
'''
TODO...can we get a Source?
https://arxiv.org/abs/1808.04888 ?????
Find the best refiner and discriminator from the list of refiners and discriminators using the Tournament Skill Rating Evaluation.
Parameters:
refiners (list(torch.nn)): list of refiners
discriminators (list(torch.nn)): list of discriminators
validation_data (simganData): SimGAN dataset
train_config (dict): dictionary holding information related to training
starting_rating (float): The rating that players were initialized to
starting_RD (float): The RD that players were initialized to
norm_val (float): The normalization value used to convert between phi and RD
n_rounds(int): Number of rounds for the tournament
matches_per_pairing(int): The number of matches per refiner/discriminator pairing to determine the overall winner
samples_per_match(int): The number of samples per match to determine the winner of the match
discriminator_win_thresh: The accuracy of the discriminator needed for the discriminator to be declared the winner
Returns:
A tuple a of Pandas DataFrames...
A Pandas DataFrame for metadata-ratings where 1 row is for 1 refiner (respectively for discriminator).
'''
n_refiners = len(refiners)
ids = np.arange(n_refiners + len(discriminators))
refiner_ids = ids[:n_refiners]
discriminator_ids = ids[n_refiners:]
ratings = {}
for id in ids:
ratings[id] = {'r': starting_rating, 'RD': starting_rd, 'mu': 0, 'phi': starting_rd/norm_val}
labels_real = torch.zeros(samples_per_match, dtype=torch.float, device=device)
labels_refined = torch.ones(samples_per_match, dtype=torch.float, device=device)
all_real = validation_data.real_raw
all_simulated = validation_data.simulated_raw
for rnd in range(n_rounds):
# instantiate match results
match_results = {}
for id in ids:
match_results[id] = {'opponent_mus': [], 'opponent_phis': [], 'scores': []}
# Perform matches between each pair (R,D)
for id_R, R in zip(refiner_ids, refiners):
for id_D, D in zip(discriminator_ids, discriminators):
# RODD - ?...why do we need multiple matches? why not just change samples to samples_per_match*matches_per_pairing
# ...like it's just running data through refiner and discrim. like why not just do that once but with more data?
for match in range(matches_per_pairing):
real_inds = np.random.choice(np.arange(len(all_real)), samples_per_match, replace=False)
real = torch.tensor(all_real[real_inds], dtype=torch.float, device=device)
sim_inds = np.random.choice(np.arange(len(all_simulated)), samples_per_match, replace=False)
simulated = torch.tensor(all_simulated[sim_inds], dtype=torch.float, device=device)
refined = R(simulated)
# Get discriminator accuracy on real and refined data
d_pred_real = D(real)
acc_real = calc_acc(d_pred_real, labels_real)
d_pred_refined = D(refined)
acc_refined = calc_acc(d_pred_refined, labels_refined)
# Find the average accuracy of the discriminator
avg_acc = (acc_real + acc_refined) / 2.0
# Add this match's results to match_results
match_results[id_D]['opponent_mus'].append(ratings[id_R]['mu'])
match_results[id_R]['opponent_mus'].append(ratings[id_D]['mu'])
match_results[id_D]['opponent_phis'].append(ratings[id_R]['phi'])
match_results[id_R]['opponent_phis'].append(ratings[id_D]['phi'])
if avg_acc >= discriminator_win_thresh: # An accuracy greater than or equal to this threshold is considered a win for the discriminator
# A score of 1 is a win
match_results[id_D]['scores'].append(1)
match_results[id_R]['scores'].append(0)
else:
match_results[id_D]['scores'].append(0)
match_results[id_R]['scores'].append(1)
# Update scores for the refiners and discriminators
new_ratings = ratings.copy()
for id in ids:
results = match_results[id]
glicko_calculations = calculate_new_glicko_scores(ratings[id]['mu'],
ratings[id]['phi'],
np.array(results['opponent_mus']),
np.array(results['opponent_phis']),
np.array(results['scores']),
starting_rating,
norm_val)
new_ratings[id]['mu'], new_ratings[id]['phi'], new_ratings[id]['r'], new_ratings[id]['RD'] = glicko_calculations
ratings = new_ratings
# Get refiner and discriminator with best ratings
ratings_pd = pd.DataFrame(ratings).T
refiner_ratings = ratings_pd.loc[refiner_ids]
discriminator_ratings = ratings_pd.loc[discriminator_ids]
return refiner_ratings, discriminator_ratings
def calc_acc(tensor_output, tensor_labels):
'''
Calculate the percent accuracy of the output, using the labels.
Note that the sigmoid is already calculated as part of the Discriminator Network.
Parameters:
tensor_output (torch.Tensor): M tensor output of the discriminator (M samples,) probability of being class '1'
tensor_labels (torch.Tensor): M tensor true labels for each sample
Returns:
acc (float): the probability accuracy of the output vs. the true labels
'''
y_pred = torch.round(tensor_output)#.detatch())
acc = torch.sum(y_pred == tensor_labels.detach()) / len(tensor_labels.detach())
return acc
def calculate_new_glicko_scores(old_mu, old_phi, opponent_mus, opponent_phis, scores, starting_rating, norm_val):
'''
TODO ...Source ????
http://www.glicko.net/glicko/glicko2.pdf ????
https://en.wikipedia.org/wiki/Glicko_rating_system ????
Calculate and return the new glicko values for the player using Glicko2 calculation
Parameters:
old_mu (float): The former mu rating
old_phi (float): The former phi rating
opponent_mus (list(float)): The mu ratings of the opponents played
opponent_phis (list(float)): The phi ratings of the opponents played
scores (list(inte)): The scores of the games played, 1 indicating a win, 0 indicating a loss
starting_rating (float): The rating that players were initialized to
norm_val (float): The normalization value used to convert between phi and RD
Returns:
(new_mu, new_phi, new_rating, new_rd) (float, float, float, float): The updated Glicko values for the player
'''
g = 1.0 / (1 + 3 * opponent_phis**2 / np.pi**2) ** 0.5 # TODO: explain/figure out what g is
E = 1.0 / (1 + np.exp(-1 * g * (old_mu - opponent_mus))) # Probability of player winning each match
v = np.sum(g**2 * E * (1 - E)) ** -1 # Estimated variance of the player's rating based on game outcomes
delta = v * np.sum(g * (scores - E)) # Estimated improvement in rating
new_phi = 1 / (1/old_phi**2 + 1/v) ** 0.5
new_mu = old_mu + new_phi**2 * np.sum(g * (scores - E))
new_rating = norm_val * new_mu + starting_rating
new_rd = norm_val * new_phi
return new_mu, new_phi, new_rating, new_rd
| 2.84375 | 3 |
VRPTWObjectiveFunction.py | tweinyan/hsavrptw | 2 | 12790000 | <reponame>tweinyan/hsavrptw
#!/usr/bin/python
"""hsa
Usage:
hsa.py <problem_instance> --hms=<hms> --hmcr=<hmcr> --parmax=<par> --parmin=<parmin> --ni=<ni>
Options:
--hms=<hms> Harmony memory size e.g. 10, 20, 30...
--hmcr=<hmcr> Harmony memory consideration rate e.g. 0.6, 0.7, 0.8
--ni=<ni> Number of improvisations e.g. 500, 1000, 2000
--parxmax=<parmax> Maximal pitch adjustment rate e.g. 0.9
--parxmin=<parmin> Minimal pitch adjustment rate e.g. 0.3
"""
from problemParser import parse_problem
from pyharmonysearch import ObjectiveFunctionInterface, harmony_search
import random
from bisect import bisect_left
from multiprocessing import cpu_count
class VRPTWObjectiveFunction(ObjectiveFunctionInterface):
def __init__(self, arguments, problem_instance):
self.problem_instance = problem_instance
self.customer_number = problem_instance['customer_number']
self.vehicle_number = problem_instance['vehicle_number']
# x[i][j][k] = 1 iff vehicle k traveled from i to j
# 0 otherwise
number_of_variables = (self.customer_number + 1)**2 \
* self.vehicle_number
self._discrete_values = []
self._variable = []
for i in range(number_of_variables):
self._discrete_values.append([0, 1])
self._variable.append(True)
#define all input parameters
self._maximize = False #minimize
self._max_imp = int(arguments['--ni']) #maximum number of improvisations
self._hms = int(arguments['--hms']) #harmony memory size
self._hmcr = float(arguments['--hmcr']) #harmony memory considering rate
self._parmin = float(arguments['--parmin'])
self._parmax = float(arguments['--parmax'])
self._mpai = 1
#TODO check, if par is used directly or via function
self._par = 0.5 #pitch adjusting rate
def ijk_to_index(self, i, j, k):
index = i * self.vehicle_number * (self.customer_number + 1) + j * self.vehicle_number + k
return index
def index_to_ijk(index):
pass
def make_x_from_vector(self, vector):
x = [[[0 for k in xrange(self.vehicle_number)] for j in xrange(self.customer_number + 1)] for i in xrange(self.customer_number + 1)]
for i in range(self.customer_number + 1):
for j in range(self.customer_number + 1):
for k in range(self.vehicle_number):
x[i][j][k] = vector[self.ijk_to_index(i, j, k)]
return x
def get_fitness(self, vector):
x = [[[0 for k in xrange(self.vehicle_number)] for j in xrange(self.customer_number + 1)] for i in xrange(self.customer_number + 1)]
for i in range(self.customer_number + 1):
for j in range(self.customer_number + 1):
for k in range(self.vehicle_number):
x[i][j][k] = vector[self.ijk_to_index(i, j, k)]
# check, if cars were in the same town
for j in range(self.customer_number + 1):
visited = False
for i in range(self.customer_number + 1):
for k in range(self.vehicle_number):
if x[i][j][k] == 1 and not visited:
visited = True
elif x[i][j][k] == 1 and visited:
# two cars visited city or one car visited city twice
return float("inf")
# check, if all vechicles started from depot
for k in range(self.vehicle_number):
car_starts_from_depot = False
for j in range(self.customer_number + 1):
if x[0][j][k] == 1:
car_starts_from_depot = True
break
if not car_starts_from_depot:
return float("inf")
max_time = 0
for k in range(self.vehicle_number):
time = 0
for i in range(self.customer_number + 1):
for j in range(self.customer_number + 1):
if x[i][j][k] == 1:
time += self.problem_instance['t'][i][j]
if time > max_time:
max_time = time
return max_time
#TODO write vectorize solution
#TODO unvectorize
#TODO implement fitness
return 5.0
def get_value(self, i, j=None):
return random.randrange(2)
def get_num_discrete_values(self, i):
# there will be always 0 or 1
return 2
def get_index(self, i, v):
# index of 0 is 0 and index of 1 is 1 in [0, 1]
return v
def is_variable(self, i):
return self._variable[i]
def is_discrete(self, i):
# All variables are discrete
return True
def get_num_parameters(self):
# compute number of parameters
return len(self._discrete_values)
def use_random_seed(self):
# What ever that means :D
return hasattr(self, '_random_seed') and self._random_seed
def get_max_imp(self):
return self._max_imp
def get_hmcr(self):
return self._hmcr
def get_par(self):
#TODO implement pitch adjustment rate accroding to http://scialert.net/qredirect.php?doi=jas.2013.633.638&linkid=pdf
return self._par
def get_hms(self):
return self._hms
def get_mpai(self):
return self._mpai
def get_mpap(self):
#TODO remove, when it runs
return 0.5
def maximize(self):
return self._maximize
from problemParser import parse_problem
from docopt import docopt
if __name__ == '__main__':
arguments = docopt(__doc__)
problem_instance = parse_problem(arguments['<problem_instance>'])
obj_fun = VRPTWObjectiveFunction(arguments, problem_instance)
num_processes = cpu_count() - 1 #use number of logical CPUs - 1 so that I have one available for use
num_processes = 1
num_iterations = 100
(result, value) = (harmony_search(obj_fun, num_processes, num_iterations))
print obj_fun.make_x_from_vector(result)
print value
| 2.578125 | 3 |
bin/ud/__init__.py | cedar101/spaCy | 12 | 12790001 | <filename>bin/ud/__init__.py
from .conll17_ud_eval import main as ud_evaluate # noqa: F401
from .ud_train import main as ud_train # noqa: F401
| 1.132813 | 1 |
cdk/tests/unit/test_config.py | IGVF-DACC/igvfd | 1 | 12790002 | import pytest
def test_config_exists():
from infrastructure.config import config
assert 'demo' in config['environment']
def test_config_common_dataclass():
from infrastructure.config import Common
common = Common()
assert common.organization_name == 'igvf-dacc'
assert common.project_name == 'igvfd'
def test_config_config_dataclass():
from infrastructure.config import Config
config = Config(
name='demo',
branch='xyz-branch',
pipeline='xyz-pipeline',
)
assert config.common.organization_name == 'igvf-dacc'
assert config.common.project_name == 'igvfd'
assert config.snapshot_source_db_identifier is None
assert config.branch == 'xyz-branch'
assert config.pipeline == 'xyz-pipeline'
def test_config_build_config_from_name():
from infrastructure.config import build_config_from_name
from infrastructure.constants import DEV_DATABASE_IDENTIFIER
config = build_config_from_name(
'demo',
branch='my-branch',
pipeline='my-pipeline',
)
assert config.common.organization_name == 'igvf-dacc'
assert config.common.project_name == 'igvfd'
assert config.snapshot_source_db_identifier == DEV_DATABASE_IDENTIFIER
assert config.branch == 'my-branch'
assert config.pipeline == 'my-pipeline'
assert config.name == 'demo'
config = build_config_from_name(
'demo',
branch='my-branch',
# Overrides.
pipeline='my-pipeline',
)
config = build_config_from_name(
'dev',
branch='my-branch',
)
assert config.common.organization_name == 'igvf-dacc'
assert config.common.project_name == 'igvfd'
assert config.snapshot_source_db_identifier is None
assert config.branch == 'my-branch'
assert config.pipeline == 'ContinuousDeploymentPipelineStack'
assert config.name == 'dev'
def test_config_build_config_from_branch():
from infrastructure.config import get_config_name_from_branch
config_name = get_config_name_from_branch('IGVF-123-add-new-feature')
assert config_name == 'demo'
config_name = get_config_name_from_branch('dev')
assert config_name == 'dev'
| 2.125 | 2 |
manim_rubikscube/__init__.py | WampyCakes/manim-rubikscube | 20 | 12790003 | from .cube import *
from .cube_animations import *
try:
import importlib.metadata as importlib_metadata
except ModuleNotFoundError:
import importlib_metadata
__version__ = importlib_metadata.version(__name__)
| 1.132813 | 1 |
homedisplay/info_timers/migrations/0006_auto_20160224_1522.py | ojarva/home-info-display | 1 | 12790004 | <gh_stars>1-10
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-02-24 13:22
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('info_timers', '0005_auto_20160213_1439'),
]
operations = [
migrations.RemoveField(
model_name='timer',
name='no_refresh',
),
migrations.AddField(
model_name='timer',
name='alarm_until_dismissed',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='timer',
name='no_bell',
field=models.BooleanField(default=False),
),
]
| 1.507813 | 2 |
demos/synchronization/synchronization.py | ae6nr/digicomm | 1 | 12790005 | <reponame>ae6nr/digicomm<gh_stars>1-10
#
# Symbol Synchronization
#
# This script tests the efficacy of frequency offset estimators for QPSK and 16-APSK constellations.
# This script does *not* handle phase ambiguity resolution.
#
# QPSK Example
# In this noiseless example, we create a series of symbols using a QPSK constellation.
# Then we attempt to estimate the frequency offset and derotate accordingly.
# If this works correctly, the derotated points should look like the original constellation, but perhaps with a constant phase offset.
#
# Author: redd
#
import numpy as np
import scipy as sp
from matplotlib import pyplot as plt
from digicomm import * # import my helper functions
plt.ion() # turn on interactive mode
c = getConstellation(type='qpsk')
M = len(c)
nsyms = 2**13
nbits = nsyms * int(np.log2(M))
bits = np.random.randint(0,2,size=(nbits,))
syms = bitsToSymbols(bits,M)
tx = c[syms]
rx = addNoise(addPhaseOffset(addFrequencyOffset(tx,nuT=0.01)), SNR=10, Eb=1/2)
plt.figure()
plt.plot(rx.real, rx.imag, '.', markersize=1)
plt.axis('equal')
plt.title('Symbols with Frequency Offset')
plt.grid(True)
plt.show()
nuT_hat = freqOffsetEstimationQpsk(rx, mode='interp_1') # estimate frequency offset
rx_2 = addFrequencyOffset(rx,nuT=-nuT_hat) # derotation
plt.figure()
plt.plot(rx_2.real, rx_2.imag, '.', markersize=1)
plt.axis('equal')
plt.title('Derotated')
plt.grid(True)
plt.show()
luw = 64
rx_3 = phaseAmbiguityResolution(rx_2, rx_2[0:luw], tx[0:luw]) # use the first few symbols as a poor man's unique word. This is cheating because we don't have explicit knowledge of tx.
plt.figure()
plt.plot(rx_3.real, rx_3.imag, '.', markersize=1)
plt.axis('square')
plt.title('Phase Ambiguity Resolution')
plt.grid(True)
plt.show()
plt.ioff() # turn off interactive mode
plt.show() # keep plots visible | 2.421875 | 2 |
checkmate/lib/stats/helpers.py | marcinguy/checkmate-ce | 80 | 12790006 | <filename>checkmate/lib/stats/helpers.py
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
def directory_splitter(path,include_filename = False):
if include_filename:
path_hierarchy = path.split("/")
else:
path_hierarchy = path.split("/")[:-1]
if path.startswith('/'):
path_hierarchy = path_hierarchy[1:]
paths = []
current_path = ''
for partial_path in path_hierarchy:
paths.append(current_path)
if current_path != '':
current_path+='/'
current_path+=partial_path
paths.append(current_path)
return paths
| 2.640625 | 3 |
TunServer.py | mrlinqu/intsa_term_client | 0 | 12790007 | <reponame>mrlinqu/intsa_term_client
# Copyright 2020 by <NAME> <<EMAIL>>.
# All rights reserved.
# This file is part of the Intsa Term Client - X2Go terminal client for Windows,
# and is released under the "MIT License Agreement". Please see the LICENSE
# file that should have been included as part of this package.
import select
import threading
import socketserver
import logging
class Handler(socketserver.BaseRequestHandler):
def handle(self):
try:
chan = self.ssh_transport.open_channel("direct-tcpip", (self.chain_host, self.chain_port), self.request.getpeername(),)
except Exception as e:
logging.debug("Incoming request to %s:%d failed: %s" % (self.chain_host, self.chain_port, repr(e)))
return
if chan is None:
logging.debug("Incoming request to %s:%d was rejected by the SSH server." % (self.chain_host, self.chain_port))
return
logging.debug("Connected! Tunnel open %r -> %r -> %r" % (self.request.getpeername(), chan.getpeername(), (self.chain_host, self.chain_port),))
self.onConnect(self.request.getpeername())
try:
while True:
r, w, x = select.select([self.request, chan], [], [])
if self.request in r:
data = self.request.recv(1024)
if len(data) == 0:
break
chan.send(data)
if chan in r:
data = chan.recv(1024)
if len(data) == 0:
break
self.request.send(data)
except Exception:
pass
peername = self.request.getpeername()
chan.close()
self.request.close()
self.onDisconnect(peername)
#def ssh_forward_tunnel(local_port, remote_host, remote_port, transport):
# # this is a little convoluted, but lets me configure things for the Handler
# # object. (socketserver doesn't give Handlers any way to access the outer
# # server normally.)
# class SubHander(Handler):
# chain_host = remote_host
# chain_port = remote_port
# ssh_transport = transport
#
# #ForwardServer(("", local_port), SubHander).serve_forever()
# server = ForwardServer(("", local_port), SubHander)
# server_thread = threading.Thread(target=server.serve_forever)
# server_thread.daemon = True
# server_thread.start()
class TunServer:
def __init__(self, local_port, remote_host, remote_port, transport):
class SubHander(Handler):
chain_host = remote_host
chain_port = remote_port
ssh_transport = transport
onConnect = self._onConnect
onDisconnect = self._onDisconnect
self.onConnect = None
self.onDisconnect = None
self.server = socketserver.ThreadingTCPServer(("", local_port), SubHander)
self.server.allow_reuse_address = True
self.server.daemon_threads = True
## остатки от веб-сервера
#tcp_socket = socket.socket(self.httpd.address_family, self.httpd.socket_type)
#self.httpd.socket = ssl.wrap_socket(tcp_socket, self.config.privkeyfile, self.config.pubkeyfile, True)
#self.httpd.server_bind()
#self.httpd.server_activate()
self.server_thread = threading.Thread(target = self.server.serve_forever)
#self.server_thread.daemon = True
self.server_thread.start()
#print('tun start')
def _onConnect(self, peer):
logging.debug("Tun: open %r" % (peer,))
if self.onConnect:
self.onConnect(peer)
pass
def _onDisconnect(self, peer):
logging.debug("Tun: closed from %r" % (peer,))
if self.onDisconnect:
self.onDisconnect(peer)
pass
def stop(self):
self.server.shutdown()
self.server_thread.join(5)
self.server.server_close()
| 2.109375 | 2 |
auth_app/migrations/0003_auto_20220120_2339.py | Afeez1131/Referral_contest | 0 | 12790008 | # Generated by Django 3.2 on 2022-01-20 21:39
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("auth", "0012_alter_user_first_name_max_length"),
("auth_app", "0002_businessowner_is_superuser"),
]
operations = [
migrations.AddField(
model_name="businessowner",
name="groups",
field=models.ManyToManyField(
blank=True,
help_text="The groups this user belongs to. A user will get all permissions granted to each of their groups.",
related_name="user_set",
related_query_name="user",
to="auth.Group",
verbose_name="groups",
),
),
migrations.AddField(
model_name="businessowner",
name="user_permissions",
field=models.ManyToManyField(
blank=True,
help_text="Specific permissions for this user.",
related_name="user_set",
related_query_name="user",
to="auth.Permission",
verbose_name="user permissions",
),
),
]
| 1.953125 | 2 |
tests/test_dixday_predictions.py | innovation-hub-bergisches-rheinland/dixday-predictions | 0 | 12790009 | import csv
import yaml
from dixday_predictions import __version__
from dixday_predictions.eventhandler.EventHandler import EventHandler
def _read_config(config_path) -> dict:
with open(config_path, "r") as ymlfile:
config = yaml.safe_load(ymlfile)
return config
def test_version():
assert __version__ == '0.1.5'
| 2.125 | 2 |
helpers/db_helpers.py | zoltancsontos/pystack-framework | 0 | 12790010 | <filename>helpers/db_helpers.py
import os
import re
env = os.environ
class DbHelpers(object):
"""
Database helpers
:author: <EMAIL>
"""
@staticmethod
def __get_connection_parts__(connection_string, ):
conn_str = env['CLEARDB_DATABASE_URL']
db_type, user, password, host, database = re.match('(.*?)://(.*?):(.*?)@(.*?)/(.*)', conn_str).groups()
return db_type, user, password, host, database
| 2.65625 | 3 |
genpairs.py | TestCreator/GenPairs | 0 | 12790011 | # Generate an all-pairs covering test suite
#
# (c) 2007 University of Oregon and <NAME>
# All rights reserved.
#
License = """
(C) 2007,2017 University of Oregon and <NAME>. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the
distribution.
* Neither the name of the University of Oregon nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
This software is provided by the copyright holders and contributors
"as is" and any express or implied warranties, including, but not
limited to, the implied warranties of merchantability and fitness for
a particular purpose are disclaimed. In no event shall the copyright
owner or contributors be liable for any direct, indirect, incidental,
special, exemplary, or consequential damages (including, but not
limited to, procurement of substitute goods or services; loss of use,
data, or profits; or business interruption) however caused and on any
theory of liability, whether in contract, strict liability, or tort
(including negligence or otherwise) arising in any way out of the use
of this software, even if advised of the possibility of such damage.
"""
usage = """Usage:
# To read a specification (foo.cp) and print the test vector in human-readable
# format:
python genpairs.py < foo.cp
# To read a partial suite of test cases (tests.txt) in CSV format,
# plus a test specification, and report which pairs of values have not
# been covered:
python genpairs.py --csv --initial-suite tests.txt -o -v -p < foo.cp
# To read the same as above, and then produce a test suite that
# covers the missing pairs:
python genpairs.py --csv --initial-suite tests.txt < foo.cp
"""
#
# An item is a pair (slot number, value)
# An itempair is a pair (item, item), that is, ((slot, value), (slot, value))
# An obligation is a pair (the two items must occur together in some case)
# An exclusion is a pair (the two items must not occur together in any case)
# A case is a list (array) with n columns
#
# Representations:
# A test case is represented as a list, indexed by column (category)
# A test suite is a list of test cases
# An item is a tuple, and an itempair is a tuple
#
# Like AETG and several other covering array generators, the outer
# loop will generate test cases, and the inner loops try to fulfill
# as many test obligations as possible with each test case.
#
# Data structures:
# We will record obligations in three different data structures,
# for different forms of quick access:
# ObsList is a list of obligations, some of which may
# already have been fulfilled (deletion is lazy). We may scramble
# this list so we don't have an unfortunate ordering.
# Outstanding is a set of all the obligations still outstanding.
# ObsByCol is a dictionary obligations by column, also updated lazily.
#
# Exclude is a dictionary mapping items to lists of item.
#
import sys ## for file handling
import random ## for shuffling lists
import csv ## for reading and writing test suites
## Constants (other than tokens for parsing)
DontCare = "_"
## Configuration parameters
DBG = False ## Debugging mode, on (true) or off (false)
DBGp = False ## Performance debugging, December 2006
maxCandidates = 50 ## Bigger = better solutions, smaller = faster
## Platform compatibility
# ----------------------------------------
import six # Python 2 and 3 compatibility
from six import print_
## Logging
#
import logging
logging.basicConfig(format='%(levelname)s:%(message)s',
level=logging.WARNING)
Log = logging.getLogger(__name__)
# Debug messages
def dbg(*msg):
parts = [ str(x) for x in msg ]
msg_string = " ".join(parts)
Log.debug(msg_string)
# Performance debug messages
def dbg_p(*msg):
if DBGp:
dbg(*msg)
# ------------------------------------
## User arguments
from optparse import OptionParser
optparser = OptionParser(usage=usage)
optparser.set_defaults(output_format="plain")
optparser.add_option("-d", "--debug",
help="Print a lot of debugging messages",
action="store_true", default=False, dest="debug")
optparser.add_option("-l", "--license",
help="Print license terms (and then quit)",
action="store_true",default=False, dest="license")
optparser.add_option("--csv", "-c", "--comma-separated-values",
action="store_const", dest="output_format",
const = "csv",
help = """Output format is comma-separated-values
(suitable as input to Excel and other spreadsheets,
genpairs with the -i option, and some other
programs).""")
optparser.add_option("-v", "--varying", "--varying-columns-only",
action="store_true", default=False, dest="varying",
help="""Include only categories with more than one
non-error and non-single value""")
optparser.add_option("-s", "--singles", "--singles-only",
action="store_false", default=True, dest="combinations",
help="""Print only test cases covering 'error'
and 'single' values.""")
optparser.add_option("-o", "--omit-singles",
action="store_false", default=True, dest="singles",
help = """Do not produce test cases covering 'single'
or 'error' values.""")
optparser.add_option("-i", "--initial", "--initial-suite",
action="append", default = [], dest="initial_suite",
help="""Read initial test suite (in csv format). Often
used together with -p""")
optparser.add_option("-p", "--pairs", "--print-pairs",
action="store_true", default=False, dest="pairs",
help="""Report pairs not covered by initial test suites.
(Useful only with --initial)""")
(UserOptions, UserArgs) = optparser.parse_args()
Log.info("User options: ", UserOptions)
if UserOptions.debug :
print_("Enabling debugging")
DBG=True
Log.setLevel(logging.DEBUG)
## Primary data structures
CategoriesList = [ ] ## List of category names (in order given)
## The CategoriesList can also be considered the test case schema
CategoriesValues = [ ] ## List of value sets
Singles = [] ## List of (slot,value,kind) where kind is "single" or "error"
Excludes = set() ## Set of ((slot,value),(slot,value)) (not symmetric)
ObsList = [ ] # All obligations, but only one direction
Outstanding = set() # All obligations, but only one direction
ObsByCol = {} # Per column, both directions
SingleColumns = [ ] # Columns with just one (non-error, non-single) choice
MultipleColumns = [ ] # Complement of SingleColumns -- pairs are from these
NCol = 0 # ==len(CategoriesList), set after parsing
## Temporary, for building excludes
PropsSlots = { } # For each property name, set of slots with it
CategoriesProps = { } # For each category, all props on any values
ValueProps = { } # Map (slot,value) pair to list of condition names
ValueIfs = [ ] # List of (value, slot, condition) triples
ValueExcepts = [ ] # List of (value, slot, condition) triples
## What we build
Suite = [ ] ## List of test cases
## Instrumentation
INSTR_N_Comparisons = 0
# ---------- Read spec file using a simple LL parser ----
# Consts for token classification
EOF = "<EOF>"
CategoryToken = "<CAT>"
ValueToken = "<VAL>"
IfToken = "<IF>"
PropToken = "<PROP>"
ExceptToken = "<EXCEPT>"
ErrorToken = "<ERROR>"
SingleToken = "<SINGLE>"
EOFToken = EOF
def tokenClass( tok ) :
if tok == EOF : return EOFToken
if tok.endswith(":") : return CategoryToken
if tok == "if" : return IfToken
if tok == "prop" : return PropToken
if tok == "except" : return ExceptToken
if tok == "single" : return SingleToken
if tok == "error" : return ErrorToken
return ValueToken
# Generator to produce tokens, one by one
#
def getToken() :
while 1:
s = sys.stdin.readline()
if not s:
dbg("#DBG <<EOF reached>>")
yield EOF
return
commentPos = s.find("//");
if commentPos >= 0 :
s = s[0:commentPos]
for word in s.split() :
dbg("#DBG <<%s: %s>>" % ( word, tokenClass(word) ) )
yield word
Token = "<PASSWORD>"
tokenStream = getToken()
def parse():
global Token
global NCol
Token = six.next(tokenStream)
parseSpec()
NCol = len(CategoriesList)
def parseSpec():
global Token
dbg("#DBG (parseSpec)")
if Token == EOF : return [ ]
if tokenClass( Token ) != CategoryToken :
print_("Syntax error on ", Token, " looking for 'category:'")
print_("Skipping to next category")
## Error recovery to next category
while tokenClass( Token ) != CategoryToken :
if tokenClass( Token ) == EOF :
print_("Discarding rest of file")
return [ ]
Token = tokenStream.next()
print_("Resuming from" , Token)
category = Token[0:-1]
Token = six.next(tokenStream)
values = parseValues()
dbg("#DBG Parsed: ", category, " ::= ", values)
slotNum = len(CategoriesList)
CategoriesList.append( category )
vlist = [ ]
CategoriesValues.append(vlist)
CategoriesProps[ category ] = [ ]
for valDesc in values :
val = valDesc[0] ## The name of the value itself
## Postpone marking val as a possible value of the property
## until we know whether it is a singleton
singleton = False
ValueProps[ (slotNum, val) ] = [] ## List of its properties
for cond in valDesc[1:] :
kind = nameOf(cond)
condVal = valOf(cond)
if kind == "prop" :
CategoriesProps[ category ].append(condVal)
ValueProps[ (slotNum, val ) ].append(condVal)
if condVal not in PropsSlots :
PropsSlots[condVal] = set()
PropsSlots[condVal].add(slotNum)
elif kind == "if" :
ValueIfs.append( (val, slotNum, condVal ) )
elif kind == "except" :
ValueExcepts.append( (val, slotNum, condVal) )
elif kind == "error" or kind == "single" :
Singles.append( (val, slotNum, kind) )
singleton = True
else :
print_("*ERR* Unrecognized condition attribute:", cond)
if not singleton: vlist.append( val )
parseSpec()
def parseValues():
global Token
dbg("#DBG (parseValues)")
values = [ ]
while tokenClass( Token ) == ValueToken :
val = parseValue()
dbg("#DBG (parsed value: ", val, ")")
values.append( val )
return values
def parseValue():
global Token
dbg("#DBG (parseValue, looking at ", Token, ")")
if tokenClass( Token ) != ValueToken :
print_("Syntax error, expecting value, saw ", Token )
return [ "--bogus--"]
value = [ Token ]
Token = six.next(tokenStream)
conditions = parseConditions()
dbg("#DBG parseValue returns", value + conditions)
return value + conditions
def parseConditions():
global Token
dbg("#DBG (parseConditions)")
if tokenClass( Token ) == ErrorToken :
Token = six.next(tokenStream)
return [("error", None )] + parseConditions()
if tokenClass( Token ) == SingleToken :
Token = six.next(tokenStream)
return [("single", None)] + parseConditions()
if tokenClass( Token ) == IfToken :
Token = six.next(tokenStream)
ifcond = Token
Token = six.next(tokenStream)
return [("if" , ifcond)] + parseConditions()
if tokenClass( Token ) == PropToken :
Token = six.next(tokenStream)
condname = Token
Token = six.next(tokenStream)
return [("prop" , condname)] + parseConditions()
if tokenClass( Token ) == ExceptToken :
Token = six.next(tokenStream)
condname = Token
Token = six.next(tokenStream)
return [("except" , condname)] + parseConditions()
dbg("#DBG No more conditions")
return [ ]
# -------------- The form of a pair (obligation or exclusion) -----
def makePair( s1, v1, s2, v2 ):
return ((s1, v1), (s2, v2))
def reversePair( pair ):
return ( pair[1], pair[0] )
# Each item in the pair is a <slot,value> or <name,value> pair
def slotOf( tuple ):
return tuple[0]
def nameOf( tuple ):
return tuple[0]
def valOf( tuple ):
return tuple[1]
# --------------- Build initial data structures ----
# Single columns are those in which all but one value is
# listed as a "single" or "error" choice, i.e., for pairs
# generation the value will be fixed. We can save some time by
# always fixing these at the beginning of pairs generation, and
# we can save space in output by suppressing them.
# (Note they may still participate in excludes.)
#
# We'll identify the multiples (non-single columns) as well,
# because they are useful in several places
#
def identifySingles() :
for slot in range(len(CategoriesList)) :
if len(CategoriesValues[slot]) == 0 :
print_("Warning: No non-singular value choices for ",
CategoriesList[slot],
"; Pairs generation will fail.")
elif len(CategoriesValues[slot]) == 1 :
SingleColumns.append(slot)
else:
MultipleColumns.append(slot)
# Obligations depend on excludes, so call makeExcludes before
# calling makeObligations
#
def makeExcludes() :
# Excludes that come from "except" clauses
for ExceptCond in ValueExcepts :
val, slot, cond = ExceptCond
for conflict_slot in PropsSlots[ cond ] :
for cs_value in CategoriesValues[ conflict_slot ] :
if cond in ValueProps[ (conflict_slot, cs_value) ] :
Excludes.add( makePair( slot, val, conflict_slot, cs_value))
# Excludes that come from "if" clauses --- reverse sense
for IfCond in ValueIfs :
val, slot, cond = IfCond
for conflict_slot in PropsSlots[ cond ] :
for cs_value in CategoriesValues[ conflict_slot ] :
if cond not in ValueProps[ (conflict_slot, cs_value) ] :
Excludes.add( makePair( slot, val, conflict_slot, cs_value))
def makeObligations() :
if DBG:
print_("--- Creating obligations list ---")
keys = CategoriesList
nslots = len(keys)
for i in range(nslots):
ObsByCol[i] = []
for i in MultipleColumns :
for v1 in CategoriesValues[i] :
i_item = (i, v1)
for j in range(i+1,nslots) :
## if j in SingleColumns: continue ##
## --- short cut doesn't work if only one varying column --
for v2 in CategoriesValues[j] :
j_item = (j, v2)
obforward = (i_item, j_item)
obbackward = (j_item, i_item)
if obforward not in Excludes and obbackward not in Excludes:
ObsList.append(obforward)
Outstanding.add(obforward)
ObsByCol[ i ].append(obforward)
ObsByCol[ j ].append(obbackward)
random.shuffle(ObsList)
dbg("--- ObsList complete, ", len(ObsList), " obligations ---")
# When we complete a test case, we remove obligations from
# the outstanding obligations list. The other lists are
# cleared lazily, when we bring up an obligation.
#
def clearObligations(testcase) :
testCaseValue = 0
for i in range( len(testcase) ):
for j in range ( i+1, len(testcase) ):
ob = makePair(i, testcase[i], j, testcase[j])
if ob in Outstanding:
Outstanding.remove(ob)
testCaseValue = testCaseValue + 1
dbg("*** Value ", testCaseValue, testcase )
# ---------------------------------------------------------
#
# Is a given (slot,value) pair compatible with the test case so far?
#
def compatible( item, testcase ) :
slot, val = item
if ( testcase[ slot ] != DontCare and testcase[slot] != val) :
return False
for tslot in range(len(testcase)) :
if ((slot, val), (tslot, testcase[tslot])) in Excludes:
return False
if ((tslot, testcase[tslot]),(slot,val)) in Excludes:
return False
return True
# ---------------------------------------------------------
def MakeTuple ( len ):
newList = []
for i in range(0,len):
newList.append(DontCare)
return newList
def CreateCase():
seedObligation = ObsList.pop()
while seedObligation not in Outstanding:
if (len(ObsList) == 0): return
seedObligation = ObsList.pop()
s1, v1 = seedObligation[0]
s2, v2 = seedObligation[1]
testcase = MakeTuple( len(CategoriesList) )
testcase[s1] = v1
testcase[s2] = v2
for slot in SingleColumns :
testcase[slot] = CategoriesValues[slot][0]
dbg("#DBG === Attempting tuple seeded with", testcase)
columnOrder = list(range( len(CategoriesList) ) )
random.shuffle(columnOrder)
if ( completeCase( columnOrder, testcase ) ) :
Suite.append( testcase )
clearObligations( testcase )
else:
CaseMessage( "Warning - No pair possible: ", testcase )
def CreateSingles():
for single in Singles:
CreateSingle(single)
def CreateSingle( single ):
testcase = MakeTuple( len(CategoriesList) )
columnOrder = list(range( len(CategoriesList) ) )
random.shuffle(columnOrder)
value, slot, kind = single
dbg("#DBG single obligation: ", slot, value, kind)
testcase[slot] = value
if completeCase( columnOrder, testcase ) :
Suite.append( testcase )
else:
CaseMessage( "Warning - No pair possible: ", testcase )
def completeCase( columnOrder, testcase ) :
if len (columnOrder) == 0 :
dbg_p("#DBG: *** Success: ", testcase)
return True
dbg_p("#DBG * Attempting to complete", testcase )
col = columnOrder[0]
if testcase[col] != DontCare:
dbg_p("#DBG * Skipping column ", col, " (already filled in)")
return completeCase( columnOrder[1:], testcase )
dbg("#DBG ***Trying columns ", columnOrder, " in ", testcase)
# How shall we fill this DontCare with something useful?
# Let's try for an outstanding obligation.
# Dec 2006 --- Let's look at all the outstanding obligations
# and choose the one with highest score. This is fairly expensive
# (10^20 takes about 9 minutes wall time on G4 laptop), so now we
# set a limit (maxCandidates) on number of candidates considered
colObs = ObsByCol[col]
candidates = [ ]
obindex = 0
while obindex < len(colObs) and len(candidates) < maxCandidates :
ob = colObs[obindex]
if not (ob in Outstanding or reversePair(ob) in Outstanding):
# Here is our lazy deletion of obligations; we
# clip from the end of the list
dbg_p("#DBG * Lazy deletion")
colObs[obindex] = colObs[ len(colObs) - 1 ]
colObs.pop()
else:
if compatible(ob[0], testcase) and compatible(ob[1], testcase):
dbg_p("#DBG *** Compatible", ob, testcase )
# Score the
# Note one (but not both) of these may coincide with
# an existing element. We'll only consider *added* value,
# so we score the *new* parts only.
value = 1 ## For at least meeting one obligation
((s1, v1), (s2, v2)) = ob
if testcase[s1] != v1 :
for ccol in range( len(testcase) ):
if ((s1,v1),(ccol,testcase[ccol])) in Outstanding :
value = value + 1
if ((ccol,testcase[ccol]),(s1,v1)) in Outstanding :
value = value + 1
if testcase[s2] != v2 :
for ccol in range( len(testcase) ):
if ((s2,v2),(ccol,testcase[ccol])) in Outstanding :
value = value + 1
if ((ccol,testcase[ccol]),(s2,v2)) in Outstanding :
value = value + 1
candidates.append( (value, ob) )
obindex = obindex + 1
candidates.sort()
candidates.reverse()
dbg_p("### Candidates: ", candidates)
for cand in candidates:
(score, ((s1, v1),(s2,v2))) = cand
old_v1 = testcase[ s1 ]
testcase[ s1 ] = v1
old_v2 = testcase[ s2 ]
testcase[ s2 ] = v2
if completeCase( columnOrder[1:] , testcase ):
return True
else:
dbg_p("#DBG *** Rolling back ", s1, s2)
# Restore previous values
testcase[ s1 ] = old_v1
testcase[ s2 ] = old_v2
## If we couldn't score any more obligations, can we at least
## fill in some compatible value and move on?
dbg_p("#DBG *** Trying any value, regardless of obligation")
for val in CategoriesValues[ col ] :
if compatible((col,val), testcase) :
testcase[ col ] = val
if completeCase( columnOrder[1:], testcase ):
return True
else:
testcase[ col ] = DontCare
dbg_p("#DBG ** Failing to fill column ", col , " with ", testcase)
return False
# ------------------------------------------------------------
# Print Warnings (to stderr unless otherwise specified)
# ------------------------------------------------------------
def CaseMessage( msg, vector, dest=sys.stderr ) :
"""Print a warning or error message concerning a
particular partially-defined test vector"""
print_( "{} [".format(msg), end="", file=dest)
sep=""
for col in range(len(vector)) :
if vector[col] == DontCare :
print_(sep+"_",end="", file=dest)
else:
print_("{}{}={}".format(sep,CategoriesList[col],vector[col]),
end="", file=dest)
sep=", "
print_("]",file=dest)
def ObToVector( ob ) :
"""Convert obligation to vector for debugging messages"""
t = MakeTuple( NCol )
s1,v1 = ob[0]
s2,v2 = ob[1]
t[s1]=v1
t[s2]=v2
return t
# ------------------------------------------------------------
# Print results
# ------------------------------------------------------------
def PrintTable( columns, descriptive_title ) :
if UserOptions.output_format == "csv" :
PrintAsCSV( columns )
else:
PrintAsText( columns, descriptive_title )
def PrintAsText( columns, descriptive_title ):
print_(descriptive_title + ":", len(Suite), " test vectors")
print_("")
for slot in columns :
parm = CategoriesList[ slot ]
print_("%15s" % parm , end="")
print_("")
print_("_"*60)
for t in Suite :
for slot in columns :
value = t[slot]
print_("%15s" % value , end="")
print_( "" )
print_( "" )
def PrintAsCSV(columns):
""" Print vectors as comma-separated values, for import
into a spreadsheet or other CSV-consuming application. """
dbg("Print as CSV")
csv_writer = csv.writer( sys.stdout, dialect=csv.excel )
schema_row = [ ]
for slot in columns :
schema_row.append( CategoriesList[slot] )
csv_writer.writerow(schema_row)
for t in Suite :
dbg("write row " , t )
csv_writer.writerow( t )
# ----------------
## Read an initial test suite (or several), and
## eliminate those obligations, so we are creating
## a test suite to fill in the remainder of the test
## obligations.
##
## NOTE: Currently considering only pair obligations,
## not singletons. We should look at single and error
## cases first, and
## * Not consider any test case with more than one
## single or error value (we don't know which will be handled
## by the application, and we assume special case processing
## may miss other features, including other special cases)
## * Not consider any pairs as being satisfied by a single
## or error case.
## For now, we just assume that the initial test suite is not
## a suite of special and error cases.
##
class csv_dialect(csv.excel):
skipinitialspace=True ## Seems to have no effect
def initial_suite_clear( initial_suite ) :
matches = False
reader = csv.reader( open(initial_suite, "r"),
csv_dialect) ## Working yet? (No.)
## First line should be schema
in_schema = reader.next()
in_schema_map = [ ]
for i in range(len(in_schema)):
col = in_schema[i]
if col in CategoriesList:
to_col = CategoriesList.index(col)
in_schema_map.append(to_col)
else:
print_("Warning: schema mismatch in", initial_suite)
print_(" Column ", i, "'" + col + "'", "not in specification")
in_schema_map.append(-1)
for vec in reader:
if len(vec) == len(in_schema) :
trvec = MakeTuple(len(CategoriesList))
for i in range(len(vec)) :
if in_schema_map[i] != -1 :
trvec[in_schema_map[i]] = vec[i]
clearObligations( trvec )
else:
print_("*** Warning, format mismatch with initial suite ",
initial_suite)
print_("*** Expecting columns ",
in_schema , " but saw ", vec)
# ----------------
## Print the set of outstanding obligations. Typical use is when
## we are trying to see what is missing in an initial test suite.
##
def print_required_pairs( ) :
for ob in Outstanding :
s1, v1 = ob[0]
name1=CategoriesList[s1]
s2, v2 = ob[1]
name2=CategoriesList[s2]
print_("%s=%s, %s=%s" % (name1, v1, name2, v2))
## ------------------------------------------------------------
## MAIN PROGRAM (after initialization above)
## ------------------------------------------------------------
# -- Respond to special diagnostic options --
if UserOptions.license:
print_(License)
exit(0)
if UserOptions.debug:
print_("---------------------------")
print_("Options in effect: ")
print_("debug: ", UserOptions.debug)
print_("output_format:", UserOptions.output_format)
print_("varying:", UserOptions.varying)
print_("combinations:", UserOptions.combinations)
print_("singles:", UserOptions.singles)
print_("initial_suite:", UserOptions.initial_suite)
print_("pairs:", UserOptions.pairs)
print_("---------------------------")
# -- Main processing: Parse the script, execute, print --
parse()
identifySingles()
makeExcludes()
makeObligations()
for suite in UserOptions.initial_suite :
initial_suite_clear( suite )
if UserOptions.pairs :
print_("=== Pairs required for completion ===" )
print_required_pairs()
print_("=====================================")
if UserOptions.combinations :
while len(ObsList) > 0 :
CreateCase()
if UserOptions.varying :
PrintTable( MultipleColumns, "Pairwise coverage, varying columns only" )
else:
PrintTable( range(len(CategoriesList)), "Pairwise coverage" )
if UserOptions.singles :
Suite = [ ]
CreateSingles()
PrintTable( range(len(CategoriesList)), "Single and error vectors" )
| 1.328125 | 1 |
src/main/python/mydiscordbot.py | mgaertne/minqlx-plugin-tests | 4 | 12790012 | <filename>src/main/python/mydiscordbot.py
"""
This is a plugin created by ShiN0
Copyright (c) 2017 ShiN0
<https://www.github.com/mgaertne/minqlx-plugin-tests>
You are free to modify this plugin to your own one, except for the version command related code.
The basic ideas for this plugin came from Gelenkbusfahrer and roast
<https://github.com/roasticle/minqlx-plugins/blob/master/discordbot.py> and have been mainly discussed on the
fragstealers_inc discord tech channel of the Bus Station server(s).
You need to install discord.py in your python installation, i.e. python3 -m pip install -U discord.py
"""
import re
import asyncio
import threading
import logging
import os
from logging.handlers import RotatingFileHandler
import minqlx
from minqlx import Plugin
import discord
from discord import ChannelType, AllowedMentions
from discord.ext.commands import Bot, Command, DefaultHelpCommand
import discord.ext.tasks
plugin_version = "v1.51"
MAP_SUBSCRIBER_KEY = "minqlx:maps:{}:subscribers"
class mydiscordbot(minqlx.Plugin):
"""
The plugin's main purpose is to create a relay chat between the Quake Live chat and configured discord channels.
There are two basic types of relay in this basic version of a discord plugin:
* full relay between Quake Live chat and discord, where every text message that is happening is forwarded to the
other system, and some basic Quake Live status updates are send to discord
* triggered relay of specific messages between discord and Quake Live chat where a prefix needs to be used for the
messages to be forwarded.
These two modes can be combined, i.e. full relay to a broadcast channel, and specific messages from another channel.
For a description on how to set up a bot for you discord network take a look `here
<https://github.com/reactiflux/discord-irc/wiki/Creating-a-discord-bot-&-getting-a-token>`.
As of version 1.5 of the mydiscordbot, you also need to enable the Server Members Intent for the bot in order to be
able to replace discord user mentions. If you don't need that, i.e. you did configured and of the
qlx_discordReplaceMentions cvars as '0', you can leave it unchecked. By default, this will be enabled and therefore
mandatory. Check <https://discordpy.readthedocs.io/en/latest/intents.html#privileged-intents> for a description.
Uses:
* qlx_discordBotToken (default: "") The token of the discord bot to use to connect to discord.
* qlx_discordRelayChannelIds (default: "") Comma separated list of channel ids for full relay.
* qlx_discordRelayTeamchatChannelIds (default: "") Comma separated list of channel ids for relaying team chat
messages.
* qlx_discordTriggeredChannelIds (default: "") Comma separated list of channel ids for triggered relay.
* qlx_discordTriggeredChatMessagePrefix (default: "") Prefix any triggered message from QL with this text portion.
Useful when running multiple servers on the same host with the same discord connected to.
* qlx_discordUpdateTopicOnTriggeredChannels (default: "1") Boolean flag to indicate whether to update the topic with
the current game state on triggered relay channels. Your bot needs edit_channel permission for these channels.
* qlx_discordKeepTopicSuffixChannelIds (default: "") Comma separated list of channel ids where the topic suffix
will be kept upon updating.
* qlx_discordUpdateTopicInterval (default: 305) Amount of seconds between automatic topic updates
* qlx_discordKeptTopicSuffixes (default: {}) A dictionary of channel_ids for kept topic suffixes and the related
suffixes. Make sure to use single quotes for the suffixes.
* qlx_discordCommandPrefix (default: "!") Command prefix for all commands from discord
* qlx_discordTriggerTriggeredChannelChat (default: "quakelive") Message prefix for the trigger on triggered relay
channels.
* qlx_discordTriggerStatus (default: "status") Trigger for having the bot send the current status of the game
server.
* qlx_discordMessagePrefix (default: "[DISCORD]") messages from discord to quake live will be prefixed with this
prefix
* qlx_discordEnableHelp (default: "1") indicates whether the bot will respond to !help or responses are completely
switched off
* qlx_discordEnableVersion (default: "1") indicates whether the bot will respond to !version or responses are
completely switched off
* qlx_displayChannelForDiscordRelayChannels (default: "1") display the channel name of the discord channel for
configured relay channels
* qlx_discordQuakeRelayMessageFilters (default: "^\!s$, ^\!p$") comma separated list of regular expressions for
messages that should not be sent from quake live to discord
* qlx_discordReplaceMentionsForRelayedMessages (default: "1") replace mentions (@user and #channel) for messages
sent towards relay channels
* qlx_discordReplaceMentionsForTriggeredMessages (default: "1") replace mentions (@user and #channel) for triggered
messages sent towards the triggered channels
* qlx_discordAdminPassword (default "<PASSWORD>") passwort for remote admin of the server via discord private
messages to the discord bot.
* qlx_discordAuthCommand (default: "auth") command for authenticating a discord user to the plugin via private
message
* qlx_discordExecPrefix (default: "qlx") command for authenticated users to execute server commands from discord
* qlx_discordLogToSeparateLogfile (default: "0") enables extended logging for the discord library (logs to
minqlx_discord.log in the homepath)
"""
def __init__(self, discord_client=None):
super().__init__()
# maybe initialize plugin cvars
Plugin.set_cvar_once("qlx_discordBotToken", "")
Plugin.set_cvar_once("qlx_discordRelayChannelIds", "")
Plugin.set_cvar_once("qlx_discordRelayTeamchatChannelIds", "")
Plugin.set_cvar_once("qlx_discordTriggeredChannelIds", "")
Plugin.set_cvar_once("qlx_discordTriggeredChatMessagePrefix", "")
Plugin.set_cvar_once("qlx_discordUpdateTopicOnTriggeredChannels", "1")
Plugin.set_cvar_once("qlx_discordKeepTopicSuffixChannelIds", "")
Plugin.set_cvar_once("qlx_discordUpdateTopicInterval", "305")
Plugin.set_cvar_once("qlx_discordKeptTopicSuffixes", "{}")
Plugin.set_cvar_once("qlx_discordCommandPrefix", "!")
Plugin.set_cvar_once("qlx_discordTriggerTriggeredChannelChat", "quakelive")
Plugin.set_cvar_once("qlx_discordTriggerStatus", "status")
Plugin.set_cvar_once("qlx_discordMessagePrefix", "[DISCORD]")
Plugin.set_cvar_once("qlx_discordEnableHelp", "1")
Plugin.set_cvar_once("qlx_discordEnableVersion", "1")
Plugin.set_cvar_once("qlx_displayChannelForDiscordRelayChannels", "1")
Plugin.set_cvar_once("qlx_discordQuakeRelayMessageFilters", r"^\!s$, ^\!p$")
Plugin.set_cvar_once("qlx_discordReplaceMentionsForRelayedMessages", "1")
Plugin.set_cvar_once("qlx_discordReplaceMentionsForTriggeredMessages", "1")
Plugin.set_cvar_once("qlx_discordAdminPassword", "<PASSWORD>")
Plugin.set_cvar_once("qlx_discordAuthCommand", "auth")
Plugin.set_cvar_once("qlx_discordExecPrefix", "qlx")
Plugin.set_cvar_once("qlx_discordLogToSeparateLogfile", "0")
# get the actual cvar values from the server
self.discord_message_filters = Plugin.get_cvar("qlx_discordQuakeRelayMessageFilters", set)
# adding general plugin hooks
self.add_hook("unload", self.handle_plugin_unload)
self.add_hook("chat", self.handle_ql_chat, priority=minqlx.PRI_LOWEST)
self.add_hook("player_connect", self.handle_player_connect, priority=minqlx.PRI_LOWEST)
self.add_hook("player_disconnect", self.handle_player_disconnect, priority=minqlx.PRI_LOWEST)
self.add_hook("map", self.handle_map)
self.add_hook("vote_started", self.handle_vote_started)
self.add_hook("vote_ended", self.handle_vote_ended)
self.add_hook("game_countdown", self.handle_game_countdown_or_end, priority=minqlx.PRI_LOWEST)
self.add_hook("game_end", self.handle_game_countdown_or_end, priority=minqlx.PRI_LOWEST)
self.add_command("discord", self.cmd_discord, usage="<message>")
self.add_command("discordbot", self.cmd_discordbot, permission=1,
usage="[status]|connect|disconnect|reconnect")
# initialize the discord bot and its interactions on the discord server
if discord_client is None:
self.discord = SimpleAsyncDiscord(self.version_information(), self.logger)
else:
self.discord = discord_client
self.logger.info("Connecting to Discord...")
self.discord.start()
self.logger.info(self.version_information())
Plugin.msg(self.version_information())
def version_information(self):
return "{} Version: {}".format(self.name, plugin_version)
def handle_plugin_unload(self, plugin):
"""
Handler when a plugin is unloaded to make sure, that the connection to discord is properly closed when this
plugin is unloaded.
:param plugin: the plugin that was unloaded.
"""
if plugin == self.__class__.__name__:
self.discord.stop()
@staticmethod
def game_status_information(game: minqlx.Game):
"""
Generate the text for the topic set on discord channels.
:param game: the game to derive the status information from
:return: the topic that represents the current game state.
"""
ginfo = mydiscordbot.get_game_info(game)
num_players = len(Plugin.players())
max_players = game.maxclients
maptitle = game.map_title if game.map_title else game.map
gametype = game.type_short.upper()
# CAUTION: if you change anything on the next line, you may need to change the topic_ending logic in
# :func:`mydiscordbot.update_topic_on_triggered_channels(self, topic)` to keep the right portion
# of the triggered relay channels' topics!
return "{0} on **{1}** ({2}) with **{3}/{4}** players. ".format(ginfo,
Plugin.clean_text(maptitle),
gametype,
num_players,
max_players)
@staticmethod
def get_game_info(game):
"""
Helper to format the current game.state that may be used in status messages and setting of channel topics.
:param game: the game object to derive the information from
:return: the current text representation of the game state
"""
if game.state == "warmup":
return "Warmup"
if game.state == "countdown":
return "Match starting"
if game.roundlimit in [game.blue_score, game.red_score] or game.red_score < 0 or game.blue_score < 0:
return "Match ended: **{}** - **{}**".format(game.red_score, game.blue_score)
if game.state == "in_progress":
return "Match in progress: **{}** - **{}**".format(game.red_score, game.blue_score)
return "Warmup"
@staticmethod
def player_data():
"""
Formats the top 5 scorers connected to the server in a string. The return value may be used for status messages
and used in topics to indicate reveal more data about the server and its current game.
:return: string of the current top5 scorers with the scores and connection time to the server
"""
player_data = ""
teams = Plugin.teams()
if len(teams['red']) > 0:
player_data += "\n**R:** {}".format(mydiscordbot.team_data(teams['red']))
if len(teams['blue']) > 0:
player_data += "\n**B:** {}".format(mydiscordbot.team_data(teams['blue']))
return player_data
@staticmethod
def team_data(player_list, limit=None):
"""
generates a sorted output of the team's player by their score
:param player_list: the list of players to generate the team output for
:param limit: (default: None) just list the top players up to the given limit
:return: a discord ready text representation of the player's of that team by their score
"""
if len(player_list) == 0:
return ""
players_by_score = sorted(player_list, key=lambda k: k.score, reverse=True)
if limit:
players_by_score = players_by_score[:limit]
team_data = ""
for player in players_by_score:
team_data += "**{}**({}) ".format(mydiscordbot.escape_text_for_discord(player.clean_name), player.score)
return team_data
def is_filtered_message(self, msg):
"""
Checks whether the given message should be filtered and not be sent to discord.
:param msg: the message to check whether it should be filtered
:return whether the message should not be relayed to discord
"""
for message_filter in self.discord_message_filters:
matcher = re.compile(message_filter)
if matcher.match(msg):
return True
return False
def handle_ql_chat(self, player: minqlx.Player, msg, channel: minqlx.AbstractChannel):
"""
Handler function for all chat messages on the server. This function will forward and messages on the Quake Live
server to discord.
:param player: the player that sent the message
:param msg: the message that was sent
:param channel: the chnannel the message was sent to
"""
handled_channels = {"chat": "",
"red_team_chat": " *(to red team)*",
"blue_team_chat": " *(to blue team)*",
"spectator_chat": " *(to specs)*"}
if channel.name not in handled_channels:
return
if self.is_filtered_message(msg):
return
if channel.name in ["red_team_chat", "blue_team_chat"]:
self.discord.relay_team_chat_message(player, handled_channels[channel.name], Plugin.clean_text(msg))
return
self.discord.relay_chat_message(player, handled_channels[channel.name], Plugin.clean_text(msg))
@minqlx.delay(3)
def handle_player_connect(self, player: minqlx.Player):
"""
Handler called when a player connects. The method sends a corresponding message to the discord relay channels,
and updates the relay channel topic as well as the trigger channels, when configured.
:param player: the player that connected
"""
content = "_{} connected._".format(mydiscordbot.escape_text_for_discord(player.clean_name))
self.discord.relay_message(content)
@staticmethod
def escape_text_for_discord(text):
"""
Escapes the provided player's name for proper formatting to discord (i.e. replace '*' (asterisks) with a
variant to not interfere with discord's formattings.)
:param text: the text that shall be escaped for discord chat channels
"""
escaped_text = text.replace('_', r'\_')
escaped_text = escaped_text.replace('*', r"\*")
return escaped_text
@minqlx.delay(3)
def handle_player_disconnect(self, player: minqlx.Player, reason):
"""
Handler called when a player disconnects. The method sends a corresponding message to the discord relay
channels, and updates the relay channel topic as well as the trigger channels, when configured.
:param player: the player that connected
:param reason: the reason why the player left
"""
if reason in ["disconnected", "timed out", "was kicked", "was kicked."]:
reason_str = "{}.".format(reason)
else:
reason_str = "was kicked ({}).".format(mydiscordbot.escape_text_for_discord(Plugin.clean_text(reason)))
content = "_{} {}_".format(mydiscordbot.escape_text_for_discord(player.clean_name),
reason_str)
self.discord.relay_message(content)
def handle_map(self, mapname, factory):
"""
Handler called when a map is changed. The method sends a corresponding message to the discord relay channels.
and updates the relay channel topic as well as the trigger channels, when configured.
:param mapname: the new map
:param factory: the map factory used
"""
content = "*Changing map to {}...*".format(mydiscordbot.escape_text_for_discord(mapname))
self.discord.relay_message(content)
def handle_vote_started(self, caller, vote, args):
"""
Handler called when a vote was started. The method sends a corresponding message to the discord relay channels.
:param caller: the player that initiated the vote
:param vote: the vote itself, i.e. map change, kick player, etc.
:param args: any arguments of the vote, i.e. map name, which player to kick, etc.
"""
caller_name = mydiscordbot.escape_text_for_discord(caller.clean_name) if caller else "The server"
content = "_{} called a vote: {} {}_".format(caller_name,
vote,
mydiscordbot.escape_text_for_discord(Plugin.clean_text(args)))
self.discord.relay_message(content)
def handle_vote_ended(self, votes, vote, args, passed):
"""
Handler called when a vote was passed or failed. The method sends a corresponding message to the discord relay
channels.
:param votes: the final votes
:param vote: the initial vote that passed or failed, i.e. map change, kick player, etc.
:param args: any arguments of the vote, i.e. map name, which player to kick, etc.
:param passed: boolean indicating whether the vote passed
"""
if passed:
content = "*Vote passed ({} - {}).*".format(*votes)
else:
content = "*Vote failed.*"
self.discord.relay_message(content)
@minqlx.delay(1)
def handle_game_countdown_or_end(self, *args, **kwargs):
"""
Handler called when the game is in countdown, i.e. about to start. This function mainly updates the topics of
the relay channels and the triggered channels (when configured), and sends a message to all relay channels.
"""
game = self.game
if game is None:
return
topic = mydiscordbot.game_status_information(game)
top5_players = mydiscordbot.player_data()
self.discord.relay_message("{}{}".format(topic, top5_players))
def cmd_discord(self, player: minqlx.Player, msg, channel):
"""
Handler of the !discord command. Forwards any messages after !discord to the discord triggered relay channels.
:param player: the player that send to the trigger
:param msg: the message the player sent (includes the trigger)
:param channel: the channel the message came through, i.e. team chat, general chat, etc.
"""
# when the message did not include anything to forward, show the usage help text.
if len(msg) < 2:
return minqlx.RET_USAGE
self.discord.triggered_message(player, Plugin.clean_text(" ".join(msg[1:])))
self.msg("Message to Discord chat cast!")
def cmd_discordbot(self, player: minqlx.Player, msg, channel):
"""
Handler for reconnecting the discord bot to discord in case it gets disconnected.
:param player: the player that send to the trigger
:param msg: the original message the player sent (includes the trigger)
:param channel: the channel the message came through, i.e. team chat, general chat, etc.
"""
if len(msg) > 2 or (len(msg) == 2 and msg[1] not in ["status", "connect", "disconnect", "reconnect"]):
return minqlx.RET_USAGE
if len(msg) == 2 and msg[1] == "connect":
self.logger.info("Connecting to Discord...")
channel.reply("Connecting to Discord...")
self.connect_discord()
return
if len(msg) == 2 and msg[1] == "disconnect":
self.logger.info("Disconnecting from Discord...")
channel.reply("Disconnecting from Discord...")
self.disconnect_discord()
return
if len(msg) == 2 and msg[1] == "reconnect":
self.logger.info("Reconnecting to Discord...")
channel.reply("Reconnecting to Discord...")
self.disconnect_discord()
self.connect_discord()
return
channel.reply(self.discord.status())
return
@minqlx.thread
def connect_discord(self):
if self.discord.is_discord_logged_in():
return
self.discord.run()
@minqlx.thread
def disconnect_discord(self):
if not self.discord.is_discord_logged_in():
return
self.discord.stop()
class MinqlxHelpCommand(DefaultHelpCommand):
"""
A help formatter for the minqlx plugin's bot to provide help information. This is a customized variation of
discord.py's :class:`DefaultHelpCommand`.
"""
def __init__(self):
super().__init__(no_category="minqlx Commands")
def get_ending_note(self):
"""
Provides the ending_note for the help output.
"""
command_name = self.context.invoked_with
return "Type {0}{1} command for more info on a command.".format(self.clean_prefix, command_name)
async def send_error_message(self, error):
pass
class DiscordChannel(minqlx.AbstractChannel):
"""
a minqlx channel class to respond to from within minqlx for interactions with discord
"""
def __init__(self, client, author, discord_channel):
super().__init__("discord")
self.client = client
self.author = author
self.discord_channel = discord_channel
def __repr__(self):
return "{} {}".format(str(self), self.author.display_name)
def reply(self, msg):
"""
overwrites the channel.reply function to relay messages to discord
:param msg: the message to send to this channel
"""
self.client.send_to_discord_channels({self.discord_channel.id}, Plugin.clean_text(msg))
class DiscordDummyPlayer(minqlx.AbstractDummyPlayer):
"""
a minqlx dummy player class to relay messages to discord
"""
def __init__(self, client, author, discord_channel):
self.client = client
self.author = author
self.discord_channel = discord_channel
super().__init__(name="Discord-{}".format(author.display_name))
@property
def steam_id(self):
return minqlx.owner()
@property
def channel(self):
return DiscordChannel(self.client, self.author, self.discord_channel)
def tell(self, msg):
"""
overwrites the player.tell function to relay messages to discord
:param msg: the msg to send to this player
"""
self.client.send_to_discord_channels({self.discord_channel.id}, Plugin.clean_text(msg))
class SimpleAsyncDiscord(threading.Thread):
"""
SimpleAsyncDiscord client which is used to communicate to discord, and provides certain commands in the relay and
triggered channels as well as private authentication to the bot to admin the server.
"""
def __init__(self, version_information, logger):
"""
Constructor for the SimpleAsyncDiscord client the discord bot runs in.
:param version_information: the plugin's version_information string
:param logger: the logger used for logging, usually passed through from the minqlx plugin.
"""
super().__init__()
self.version_information = version_information
self.logger = logger
self.discord = None
self.authed_discord_ids = set()
self.auth_attempts = {}
self.discord_bot_token = Plugin.get_cvar("qlx_discordBotToken")
self.discord_relay_channel_ids = SimpleAsyncDiscord.int_set(Plugin.get_cvar("qlx_discordRelayChannelIds", set))
self.discord_relay_team_chat_channel_ids = SimpleAsyncDiscord.int_set(
Plugin.get_cvar("qlx_discordRelayTeamchatChannelIds", set))
self.discord_triggered_channel_ids = SimpleAsyncDiscord.int_set(
Plugin.get_cvar("qlx_discordTriggeredChannelIds", set))
self.discord_triggered_channel_message_prefix = Plugin.get_cvar("qlx_discordTriggeredChatMessagePrefix")
self.discord_update_triggered_channels_topic = \
Plugin.get_cvar("qlx_discordUpdateTopicOnTriggeredChannels", bool)
self.discord_topic_update_interval = Plugin.get_cvar("qlx_discordUpdateTopicInterval", int)
self.discord_keep_topic_suffix_channel_ids = SimpleAsyncDiscord.int_set(
Plugin.get_cvar("qlx_discordKeepTopicSuffixChannelIds", set))
self.discord_kept_topic_suffixes = eval(Plugin.get_cvar("qlx_discordKeptTopicSuffixes", str))
self.discord_trigger_triggered_channel_chat = Plugin.get_cvar("qlx_discordTriggerTriggeredChannelChat")
self.discord_command_prefix = Plugin.get_cvar("qlx_discordCommandPrefix")
self.discord_help_enabled = Plugin.get_cvar("qlx_discordEnableHelp", bool)
self.discord_version_enabled = Plugin.get_cvar("qlx_discordEnableVersion", bool)
self.discord_trigger_status = Plugin.get_cvar("qlx_discordTriggerStatus")
self.discord_message_prefix = Plugin.get_cvar("qlx_discordMessagePrefix")
self.discord_show_relay_channel_names = Plugin.get_cvar("qlx_displayChannelForDiscordRelayChannels", bool)
self.discord_replace_relayed_mentions = Plugin.get_cvar("qlx_discordReplaceMentionsForRelayedMessages", bool)
self.discord_replace_triggered_mentions = \
Plugin.get_cvar("qlx_discordReplaceMentionsForTriggeredMessages", bool)
self.discord_admin_password = Plugin.get_cvar("<PASSWORD>AdminPassword")
self.discord_auth_command = Plugin.get_cvar("qlx_discordAuthCommand")
self.discord_exec_prefix = Plugin.get_cvar("qlx_discordExecPrefix")
extended_logging_enabled = Plugin.get_cvar("qlx_discordLogToSeparateLogfile", bool)
if extended_logging_enabled:
self.setup_extended_logger()
def setup_extended_logger(self):
discordLogger = logging.getLogger("discord")
discordLogger.setLevel(logging.DEBUG)
# File
file_path = os.path.join(minqlx.get_cvar("fs_homepath"), "minqlx_discord.log")
maxlogs = minqlx.Plugin.get_cvar("qlx_logs", int)
maxlogsize = minqlx.Plugin.get_cvar("qlx_logsSize", int)
file_fmt = logging.Formatter("(%(asctime)s) [%(levelname)s @ %(name)s.%(funcName)s] %(message)s", "%H:%M:%S")
file_handler = RotatingFileHandler(file_path, encoding="utf-8", maxBytes=maxlogsize, backupCount=maxlogs)
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(file_fmt)
discordLogger.addHandler(file_handler)
# Console
console_fmt = logging.Formatter("[%(name)s.%(funcName)s] %(levelname)s: %(message)s", "%H:%M:%S")
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.INFO)
console_handler.setFormatter(console_fmt)
discordLogger.addHandler(console_handler)
@staticmethod
def int_set(string_set):
int_set = set()
for item in string_set:
if item == '':
continue
value = int(item)
int_set.add(value)
return int_set
def status(self):
if self.discord is None:
return "No discord connection set up."
if self.is_discord_logged_in():
return "Discord connection up and running."
return "Discord client not connected."
def run(self):
"""
Called when the SimpleAsyncDiscord thread is started. We will set up the bot here with the right commands, and
run the discord.py bot in a new event_loop until completed.
"""
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
members_intent = self.discord_replace_relayed_mentions or self.discord_replace_triggered_mentions
intents = discord.Intents(members=members_intent, guilds=True, bans=False, emojis=False, integrations=False,
webhooks=False, invites=False, voice_states=False, presences=False, messages=True,
guild_messages=True, dm_messages=True, reactions=False, guild_reactions=False,
dm_reactions=False, typing=False, guild_typing=False, dm_typing=False)
# init the bot, and init the main discord interactions
if self.discord_help_enabled:
self.discord = Bot(command_prefix=self.discord_command_prefix,
description="{}".format(self.version_information),
help_command=MinqlxHelpCommand(), loop=loop, intents=intents)
else:
self.discord = Bot(command_prefix=self.discord_command_prefix,
description="{}".format(self.version_information),
help_command=None, loop=loop, intents=intents)
self.initialize_bot(self.discord)
# connect the now configured bot to discord in the event_loop
self.discord.loop.run_until_complete(self.discord.start(self.discord_bot_token))
def initialize_bot(self, discord_bot):
"""
initializes a discord bot with commands and listeners on this pseudo cog class
:param discord_bot: the discord_bot to initialize
"""
discord_bot.add_command(Command(self.auth, name=self.discord_auth_command,
checks=[self.is_private_message, lambda ctx: not self.is_authed(ctx),
lambda ctx: not self.is_barred_from_auth(ctx)],
hidden=True,
pass_context=True,
help="auth with the bot"))
discord_bot.add_command(Command(self.qlx, name=self.discord_exec_prefix,
checks=[self.is_private_message, self.is_authed],
hidden=True,
pass_context=True,
help="execute minqlx commands on the server"))
discord_bot.add_command(Command(self.trigger_status, name=self.discord_trigger_status,
checks=[self.is_message_in_relay_or_triggered_channel],
pass_context=True,
ignore_extra=False,
help="display current game status information"))
discord_bot.add_command(Command(self.triggered_chat, name=self.discord_trigger_triggered_channel_chat,
checks=[self.is_message_in_triggered_channel],
pass_context=True,
help="send [message...] to the Quake Live server"))
discord_bot.add_listener(self.on_ready)
discord_bot.add_listener(self.on_message)
if self.discord_version_enabled:
discord_bot.add_command(Command(self.version, name="version",
pass_context=True,
ignore_extra=False,
help="display the plugin's version information"))
def reply_to_context(self, ctx, message):
return ctx.send(message)
async def version(self, ctx):
"""
Triggers the plugin's version information sent to discord
:param ctx: the context the trigger happened in
"""
await self.reply_to_context(ctx, "```{}```".format(self.version_information))
def is_private_message(self, ctx):
"""
Checks whether a message was sent on a private chat to the bot
:param ctx: the context the trigger happened in
"""
return isinstance(ctx.message.channel, discord.DMChannel)
def is_authed(self, ctx):
"""
Checks whether a user is authed to the bot
:param ctx: the context the trigger happened in
"""
return ctx.message.author.id in self.authed_discord_ids
def is_barred_from_auth(self, ctx):
"""
Checks whether an author is currently barred from authentication to the bot
:param ctx: the context the trigger happened in
"""
return ctx.message.author.id in self.auth_attempts and self.auth_attempts[ctx.message.author.id] <= 0
async def auth(self, ctx, password: str):
"""
Handles the authentication to the bot via private message
:param ctx: the context of the original message sent for authentication
:param password: the password to authenticate
"""
if password == self.discord_admin_password:
self.authed_discord_ids.add(ctx.message.author.id)
await self.reply_to_context(ctx, "You have been successfully authenticated. "
"You can now use {}{} to execute commands."
.format(self.discord_command_prefix, self.discord_exec_prefix))
return
# Allow up to 3 attempts for the user's discord id to authenticate.
if ctx.message.author.id not in self.auth_attempts:
self.auth_attempts[ctx.message.author.id] = 3
self.auth_attempts[ctx.message.author.id] -= 1
if self.auth_attempts[ctx.message.author.id] > 0:
await self.reply_to_context(ctx, "Wrong password. You have {} attempts left."
.format(self.auth_attempts[ctx.message.author.id]))
return
# User has reached maximum auth attempts, we will bar her/him from authentication for 5 minutes (300 seconds)
bar_delay = 300
await self.reply_to_context(ctx,
"Maximum authentication attempts reached. "
"You will be barred from authentication for {} seconds."
.format(bar_delay))
def f():
del self.auth_attempts[ctx.message.author.id]
threading.Timer(bar_delay, f).start()
async def qlx(self, ctx, *qlx_command: str):
"""
Handles exec messages from discord via private message to the bot
:param ctx: the context the trigger happened in
:param qlx_command: the command that was sent by the user
"""
@minqlx.next_frame
def f():
try:
minqlx.COMMANDS.handle_input(
DiscordDummyPlayer(self, ctx.message.author, ctx.message.channel),
" ".join(qlx_command),
DiscordChannel(self, ctx.message.author, ctx.message.channel))
except Exception as e:
send_message = ctx.send("{}: {}".format(e.__class__.__name__, e))
asyncio.run_coroutine_threadsafe(send_message, loop=ctx.bot.loop)
minqlx.log_exception()
f()
def is_message_in_relay_or_triggered_channel(self, ctx):
"""
Checks whether a message was either sent in a configured relay or triggered channel
:param ctx: the context the trigger happened in
"""
return ctx.message.channel.id in self.discord_relay_channel_ids | self.discord_triggered_channel_ids
async def trigger_status(self, ctx):
"""
Triggers game status information sent towards the originating channel
:param ctx: the context the trigger happened in
"""
try:
game = minqlx.Game()
ginfo = mydiscordbot.get_game_info(game)
num_players = len(Plugin.players())
max_players = game.maxclients
maptitle = game.map_title if game.map_title else game.map
gametype = game.type_short.upper()
reply = "{0} on **{1}** ({2}) with **{3}/{4}** players. {5}".format(
ginfo,
Plugin.clean_text(maptitle),
gametype,
num_players,
max_players,
mydiscordbot.player_data())
except minqlx.NonexistentGameError:
reply = "Currently no game running."
if self.is_message_in_triggered_channel(ctx):
reply = "{0} {1}".format(self.discord_triggered_channel_message_prefix, reply)
await self.reply_to_context(ctx, reply)
def is_message_in_triggered_channel(self, ctx):
"""
Checks whether the message originate in a configured triggered channel
:param ctx: the context the trigger happened in
"""
return ctx.message.channel.id in self.discord_triggered_channel_ids
async def triggered_chat(self, ctx, *message: str):
"""
Relays a message from the triggered channels to minqlx
:param ctx: the context the trigger happened in
:param message: the message to send to minqlx
"""
prefix_length = len("{}{} ".format(ctx.prefix, ctx.invoked_with))
minqlx.CHAT_CHANNEL.reply(
self._format_message_to_quake(ctx.message.channel,
ctx.message.author,
ctx.message.clean_content[prefix_length:]))
def _format_message_to_quake(self, channel, author, content):
"""
Format the channel, author, and content of a message so that it will be displayed nicely in the Quake Live
console.
:param channel: the channel, the message came from.
:param author: the author of the original message.
:param content: the message itself, ideally taken from message.clean_content to avoid ids of mentioned users
and channels on the discord server.
:return: the formatted message that may be sent back to Quake Live.
"""
sender = author.name
if author.nick is not None:
sender = author.nick
if not self.discord_show_relay_channel_names and channel.id in self.discord_relay_channel_ids:
return "{0} ^6{1}^7:^2 {2}".format(self.discord_message_prefix, sender, content)
return "{0} ^5#{1.name} ^6{2}^7:^2 {3}".format(self.discord_message_prefix, channel, sender, content)
async def on_ready(self):
"""
Function called once the bot connected. Mainly displays status update from the bot in the game console
and server logfile, and sets the bot to playing Quake Live on discord.
"""
self.logger.info("Logged in to discord as: {} ({})".format(self.discord.user.name, self.discord.user.id))
Plugin.msg("Connected to discord")
await self.discord.change_presence(activity=discord.Game(name="Quake Live"))
self._topic_updater()
async def on_message(self, message):
"""
Function called once a message is send through discord. Here the main interaction points either back to
Quake Live or discord happen.
:param message: the message that was sent.
"""
# guard clause to avoid None messages from processing.
if not message:
return
# if the bot sent the message himself, do nothing.
if message.author == self.discord.user:
return
# relay all messages from the relay channels back to Quake Live.
if message.channel.id in self.discord_relay_channel_ids:
content = message.clean_content
if len(content) > 0:
minqlx.CHAT_CHANNEL.reply(
self._format_message_to_quake(message.channel, message.author, content))
async def on_command_error(self, exception, ctx):
"""
overrides the default command error handler so that no exception is produced for command errors
Might be changed in the future to log those problems to the minqlx.logger
"""
pass
def _topic_updater(self):
try:
game = minqlx.Game()
except minqlx.NonexistentGameError:
return
topic = mydiscordbot.game_status_information(game)
self.update_topics_on_relay_and_triggered_channels(topic)
threading.Timer(self.discord_topic_update_interval, self._topic_updater).start()
def update_topics_on_relay_and_triggered_channels(self, topic):
"""
Helper function to update the topics on all the relay and all the triggered channels
:param topic: the topic to set on all the channels
"""
if not self.is_discord_logged_in():
return
if self.discord_update_triggered_channels_topic:
topic_channel_ids = self.discord_relay_channel_ids | self.discord_triggered_channel_ids
else:
topic_channel_ids = self.discord_relay_channel_ids
# directly set the topic on channels with no topic suffix
self.set_topic_on_discord_channels(topic_channel_ids - self.discord_keep_topic_suffix_channel_ids, topic)
# keep the topic suffix on the channels that are configured accordingly
self.update_topic_on_channels_and_keep_channel_suffix(
topic_channel_ids & self.discord_keep_topic_suffix_channel_ids, topic)
def set_topic_on_discord_channels(self, channel_ids, topic):
"""
Set the topic on a set of channel_ids on discord provided.
:param channel_ids: the ids of the channels the topic should be set upon.
:param topic: the new topic that should be set.
"""
# if we were not provided any channel_ids, do nothing.
if not channel_ids or len(channel_ids) == 0:
return
# set the topic in its own thread to avoid blocking of the server
for channel_id in channel_ids:
channel = self.discord.get_channel(channel_id)
if channel is None:
continue
asyncio.run_coroutine_threadsafe(channel.edit(topic=topic), loop=self.discord.loop)
def is_discord_logged_in(self):
if self.discord is None:
return False
return not self.discord.is_closed() and self.discord.is_ready()
def update_topic_on_channels_and_keep_channel_suffix(self, channel_ids, topic):
"""
Updates the topic on the given channels and keeps the topic suffix intact on the configured channels
:param channel_ids: the set of channels to update the topic on
:param topic: the topic to set on the given channels
"""
# if there are not triggered relay channels configured, do nothing.
if not channel_ids or len(channel_ids) == 0:
return
# take the final 10 characters from the topic, and search for it in the current topic
topic_ending = topic[-10:]
for channel_id in channel_ids:
previous_topic = self.get_channel_topic(channel_id)
if previous_topic is None:
previous_topic = topic
# preserve the original channel's topic.
position = previous_topic.find(topic_ending)
topic_suffix = previous_topic[position + len(topic_ending):] if position != -1 else previous_topic
if channel_id in self.discord_kept_topic_suffixes:
topic_suffix = self.discord_kept_topic_suffixes[channel_id]
# update the topic on the triggered channels
self.set_topic_on_discord_channels({channel_id}, "{}{}".format(topic, topic_suffix))
def get_channel_topic(self, channel_id):
"""
get the topic of the provided channel id
:param channel_id: the id of the channel to get the topic from
:return: the topic of the channel
"""
channel = self.discord.get_channel(channel_id)
if channel is None:
return None
return channel.topic
def stop(self):
"""
stops the discord client
"""
if self.discord is None:
return
asyncio.run_coroutine_threadsafe(self.discord.change_presence(status="offline"), loop=self.discord.loop)
asyncio.run_coroutine_threadsafe(self.discord.logout(), loop=self.discord.loop)
def relay_message(self, msg):
"""
relay a message to the configured relay_channels
:param msg: the message to send to the relay channel
"""
self.send_to_discord_channels(self.discord_relay_channel_ids, msg)
def send_to_discord_channels(self, channel_ids, content):
"""
Send a message to a set of channel_ids on discord provided.
:param channel_ids: the ids of the channels the message should be sent to.
:param content: the content of the message to send to the discord channels
"""
if not self.is_discord_logged_in():
return
# if we were not provided any channel_ids, do nothing.
if not channel_ids or len(channel_ids) == 0:
return
# send the message in its own thread to avoid blocking of the server
for channel_id in channel_ids:
channel = self.discord.get_channel(channel_id)
if channel is None:
continue
asyncio.run_coroutine_threadsafe(
channel.send(content,
allowed_mentions=AllowedMentions(everyone=False, users=True, roles=True)),
loop=self.discord.loop)
def relay_chat_message(self, player, channel, message):
"""
relay a message to the given channel
:param player: the player that originally sent the message
:param channel: the channel the original message came through
:param message: the content of the message
"""
if self.discord_replace_relayed_mentions:
message = self.replace_user_mentions(message, player)
message = self.replace_channel_mentions(message, player)
content = "**{}**{}: {}".format(mydiscordbot.escape_text_for_discord(player.clean_name), channel, message)
self.relay_message(content)
def relay_team_chat_message(self, player, channel, message):
"""
relay a team_chat message, that might be hidden to the given channel
:param player: the player that originally sent the message
:param channel: the channel the original message came through
:param message: the content of the message
"""
if self.discord_replace_relayed_mentions:
message = self.replace_user_mentions(message, player)
message = self.replace_channel_mentions(message, player)
content = "**{}**{}: {}".format(mydiscordbot.escape_text_for_discord(player.clean_name), channel, message)
self.send_to_discord_channels(self.discord_relay_team_chat_channel_ids, content)
def replace_user_mentions(self, message, player=None):
"""
replaces a mentioned discord user (indicated by @user-hint with a real mention
:param message: the message to replace the user mentions in
:param player: (default: None) when several alternatives are found for the mentions used, this player is told
what the alternatives are. No replacements for the ambiguous substitutions will happen.
:return: the original message replaced by properly formatted user mentions
"""
if not self.is_discord_logged_in():
return message
returned_message = message
# this regular expression will make sure that the "@user" has at least three characters, and is either
# prefixed by a space or at the beginning of the string
matcher = re.compile("(?:^| )@([^ ]{3,})")
member_list = [user for user in self.discord.get_all_members()]
matches = matcher.findall(returned_message)
for match in sorted(matches, key=lambda user_match: len(user_match), reverse=True):
if match in ["all", "everyone", "here"]:
continue
member = SimpleAsyncDiscord.find_user_that_matches(match, member_list, player)
if member is not None:
returned_message = returned_message.replace("@{}".format(match), member.mention)
return returned_message
@staticmethod
def find_user_that_matches(match, member_list, player=None):
"""
find a user that matches the given match
:param match: the match to look for in the user name and nick
:param member_list: the list of members connected to the discord server
:param player: (default: None) when several alternatives are found for the mentions used, this player is told
what the alternatives are. None is returned in that case.
:return: the matching member, or None if none or more than one are found
"""
# try a direct match for the whole name first
member = [user for user in member_list if user.name.lower() == match.lower()]
if len(member) == 1:
return member[0]
# then try a direct match at the user's nickname
member = [user for user in member_list if user.nick is not None and user.nick.lower() == match.lower()]
if len(member) == 1:
return member[0]
# if direct searches for the match fail, we try to match portions of the name or portions of the nick, if set
member = [user for user in member_list
if user.name.lower().find(match.lower()) != -1 or
(user.nick is not None and user.nick.lower().find(match.lower()) != -1)]
if len(member) == 1:
return list(member)[0]
# we found more than one matching member, let's tell the player about this.
if len(member) > 1 and player is not None:
player.tell("Found ^6{}^7 matching discord users for @{}:".format(len(member), match))
alternatives = ""
for alternative_member in member:
alternatives += "@{} ".format(alternative_member.name)
player.tell(alternatives)
return None
def replace_channel_mentions(self, message, player=None):
"""
replaces a mentioned discord channel (indicated by #channel-hint with a real mention
:param message: the message to replace the channel mentions in
:param player: (default: None) when several alternatives are found for the mentions used, this player is told
what the alternatives are. No replacements for the ambiguous substitutions will happen.
:return: the original message replaced by properly formatted channel mentions
"""
if not self.is_discord_logged_in():
return message
returned_message = message
# this regular expression will make sure that the "#channel" has at least three characters, and is either
# prefixed by a space or at the beginning of the string
matcher = re.compile("(?:^| )#([^ ]{3,})")
channel_list = [ch for ch in self.discord.get_all_channels()
if ch.type in [ChannelType.text, ChannelType.voice, ChannelType.group]]
matches = matcher.findall(returned_message)
for match in sorted(matches, key=lambda channel_match: len(channel_match), reverse=True):
channel = SimpleAsyncDiscord.find_channel_that_matches(match, channel_list, player)
if channel is not None:
returned_message = returned_message.replace("#{}".format(match), channel.mention)
return returned_message
@staticmethod
def find_channel_that_matches(match, channel_list, player=None):
"""
find a channel that matches the given match
:param match: the match to look for in the channel name
:param channel_list: the list of channels connected to the discord server
:param player: (default: None) when several alternatives are found for the mentions used, this player is told
what the alternatives are. None is returned in that case.
:return: the matching channel, or None if none or more than one are found
"""
# try a direct channel name match case-sensitive first
channel = [ch for ch in channel_list if ch.name == match]
if len(channel) == 1:
return channel[0]
# then try a case-insensitive direct match with the channel name
channel = [ch for ch in channel_list if ch.name.lower() == match.lower()]
if len(channel) == 1:
return channel[0]
# then we try a match with portions of the channel name
channel = [ch for ch in channel_list if ch.name.lower().find(match.lower()) != -1]
if len(channel) == 1:
return channel[0]
# we found more than one matching channel, let's tell the player about this.
if len(channel) > 1 and player is not None:
player.tell("Found ^6{}^7 matching discord channels for #{}:".format(len(channel), match))
alternatives = ""
for alternative_channel in channel:
alternatives += "#{} ".format(alternative_channel.name)
player.tell(alternatives)
return None
def triggered_message(self, player, message):
"""
send a triggered message to the configured triggered_channel
:param player: the player that originally sent the message
:param message: the content of the message
"""
if not self.discord_triggered_channel_ids:
return
if self.discord_replace_triggered_mentions:
message = self.replace_user_mentions(message, player)
message = self.replace_channel_mentions(message, player)
if self.discord_triggered_channel_message_prefix is not None and \
self.discord_triggered_channel_message_prefix != "":
content = "{} **{}**: {}".format(self.discord_triggered_channel_message_prefix,
mydiscordbot.escape_text_for_discord(player.clean_name), message)
else:
content = "**{}**: {}".format(mydiscordbot.escape_text_for_discord(player.clean_name), message)
self.send_to_discord_channels(self.discord_triggered_channel_ids, content)
| 2.546875 | 3 |
coh_wn_read.py | Renata1995/Topic-Distance-and-Coherence | 5 | 12790013 | <reponame>Renata1995/Topic-Distance-and-Coherence<gh_stars>1-10
from coherence.wn import WordNetEvaluator
import sys
import utils.name_convention as name
from topic.topicio import TopicIO
from nltk.corpus import wordnet as wn
from nltk.corpus import reuters
import os
#
# syntax: python coh_wn_read.py <corpus type> <# of topics> <src> <word count>
# <corpus type> default to bag of words. b for binary, t for tf-idf, anything else or missing for bag of words
# <# of topics> number of topics. default to 8
# <src> src folder which contains documents for LDA
# <wordnet method> default to path
# <word count> the number of top words used in the calculation of topic coherence
# <max_words> specify the number of words the preprocessed file used
# <startw> the start point of collecting words
if len(sys.argv) <= 1:
corpus_type = "bow"
else:
if sys.argv[1] == "t":
corpus_type = "tfidf"
elif sys.argv[1] == "b":
corpus_type = "binary"
else:
corpus_type = "bow"
if len(sys.argv) <= 2:
topics_count = 3
else:
topics_count = int(sys.argv[2])
if len(sys.argv) <= 3:
src = "pp_reuters"
else:
src = sys.argv[3]
if len(sys.argv) <= 4:
tc = "path"
else:
tc = sys.argv[4]
if len(sys.argv) <= 5:
words_count = 10
else:
words_count = int(sys.argv[5])
if len(sys.argv) <= 6:
max_words = 250
else:
max_words = int(sys.argv[6])
if len(sys.argv) <= 7:
startw = 0
else:
startw = int(sys.argv[7])
dname = name.get_output_dir(corpus_type, topics_count, src)
# read topics
tio = TopicIO()
tlist = tio.read_topics(dname + name.topics_dir())
ifname = dname + name.te_preprocess(tc, max_words, startw=startw)
# calculate topic evaluation values
tclist = []
te = WordNetEvaluator()
for index, topic in enumerate(tlist):
tclist.append([index, te.get_values(topic, words_count, ifname, startw=startw)])
# sort the list by a descending order
tclist = list(reversed(sorted(tclist, key=lambda x: x[1][2])))
# output results
if not os.path.exists(dname+"/"+tc):
os.makedirs(dname+"/"+tc)
ofname = dname + "/" + tc + "/w0" + str(words_count) + "_start"+str(startw) + ".txt"
ofile = open(ofname, "w")
for value in tclist:
ofile.write("Topic " + str(value[0]) + "\n")
ofile.write("Mean " + str(value[1][1]) + "\n")
ofile.write("Median " + str(value[1][2]) + "\n")
ofile.write("Sum " + str(value[1][0]) + "\n")
for tcnum in value[1][3]:
ofile.write(str(tcnum) + "\n")
ofile.write("\n")
| 2.625 | 3 |
Advent2018/15.py | SSteve/AdventOfCode | 0 | 12790014 | from dataclasses import dataclass
from typing import List, NamedTuple
import numpy as np
from generic_search import bfsCave, nodeToPath
wall = "#"
emptySpace = "."
class GridLocation(NamedTuple):
column: int
row: int
def __lt__(self, other):
return self.row < other.row or \
self.row == other.row and self.column < other.column
def openLocations(cave, location: GridLocation) -> List[GridLocation]:
"""
Return a list of the open locations around the given location. The locations are
in reading order.
"""
available = []
row = cave[location.row]
if location.row > 0 and cave[location.row - 1, location.column] == ".":
available.append(GridLocation(location.column, location.row - 1))
if location.column > 0 and row[location.column - 1] == ".":
available.append(GridLocation(location.column - 1, location.row))
if location.column + 1 < len(row) and row[location.column + 1] == ".":
available.append(GridLocation(location.column + 1, location.row))
if location.row + 1 < len(cave) and cave[location.row + 1, location.column] == ".":
available.append(GridLocation(location.column, location.row + 1))
return sorted(available)
def reachedLocation(currentLocation, goalLocation):
return abs(currentLocation.row - goalLocation.row) + abs(currentLocation.column - goalLocation.column) == 1
@dataclass
class Unit:
x: int
y: int
race: str
hitPoints: int = 200
attackDamage: int = 3
def __str__(self):
return f"{self.race}({self.hitPoints})"
def __lt__(self, other):
if self.y != other.y:
return self.y < other.y
return self.x < other.x
def __eq__(self, other):
return self.x == other.x and self.y == other.y
def location(self):
return GridLocation(self.x, self.y)
def sameLocation(self, other):
"""
Return True if this unit is at the same location as other
"""
return self.x == other.x and self.y == other.y
def atLocation(self, x, y):
"""
Return True if this unit is at this x,y location
"""
return self.x == x and self.y == y
def distanceTo(self, other):
"""
Return the Manhattan distance between this unit and other
Keyword arguments:
other -- The other unit.
"""
return abs(self.x - other.x) + abs(self.y - other.y)
def canAttack(self, units):
"""
Return True if there is an enemy available to attack.
Keyword arguments:
units -- A list of all units. Does not need to be sorted.
"""
for unit in units:
if unit.hitPoints > 0 and unit.race != self.race and self.distanceTo(unit) == 1:
return True
return False
def enemyExists(self, units):
"""
Return True if an enemy exists. The enemy does not need to be available for attack.
Keyword arguments:
units -- A list of all units. Does not need to be sorted.
"""
for unit in units:
if unit.hitPoints > 0 and unit.race != self.race:
return True
return False
def availableEnemies(self, cave, units):
"""
Return a list of available enemies in the list
Keyword arguments:
units -- A list of all units. Does not need to be sorted.
cave -- The array representing the cave
"""
availableList = []
for unit in units:
if unit.hitPoints > 0 and unit.race != self.race and openLocations(cave, unit.location()):
availableList.append(unit)
return availableList
def move(self, cave, units) -> None:
targetLocation: GridLocation = None
shortestPath = None
enemies = self.availableEnemies(cave, units)
for enemy in enemies:
solution = bfsCave(self.location(), enemy.location(), reachedLocation, cave, openLocations)
if solution:
path = nodeToPath(solution)
# We found a path. Now see if it's a better candidate than one already found
pathEnd = path[-1]
if shortestPath is None or len(path) < len(shortestPath) or \
len(path) == len(shortestPath) and (pathEnd < targetLocation):
targetLocation = pathEnd
shortestPath = path
if shortestPath:
cave[self.y, self.x] = '.'
# The first step in the path is the current location so go to the second step
nextLocation: GridLocation = shortestPath[1]
self.x = nextLocation.column
self.y = nextLocation.row
cave[self.y, self.x] = self.race
def attack(self, cave, units):
"""
Attack an available enemy.
units -- A list of all units. Does not need to be sorted.
"""
target = None
for unit in units:
if unit.hitPoints > 0 and unit.race != self.race and self.distanceTo(unit) == 1:
if target is None or unit.hitPoints < target.hitPoints or \
unit.hitPoints == target.hitPoints and unit < target:
target = unit
if target is not None:
target.hitPoints -= self.attackDamage
if target.hitPoints <= 0:
cave[target.y, target.x] = "."
def printCave(cave, units, showScores=False):
for rowNumber, row in enumerate(cave):
scores = " "
for columnNumber, cell in enumerate(row):
print(cell, end='')
if showScores and cell in ["E", "G"]:
unit = next(unit for unit in units if unit.hitPoints > 0 and unit.atLocation(columnNumber, rowNumber))
scores += str(unit) + " "
if len(scores.strip()):
print(scores, end='')
print()
def loadPuzzle(puzzleName, elfAttackPower):
# Get the dimensions of the puzzle.
with open(puzzleName, "r") as infile:
puzzleHeight = 0
puzzleWidth = 0
for line in infile:
puzzleHeight += 1
puzzleWidth = max(puzzleWidth, len(line.rstrip()))
# Create the cave with the determined puzzle dimensions.
cave = np.full((puzzleHeight, puzzleWidth), '.', dtype=str)
units = []
# Populate the cave and the list of units.
with open(puzzleName, "r") as infile:
for rowNumber, line in enumerate(infile):
for columnNumber, cell in enumerate(line.rstrip()):
if cell in ['E', 'G']:
units.append(Unit(columnNumber, rowNumber, cell, attackDamage=3 if cell == 'G' else elfAttackPower))
cave[rowNumber, columnNumber] = cell
return cave, units
if __name__ == "15a":
cave, units = loadPuzzle("15.txt", 3)
finished = False
playRound = 0
while not finished:
for unit in units:
if unit.hitPoints <= 0:
continue
if not unit.enemyExists(units):
finished = True
break
if not unit.canAttack(units):
unit.move(cave, units)
unit.attack(cave, units)
if not finished:
playRound += 1
print(playRound)
livingUnits = [unit for unit in units if unit.hitPoints > 0]
units = sorted(livingUnits)
if __name__ == "__main__":
goblinsWin = True
elfAttackPower = 3
originalElfCount = 0
survivingElfCount = 0
while goblinsWin or survivingElfCount < originalElfCount:
elfAttackPower += 1
cave, units = loadPuzzle("15.txt", elfAttackPower)
originalElfCount = len([unit for unit in units if unit.race == "E"])
finished = False
playRound = 0
while not finished:
for unit in units:
if unit.hitPoints <= 0:
continue
if not unit.enemyExists(units):
finished = True
break
if not unit.canAttack(units):
unit.move(cave, units)
unit.attack(cave, units)
survivingElfCount = len([unit for unit in units if unit.race == "E" and unit.hitPoints > 0])
if survivingElfCount < originalElfCount:
finished = True
break
if not finished:
playRound += 1
print(playRound)
livingUnits = [unit for unit in units if unit.hitPoints > 0]
units = sorted(livingUnits)
goblinsWin = units[0].race == "G"
printCave(cave, units, showScores=True)
print(f"Combat ends after {playRound} full rounds")
hitPoints = sum([unit.hitPoints for unit in units])
survivingRace = "Goblins" if units[0].race == "G" else "Elves"
print(f"{survivingRace} win with {hitPoints} total hit points left")
print(f"Outcome: {playRound} * {hitPoints} = {playRound * hitPoints}")
print(f"Elf attack power: {elfAttackPower}")
| 3.53125 | 4 |
tourney/constants.py | seangeggie/tourney | 0 | 12790015 | from datetime import time, timedelta
# Will print all read events to stdout.
DEBUG = False
DATA_PATH = "~/.tourney"
CHANNEL_NAME = "foosball"
RTM_READ_DELAY = 0.5 # seconds
RECONNECT_DELAY = 5.0 # seconds
COMMAND_REGEX = "!(\\w+)\\s*(.*)"
REACTION_REGEX = ":(.+):"
SCORE_ARGS_REGEX = "(T\\d+)\\s+(\\d+)\\s+(T\\d+)\\s+(\\d+)"
WIN_ARGS_REGEX = "(\\d+)\\s+(\\d+)"
MORNING_ANNOUNCE = time(9)
MORNING_ANNOUNCE_DELTA = timedelta(hours=1)
REMINDER_ANNOUNCE = time(11)
REMINDER_ANNOUNCE_DELTA = timedelta(minutes=49)
MIDDAY_ANNOUNCE = time(11, 50)
MIDDAY_ANNOUNCE_DELTA = timedelta(minutes=10)
POSITIVE_REACTIONS = [
"+1",
"the_horns",
"metal",
"raised_hands",
"ok",
"ok_hand",
"fire",
"tada",
"confetti_ball"
]
NEGATIVE_REACTIONS = ["-1", "middle_finger"]
PRIVILEGED_COMMANDS = ["undoteams", "generate", "autoupdate"]
TEAM_NAMES = [
"Air Farce",
"Cereal Killers",
"Dangerous Dynamos",
"Designated Drinkers",
"Fire Breaking Rubber Duckies",
"Game of Throw-ins",
"Injured Reserve",
"One Hit Wonders",
"Our Uniforms Match",
"Pique Blinders",
"Pistons from the Past",
"Purple Cobras",
"Rabid Squirrels",
"Raging Nightmare",
"Recipe for Disaster",
"Shockwave",
"Smarty Pints",
"Straight off the Couch",
"Tenacious Turtles",
"The Abusement Park",
"The Flaming Flamingos",
"The League of Ordinary Gentlemen",
"The Meme Team",
"The Mullet Mafia",
"Thunderpants",
]
| 2.3125 | 2 |
proj01_ifelse/proj01.py | tristank23/vsa2018- | 0 | 12790016 | # Name:
# Date:
# proj01: A Simple Program
# Part I:
# This program asks the user for his/her name and grade.
#Then, it prints out a sentence that says the number of years until they graduate.
#var name
user_name = raw_input("Enter your name: ")
# user_grade = raw_input("Enter your grade: ")
# grad_year = 12 - int(user_grade)
# print user_name + ", you will graduate from high school in", grad_year, "years!"
# # Part II:
# # This program asks the user for his/her name and birth month.
# # Then, it prints a sentence that says the number of days and months until their birthday
# user_name_gram = user_name[0:1].upper() + user_name[1:].lower()
# print user_name_gram
user_month = raw_input("Enter your birth month (number): ")
user_day = raw_input("Enter your birth day (number): ")
user_month = int(user_month)
user_day = int(user_day)
current_month = 7
current_day = 9
if user_month >= current_month:
birth_month_count = user_month - current_month
else:
birth_month_count = 12 - (current_month - user_month)
if user_day >= current_day:
birth_day_count = user_day - current_day
else:
birth_day_count = 30 - (user_day - current_day)
birth_month_count = birth_month_count - 1
print user_name + ", your birthday is in ", birth_month_count, "months and", birth_day_count, "days!"
user_age = raw_input("Enter your age: ")
user_age = int(user_age)
if user_age <= 7:
print "you can only watch G rated movies"
elif user_age > 7 and user_age < 13:
print "you can watch G and PG rated movies"
elif user_age >= 13 and user_age < 18:
print "you can watch G, PG and PG-13 rated movies"
else:
print "you can watch G, PG, PG-13 and R rated movies"
user_dog_count = raw_input("How many dogs do you have?: ")
user_dog_count = int(user_dog_count)
if user_dog_count == 0:
print "I suggest you get a dog, they are really fun!"
elif user_dog_count > 0 and user_dog_count <= 3:
print "Good for you!"
else:
print "Wow that's a lot of dogs!"
# If you complete extensions, describe your extensions here!
| 4.34375 | 4 |
main.py | vtlanglois/01-Interactive-Fiction | 0 | 12790017 | <filename>main.py
#!/usr/bin/env python3
import sys
import json
import os
assert sys.version_info >= (3,9), "This script requires at least Python 3.9"
# ----------------------------------------------------------------
def select_game():
#Get all json filenames within the json folder
path_to_json_files = "json/"
json_files = [pos_json for pos_json in os.listdir(path_to_json_files) if pos_json.endswith('.json')]
json_files = [json_file.replace(".json", "") for json_file in json_files]
response = ""
#Allow player to select json file
while True:
if response in json_files:
break
print("You have the following Game Paks:")
print(json_files)
print("Select a Game Pak:")
response = input()
#get the file, create a world, return the world
response = "json/"+response+".json"
file = open(response, )
world = json.load(file)
return world
# ----------------------------------------------------------------
def find_current_location(location_label):
if "passages" in world:
for passage in world["passages"]:
if location_label == passage["name"]:
return passage
return {}
# ----------------------------------------------------------------
def render(current_location, score, moves):
if "name" in current_location and "cleanText" in current_location:
# Display passage (render the world)
print("Moves: " + str(moves) + " | Score: " + str(score))
print("\n" + current_location["name"] + " - " + current_location["cleanText"])
# Print all passage links
for link in current_location["links"]:
print(" ->" + link["linkText"] + " - " + link["passageName"])
def get_input():
response = input("Enter option: ")
return response.upper().strip()
def update(current_location, location_label, response):
#if there is no response, return location_label argument
if response == "":
return location_label
# see if there are links in the current_location
if "links" in current_location:
#for each link, see if response matches a link
for link in current_location["links"]:
if(response == link["linkText"]):
return link["passageName"]
else:
print("Option not found.")
return location_label
# ----------------------------------------------------------------
world = select_game()
location_label = world["passages"][0]["name"]
current_location = {}
response = ""
score = 0
moves = 0
while True:
if response == "QUIT":
break
if "score" in current_location:
score+=current_location["score"]
location_label = update(current_location, location_label, response)
current_location = find_current_location(location_label)
render(current_location, score, moves)
response = get_input()
moves+=1
print("Thank you for playing!")
| 3.546875 | 4 |
src/sst/elements/ember/test/generateNidListRange.py | sudhanshu2/sst-elements | 58 | 12790018 | def generate( args ):
args = args.split(',')
start = int(args[0])
length = int(args[1])
#print 'generate', start, length
return str(start) + '-' + str( start + length - 1 )
| 3.640625 | 4 |
l2.py | Ebony-Ayers/awmms | 0 | 12790019 | import math
for i in range(1, 25):
r = math.floor(math.log2(i))
#j = i
#counter = 0
#while j != 1:
# j >>= 1
# counter += 1
j = i
counter = 0
while j >>= 1:
j >>= 1
counter += 1
print(f"{i:2} {bin(i)[2:]:>5} {r} {counter}")
| 3.203125 | 3 |
setup.py | mtskelton/django-simple-export | 0 | 12790020 | <filename>setup.py
from distutils.core import setup
setup(name='django-simple-export',
version='0.1',
license='BSD',
packages=['simple_export'],
include_package_data=True,
description='Simple import / export utility for Django with large data support. Compatible with Mongoengine.',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/mtskelton/django-simple-export/',
install_requires=['django>=1.7', 'metamagic.json'],
requires=[],
data_files=['LICENSE', 'README.md'],
provides=['simple_export'],
py_modules=['simple_export.management.commands.simple_export', 'simple_export.management.commands.simple_import'],
classifiers=['Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'Framework :: Django',
'Environment :: Web Environment',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'License :: OSI Approved :: MIT License',
'Topic :: Software Development :: Libraries :: Python Modules',
]
)
| 1.070313 | 1 |
mouseInteractive.py | wwwins/OpenCV-Samples | 0 | 12790021 | # -*- coding: utf-8 -*-
import cv2
import sys
import numpy as np
import argparse
imagePath = "img.png"
sx = sy = None
previewImage = None
if len(sys.argv) < 3:
print("""
Usage:
python mouseInteractive -i img.png
""")
sys.exit(-1)
if sys.argv[1]=="-i":
imagePath = sys.argv[2]
def createBlankImage(width, height, color=(255,255,255)):
img = np.zeros((height, width, 3), np.uint8)
img[:] = color
return img
def mouseCallback(event,x,y,flags,param):
global sx,sy,previewImage
if (event == cv2.EVENT_LBUTTONDOWN):
print(event,x,y,flags,param)
bgrColor = frame[y][x]
previewImage = createBlankImage(200,200,bgrColor)
hsvColor = cv2.cvtColor(bgrColor.reshape(1,1,3),cv2.COLOR_BGR2HSV)
print("bgr->hsv:{}->{}".format(bgrColor,hsvColor.tolist()[0][0]))
cv2.circle(frame,(x,y),6, (0,0,255),-1)
if (sx != None):
cv2.line(frame,(sx,sy),(x,y),(0,0,255),3)
sx = x
sy = y
cv2.imshow('demo', frame)
cv2.imshow('preview', previewImage)
frame = cv2.imread(imagePath)
cv2.namedWindow("demo")
cv2.namedWindow("preview")
cv2.moveWindow("demo", 1500, 300)
cv2.moveWindow("preview", 1500, 80)
cv2.imshow('demo', frame)
cv2.setMouseCallback('demo', mouseCallback)
cv2.waitKey(0)
cv2.destroyAllWindows()
| 3.078125 | 3 |
images.py | malyvsen/marble | 1 | 12790022 | <filename>images.py
#%% imports
#% matplotlib inline
import skimage
import matplotlib.pyplot as plt
import numpy as np
from imgaug import augmenters as iaa
from imageio import mimsave
#%% loading
def to_rgb(image):
if len(np.shape(image)) == 2:
return skimage.color.gray2rgb(image)
return image[:, :, :3]
images = []
for image in skimage.io.imread_collection('statues/*'):
images.append(to_rgb(image))
images = np.array(images)
#%% augmentation pipeline
augmented_shape = (64, 64)
augmenter = iaa.Sequential([
iaa.Fliplr(p=0.5),
iaa.Affine(scale=(0.5, 1.0), rotate=(-5, 5), mode='reflect'),
iaa.CropToFixedSize(width=augmented_shape[0], height=augmented_shape[1], position='normal'),
iaa.Resize(size={'height': augmented_shape[0], 'width': 'keep-aspect-ratio'}),
iaa.Resize(size={'height': 'keep-aspect-ratio', 'width': augmented_shape[1]})
])
noiser = iaa.Sequential([
iaa.SomeOf(
(1, 2),
[iaa.CoarseDropout(p=(0.0, 0.2), size_percent=(0.01, 0.05)), iaa.AdditiveGaussianNoise(scale=(8, 16))],
random_order=True
)
])
#%% utils
def batch(size):
originals = images[np.random.choice(len(images), size)]
augmented = np.array([augmenter.augment_image(image) for image in originals])
noised = noiser.augment_images(augmented)
return noised, augmented
def demo_board(images):
return np.clip(np.concatenate(images, 1).astype(np.uint8), 0, 255)
def show(images, save_as=None):
to_show = demo_board(images)
if save_as is not None:
skimage.io.imsave(save_as, to_show)
plt.imshow(to_show)
plt.show()
return to_show
def save_gif(path, demo_boards, fps=25):
mimsave(path, demo_boards, fps=fps)
| 2.40625 | 2 |
neptune/internal/channels/channels.py | wuchangsheng951/neptune-client | 0 | 12790023 | #
# Copyright (c) 2019, Neptune Labs Sp. z o.o.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import time
from collections import namedtuple
from enum import Enum
ChannelNameWithTypeAndNamespace = namedtuple(
"ChannelNameWithType",
['channel_id', 'channel_name', 'channel_type', 'channel_namespace']
)
ChannelIdWithValues = namedtuple('ChannelIdWithValues', ['channel_id', 'channel_values'])
class ChannelType(Enum):
TEXT = 'text'
NUMERIC = 'numeric'
IMAGE = 'image'
class ChannelNamespace(Enum):
USER = 'user'
SYSTEM = 'system'
class ChannelValue(object):
def __init__(self, x, y, ts):
self._x = x
self._y = y
if ts is None:
ts = time.time()
self._ts = ts
@property
def ts(self):
return self._ts
@property
def x(self):
return self._x
@property
def y(self):
return self._y
def __str__(self):
return 'ChannelValue(x={},y={},ts={})'.format(self.x, self.y, self.ts)
def __repr__(self):
return str(self)
def __eq__(self, o):
return self.__dict__ == o.__dict__
def __ne__(self, o):
return not self.__eq__(o)
| 2.3125 | 2 |
plusminus/examples/dice_roll_parser.py | ptmcg/arithmetic_parsing | 0 | 12790024 | #
# dice_roll_parser.py
#
# Copyright 2021, <NAME>
#
from plusminus import BaseArithmeticParser
# fmt: off
class DiceRollParser(BaseArithmeticParser):
"""
Parser for evaluating expressions representing rolls of dice, as used in many board and
role-playing games, such as:
d20
3d20
5d6 + d20
min(d6, d6, d6)
maxn(2, d6, d6, d6) (select top 2 of 3 d6 rolls)
show(d6, d6, d6)
"""
def customize(self):
import random
self.add_operator("d", 1, BaseArithmeticParser.RIGHT,
lambda a: random.randint(1, a))
self.add_operator("d", 2, BaseArithmeticParser.LEFT,
lambda a, b: sum(random.randint(1, b) for _ in range(a)))
self.add_function("min", ..., min)
self.add_function("max", ..., max)
self.add_function("show", ...,
lambda *args: {"rolls": list(args), "sum": sum(args)})
def maxn(n, *values):
ret = sorted(values, reverse=True)[:n]
return {"n": n, "rolls": values, "maxn": ret, "sum": sum(ret)}
self.add_function("maxn", ..., maxn)
# fmt: on
if __name__ == '__main__':
parser = DiceRollParser()
parser.runTests(
"""\
d20
3d6
d20+3d4
2d100
max(d6, d6, d6)
show(d6, d6, d6)
""",
postParse=lambda _, result: result[0].evaluate(),
)
| 3.328125 | 3 |
book/ch02/python/ch02.py | verazuo/Code-For-Data-driven-Security | 0 | 12790025 | <reponame>verazuo/Code-For-Data-driven-Security
#
# name ch02.py
# 数据帧(类似excel)
# create a new data frame
import numpy as np
import pandas as pd
# create a new data frame of hosts & high vuln counts
assets_df = pd.DataFrame({
"name": ["danube", "gander", "ganges", "mekong", "orinoco"],
"os": ["W2K8", "RHEL5", "W2K8", "RHEL5", "RHEL5"],
"highvulns": [1, 0, 2, 0, 0]
})
# take a look at the data frame structure & contents
print(assets_df)
assets_df.head()
# show a "slice" just the operating systmes
assets_df.os.head()
# print(assets_df.os.head())
# add a new column
assets_df['ip'] = ["192.168.1.5", "10.2.7.5", "192.168.1.7",
"10.2.7.6", "10.2.7.7"]
# show only nodes with more than one high vulnerabilty
assets_df[assets_df.highvulns > 1].head()
# divide nodes into network 'zones' based on IP address
assets_df['zones'] = np.where(
assets_df.ip.str.startswith("192"), "Zone1", "Zone2")
# get one final view
assets_df.head()
print(assets_df)
| 3.390625 | 3 |
aiocometd_chat_demo/cometd.py | robertmrk/aiocometd-chat-demo | 1 | 12790026 | """Synchronous CometD client"""
from enum import IntEnum, unique, auto
import asyncio
from functools import partial
from typing import Optional, Iterable, TypeVar, Awaitable, Callable, Any
import concurrent.futures as futures
from contextlib import suppress
import aiocometd
from aiocometd.typing import JsonObject
# pylint: disable=no-name-in-module
from PyQt5.QtCore import pyqtSignal, pyqtProperty, QObject # type: ignore
# pylint: enable=no-name-in-module
from aiocometd_chat_demo.exceptions import InvalidStateError
T_co = TypeVar("T_co", covariant=True) # pylint: disable=invalid-name
def run_coro(coro: Awaitable[T_co],
callback: Optional[Callable[["futures.Future[T_co]"], Any]]
= None,
loop: Optional[asyncio.AbstractEventLoop] = None,) \
-> "futures.Future[T_co]":
"""Schedule the execution of the given *coro* and set *callback* to be
called when the *coro* is finished
:param coro: A coroutine
:param callback: A callback function called with the future object \
associated with *coro*
:param loop: The event loop on which the *coro* should be scheduled
:return: The future associated with the *coro*
"""
if loop is None:
loop = asyncio.get_event_loop()
future = asyncio.run_coroutine_threadsafe(coro, loop)
if callback is not None:
future.add_done_callback(callback)
return future
@unique
class ClientState(IntEnum):
"""CometD client states"""
#: Connected with the server
CONNECTED = auto()
#: Disconnected state
DISCONNECTED = auto()
#: Disconnected state due to an error
ERROR = auto()
# pylint: disable=too-few-public-methods
class MessageResponse(QObject): # type: ignore
"""The asynchronous result of a sent CometD message"""
#: Contains the exception object if finished with an error, otherwise None
error: Optional[BaseException] = None
#: Contains the response of the server when finished successfully,
#: otherwise None
result: Optional[JsonObject] = None
#: Emited when the response has been received
finished = pyqtSignal()
# pylint: enable=too-few-public-methods
# pylint: disable=too-many-instance-attributes
class CometdClient(QObject): # type: ignore
"""Synchronous CometD client implementation
This class enables the asynchronous Client class from aiocometd to be used
in synchronous code if it runs on a quamash event loop.
Since the event loop is shared by Qt's and asyncio's events, the
concurrent.futures.Future can't be awaited, blocking is not allowed.
Instead, this class is implemented similarly to how asynchronous network
operations are implemented in Qt. Namely, on a method call the operation
is started and the method immediately returns, and then the results or the
potential errors during the asynchronous operation are broadcasted with
signals.
"""
#: Signal emited when the client's state is changed
state_changed = pyqtSignal(ClientState)
#: Signal emited when the client enters the :obj:`~ClientState.CONNECTED`
#: state
connected = pyqtSignal()
#: Signal emited when the client enters the
#: :obj:`~ClientState.DISCONNECTED` state
disconnected = pyqtSignal()
#: Signal emited when the client enters the :obj:`~ClientState.ERROR` state
error = pyqtSignal(Exception)
#: Signal emited when a message has been received from the server
message_received = pyqtSignal(dict)
def __init__(self, url: str, subscriptions: Iterable[str],
loop: Optional[asyncio.AbstractEventLoop] = None) -> None:
"""
:param url: CometD service url
:param subscriptions: A list of channels to which the client should \
subscribe
:param loop: Event :obj:`loop <asyncio.BaseEventLoop>` used to
schedule tasks. If *loop* is ``None`` then
:func:`asyncio.get_event_loop` is used to get the default
event loop.
"""
super().__init__()
self._url = url
self._subscriptions = list(subscriptions)
self._loop = loop or asyncio.get_event_loop()
self._client: Optional[aiocometd.Client] = None
self._state = ClientState.DISCONNECTED
self._state_signals = {
ClientState.CONNECTED: self.connected,
ClientState.DISCONNECTED: self.disconnected,
}
self._connect_task: Optional["futures.Future[None]"] = None
@pyqtProperty(ClientState, notify=state_changed)
def state(self) -> ClientState:
"""Current state of the client"""
return self._state
@state.setter # type: ignore
def state(self, new_state: ClientState) -> None:
"""Set the state of the client to *state*"""
# if the state didn't changed then don't do anything
if new_state != self._state:
self._state = new_state
# notify listeners that the state changed
self.state_changed.emit(self._state)
# emit state specific signals
if new_state in self._state_signals:
self._state_signals[new_state].emit()
def connect_(self) -> None:
"""Connect to the CometD service and start listening for messages
The function returns immediately. On success the
:obj:`~CometdClient.connected` signal is emited or the
:obj:`~CometdClient.error` signal on failure. If the client is already
connected then it does nothing.
"""
# don't do anything if already connected
if self.state != ClientState.CONNECTED:
# schedule the coroutine for execution
self._connect_task = run_coro(
self._connect(),
self._on_connect_done,
self._loop
)
async def _connect(self) -> None:
"""Connect to the CometD service and retreive the messages sent by
the service as long as the client is open
"""
# connect to the service
async with aiocometd.Client(self._url, loop=self._loop) as client:
# set the asynchronous client attribute
self._client = client
# subscribe to all the channels
for subscription in self._subscriptions:
await client.subscribe(subscription)
# put the client into a connected state
self.state = ClientState.CONNECTED
# listen for incoming messages
with suppress(futures.CancelledError):
async for message in client:
# emit signal about received messages
self._loop.call_soon_threadsafe(self.message_received.emit,
message)
# clear the asynchronous client attribute
self._client = None
# put the client into a disconnected state
self.state = ClientState.DISCONNECTED
def _on_connect_done(self, future: "futures.Future[None]") -> None:
"""Evaluate the result of an asynchronous task
Emit signals about errors if the *future's* result is an exception.
:param future: A future associated with the asynchronous task
"""
# clear the task member
self._connect_task = None
error = None
with suppress(futures.CancelledError):
error = future.exception()
if error is not None:
self.state = ClientState.ERROR
self.error.emit(error)
def disconnect_(self) -> None:
"""Disconnect from the CometD service
If the client is not connected it does nothing.
"""
if self.state == ClientState.CONNECTED:
# check that the task has been initialized
if self._connect_task is None:
raise InvalidStateError("Uninitialized _connect_task "
"attribute.")
self._connect_task.cancel()
def publish(self, channel: str, data: JsonObject) -> MessageResponse:
"""Publish *data* to the given *channel*
:param channel: Name of the channel
:param data: Data to send to the server
:return: Return the response associated with the message
"""
# check that the client has been initialized
if self.state != ClientState.CONNECTED:
raise InvalidStateError("Can't send messages in a non-connected "
"state.")
if self._client is None:
raise InvalidStateError("Uninitialized _client attribute.")
response = MessageResponse()
run_coro(self._client.publish(channel, data),
partial(self._on_publish_done, response),
self._loop)
return response
@staticmethod
def _on_publish_done(response: MessageResponse,
future: "futures.Future[JsonObject]") -> None:
"""Evaluate the result of an asynchronous message sending task
:param response: A response associated with the *future*
:param future: A future associated with the asynchronous task
"""
# set the error or result attributes of the response depending on
# whether it was completed normally or it exited with an exception
if future.exception() is not None:
response.error = future.exception()
else:
response.result = future.result()
# notify listeners that a response has been received
response.finished.emit()
# pylint: disable=too-many-instance-attributes
| 2.5 | 2 |
YAuB/app.py | Wyvryn/YAuB | 0 | 12790027 | <reponame>Wyvryn/YAuB
"""
Main webapp logic
All setup config and endpoint definitions are stored here
.. TODO:: allow user creation
.. TODO:: page to show loaded plugins
"""
from dateutil import parser
import models
from flask import Blueprint, flash, redirect, render_template, request, url_for
from flask.ext.login import (LoginManager, current_user, login_required,
login_user, logout_user)
from flask_uploads import IMAGES, UploadNotAllowed, UploadSet
from forms import ArticleForm, AuthorForm, ConfigForm, LoginForm
from markdown2 import Markdown
from utils import flash_errors, load_plugins
markdowner = Markdown()
login_manager = LoginManager()
uploaded_photos = UploadSet('photos', IMAGES)
plugs, header_includes, footer_includes = load_plugins()
main = Blueprint('main', __name__)
@login_manager.user_loader
def load_user(id):
return models.getAuthor(int(id))
@main.route('/login', methods=['GET', 'POST'])
def login():
form = LoginForm(request.form)
# Handle logging in
if request.method == 'POST':
if form.validate_on_submit():
login_user(form.user)
flash("You are logged in.", 'success')
redirect_url = request.args.get("next") or url_for("main.home")
return redirect(redirect_url)
else:
flash_errors(form)
return render_template("login.html", form=form, models=models)
@main.route('/logout')
@login_required
def logout():
logout_user()
flash('You are logged out.')
return redirect(url_for('main.home'))
@main.route("/<int:page>")
@main.route("/", defaults={'page': 1})
def home(page):
"""Home Page
We have two routes - / and /<int:page>
If we're giving a specific page to load, get articles for that page
Otherwise, load page 1"""
if not models.hasSetupRun():
return redirect(url_for('main.initial_setup_user'))
articlesPerPage = models.getArticlesPerPage()
nextPage = models.nextPage(page, articlesPerPage)
return render_template(
'home.html',
header_includes=header_includes,
footer_includes=footer_includes,
models=models,
entries=models.getArticlesForPage(page, articlesPerPage),
sidebar=True,
pageNumber=int(page),
nextPage=nextPage)
@main.route('/firstrun/blog', methods=['GET', 'POST'])
def initial_setup_blog():
"""Initial blog setup when accessing the YAuB for the first time"""
if models.hasSetupRun():
"""Only run setup once"""
return redirect(url_for('main.home'))
obj = models.Config()
form = ConfigForm(obj=obj)
if request.method == "POST" and form.validate():
form.populate_obj(obj)
models.db.session.add(obj)
models.db.session.commit()
flash('Successfully set blog settings')
return redirect(url_for("main.home"))
else:
flash_errors(form)
return render_template('admin_blog.html', form=form, firstrun=True)
@main.route('/firstrun/author', methods=['GET', 'POST'])
def initial_setup_user():
"""Initial user setup when accessing YAuB for the first time"""
if models.hasSetupRun():
"""Only run setup once"""
return redirect(url_for('main.home'))
obj = models.Author()
form = AuthorForm(obj=obj)
if request.method == "POST" and form.validate():
form.populate_obj(obj)
obj.set_password(form.password.data)
models.db.session.add(obj)
models.db.session.commit()
flash('Successfully created user')
return redirect(url_for("main.initial_setup_blog"))
else:
flash_errors(form)
return render_template('firstrun_author.html', form=form, firstrun=True)
@main.route("/admin/settings", methods=['GET', 'POST'])
@login_required
def admin_blog():
"""Page to change YAuB settings"""
obj = models.getConfigObj()
form = ConfigForm(obj=obj)
# populate the form with our blog data
if request.method == "POST" and form.validate():
form.populate_obj(obj)
models.db.session.commit()
flash('Successfully editted blog settings')
return redirect(url_for("main.home"))
else:
flash_errors(form)
return render_template('admin_blog.html', form=form, models=models)
@main.route("/admin/article", defaults={'id': None}, methods=['GET', 'POST'])
@main.route("/admin/article/<id>", methods=['GET', 'POST'])
@login_required
def admin_article(id):
"""Page to create or edit an article
If no article id is given we will create a new article,
Otherwise we edit the article at the given id"""
isNew = not id
if isNew:
obj = models.Article()
else:
obj = models.getArticle(int(id))
obj.author = current_user.rowid
form = ArticleForm(obj=obj)
if not isNew:
# Bootstrap-TagsInput hooks into a select multiple field
form.tags.choices = [
(a.tag, a.rowid)
for a in models.ArticleTag.query.filter(
models.ArticleTag.articleid == int(id)
).order_by('tag')
]
else:
form.tags.choices = []
if request.method == "POST" and form.validate():
form.populate_obj(obj)
if 'imgcap' in request.files:
try:
filename = uploaded_photos.save(request.files['imgcap'])
obj.imagecap = filename
except UploadNotAllowed:
# If no picture is passed, don't crash
pass
if 'banner' in request.files:
try:
filename = uploaded_photos.save(request.files['banner'])
obj.banner = filename
except UploadNotAllowed:
# If no picture is passed, don't crash
pass
obj.published = parser.parse(obj.published)
if isNew:
models.db.session.add(obj)
models.db.session.flush()
models.updateTags(obj.tags, obj.rowid)
models.db.session.commit()
flash('Successfully editted article')
return redirect(url_for("main.home"))
return render_template('admin_article.html', form=form, rowid=id, models=models)
@main.route("/admin/delete/<id>", methods=['GET', 'POST'])
@login_required
def admin_delete(id):
"""Deletes an article at a given id"""
obj = models.getArticle(int(id))
models.updateTags(None, int(id))
models.db.session.delete(obj)
models.db.session.commit()
flash('Successfully deleted article')
return redirect(url_for("main.home"))
@main.route("/admin/author", methods=['GET', 'POST'])
@login_required
def admin_author():
"""Updates author info"""
obj = models.getAuthor(int(current_user.rowid))
# Hold on to this until we validate the fields from the form
password = models.getAuthor(int(current_user.rowid)).password
form = AuthorForm(obj=obj)
# populate the form with our blog data
if request.method == "POST" and form.validate():
form.populate_obj(obj)
if len(form.password.data) == 0:
# If the password field has no data, don't change the user's password
obj.password = password
else:
obj.set_password(form.password.data)
models.db.session.commit()
flash('Successfully editted user info')
return redirect(url_for("main.home"))
else:
flash_errors(form)
return render_template('admin_author.html', form=form, models=models)
@main.route("/article/<id>")
def article(id):
"""Display an article with a given id"""
article = models.getArticle(id)
markdown = article.content
markdown = markdown.replace('\\n', '<br />')
html = markdowner.convert(markdown)
# Run any plugins on our html before passing it to the template
for plug in plugs:
html = plug.run(html)
return render_template(
'article.html',
header_includes=header_includes,
footer_includes=footer_includes,
html=html,
article=article,
models=models,
sidebar=True
)
@main.route("/tag/<id>")
def tag(id):
"""Loads the main page but only shows articles that have a given tag"""
return render_template(
'home.html',
header_includes=header_includes,
footer_includes=footer_includes,
entries=models.getArticlesWithTag(id, 10),
models=models,
sidebar=True
)
| 2.078125 | 2 |
Codes/PC-Interface/erts.py | eyantra/Border_Surveillance_Robot_using_Firebird_ATmega2560 | 1 | 12790028 | """**************************************************************************************************
Platform: Python 2.x and 2.x.x
Title: Border Surveillance Bot
Author:
1.<NAME>
2.<NAME>
**************************************************************************************************/
/********************************************************************************
Copyright (c) 2010, ERTS Lab, IIT Bombay. -*- c -*-
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
* Neither the name of the copyright holders nor the names of
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
* Source code can be used for academic purpose.
For commercial use permission form the author needs to be taken.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES LOSS OF USE, DATA, OR PROFITS OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
Software released under Creative Commence cc by-nc-sa licence.
For legal information refer to:
http://creativecommons.org/licenses/by-nc-sa/3.0/legalcode
********************************************************************************"""
"""/*******************************************************************************
This code does image processing for face detection.
The code also handles false faces that may creep in.
'S' ----> Move Left
'T' ----> Move Right
'Q' ----> Move Forward
'R' ----> Move Backward
'B' ----> Beep for one second
'Z' ----> Move Fast Left
********************************************************************************/"""
import sys #importing system for handling signals for exit
import cv #importing opencv for face detection
import time #for sleep function
import serial #importing pyserial for serial communication
count = 0
# configure the serial connections (the parameters differs on the device you are connecting to)
ser = serial.Serial(
port='/dev/ttyUSB0', #The port where the serial communication usb is present.
baudrate=9600,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.EIGHTBITS
)
ser.open()
ser.isOpen()
def detect(image):
#Getting size of the image for handling generic image resolution, i.e. handling webcam with arbitrary resolution
image_size = cv.GetSize(image)
# create grayscale version
grayscale = cv.CreateImage(image_size, 8, 1) #creating a blank image with the given image's resolution
cv.CvtColor(image, grayscale, cv.CV_BGR2GRAY) #copying the black and white version of the image into the blank image
# create storage
storage = cv.CreateMemStorage(0) #creating required storage for face detection
# equalize histogram
cv.EqualizeHist(grayscale, grayscale)
# show processed image
cv.ShowImage('Processed', grayscale)
# detect objects
cascade = cv.Load('haarcascade_frontalface_alt.xml') #loading the Haar Cascade
faces = cv.HaarDetectObjects(grayscale, cascade, storage, 1.2, 2, cv.CV_HAAR_DO_CANNY_PRUNING) #detecting the faces in the image
#These parameters are tweaked to RGB video captures, please refer to http://opencv.willowgarage.com/documentation/python/objdetect_cascade_classification.html for tweaking your parameters.
print faces #printing the rectangles circumscribing the face in the image
#drawing rectangles around the faces in the image
if faces:
for i in faces:
cv.Rectangle(image,
(i[0][0], i[0][1]),
(i[0][0] + i[0][2], i[0][1] + i[0][3]),
(0, 255, 0),
3,
8,
0)
return faces
if __name__ == "__main__":
print "Press ESC to exit ..."
# create windows
cv.NamedWindow('Raw', cv.CV_WINDOW_AUTOSIZE) #creating autosizable windows for captured frame from webcam
cv.NamedWindow('Processed', cv.CV_WINDOW_AUTOSIZE) #creating autosizable windows for processed image
# create capture device
device = 1 # assume we want second capture device(USB webcam), use i for the capure device /dev/videoi
capture = cv.CaptureFromCAM(device)
cv.SetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_WIDTH, 640) #setting capture width to 640
cv.SetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_HEIGHT, 480) #setting capture height to 480
#If you want to capture at native resolution of the web cam don't set obove width and height parameters, the processing speed will be slower for larger image resolutions
# check if capture device is OK
if not capture:
print "Error opening capture device"
sys.exit(1)
faceBeep = 0 #Used for beeping control
firstFaceDetected = 0 #Used for face detection
falseDetection = 0 #For false detection
faceNotDetected = 0 #used for checking if no face is found
multipleFaces = 0 #for indicating that multiple faces
multipleFacesInit = 0
forward = 0
left = 0
right = 0
while 1:
# do forever
# capture the current frame
frame = cv.QueryFrame(capture)
if frame is None:
continue
# mirror
#cv.Flip(frame, None, 1)
# face detection
faces = detect(frame)
if len(faces) > 0:
firstFaceDetected = 1
#multiple faces
if len(faces) > 1:
multipleFacesInit = 0
multipleFaces = 1
ser.write('Q'+'\r\n')
forward = forward + 1
faceBeep = 0
#Single face detected
else:
if multipleFacesInit == 1:
multipleFaces = 0
multipleFacesInit = 0
#algining itself to face the enemy
if faces[0][0][0] + float(faces[0][0][2]/2) < 260:
ser.write('S'+'\r\n')
left = left + 1
else:
if faces[0][0][0] + float(faces[0][0][2]/2) > 380:
ser.write('T'+'\r\n')
right = right + 1
else:
faceBeep = faceBeep + 1
#alarming for detected enemy
if faceBeep > 3:
faceBeep = 0
print 'Beeping for Faces'
ser.write('B'+'\r\n')
time.sleep(1)
ser.write('B'+'\r\n')
time.sleep(1)
ser.write('B'+'\r\n')
time.sleep(1)
ser.write('B'+'\r\n')
time.sleep(1)
ser.write('B'+'\r\n')
if multipleFaces == 0:
firstFaceDetected = 0
else:
#scouting
if firstFaceDetected == 0:
ser.write('Z'+'\r\n')
#print "batu\n"
else:
if multipleFaces == 0:
falseDetection = falseDetection + 1
#handling false detection
if falseDetection > 10:
falseDetection = 0
firstFaceDetected = 0
#retracing to position and orientation of multiple face detection
faceNotDetected = faceNotDetected + 1
if faceNotDetected > 10 and multipleFaces == 1:
faceNotDetected = 0
multipleFacesInit = 1
#Realign to initial position of multiple face detection.
while forward > 0:
ser.write('R'+'\r\n')
forward = forward - 1
time.sleep(1) #waiting for clearing of serial buffer
#Realign to initial orientation of multiple face detection.
if left > right:
x = left - right
left = 0
right = 0
while x > 0:
ser.write('T'+'\r\n')
x = x - 1
time.sleep(1) #waiting for clearing of serial buffer
else:
x = right - left
right = 0
left = 0
while x > 0:
ser.write('S'+'\r\n')
x = x - 1
time.sleep(1) #waiting for clearing of serial buffer
# display webcam image
cv.ShowImage('Raw', frame)
# handle events
k = cv.WaitKey(10)
if k == 0x1b: # ESC
print 'ESC pressed. Exiting ...'
break #Exiting the program
| 1.351563 | 1 |
hubbot/moduleinterface.py | HubbeKing/Hubbot_Twisted | 2 | 12790029 | from enum import Enum
class ModuleAccessLevel(Enum):
ANYONE = 0
ADMINS = 1
class ModuleInterface(object):
"""
The interface modules should inherit and implement in order to function with the ModuleHandler.
triggers - command words that cause the module to trigger.
accepted_types - message types that can cause the module to trigger (PRIVMSG, ACTION, NOTICE).
help - help text for the module. May be unicode or a function on the message object help(message), returning a unicode object.
access_level - whether the module should be trigger-able by ANYONE, or only users on the admin list.
priority - the priority for the module. modules with high priority trigger before ones with low priority for any given message object.
"""
triggers = []
accepted_types = ["PRIVMSG"]
help = "No help defined yet."
access_level = ModuleAccessLevel.ANYONE
priority = 0
def __init__(self, bot):
"""
@type bot: hubbot.bot.Hubbot
"""
self.bot = bot
def on_load(self):
"""
Called when the module is loaded by the ModuleHandler
"""
pass
def on_unload(self):
"""
Called when the module is unloaded by the ModuleHandler
"""
pass
def should_trigger(self, message):
"""
Called by the ModuleHandler for each incoming message, to see if said message causes this module to trigger
Default behavior is to trigger on any message that matches the accepted types and contains a matching command trigger
@type message: hubbot.message.IRCMessage
"""
if message.type not in self.accepted_types:
return False
if message.command not in self.triggers:
return False
return True
def on_trigger(self, message):
"""
Called by the ModuleHandler when shouldTrigger(message) is True.
Contains all actions the module is to perform on the message object.
@type message: hubbot.message.IRCMessage
@return: hubbot.response.IRCResponse | None
"""
pass
| 3.1875 | 3 |
gui_res/game_window.py | CoderTofu/Match-A-Card | 0 | 12790030 | <filename>gui_res/game_window.py
from tkinter import *
import threading
import time
import random
from gui_res.gui_frames.card_frame import card_frame
def game_gui(window, count, deck):
"""
Takes in the original window, count selected, and deck generated.
Creates a new window with the 3 params
"""
game_window = Toplevel(window)
random.shuffle(deck) # shuffles deck randomly
button_list = [] # List that will hold all button widgets
checking = [] # List that will hold up to the 2 most recent buttons player pressed
times_checked = 0
check_label = Label(game_window,
text=f"You checked {times_checked} times.",
font=("Arial Black", 20))
check_label.pack(padx=20, pady=20)
grid_frame = card_frame(game_window) # Frame where all cards will be contained
grid_frame.pack(padx=10, pady=10)
def game_over():
pass
def judge_delay(wait, first, second):
nonlocal times_checked
# Sets a delay then either changes the text to a check mark
# or clears it if the two do not match each other
# Then after that checks if all boxes are checked, so it can do a function
time.sleep(wait)
try:
times_checked += 1
check_label.config(text=f"You checked {times_checked} times.")
if first.cget("text") == second.cget("text"):
first.config(text="✔", bg="green")
second.config(text="✔", bg="green")
game_over()
else:
first.config(text="")
second.config(text="")
except TclError:
print("An error occurred...")
def show_and_hide(btn, card):
nonlocal checking
btn_text = btn.cget("text")
if btn_text == "":
card_text = card.stringify()
btn.config(text=card_text)
checking.append(btn)
if len(checking) == 2:
thread = threading.Thread(target=judge_delay, args=(0.5, checking[0], checking[1]))
thread.start()
checking.clear()
elif btn_text == "✔":
print("You already matched!")
else:
print("Oops you can't do that!")
# A loop that creates 2 cards per 1 count
for i in range(0, count):
# First of the card pair
card_button = Button(grid_frame)
# Pass the button widget itself and the random card class, so we can change text config
card_button.config(command=lambda btn=card_button, j=i: show_and_hide(btn, deck[j]))
# Second of the card pair
card_button_pair = Button(grid_frame)
# Pass the button widget itself and the random card class, so we can change text config
card_button_pair.config(command=lambda btn=card_button_pair, j=i: show_and_hide(btn, deck[j]))
button_list.append(card_button)
button_list.append(card_button_pair)
random.shuffle(button_list)
CARD_PER_COLUMN = round(count/4) * 2
cell_row_count = 0
cell_column_count = 0
for button in button_list:
button.config(
width=6,
height=3,
font=("arial", 15)
)
if cell_row_count == CARD_PER_COLUMN:
# if number of cell rows reach 3, reset number of cell row and create a new column
cell_row_count = 0
cell_column_count += 1
button.grid(row=cell_row_count, column=cell_column_count)
# Otherwise, just create a new row for the card
cell_row_count += 1
grid_frame.pack()
| 3.71875 | 4 |
pychess/Players/ProtocolEngine.py | jacobchrismarsh/chess_senior_project | 0 | 12790031 | from gi.repository import GObject
from pychess.Players.Engine import Engine
from pychess.Utils.const import NORMAL, ANALYZING, INVERSE_ANALYZING
TIME_OUT_SECOND = 60
class ProtocolEngine(Engine):
__gsignals__ = {
"readyForOptions": (GObject.SignalFlags.RUN_FIRST, None, ()),
"readyForMoves": (GObject.SignalFlags.RUN_FIRST, None, ()),
}
# Setting engine options
def __init__(self, subprocess, color, protover, md5):
Engine.__init__(self, md5)
self.engine = subprocess
self.defname = subprocess.defname
self.color = color
self.protover = protover
self.readyMoves = False
self.readyOptions = False
self.connected = True
self.mode = NORMAL
self.analyzing_paused = False
def isAnalyzing(self):
return self.mode in (ANALYZING, INVERSE_ANALYZING)
| 2.265625 | 2 |
espy/algorithms/channel_noise_simulator.py | Yomikron/espy | 1 | 12790032 | import numpy
class channel_noise_simulator:
"""Class to hold usefull funktions to simulate noise in a channel"""
def __init__(self):
return
# _____________create bits___________________
def create_random_bits_list(self, len):
"""create a random len bits long bitstring """
bits = []
for i in range(len):
bits.append(numpy.random.randint(0, 2))
return bits
def create_random_bits_string(self, len):
"""create a random len bits long string """
bits = ""
for i in range(len):
bits += str(numpy.random.randint(0, 2))
return bits
# _____________Randoise bits______________________
def randomise_bits_list(self, bits, probability):
"""A function to simply flip bits with the given probability
ARGS: a list of bits, the probability for an error[0-1]
RETURN: a list of bits
"""
new_bits = []
for b in bits:
if probability > numpy.random.random(): # roll random numbers
new_bits.append((b + 1) % 2) # turn 0 to 1 and 1 to 0
else:
new_bits.append(b)
return new_bits
def randomise_bits_string(self, bits, probability):
"""A function to simply flip bits with the given probability
ARGS: a list of bits, the probability for an error[0-1]
Return: a string full of bits
"""
new_bits = ""
for b in bits:
if probability > numpy.random.random(): # roll random numbers
new_bits += str((int(b) + 1) % 2) # turn 0 to 1 and 1 to 0
else:
new_bits += b
return new_bits
def randomise_bits_string_list(self, bits, probability):
"""A function to simply flip bits with the given probability
ARGS: a list of bits, the probability for an error[0-1]
RETURN: a list of bits
"""
new_bits = []
for b in bits:
new_bit = ""
for i in range(len(b)):
if probability > numpy.random.random(): # roll random numbers
new_bit += str((int(b[i]) + 1) % 2) # turn 0 to 1 and 1 to 0
else:
new_bit += str(b[i])
new_bits.append(new_bit)
return new_bits
def randomise_bits_burst_string_list(
self, bits, burst_probability, error_rate_in_burst=0.9,
):
"""A function to simply flip bits with the given probability
ARGS: a String of bits, the probability for an error[0-1], the probability to leave the bursterror[0-1]
Return: String of bits with added burst error
"""
new_bits = []
currently_bursting = False
for b in bits:
i = 0
new_bits.append("")
while i < len(b):
if burst_probability > numpy.random.random(): # roll random numbers
currently_bursting = True
while currently_bursting and i < len(
b
): # stop when bitstream ends (simulate one bursterror and adjust i)
if error_rate_in_burst > numpy.random.random():
new_bits[len(new_bits) - 1] += str(
((int(b[i]) + 1) % 2)
) # turn 0 to 1 and 1 to 0 randomly
else:
new_bits[len(new_bits) - 1] += str(b[i])
currently_bursting = False
i += 1
else:
new_bits[len(new_bits) - 1] += str(b[i])
i += 1
return new_bits
def randomise_bits_burst_list(
self, bits, burst_probability, error_rate_in_burst=0.9
):
"""A function to simply flip bits with the given probability
ARGS: a list of bits, the probability for an error[0-1], the probability to leave the bursterror[0-1]
Return: list of bits with added burst erorrs
"""
new_bits = []
i = 0
while i < len(bits):
if burst_probability > numpy.random.random(): # roll random numbers
currently_bursting = True
while currently_bursting and i < len(
bits
): # stop when bitstream ends (simulate one bursterror and adjust i)
if error_rate_in_burst > numpy.random.random():
new_bits.append(
(bits[i] + 1) % 2
) # turn 0 to 1 and 1 to 0 randomly
else:
new_bits.append(bits[i])
currently_bursting = False
i += 1
else:
new_bits.append(bits[i])
i += 1
return new_bits
def randomise_bits_burst_string(
self, bits, burst_probability, error_rate_in_burst=0.9,
):
"""A function to simply flip bits with the given probability
ARGS: a String of bits, the probability for an error[0-1], the probability to leave the bursterror[0-1]
Return: String of bits with added burst erorrs
"""
new_bits = ""
i = 0
while i < len(bits):
if burst_probability > numpy.random.random(): # roll random numbers
currently_bursting = True
while currently_bursting and i < len(
bits
): # stop when bitstream ends (simulate one bursterror and adjust i)
if error_rate_in_burst > numpy.random.random():
new_bits += str(
((int(bits[i]) + 1) % 2)
) # turn 0 to 1 and 1 to 0 randomly
else:
new_bits += str(bits[i])
currently_bursting = False
i += 1
else:
new_bits += str(bits[i])
i += 1
return new_bits
# ______________compare bits__________________________
def compare_and_highlight_differences(self, bits1, bits2):
"""compare two bitlists and higlight the differences"""
differences = []
if len(bits1) != len(bits2):
print("waning, different lengths detected. may result in higher errorrate")
min_length = min(len(bits1), len(bits2))
for i in range(min_length):
differences.append(1 if bits1[i] != bits2[i] else 0)
print("Differences found: " + str(differences.count(True)))
return differences
# c=channel_noise_simulator()
# print (c.randomise_bits_list([1,1,1,1,0,0,0,0,1],0.5))
# print (c.randomise_bits_string("1101110",0.5))
# print (c.compare_and_highlight_differences([1,1,1,0,0,1,1,0,0,1,0,1,1,1],[0,1,1,0,0,1,1,1,1,1,0,1,0,1]))
# print (c.create_random_bits_list(200))
# rb= c.create_random_bits_string(200)
# rr = c.randomise_bits_burst_string(rb,0.01,.9)
# print (c.compare_and_highlight_differences(rb,rr))
# """
| 3.53125 | 4 |
tests/observatory/platform/cli/test_click_utils.py | The-Academic-Observatory/observatory-platform | 9 | 12790033 | # Copyright 2020 Curtin University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Author: <NAME>
import unittest
from observatory.platform.cli.click_utils import (
INDENT1,
INDENT2,
INDENT3,
INDENT4,
comment,
indent,
)
class TestClick(unittest.TestCase):
def test_indent(self):
original_str = "hello world"
# 2 spaces
output = indent(original_str, INDENT1)
self.assertEqual(f" {original_str}", output)
# 3 spaces
output = indent(original_str, INDENT2)
self.assertEqual(f" {original_str}", output)
# 4 spaces
output = indent(original_str, INDENT3)
self.assertEqual(f" {original_str}", output)
# 5 spaces
output = indent(original_str, INDENT4)
self.assertEqual(f" {original_str}", output)
# Check that values below 0 raise assertion error
with self.assertRaises(AssertionError):
indent(original_str, 0)
with self.assertRaises(AssertionError):
indent(original_str, -1)
def test_comment(self):
input_str = ""
output = comment(input_str)
self.assertEqual(output, "# ")
input_str = "Hello world"
output = comment(input_str)
self.assertEqual(output, "# Hello world")
| 2.796875 | 3 |
kopf/structs/filters.py | yashbhutwala/kopf | 0 | 12790034 | def match(handler, body, changed_fields=None):
return (
(not handler.field or _matches_field(handler, changed_fields or [])) and
(not handler.labels or _matches_labels(handler, body)) and
(not handler.annotations or _matches_annotations(handler, body))
)
def _matches_field(handler, changed_fields):
return any(field[:len(handler.field)] == handler.field for field in changed_fields)
def _matches_labels(handler, body):
return _matches_metadata(handler=handler, body=body, metadata_type='labels')
def _matches_annotations(handler, body):
return _matches_metadata(handler=handler, body=body, metadata_type='annotations')
def _matches_metadata(handler, body, metadata_type):
metadata = getattr(handler, metadata_type)
object_metadata = body.get('metadata', {}).get(metadata_type, {})
for key, value in metadata.items():
if key not in object_metadata:
return False
elif value is not None and value != object_metadata[key]:
return False
else:
continue
return True
| 2.546875 | 3 |
matasano/t/test_blocks.py | JohnnyPeng18/MatasanoCrypto | 2 | 12790035 | #!/usr/bin/env/ python
# encoding: utf-8
"""
Test block crypto.
"""
import unittest
import matasano.blocks
import matasano.util
__author__ = 'aldur'
class BlocksTestCase(unittest.TestCase):
def test_split_blocks(self):
f = matasano.blocks.split_blocks
b = "this is a test".encode("ascii")
k_len = 3
blocks = f(b, k_len)
self.assertEqual(
len(blocks),
k_len
)
self.assertEqual(
sum(len(i) for i in blocks),
len(b)
)
l = list()
for i in range(len(blocks[0])):
for j in range(len(blocks)):
try:
l.append(blocks[j][i])
except IndexError:
pass
l = bytes(l)
self.assertEqual(
b, l
)
self.assertEqual(
b.decode("ascii"),
l.decode("ascii")
)
def test_pkcs_7(self):
b = "YELLOW SUBMARINE".encode("ascii")
size = 20
padded = matasano.blocks.pkcs_7(b, size)
self.assertEqual(len(padded), size)
self.assertEqual(padded, b + b"\x04" * 4)
size = 16
padded = matasano.blocks.pkcs_7(b, size)
self.assertEqual(len(padded), size * 2)
self.assertEqual(padded, b + (b"\x10" * size))
def test_pkcs_1_5(self):
b = "YELLOW SUBMARINE".encode("ascii")
size = 20
padded = matasano.blocks.pkcs_1_5(b, size)
self.assertEqual(
padded.to_bytes(size, "big"), b"\x00\x02\xff\x00" + b
)
unpadded = matasano.blocks.un_pkcs_1_5(padded, size)
self.assertEqual(
b, unpadded
)
self.assertRaises(
matasano.blocks.BadPaddingException,
matasano.blocks.un_pkcs_1_5,
padded << 1, size
)
def test_un_pkcs(self):
b = "YELLOW SUBMARINE".encode("ascii")
size = 20
padded = matasano.blocks.pkcs_7(b, size)
un_padded = matasano.blocks.un_pkcs_7(padded, size)
self.assertEqual(b, un_padded)
size = 16
padded = matasano.blocks.pkcs_7(b, size)
un_padded = matasano.blocks.un_pkcs_7(padded, size)
self.assertEqual(b, un_padded)
padded = b"ICE ICE BABY\x04\x04\x04\x04"
un_padded = matasano.blocks.un_pkcs_7(padded, size)
self.assertEqual(b"ICE ICE BABY", un_padded)
padded = b"ICE ICE BABY\x05\x05\x05\x05"
self.assertRaises(
matasano.blocks.BadPaddingException,
matasano.blocks.un_pkcs_7,
padded,
size
)
padded = b"ICE ICE BABY\x01\x02\x03\x04"
self.assertRaises(
matasano.blocks.BadPaddingException,
matasano.blocks.un_pkcs_7,
padded,
size
)
def test_aes_ecb(self):
f = matasano.blocks.aes_ecb
key = "YELLOW SUBMARINE".encode("ascii")
b = "00foobarfoobar00".encode("ascii")
self.assertEqual(
f(key, f(key, b), decrypt=True),
b
)
def test_aes_cbc(self):
f = matasano.blocks.aes_cbc
key = "YELLOW SUBMARINE".encode("ascii")
b = "00foobarfoobar00".encode("ascii")
iv = matasano.util.random_aes_key()
self.assertEqual(
f(key, f(key, b)[0], decrypt=True)[0],
b
)
self.assertEqual(
f(key, f(key, b, iv=iv)[0], decrypt=True, iv=iv)[0],
b
)
def test_aes_ctr(self):
f = matasano.blocks.aes_ctr
key = "YELLOW SUBMARINE".encode("ascii")
b = "00foobarfoobar00".encode("ascii")
self.assertEqual(
f(key, f(key, b)[0])[0],
b
)
def test_bytes_in_blocks(self):
f = matasano.blocks.bytes_in_block
size = 16
self.assertEqual(
f(size, 0),
slice(0, size)
)
self.assertEqual(
f(size, 1),
slice(size, size * 2)
)
def test_bytes_to_block(self):
f = matasano.blocks.bytes_to_block
size = 16
self.assertEqual(
f(size, 0),
slice(0, size)
)
self.assertEqual(
f(size, 1),
slice(0, size * 2)
)
self.assertEqual(
f(size, 10),
slice(0, size * 11)
)
def test_ith_byte_in_block(self):
f = matasano.blocks.ith_byte_block
size = 16
self.assertEqual(
f(size, 0),
0
)
self.assertEqual(
f(size, 1),
0
)
self.assertEqual(
f(size, size),
1
)
self.assertEqual(
f(size, size * 2),
2
)
if __name__ == '__main__':
unittest.main()
| 3.34375 | 3 |
dragonkeeper/utils.py | chriskr/dragonkeeper | 5 | 12790036 | import re
from common import Singleton
from maps import status_map, format_type_map, message_type_map, message_map
def _parse_json(msg):
payload = None
try:
payload = eval(msg.replace(",null", ",None"))
except:
print "failed evaling message in parse_json"
return payload
try:
from json import loads as parse_json
except:
globals()['parse_json'] = _parse_json
MSG_KEY_TYPE = 0
MSG_KEY_SERVICE = 1
MSG_KEY_COMMAND_ID = 2
MSG_KEY_FORMAT = 3
MSG_KEY_STATUS = 4
MSG_KEY_TAG = 5
MSG_KEY_CLIENT_ID = 6
MSG_KEY_UUID = 7
MSG_KEY_PAYLOAD = 8
MSG_VALUE_COMMAND = 1
MSG_VALUE_FORMAT_JSON = 1
MSG_TYPE_ERROR = 4
INDENT = " "
MAX_STR_LENGTH = 50
class TagManager(Singleton):
def __init__(self):
self._counter = 1
self._tags = {}
def _get_empty_tag(self):
tag = 1
while True:
if not tag in self._tags:
return tag
tag += 1
def set_callback(self, callback, args={}):
tag = self._get_empty_tag()
self._tags[tag] = (callback, args)
return tag
def handle_message(self, msg):
if msg[MSG_KEY_TAG] in self._tags:
callback, args = self._tags.pop(msg[MSG_KEY_TAG])
callback(msg, **args)
return True
return False
tag_manager = TagManager()
class MessageMap(object):
""" to create a description map of all messages
to be used to pretty print the payloads by adding the keys to all values"""
COMMAND_INFO = 7
COMMAND_HOST_INFO = 10
COMMAND_MESSAGE_INFO = 11
COMMAND_ENUM_INFO = 12
INDENT = " "
filter = None
@staticmethod
def set_filter(filter):
def create_check(check):
def check_default(in_str):
return check == in_str
def check_endswith(in_str):
return in_str.endswith(check)
def check_startswith(in_str):
return in_str.startswith(check)
def check_pass(in_str):
return True
if check in "*":
return check_pass
if check.endswith('*'):
check = check.strip('*')
return check_startswith
if check.startswith('*'):
check = check.strip('*')
return check_endswith
return check_default
content = filter
filter_obj = None
import os
if os.path.isfile(filter):
try:
file = open(filter, 'rb')
content = file.read()
file.close()
except:
print "reading filter failed"
try:
code = compile(content.replace('\r\n', '\n'), filter, 'eval')
filter_obj = eval(code)
except:
print "parsing the specified filter failed"
print "parsed filter:", filter_obj
if filter_obj:
for service in filter_obj:
for type in filter_obj[service]:
filter_obj[service][type] = (
[create_check(check) for check in filter_obj[service][type]]
)
MessageMap.filter = filter_obj
@staticmethod
def has_map():
return bool(message_map)
@staticmethod
def get_cmd_name(service, cmd_id):
name = None
if message_map:
name = message_map.get(service, {}).get(int(cmd_id), {}).get("name")
return name or cmd_id
def __init__(self, services, connection, callback, context, map=message_map):
self._services = services
self.scope_major_version = 0
self.scope_minor_version = 0
self._service_infos = {}
self._map = map
self._connection = connection
self._callback = callback
self._print_map = context.print_message_map
self._print_map_services = filter(bool, context.print_message_map_services.split(','))
self._connection.set_msg_handler(self.default_msg_handler)
self.request_host_info()
# ===========================
# get the messages from scope
# ===========================
def request_host_info(self):
for service in self._services:
if not service.startswith('core-') and not service.startswith('stp-'):
self._service_infos[service] = {
'parsed': False,
'parsed_enums': False,
'raw_infos': None,
'raw_messages': None
}
self._connection.send_command_STP_1({
MSG_KEY_TYPE: MSG_VALUE_COMMAND,
MSG_KEY_SERVICE: "scope",
MSG_KEY_COMMAND_ID: self.COMMAND_HOST_INFO,
MSG_KEY_FORMAT: MSG_VALUE_FORMAT_JSON,
MSG_KEY_TAG: tag_manager.set_callback(self.handle_host_info),
MSG_KEY_PAYLOAD: '[]'
})
def handle_host_info(self, msg):
if not msg[MSG_KEY_STATUS]:
host_info = parse_json(msg[MSG_KEY_PAYLOAD])
if host_info:
for service in host_info[5]:
if service[0] == "scope":
versions = map(int, service[1].split('.'))
self.scope_major_version = versions[0]
self.scope_minor_version = versions[1]
if self.scope_minor_version >= 1:
self.request_enums()
else:
self.request_infos()
else:
print "getting host info failed"
def request_enums(self):
for service in self._service_infos:
tag = tag_manager.set_callback(self.handle_enums, {'service': service})
self._connection.send_command_STP_1({
MSG_KEY_TYPE: MSG_VALUE_COMMAND,
MSG_KEY_SERVICE: "scope",
MSG_KEY_COMMAND_ID: self.COMMAND_ENUM_INFO,
MSG_KEY_FORMAT: MSG_VALUE_FORMAT_JSON,
MSG_KEY_TAG: tag,
MSG_KEY_PAYLOAD: '["%s", [], 1]' % service
})
def handle_enums(self, msg, service):
if not msg[MSG_KEY_STATUS] and service in self._service_infos:
enum_list = parse_json(msg[MSG_KEY_PAYLOAD])
if not enum_list == None:
self._service_infos[service]['raw_enums'] = enum_list and enum_list[0] or []
self._service_infos[service]['parsed_enums'] = True
if self.check_map_complete('parsed_enums'):
self.request_infos()
else:
print "handling of message failed in handle_messages in MessageMap:"
print msg
def request_infos(self):
for service in self._service_infos:
tag = tag_manager.set_callback(self.handle_info, {"service": service})
self._connection.send_command_STP_1({
MSG_KEY_TYPE: MSG_VALUE_COMMAND,
MSG_KEY_SERVICE: "scope",
MSG_KEY_COMMAND_ID: self.COMMAND_INFO,
MSG_KEY_FORMAT: MSG_VALUE_FORMAT_JSON,
MSG_KEY_TAG: tag,
MSG_KEY_PAYLOAD: '["%s"]' % service
})
def handle_info(self, msg, service):
if not msg[MSG_KEY_STATUS] and service in self._service_infos:
command_list = parse_json(msg[MSG_KEY_PAYLOAD])
if command_list:
self._service_infos[service]['raw_infos'] = command_list
tag = tag_manager.set_callback(self.handle_messages, {'service': service})
self._connection.send_command_STP_1({
MSG_KEY_TYPE: MSG_VALUE_COMMAND,
MSG_KEY_SERVICE: "scope",
MSG_KEY_COMMAND_ID: self.COMMAND_MESSAGE_INFO,
MSG_KEY_FORMAT: MSG_VALUE_FORMAT_JSON,
MSG_KEY_TAG: tag,
MSG_KEY_PAYLOAD: '["%s", [], 1, 1]' % service
})
else:
print "handling of message failed in handle_info in MessageMap:"
print msg
def handle_messages(self, msg, service):
if not msg[MSG_KEY_STATUS] and service in self._service_infos:
message_list = parse_json(msg[MSG_KEY_PAYLOAD])
self._service_infos[service]['raw_messages'] = message_list
# the message list can be empty (e.g. for the 'core' service)
if message_list:
self.parse_raw_lists(service)
self._service_infos[service]['parsed'] = True
if self.check_map_complete('parsed'):
self.finalize()
else:
print "handling of message failed in handle_messages in MessageMap:"
print msg
def finalize(self):
if self._print_map:
self.pretty_print_message_map()
self._connection.clear_msg_handler()
self._callback()
self._services = None
self._service_infos = None
self._map = None
self._connection = None
self._callback = None
def check_map_complete(self, prop):
for service in self._service_infos:
if not self._service_infos[service][prop]:
return False
return True
def default_msg_handler(self, msg):
if not tag_manager.handle_message(msg):
print "handling of message failed in default_msg_handler in MessageMap:"
print msg
# =======================
# create the message maps
# =======================
def get_msg(self, list, id):
MSG_ID = 0
for msg in list:
if msg[MSG_ID] == id:
return msg
return None
def get_enum(self, list, id):
enums = self.get_msg(list, id)
name = enums[1]
dict = {}
if enums and len(enums) == 3:
for enum in enums[2]:
dict[enum[1]] = enum[0]
return name, dict
def parse_msg(self, msg, msg_list, parsed_list, raw_enums, ret):
NAME = 1
FIELD_LIST = 2
FIELD_NAME = 0
FIELD_TYPE = 1
FIELD_NUMBER = 2
FIELD_Q = 3
FIELD_ID = 4
ENUM_ID = 5
Q_MAP = {
0: "required",
1: "optional",
2: "repeated"
}
if msg:
for field in msg[FIELD_LIST]:
name = field[FIELD_NAME]
field_obj = {
'name': name,
'q': 'required',
'type': field[FIELD_TYPE],
}
if (len(field) - 1) >= FIELD_Q and field[FIELD_Q]:
field_obj['q'] = Q_MAP[field[FIELD_Q]]
if (len(field) - 1) >= FIELD_ID and field[FIELD_ID]:
if name in parsed_list:
field_obj['message'] = parsed_list[name]['message']
field_obj['message_name'] = parsed_list[name]['message_name']
else:
parsed_list[name] = field_obj
msg = self.get_msg(msg_list, field[FIELD_ID])
field_obj['message_name'] = msg and msg[1] or 'default'
field_obj['message'] = []
self.parse_msg(msg, msg_list, parsed_list,
raw_enums, field_obj['message'])
if (len(field) - 1) >= ENUM_ID and field[ENUM_ID]:
name, numbers = self.get_enum(raw_enums, field[ENUM_ID])
field_obj['enum'] = {'name': name, 'numbers': numbers}
ret.append(field_obj)
return ret
def parse_raw_lists(self, service):
MSG_TYPE_COMMAND = 1
MSG_TYPE_RESPONSE = 2
MSG_TYPE_EVENT = 3
# Command Info
COMMAND_LIST = 0
EVENT_LIST = 1
NAME = 0
NUMBER = 1
MESSAGE_ID = 2
RESPONSE_ID = 3
# Command MessageInfo
MSG_LIST = 0
MSG_ID = 0
map = self._map[service] = {}
command_list = self._service_infos[service]['raw_infos'][COMMAND_LIST]
msgs = self._service_infos[service]['raw_messages'][MSG_LIST]
enums = self._service_infos[service].get('raw_enums', [])
for command in command_list:
command_obj = map[command[NUMBER]] = {}
command_obj['name'] = command[NAME]
msg = self.get_msg(msgs, command[MESSAGE_ID])
command_obj[MSG_TYPE_COMMAND] = self.parse_msg(msg, msgs, {}, enums, [])
msg = self.get_msg(msgs, command[RESPONSE_ID])
command_obj[MSG_TYPE_RESPONSE] = self.parse_msg(msg, msgs, {}, enums, [])
if len(self._service_infos[service]['raw_infos']) - 1 >= EVENT_LIST:
event_list = self._service_infos[service]['raw_infos'][EVENT_LIST]
for event in event_list:
event_obj = map[event[NUMBER]] = {}
event_obj['name'] = event[NAME]
msg = self.get_msg(msgs, event[MESSAGE_ID])
event_obj[MSG_TYPE_EVENT] = self.parse_msg(msg, msgs, {}, enums, [])
# =========================
# pretty print message maps
# =========================
def pretty_print_object(self, obj, indent, c_list, name=''):
INDENT = ' '
c_list = [] + c_list
if name:
name = '%s: ' % self.quote(name)
print '%s%s{' % (indent * INDENT, name)
indent += 1
keys = obj.keys()
for key in keys:
if not (isinstance(obj[key], dict) or isinstance(obj[key], list)):
print '%s%s: %s,' % (indent * INDENT,
self.quote(key), self.quote(obj[key]))
for key in keys:
if isinstance(obj[key], dict):
self.pretty_print_object(obj[key], indent, c_list, key)
for key in keys:
if isinstance(obj[key], list):
if key == "message":
if obj['message_name'] in c_list:
print '%s"message": <circular reference>,' % (
indent * INDENT)
continue
else:
c_list.append(obj['message_name'])
if obj[key]:
print '%s%s: [' % (indent * INDENT, self.quote(key))
for item in obj[key]:
self.pretty_print_object(item, indent + 1, c_list)
print '%s],' % (indent * INDENT)
else:
print '%s%s: [],' % (indent * INDENT, self.quote(key))
indent -= 1
print '%s},' % (indent * INDENT)
def pretty_print_message_map(self):
print 'message map:'
print '{'
for service in self._map:
if not self._print_map_services or service in self._print_map_services:
self.pretty_print_object(self._map[service], 1, [], service)
print '}'
def quote(self, value):
return isinstance(value, str) and '"%s"' % value or value
# ===========================
# pretty print STP/1 messages
# ===========================
def pretty_print_payload_item(indent, name, definition, item, verbose_debug=False):
if item and "message" in definition:
print "%s%s:" % (indent * INDENT, name)
pretty_print_payload(item, definition["message"], indent=indent+1)
else:
value = item
if "enum" in definition:
value = "%s (%s)" % (definition['enum']['numbers'][item], item)
elif item == None:
value = "null"
elif isinstance(item, unicode):
if not verbose_debug and len(item) > MAX_STR_LENGTH:
value = "\"%s...\"" % item[0:MAX_STR_LENGTH]
else:
value = "\"%s\"" % item
try:
print "%s%s: %s" % ( indent * INDENT, name, value)
except:
print "%s%s: %s%s" % ( indent * INDENT, name, value[0:100], '...')
def pretty_print_payload(payload, definitions, indent=2, verbose_debug=False):
for item, definition in zip(payload, definitions):
if definition["q"] == "repeated":
print "%s%s:" % (indent * INDENT, definition['name'])
for sub_item in item:
pretty_print_payload_item(
indent + 1,
definition['name'].replace("List", ""),
definition,
sub_item,
verbose_debug=verbose_debug)
else:
pretty_print_payload_item(
indent,
definition['name'],
definition,
item,
verbose_debug=verbose_debug)
def pretty_print(prelude, msg, format, format_payload, verbose_debug=False):
service = msg[MSG_KEY_SERVICE]
command_def = message_map.get(service, {}).get(msg[MSG_KEY_COMMAND_ID], None)
command_name = command_def and command_def.get("name", None) or \
'<id: %d>' % msg[MSG_KEY_COMMAND_ID]
message_type = message_type_map[msg[MSG_KEY_TYPE]]
if not MessageMap.filter or check_message(service, command_name, message_type):
print prelude
if format:
print " message type:", message_type
print " service:", service
print " command:", command_name
print " format:", format_type_map[msg[MSG_KEY_FORMAT]]
if MSG_KEY_STATUS in msg:
print " status:", status_map[msg[MSG_KEY_STATUS]]
if MSG_KEY_CLIENT_ID in msg:
print " cid:", msg[MSG_KEY_CLIENT_ID]
if MSG_KEY_UUID in msg:
print " uuid:", msg[MSG_KEY_UUID]
if MSG_KEY_TAG in msg:
print " tag:", msg[MSG_KEY_TAG]
if format_payload and not msg[MSG_KEY_TYPE] == MSG_TYPE_ERROR:
payload = parse_json(msg[MSG_KEY_PAYLOAD])
print " payload:"
if payload and command_def:
definition = command_def.get(msg[MSG_KEY_TYPE], None)
try:
pretty_print_payload(payload, definition, verbose_debug=verbose_debug)
except Exception, msg:
# print msg
print "failed to pretty print the paylod. wrong message structure?"
print "%spayload: %s" % (INDENT, payload)
print "%sdefinition: %s" % (INDENT, definition)
else:
print " ", msg[MSG_KEY_PAYLOAD]
print "\n"
else:
print " payload:", msg[MSG_KEY_PAYLOAD], "\n"
else:
print msg
def check_message(service, command, message_type):
if MessageMap.filter and service in MessageMap.filter and \
message_type in MessageMap.filter[service]:
for check in MessageMap.filter[service][message_type]:
if check(command):
return True
return False
# ===========================
# pretty print STP/0 messages
# ===========================
def pretty_print_XML(prelude, in_string, format):
"""To pretty print STP 0 messages"""
LF = "\n"
TEXT = 0
TAG = 1
CLOSING_TAG = 2
OPENING_CLOSING_TAG = 3
OPENING_TAG = 4
print prelude
if format:
if in_string.startswith("<"):
in_string = re.sub(r"<\?[^>]*>", "", in_string)
ret = []
indent_count = 0
matches_iter = re.finditer(r"([^<]*)(<(\/)?[^>/]*(\/)?>)", in_string)
try:
while True:
m = matches_iter.next()
matches = m.groups()
if matches[CLOSING_TAG]:
indent_count -= 1
if matches[TEXT] or last_match == OPENING_TAG:
ret.append(m.group())
else:
ret.extend([LF, indent_count * INDENT, m.group()])
last_match = CLOSING_TAG
elif matches[OPENING_CLOSING_TAG] or "<![CDATA[" in matches[1]:
last_match = OPENING_CLOSING_TAG
ret.extend([LF, indent_count * INDENT, m.group()])
else:
last_match = OPENING_TAG
ret.extend([LF, indent_count * INDENT, m.group()])
indent_count += 1
except StopIteration:
pass
except:
raise
else:
ret = [in_string]
in_string = "".join(ret).lstrip(LF)
print in_string
| 2.53125 | 3 |
preprocesamiento/artists_track_extract.py | MINE4201grupo2/sr_taller_1 | 0 | 12790037 | import pandas as pd
import mysql.connector
from mysql.connector import errorcode
import math
import sys
import csv
#Configuración de la conexión a Mysql
try:
cnx = mysql.connector.connect(user='user_taller1', password='<PASSWORD>.', host='127.0.0.1', database='taller1')
except mysql.connector.Error as err:
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
cursor = cnx.cursor()
#Lectura del dataframe
columns_data = ['userId','timestamp','musicbrainz-artist-id', 'artist-name','trackId','trackname']
#df_use_habits= pd.DataFrame(columns = ['userId','timestamp','musicbrainz-artist-id', 'artist-name','trackId','trackname'])
df_artist= pd.DataFrame(columns = ['musicbrainz-artist-id', 'artist-name'])
df_tracks= pd.DataFrame(columns = ['trackId','trackname'] )
chunksize = 10 ** 6
#with pd.read_csv('data/userid-timestamp-artid-artname-traid-traname.tsv', encoding="utf-8", delimiter='\r', chunksize=chunksize, header=None) as reader:
with pd.read_csv('data/clean.tsv', encoding="utf-8", delimiter='\t', chunksize=chunksize, header=None, names=columns_data) as reader:
for chunk in reader:
df_artist = df_artist.append(chunk[['musicbrainz-artist-id', 'artist-name']])
df_tracks = df_tracks.append(chunk[['trackId','trackname']])
#print(df_artist)
print("Finish reading file and dtaframes")
# Remove duplicates
df_artist= df_artist.drop_duplicates(keep='first')
df_artist = df_artist.reset_index(drop=True)
df_tracks= df_tracks.drop_duplicates(keep='first')
df_tracks = df_tracks.reset_index(drop=True)
# Create a new record
sql_tracks = "INSERT INTO `tracks` (`music_track_id`, `music_track_name`) VALUES (%s, %s)"
sql_artists = "INSERT INTO `artists` (`music_artist_id`, `music_artist_name`) VALUES (%s, %s)"
def isNaN(string):
return string != string
for i in df_tracks.index:
# Execute the query
var1= None if isNaN(df_tracks['trackId'][i]) else ''.join([c for c in df_tracks['trackId'][i].strip() if c not in ['\t', '\n', '\f', '\r','\u000B','\u0085','\u2028','\u2029','\u0022', '\u005C', '\u0027', '"']])
var2= None if isNaN(df_tracks['trackname'][i]) else ''.join([c for c in df_tracks['trackname'][i].strip() if c not in ['\t', '\n', '\f', '\r','\u000B','\u0085','\u2028','\u2029','\u0022', '\u005C', '\u0027', '"']])
#print(var2)
try:
cursor.execute(sql_tracks, (var1,var2))
except mysql.connector.errors.DataError as err:
print("Track var 1: "+ var1+ " ")
print("Track var 2: "+ var2+ " ")
print("nooooo" + df_tracks['trackname'][i])
sys.exit(1)
# the connection is not autocommited by default. So we must commit to save our changes.
cnx.commit()
for i in df_artist.index:
# Execute the query
var1= None if isNaN(df_artist['musicbrainz-artist-id'][i]) else ''.join([c for c in df_artist['musicbrainz-artist-id'][i].strip() if c not in ['\t', '\n', '\f', '\r','\u000B','\u0085','\u2028','\u2029','\u0022', '\u005C', '\u0027', '"']])
var2= None if isNaN(df_artist['artist-name'][i]) else ''.join([c for c in df_artist['artist-name'][i].strip() if c not in ['\t', '\n', '\f', '\r','\u000B','\u0085','\u2028','\u2029','\u0022', '\u005C','\u0027', '"' ]])
#print(var2)
try:
cursor.execute(sql_artists, (var1,var2))
except mysql.connector.errors.DataError as err:
print("Artists var 1: "+ var1+ " ")
print("Artists var 2: "+ var2+ " ")
sys.exit(1)
# the connection is not autocommited by default. So we must commit to save our changes.
cnx.commit()
#print(df_artist)
cursor.close()
cnx.close() | 2.765625 | 3 |
dogqc/querylib.py | cakebytheoceanLuo/dogqc | 12 | 12790038 | from dogqc.code import Code
# includes
def getIncludes ():
code = Code()
code.add("#include <list>")
code.add("#include <unordered_map>")
code.add("#include <vector>")
code.add("#include <iostream>")
code.add("#include <ctime>")
code.add("#include <limits.h>")
code.add("#include <float.h>")
code.add("#include \"../dogqc/include/csv.h\"")
code.add("#include \"../dogqc/include/util.h\"")
code.add("#include \"../dogqc/include/mappedmalloc.h\"")
return code
def getCudaIncludes ():
code = Code()
code.add("#include \"../dogqc/include/util.cuh\"")
code.add("#include \"../dogqc/include/hashing.cuh\"")
return code
class Type ( object ):
MULTI_HT = "multi_ht"
UNIQUE_HT = "unique_ht"
AGG_HT = "agg_ht"
class Const ( object ):
ALL_LANES = "ALL_LANES"
class Krnl ( object ):
INIT_AGG_HT = "initAggHT"
INIT_ARRAY = "initArray"
INIT_UNIQUE_HT = "initUniqueHT"
INIT_MULTI_HT = "initMultiHT"
# functions
class Fct ( object ):
HASH_BUILD_UNIQUE = "hashBuildUnique"
HASH_PROBE_UNIQUE = "hashProbeUnique"
HASH_COUNT_MULTI = "hashCountMulti"
HASH_INSERT_MULTI = "hashInsertMulti"
HASH_PROBE_MULTI = "hashProbeMulti"
HASH = "hash"
HASH_AGG_BUCKET = "hashAggregateGetBucket"
HASH_AGG_CHECK = "hashAggregateFindBucket"
| 2.234375 | 2 |
ciservice/apiv1/resource_upload.py | idekerlab/ci-service-template | 2 | 12790039 | # -*- coding: utf-8 -*-
import logging
import requests as rqc
from flask.ext.restful import Resource
from flask import Response, request
from flask import stream_with_context
INPUT_DATA_SERVER_LOCATION = 'http://dataserver:3000/'
class UploadResource(Resource):
def get(self):
req = rqc.get(INPUT_DATA_SERVER_LOCATION, stream=True)
return Response(
stream_with_context(req.iter_content()),
content_type=req.headers['content-type']
)
def post(self):
"""
Stream input to data file server.
:return:
"""
logging.debug('UPLOAD POST')
req = rqc.post(INPUT_DATA_SERVER_LOCATION + 'data',
json=request.stream.read(),
stream=True)
return Response(
stream_with_context(req.iter_content()),
content_type=req.headers['content-type']
)
| 2.515625 | 3 |
fluent_comments/tests/test_utils.py | ephes/django-fluent-comments | 88 | 12790040 | <filename>fluent_comments/tests/test_utils.py
from django.test import SimpleTestCase
from fluent_comments.utils import split_words
class TestUtils(SimpleTestCase):
def test_split_words(self):
text = """college scholarship essays - <a href=" https://collegeessays.us/ ">how to write a good introduction for a college essay</a>
boston university college essay <a href=" https://collegeessays.us/ ">how to write an essay for college</a>
https://collegeessays.us/
http://www.monkeyface.com/__media__/js/netsoltrademark.php?d=collegeessays.us"""
self.assertEqual(
split_words(text),
{
"__media__",
"a",
"an",
"boston",
"college",
"collegeessays",
"com",
"d",
"essay",
"essays",
"for",
"good",
"how",
"href",
"http",
"https",
"introduction",
"js",
"monkeyface",
"netsoltrademark",
"php",
"scholarship",
"to",
"university",
"us",
"write",
"www",
},
)
| 2.859375 | 3 |
TLA/Data/get_data.py | tusharsarkar3/TLA | 50 | 12790041 | <filename>TLA/Data/get_data.py
from TLA.Data.get_tweets import get_data_for_lang
from TLA.Data.Pre_Process_Tweets import pre_process_tweet
import os
import pandas as pd
import argparse
from distutils.sysconfig import get_python_lib
def store_data(language, process = False):
"""
Cretaes a .csv file for the language specified.
Can create processed datasets if process flag is set as True
Input-> language - a striing specifying the language you want the tweets in
process - A Boolean to specify pre-processing tweets.
Output-> x - a dataframe containing extracted data.
"""
directory = "datasets"
parent_dir = get_python_lib() + "/TLA/Data"
path = os.path.join(parent_dir, directory)
if os.path.isdir(path) == False:
os.mkdir(path)
df_dict = get_data_for_lang(language)
if process == True:
for file in os.listdir(path):
path = os.path.join("path", file)
df = pd.read_csv(path)
df_processed = pre_process_tweet(df)
df_processed.to_csv(path, sep=',', index=False)
if __name__ == "__main__":
my_parser = argparse.ArgumentParser()
my_parser.add_argument('--lang', action='store', type=str)
my_parser.add_argument('--process', action='store', type=bool)
args = my_parser.parse_args()
if args.process == None:
store_data(args.lang)
else:
store_data(args.lang, args.process)
| 2.96875 | 3 |
api/tacticalrmm/scripts/models.py | HighTech-Grace-Solutions/tacticalrmm | 0 | 12790042 | <gh_stars>0
import base64
import re
from loguru import logger
from typing import Any, List, Union
from django.conf import settings
from django.contrib.postgres.fields import ArrayField
from django.db import models
from logs.models import BaseAuditModel
SCRIPT_SHELLS = [
("powershell", "Powershell"),
("cmd", "Batch (CMD)"),
("python", "Python"),
]
SCRIPT_TYPES = [
("userdefined", "User Defined"),
("builtin", "Built In"),
]
logger.configure(**settings.LOG_CONFIG)
class Script(BaseAuditModel):
name = models.CharField(max_length=255)
description = models.TextField(null=True, blank=True)
filename = models.CharField(max_length=255) # deprecated
shell = models.CharField(
max_length=100, choices=SCRIPT_SHELLS, default="powershell"
)
script_type = models.CharField(
max_length=100, choices=SCRIPT_TYPES, default="userdefined"
)
args = ArrayField(
models.TextField(null=True, blank=True),
null=True,
blank=True,
default=list,
)
favorite = models.BooleanField(default=False)
category = models.CharField(max_length=100, null=True, blank=True)
code_base64 = models.TextField(null=True, blank=True)
default_timeout = models.PositiveIntegerField(default=90)
def __str__(self):
return self.name
@property
def code(self):
if self.code_base64:
base64_bytes = self.code_base64.encode("ascii", "ignore")
return base64.b64decode(base64_bytes).decode("ascii", "ignore")
else:
return ""
@classmethod
def load_community_scripts(cls):
import json
import os
from pathlib import Path
from django.conf import settings
# load community uploaded scripts into the database
# skip ones that already exist, only updating name / desc in case it changes
# for install script
if not settings.DOCKER_BUILD:
scripts_dir = os.path.join(Path(settings.BASE_DIR).parents[1], "scripts")
# for docker
else:
scripts_dir = settings.SCRIPTS_DIR
with open(
os.path.join(settings.BASE_DIR, "scripts/community_scripts.json")
) as f:
info = json.load(f)
for script in info:
if os.path.exists(os.path.join(scripts_dir, script["filename"])):
s = cls.objects.filter(script_type="builtin").filter(
name=script["name"]
)
category = (
script["category"] if "category" in script.keys() else "Community"
)
default_timeout = (
int(script["default_timeout"])
if "default_timeout" in script.keys()
else 90
)
args = script["args"] if "args" in script.keys() else []
if s.exists():
i = s.first()
i.name = script["name"]
i.description = script["description"]
i.category = category
i.shell = script["shell"]
i.default_timeout = default_timeout
i.args = args
with open(os.path.join(scripts_dir, script["filename"]), "rb") as f:
script_bytes = (
f.read().decode("utf-8").encode("ascii", "ignore")
)
i.code_base64 = base64.b64encode(script_bytes).decode("ascii")
i.save(
update_fields=[
"name",
"description",
"category",
"default_timeout",
"code_base64",
"shell",
"args",
]
)
else:
print(f"Adding new community script: {script['name']}")
with open(os.path.join(scripts_dir, script["filename"]), "rb") as f:
script_bytes = (
f.read().decode("utf-8").encode("ascii", "ignore")
)
code_base64 = base64.b64encode(script_bytes).decode("ascii")
cls(
code_base64=code_base64,
name=script["name"],
description=script["description"],
filename=script["filename"],
shell=script["shell"],
script_type="builtin",
category=category,
default_timeout=default_timeout,
args=args,
).save()
@staticmethod
def serialize(script):
# serializes the script and returns json
from .serializers import ScriptSerializer
return ScriptSerializer(script).data
@classmethod
def parse_script_args(
cls, agent, shell: str, args: List[str] = list()
) -> Union[List[str], None]:
from core.models import CustomField
if not list:
return []
temp_args = list()
# pattern to match for injection
pattern = re.compile(".*\\{\\{(.*)\\}\\}.*")
for arg in args:
match = pattern.match(arg)
if match:
# only get the match between the () in regex
string = match.group(1)
# split by period if exists. First should be model and second should be property
temp = string.split(".")
# check for model and property
if len(temp) != 2:
# ignore arg since it is invalid
continue
if temp[0] == "client":
model = "client"
obj = agent.client
elif temp[0] == "site":
model = "site"
obj = agent.site
elif temp[0] == "agent":
model = "agent"
obj = agent
else:
# ignore arg since it is invalid
continue
if hasattr(obj, temp[1]):
value = getattr(obj, temp[1])
elif CustomField.objects.filter(model=model, name=temp[1]).exists():
field = CustomField.objects.get(model=model, name=temp[1])
model_fields = getattr(field, f"{model}_fields")
value = None
if model_fields.filter(**{model: obj}).exists():
value = model_fields.get(**{model: obj}).value
if not value and field.default_value:
value = field.default_value
# check if value exists and if not use defa
if value and field.type == "multiple":
value = format_shell_array(shell, value)
elif value and field.type == "checkbox":
value = format_shell_bool(shell, value)
if not value:
continue
else:
# ignore arg since property is invalid
continue
# replace the value in the arg and push to array
# log any unhashable type errors
try:
temp_args.append(re.sub("\\{\\{.*\\}\\}", value, arg)) # type: ignore
except Exception as e:
logger.error(e)
continue
else:
temp_args.append(arg)
return temp_args
def format_shell_array(shell: str, value: Any) -> str:
if shell == "cmd":
return "array args are not supported with batch"
elif shell == "powershell":
temp_string = ""
for item in value:
temp_string += item + ","
return temp_string.strip(",")
else: # python
temp_string = ""
for item in value:
temp_string += item + ","
return temp_string.strip(",")
def format_shell_bool(shell: str, value: Any) -> str:
if shell == "cmd":
return "1" if value else "0"
elif shell == "powershell":
return "$True" if value else "$False"
else: # python
return "True" if value else "False"
| 2 | 2 |
Lib/site-packages/pylint/message/__init__.py | edupyter/EDUPYTER38 | 0 | 12790043 | <gh_stars>0
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/main/LICENSE
# Copyright (c) https://github.com/PyCQA/pylint/blob/main/CONTRIBUTORS.txt
"""All the classes related to Message handling."""
from pylint.message.message import Message
from pylint.message.message_definition import MessageDefinition
from pylint.message.message_definition_store import MessageDefinitionStore
from pylint.message.message_id_store import MessageIdStore
__all__ = [
"Message",
"MessageDefinition",
"MessageDefinitionStore",
"MessageIdStore",
]
| 1.570313 | 2 |
other_baselines_scripts/run_mendelian_experiments_more_baselines.py | carolineyuchen/MMRIV | 2 | 12790044 | import torch, add_path
import numpy as np
from baselines.all_baselines import Poly2SLS, Vanilla2SLS, DirectNN, \
GMM, DeepIV, AGMM
import os
import tensorflow
from MMR_IVs.util import ROOT_PATH, load_data
import random
random.seed(527)
def eval_model(model, test):
g_pred_test = model.predict(test.x)
mse = float(((g_pred_test - test.g) ** 2).mean())
return mse
def save_model(model, save_path, test):
g_pred = model.predict(test.x)
np.savez(save_path, x=test.w, y=test.y, g_true=test.g, g_hat=g_pred)
def run_experiment(scenario_name,mid,repid, num_reps=10, seed=527,training=False):
# set random seed
torch.manual_seed(seed)
np.random.seed(seed)
tensorflow.set_random_seed(seed)
train, dev, test = load_data(ROOT_PATH + "/data/mendelian/" + scenario_name+'.npz')
# result folder
folder = ROOT_PATH + "/results/mendelian/"+scenario_name+"/"
os.makedirs(folder, exist_ok=True)
means = []
times = []
for rep in range(num_reps):
# Not all methods are applicable in all scenarios
methods = []
# baseline methods
methods += [("DirectNN", DirectNN())]
methods += [("Vanilla2SLS", Vanilla2SLS())]
methods += [("Poly2SLS", Poly2SLS())]
methods += [("GMM", GMM(g_model="2-layer", n_steps=20))]
methods += [("AGMM", AGMM())]
methods += [("DeepIV", DeepIV())]
if training:
if rep < repid:
continue
elif rep >repid:
break
else:
pass
for method_name, method in methods[mid:mid+1]:
print("Running " + method_name +" " + str(rep))
file_name = "%s_%d.npz" % (method_name, rep)
save_path = os.path.join(folder, file_name)
model, time = method.fit(train.x, train.y, train.z, None)
np.save(folder+"%s_%d_time.npy" % (method_name, rep),time)
save_model(model, save_path, test)
test_mse = eval_model(model, test)
model_type_name = type(model).__name__
print("Test MSE of %s: %f" % (model_type_name, test_mse))
else:
means2 = []
times2 = []
for method_name, method in methods:
# print("Running " + method_name +" " + str(rep))
file_name = "%s_%d.npz" % (method_name, rep)
save_path = os.path.join(folder, file_name)
if os.path.exists(save_path):
res = np.load(save_path)
mse = float(((res['g_hat'] - res['g_true']) ** 2).mean())
# print('mse: {}'.format(mse))
means2 += [mse]
else:
print(save_path, ' not exists')
time_path = folder+"%s_%d_time.npy" % (method_name, rep)
if os.path.exists(time_path):
res = np.load(time_path)
times2 += [res]
else:
print(time_path, ' not exists')
if len(means2) == len(methods):
means += [means2]
if len(times2) == len(methods):
times += [times2]
#print('means',np.mean(np.array(means),axis=0))
#print('std',np.std(np.array(means),axis=0))
return means,times
if __name__ == "__main__":
scenarios = ["mendelian_{}_{}_{}".format(s, i, j) for s in [8,16,32] for i,j in [[1,1]]]
scenarios += ["mendelian_{}_{}_{}".format(16, i, j) for i, j in [[1, 0.5],[1, 2]]]
scenarios += ["mendelian_{}_{}_{}".format(16, i, j)for i, j in [[0.5, 1],[2, 1]]]
for sce in scenarios:
for mid in range(6):
for repid in range(10):
run_experiment(sce, mid, repid, training=True)
rows = []
for i in range(len(scenarios)):
s = scenarios[i]
means,times = run_experiment(s,0,0,training=False)
mean = np.mean(means,axis=0)
std = np.std(means,axis=0)
rows += [["({},{:.4f}) +- ({:.3f},{:.3f})".format(s,mean[j],std[j],std[j]) for j in range(len(mean))]]
print('time: ',np.mean(times,axis=0),np.std(times,axis=0))
# methods = np.array(["DirectNN","Vanilla2SLS","Poly2SLS","GMM","AGMM","DeepIV"])[:,None]
rows = np.array(rows)
#rows = np.vstack((methods,rows))
print('addplot+[mark=*,error bars/.cd, y dir=both,y explicit] coordinates'.join(['{'+'\n'.join(e)+'};\n' for e in rows.T]))
print('Tabulate Table:')
# print(tabulate(np.vstack((np.append([""],scenarios),rows)), headers='firstrow',tablefmt='latex'))
| 2.046875 | 2 |
webservices/common/models/costs.py | 18F/openFEC | 246 | 12790045 | from sqlalchemy.dialects.postgresql import TSVECTOR
from .base import db
class CommunicationCost(db.Model):
__tablename__ = 'ofec_communication_cost_mv'
sub_id = db.Column(db.Integer, primary_key=True)
original_sub_id = db.Column('orig_sub_id', db.Integer, index=True)
candidate_id = db.Column('cand_id', db.String, index=True)
committee_id = db.Column('cmte_id', db.String, index=True)
committee_name = db.Column(db.String)
pdf_url = db.Column(db.String)
candidate_name = db.Column('s_o_cand_nm', db.String)
candidate_last_name = db.Column('s_o_cand_l_nm', db.String)
candidate_middle_name = db.Column('s_o_cand_m_nm', db.String)
candidate_first_name = db.Column('s_o_cand_f_nm', db.String)
candidate_office_state = db.Column('s_o_cand_office_st', db.String, index=True)
state_full = db.Column('s_o_cand_office_st_desc', db.String)
candidate_office_district = db.Column('s_o_cand_office_district', db.String, index=True)
candidate_office = db.Column('s_o_cand_office', db.String, index=True)
candidate_office_full =db.Column('s_o_cand_office_desc', db.String)
transaction_date = db.Column('communication_dt', db.Date, index=True)
transaction_amount = db.Column('communication_cost', db.Numeric(30, 2), index=True)
transaction_type = db.Column('transaction_tp', db.String)
communication_type = db.Column('communication_tp', db.String, index=True)
communication_type_full = db.Column('communication_tp_desc', db.String)
communication_class = db.Column('communication_class', db.String, index=True)
purpose = db.Column('communication_class_desc', db.String, index=True)
support_oppose_indicator = db.Column('s_o_ind', db.String, index=True)
#new columns added from ware house transition
action_code = db.Column('action_cd', db.String)
action_code_full = db.Column('action_cd_desc', db.String)
primary_general_indicator = db.Column('s_o_rpt_pgi', db.String)
primary_general_indicator_description = db.Column('s_o_rpt_pgi_desc', db.String)
report_type = db.Column('rpt_tp', db.String)
report_year = db.Column('rpt_yr', db.Integer)
cycle = db.Column('election_cycle', db.Integer, index=True)
form_type_code = db.Column('filing_form', db.String, index=True)
schedule_type = db.Column(db.String, index=True)
schedule_type_full = db.Column('schedule_type_desc', db.String)
tran_id = db.Column(db.String)
file_number = db.Column('file_num', db.Integer)
image_number = db.Column('image_num', db.String, index=True)
class Electioneering(db.Model):
__tablename__ = 'ofec_electioneering_mv'
idx = db.Column(db.Integer, primary_key=True)
committee_id = db.Column('cmte_id', db.String, index=True)
committee_name = db.Column('cmte_nm', db.String)
candidate_id = db.Column('cand_id', db.String, index=True)
candidate_name = db.Column('cand_name', db.String)
candidate_office = db.Column('cand_office', db.String, index=True)
candidate_district = db.Column('cand_office_district', db.String, index=True)
candidate_state = db.Column('cand_office_st', db.String, index=True)
beginning_image_number = db.Column('f9_begin_image_num', db.String, index=True)
sb_image_num = db.Column(db.String, index=True)
sub_id = db.Column(db.Integer, doc="The identifier for each electioneering record")
link_id = db.Column(db.Integer)
sb_link_id = db.Column(db.String)
number_of_candidates = db.Column(db.Numeric)
calculated_candidate_share = db.Column('calculated_cand_share', db.Numeric(30, 2), doc="If an electioneering cost targets several candidates, the total cost is divided by the number of candidates. If it only mentions one candidate the full cost of the communication is listed.")
communication_date = db.Column('comm_dt', db.Date, doc='It is the airing, broadcast, cablecast or other dissemination of the communication')
public_distribution_date = db.Column('pub_distrib_dt', db.Date, doc='The pubic distribution date is the date that triggers disclosure of the electioneering communication (date reported on page 1 of Form 9)')
disbursement_date = db.Column('disb_dt', db.Date, index=True, doc='Disbursement date includes actual disbursements and execution of contracts creating an obligation to make disbursements (SB date of disbursement)')
disbursement_amount = db.Column('reported_disb_amt', db.Numeric(30, 2), index=True)
purpose_description = db.Column('disb_desc', db.String)
report_year = db.Column('rpt_yr', db.Integer, index=True)
file_number = db.Column('file_num', db.Integer)
amendment_indicator = db.Column('amndt_ind', db.String)
receipt_date = db.Column('receipt_dt', db.Date)
election_type_raw = db.Column('election_tp', db.String)
pdf_url = db.Column(db.String)
purpose_description_text = db.Column(TSVECTOR)
@property
def election_type(self):
return self.election_type_raw[:1]
| 2.203125 | 2 |
mars/tensor/reduction/__init__.py | sighingnow/mars | 0 | 12790046 | <reponame>sighingnow/mars<filename>mars/tensor/reduction/__init__.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2018 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .sum import sum, TensorSum
from .nansum import nansum, TensorNanSum
from .prod import prod, TensorProd
from .nanprod import nanprod, TensorNanProd
from .max import max, TensorMax
from .nanmax import nanmax, TensorNanMax
from .min import min, TensorMin
from .nanmin import nanmin, TensorNanMin
from .all import all, TensorAll
from .any import any, TensorAny
from .mean import mean, TensorMean, TensorMeanChunk, TensorMeanCombine
from .nanmean import nanmean, TensorNanMean, TensorNanMeanChunk, TensorMeanCombine
from .argmax import argmax, TensorArgmax, TensorArgmaxMap, TensorArgmaxCombine
from .nanargmax import nanargmax, TensorNanArgmax, \
TensorNanArgmaxMap, TensorNanArgmaxCombine
from .argmin import argmin, TensorArgmin, TensorArgminMap, TensorArgminCombine
from .nanargmin import nanargmin, TensorNanArgmin, \
TensorNanArgminMap, TensorNanArgminCombine
from .cumsum import cumsum, TensorCumsum
from .cumprod import cumprod, TensorCumprod
from .var import var, TensorVar, TensorMoment, TensorMomentMap, TensorMomentCombine
from .std import std
from .nanvar import nanvar, TensorNanVar, TensorNanMoment, \
TensorNanMomentMap, TensorNanMomentCombine
from .nanstd import nanstd
from .nancumsum import nancumsum, TensorNanCumsum
from .nancumprod import nancumprod, TensorNanCumprod
from .count_nonzero import count_nonzero, TensorCountNonzero
from .allclose import allclose
from .array_equal import array_equal
def _install():
from ..core import Tensor
setattr(Tensor, 'sum', sum)
setattr(Tensor, 'prod', prod)
setattr(Tensor, 'max', max)
setattr(Tensor, 'min', min)
setattr(Tensor, 'all', all)
setattr(Tensor, 'any', any)
setattr(Tensor, 'mean', mean)
setattr(Tensor, 'argmax', argmax)
setattr(Tensor, 'argmin', argmin)
setattr(Tensor, 'cumsum', cumsum)
setattr(Tensor, 'cumprod', cumprod)
setattr(Tensor, 'var', var)
setattr(Tensor, 'std', std)
_install()
del _install
| 1.570313 | 2 |
plugins/status.py | DasFranck/ConDeBot_Discord | 0 | 12790047 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import discord
from classes.Plugin import Plugin
NAME = "Status"
DESCRIPTION = "Change the bot status and his played game on discord"
USAGE = {}
class StatusPlugin(Plugin):
def __init__(self, cdb):
super().__init__(cdb)
self.status_dict = {"online": discord.Status.online,
"offline": discord.Status.offline,
"idle": discord.Status.idle,
"dnd": discord.Status.do_not_disturb,
"do_not_disturb": discord.Status.do_not_disturb,
"invisible": discord.Status.invisible}
self.status = None
self.game = None
cdb.reserve_keywords(["status", "game"], "Status")
cdb.add_plugin_description(DESCRIPTION, NAME)
cdb.add_plugin_usage(USAGE, NAME)
async def on_message(self, message, cmd):
if not cmd.triggered \
or cmd.action not in ["status", "game"]:
return
if not self.cdb.isop_user(message.author):
await message.channel.send("You don't have the right to do that.")
self.cdb.log_warn("Changing bot status requested by NON-OP %s, FAILED" % (str(cmd.author)), message)
else:
if cmd.action == "status":
if len(cmd.args) == 0:
await message.channel.send("Try with an argument for this command next time.")
await message.channel.send("Valid arguments: online, offline, idle, dnd, invisible.")
elif cmd.args[0].lower() in self.status_dict:
self.cdb.log_info("Change bot's status to %s requested by %s" % (cmd.args[0].lower(), str(cmd.author)), message)
self.status = self.status_dict[cmd.args[0].lower()]
else:
await message.channel.send("It's not a valid argument.")
await message.channel.send("Valid arguments: online, offline, idle, dnd, invisible.")
elif cmd.action == "game":
if len(cmd.args) == 0:
self.game = None
self.cdb.log_info("Erasing bot's game requested by %s" % (str(cmd.author)), message)
else:
self.game = discord.Game(name=message.content[6:])
self.cdb.log_info("Change bot's game requested by %s" % (str(cmd.author)), message)
await self.cdb.change_presence(game=self.game, status=self.status)
| 2.78125 | 3 |
sdk/monitor/azure-mgmt-monitor/azure/mgmt/monitor/v2016_09_01/models/__init__.py | vbarbaresi/azure-sdk-for-python | 8 | 12790048 | <reponame>vbarbaresi/azure-sdk-for-python<gh_stars>1-10
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
try:
from ._models_py3 import ErrorResponse
from ._models_py3 import LocalizableString
from ._models_py3 import LogSettings
from ._models_py3 import Metric
from ._models_py3 import MetricCollection
from ._models_py3 import MetricSettings
from ._models_py3 import MetricValue
from ._models_py3 import Resource
from ._models_py3 import RetentionPolicy
from ._models_py3 import ServiceDiagnosticSettingsResource
from ._models_py3 import ServiceDiagnosticSettingsResourcePatch
except (SyntaxError, ImportError):
from ._models import ErrorResponse # type: ignore
from ._models import LocalizableString # type: ignore
from ._models import LogSettings # type: ignore
from ._models import Metric # type: ignore
from ._models import MetricCollection # type: ignore
from ._models import MetricSettings # type: ignore
from ._models import MetricValue # type: ignore
from ._models import Resource # type: ignore
from ._models import RetentionPolicy # type: ignore
from ._models import ServiceDiagnosticSettingsResource # type: ignore
from ._models import ServiceDiagnosticSettingsResourcePatch # type: ignore
from ._monitor_client_enums import (
Unit,
)
__all__ = [
'ErrorResponse',
'LocalizableString',
'LogSettings',
'Metric',
'MetricCollection',
'MetricSettings',
'MetricValue',
'Resource',
'RetentionPolicy',
'ServiceDiagnosticSettingsResource',
'ServiceDiagnosticSettingsResourcePatch',
'Unit',
]
| 1.476563 | 1 |
salt-2016.3.3/tests/unit/states/glusterfs_test.py | stephane-martin/salt-debian-packaging | 0 | 12790049 | <reponame>stephane-martin/salt-debian-packaging<gh_stars>0
# -*- coding: utf-8 -*-
'''
:codeauthor: :email:`<NAME> <<EMAIL>>`
'''
# Import Python libs
from __future__ import absolute_import
# Import Salt Testing Libs
from salttesting import skipIf, TestCase
from salttesting.mock import (
NO_MOCK,
NO_MOCK_REASON,
MagicMock,
patch)
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../../')
# Import Salt Libs
from salt.states import glusterfs
from tests.unit.modules.glusterfs_test import GlusterResults
import salt.modules.glusterfs as mod_glusterfs
import salt.utils.cloud
import salt.modules.glusterfs as mod_glusterfs
glusterfs.__salt__ = {'glusterfs.peer': mod_glusterfs.peer}
glusterfs.__opts__ = {}
@skipIf(NO_MOCK, NO_MOCK_REASON)
class GlusterfsTestCase(TestCase):
'''
Test cases for salt.states.glusterfs
'''
# 'peered' function tests: 1
def test_peered(self):
'''
Test to verify if node is peered.
'''
name = 'server1'
other_name = 'server1'
ret = {'name': name,
'result': True,
'comment': '',
'changes': {}}
# probe new peer server2 under gluster 3.4.x
comt = ('Peer {0} added successfully.'.format(name))
ret.update({'comment': comt, 'result': True,
'changes': {'new': {name: []}, 'old': {}}})
mock_xml = MagicMock(
return_value=GlusterResults.v34.peer_probe.success_other)
with patch.dict('salt.modules.glusterfs.__salt__', {'cmd.run': mock_xml}):
mock = MagicMock(side_effect=[{}, {name: []}])
with patch.dict(glusterfs.__salt__, {'glusterfs.list_peers': mock}):
self.assertDictEqual(glusterfs.peered(name), ret)
# probe new peer server2 under gluster 3.7.x
mock_xml = MagicMock(
return_value=GlusterResults.v37.peer_probe.success_other)
with patch.dict('salt.modules.glusterfs.__salt__', {'cmd.run': mock_xml}):
mock = MagicMock(side_effect=[{}, {name: []}])
with patch.dict(glusterfs.__salt__, {'glusterfs.list_peers': mock}):
self.assertDictEqual(glusterfs.peered(name), ret)
# probe already existing server2 under gluster 3.4.x
comt = ('Host {0} already peered'.format(name))
ret.update({'comment': comt, 'changes': {}})
mock_xml = MagicMock(
return_value=GlusterResults.v34.peer_probe.success_already_peer['hostname'])
with patch.dict('salt.modules.glusterfs.__salt__', {'cmd.run': mock_xml}):
mock = MagicMock(side_effect=[{name: []}, {name: []}])
with patch.dict(glusterfs.__salt__, {'glusterfs.list_peers': mock}):
self.assertDictEqual(glusterfs.peered(name), ret)
# probe already existing server2 under gluster 3.7.x
mock_xml = MagicMock(
return_value=GlusterResults.v37.peer_probe.success_already_peer['hostname'])
with patch.dict('salt.modules.glusterfs.__salt__', {'cmd.run': mock_xml}):
mock = MagicMock(side_effect=[{name: []}, {name: []}])
with patch.dict(glusterfs.__salt__, {'glusterfs.list_peers': mock}):
self.assertDictEqual(glusterfs.peered(name), ret)
# Issue 30932: Peering an existing server by IP fails with gluster 3.7+
#
# server2 was probed by address, 10.0.0.2. Under 3.4, server1 would be
# known as 10.0.0.1 but starting with 3.7, its hostname of server1 would be
# known instead. Subsequent probing of server1 by server2 used to result in
# "success_already_peer" but now it should succeed in adding an alternate
# hostname entry.
name = 'server1'
ip = '10.0.0.1'
comt = ('Host {0} already peered'.format(ip))
ret.update({'name': ip, 'comment': comt, 'changes': {}})
mock_xml = MagicMock(
return_value=GlusterResults.v34.peer_probe.success_first_ip_from_second_first_time)
with patch.dict('salt.modules.glusterfs.__salt__', {'cmd.run': mock_xml}):
mock = MagicMock(side_effect=[{ip: []}, {ip: []}])
with patch.dict(glusterfs.__salt__, {'glusterfs.list_peers': mock}):
self.assertDictEqual(glusterfs.peered(ip), ret)
comt = ('Peer {0} added successfully.'.format(ip))
ret.update({'name': ip, 'comment': comt, 'changes': {
'old': {name: []}, 'new': {name: [ip]}}})
mock_xml = MagicMock(
return_value=GlusterResults.v37.peer_probe.success_first_ip_from_second_first_time)
with patch.dict('salt.modules.glusterfs.__salt__', {'cmd.run': mock_xml}):
mock = MagicMock(side_effect=[{name: []}, {name: [ip]}])
with patch.dict(glusterfs.__salt__, {'glusterfs.list_peers': mock}):
self.assertDictEqual(glusterfs.peered(ip), ret)
comt = ('Host {0} already peered'.format(ip))
ret.update({'name': ip, 'comment': comt, 'changes': {}})
mock_xml = MagicMock(
return_value=GlusterResults.v37.peer_probe.success_first_ip_from_second_second_time)
with patch.dict('salt.modules.glusterfs.__salt__', {'cmd.run': mock_xml}):
mock = MagicMock(side_effect=[{name: [ip]}, {name: [ip]}])
with patch.dict(glusterfs.__salt__, {'glusterfs.list_peers': mock}):
self.assertDictEqual(glusterfs.peered(ip), ret)
# test for invalid characters
comt = ('Invalid characters in peer name.')
ret.update({'name': '#badhostname', 'comment': comt, 'result': False})
self.assertDictEqual(glusterfs.peered('#badhostname'), ret)
# 'created' function tests: 1
def test_created(self):
'''
Test to check if volume already exists
'''
name = 'salt'
bricks = {'host1': '/srv/gluster/drive1',
'host2': '/srv/gluster/drive2'}
ret = {'name': name,
'result': True,
'comment': '',
'changes': {}}
mock = MagicMock(side_effect=[[name], [], [], [], [name]])
mock_lst = MagicMock(return_value=[])
with patch.dict(glusterfs.__salt__, {'glusterfs.list_volumes': mock,
'glusterfs.create': mock_lst}):
comt = ('Volume {0} already exists.'.format(name))
ret.update({'comment': comt})
self.assertDictEqual(glusterfs.created(name, bricks), ret)
with patch.dict(glusterfs.__opts__, {'test': True}):
comt = ('Volume {0} will be created'.format(name))
ret.update({'comment': comt, 'result': None})
self.assertDictEqual(glusterfs.created(name, bricks), ret)
with patch.dict(glusterfs.__opts__, {'test': False}):
with patch.object(salt.utils.cloud, 'check_name',
MagicMock(return_value=True)):
comt = ('Invalid characters in volume name.')
ret.update({'comment': comt, 'result': False})
self.assertDictEqual(glusterfs.created(name, bricks), ret)
comt = ('Host {0} already peered'.format(name))
ret.update({'comment': [], 'result': True,
'changes': {'new': ['salt'], 'old': []}})
self.assertDictEqual(glusterfs.created(name, bricks), ret)
# 'started' function tests: 1
def test_started(self):
'''
Test to check if volume has been started
'''
name = 'salt'
ret = {'name': name,
'result': False,
'comment': '',
'changes': {}}
mock = MagicMock(side_effect=[[], [name], [name], [name]])
mock_t = MagicMock(return_value='started')
mock_dict = MagicMock(side_effect=[{}, '', ''])
with patch.dict(glusterfs.__salt__, {'glusterfs.list_volumes': mock,
'glusterfs.status': mock_dict,
'glusterfs.start_volume': mock_t}):
comt = ('Volume {0} does not exist'.format(name))
ret.update({'comment': comt})
self.assertDictEqual(glusterfs.started(name), ret)
comt = ('Volume {0} is already started'.format(name))
ret.update({'comment': comt, 'result': True})
self.assertDictEqual(glusterfs.started(name), ret)
with patch.dict(glusterfs.__opts__, {'test': True}):
comt = ('Volume {0} will be started'.format(name))
ret.update({'comment': comt, 'result': None})
self.assertDictEqual(glusterfs.started(name), ret)
with patch.dict(glusterfs.__opts__, {'test': False}):
ret.update({'comment': 'started', 'result': True,
'change': {'new': 'started', 'old': 'stopped'}})
self.assertDictEqual(glusterfs.started(name), ret)
# 'add_volume_bricks' function tests: 1
def test_add_volume_bricks(self):
'''
Test to add brick(s) to an existing volume
'''
name = 'salt'
bricks = {'bricks': {'host1': '/srv/gluster/drive1'}}
ret = {'name': name,
'result': False,
'comment': '',
'changes': {}}
mock = MagicMock(side_effect=['does not exist', 'is not started',
bricks, bricks, bricks, ''])
mock_t = MagicMock(side_effect=['bricks successfully added',
'Bricks already in volume', ''])
with patch.dict(glusterfs.__salt__,
{'glusterfs.status': mock,
'glusterfs.add_volume_bricks': mock_t}):
ret.update({'comment': 'does not exist'})
self.assertDictEqual(
glusterfs.add_volume_bricks(name, bricks), ret)
ret.update({'comment': 'is not started'})
self.assertDictEqual(
glusterfs.add_volume_bricks(name, bricks), ret)
ret.update({'comment': 'bricks successfully added', 'result': True,
'changes': {'new': ['host1'], 'old': ['host1']}})
self.assertDictEqual(
glusterfs.add_volume_bricks(name, bricks), ret)
ret.update({'comment': 'Bricks already in volume', 'changes': {}})
self.assertDictEqual(
glusterfs.add_volume_bricks(name, bricks), ret)
ret.update({'comment': '', 'result': False})
self.assertDictEqual(
glusterfs.add_volume_bricks(name, bricks), ret)
if __name__ == '__main__':
from integration import run_tests
run_tests(GlusterfsTestCase, needs_daemon=False)
| 1.804688 | 2 |
src/qa4sm_reader/plotter.py | sheenaze/qa4sm-reader | 0 | 12790050 | <gh_stars>0
# -*- coding: utf-8 -*-
from pathlib import Path
import seaborn as sns
import pandas as pd
from qa4sm_reader.img import QA4SMImg
import qa4sm_reader.globals as globals
from qa4sm_reader.plot_utils import *
from warnings import warn
class QA4SMPlotter():
"""
Class to create image files of plots from the validation results in a QA4SMImage
"""
def __init__(self, image, out_dir:str=None):
"""
Create box plots from results in a qa4sm output file.
Parameters
----------
image : QA4SMImg
The results object.
out_dir : str, optional (default: None)
Path to output generated plot. If None, defaults to the current working directory.
"""
self.img = image
self.out_dir = self.get_dir(out_dir=out_dir)
self.ref = image.datasets.ref
try:
self.img.vars
except:
warn("The initialized QA4SMImg object has not been loaded. 'load_data' needs to be "
"set to 'True' in the initialization of the Image.")
def get_dir(self, out_dir:str) -> Path:
"""Use output path if specified, otherwise same directory as the one storing the netCDF file"""
if out_dir:
out_dir = Path(out_dir) # use directory if specified
if not out_dir.exists():
out_dir.mkdir() # make if not existing
else:
out_dir = self.img.filepath.parent # use default otherwise
return out_dir
def _standard_filename(self, out_name:str, out_type:str='png') -> Path:
"""
Standardized behaviour for filenames: if provided name has extension, it is kept; otherwise, it is saved as
.png to self.out_dir
Parameters
----------
out_name : str
output filename (with or without extension)
out_type : str, optional
contains file extensions to be plotted. If None, uses 'png'
Returns
-------
outname: pathlib.Path
correct path of the file
"""
out_name = Path(out_name)
# provide output directory
out_path = self.out_dir.joinpath(out_name)
# provide output file type
if not out_path.suffix:
if out_type[0] != '.':
out_type = '.' + out_type
out_path = out_path.with_suffix(out_type)
return out_path
@staticmethod
def _box_stats(ds:pd.Series, med:bool=True, iqr:bool=True, count:bool=True) -> str:
"""
Create the metric part with stats of the box (axis) caption
Parameters
----------
ds: pd.Series
data on which stats are found
med: bool
iqr: bool
count: bool
statistics
Returns
-------
stats: str
caption with summary stats
"""
# interquartile range
iqr = ds.quantile(q=[0.75,0.25]).diff()
iqr = abs(float(iqr.loc[0.25]))
met_str = []
if med:
met_str.append('Median: {:.3g}'.format(ds.median()))
if iqr:
met_str.append('IQR: {:.3g}'.format(iqr))
if count:
met_str.append('N: {:d}'.format(ds.count()))
stats = '\n'.join(met_str)
return stats
@staticmethod
def _box_caption(Var, tc:bool=False) -> str:
"""
Create the dataset part of the box (axis) caption
Parameters
----------
Var: MetricVar
variable for a metric
tc: bool, default is False
True if TC. Then, caption starts with "Other Data:"
Returns
-------
capt: str
box caption
"""
ref_meta, mds_meta, other_meta = Var.get_varmeta()
ds_parts = []
id, meta = mds_meta
if tc:
id, meta = other_meta
ds_parts.append('{}-{}\n({})'.format(
id, meta['pretty_name'], meta['pretty_version']))
capt = '\n and \n'.join(ds_parts)
if tc:
capt = 'Other Data:\n' + capt
return capt
@staticmethod
def _get_parts_name(Var, type='boxplot_basic') -> list:
"""
Create parts for title according to the type of plot
Parameters
----------
Var: MetricVar
variable for a metric
type: str
type of plot
Returns
-------
parts: list
list of parts for title
"""
parts = []
ref, mds, other = [meta for meta in Var.get_varmeta()]
if type == 'boxplot_basic':
parts.append(ref[0])
parts.extend([ref[1]['pretty_name'], ref[1]['pretty_version']])
elif type in ['boxplot_tc', 'mapplot_basic', 'mapplot_tc']:
parts.append(mds[0])
parts.extend([mds[1]['pretty_name'], mds[1]['pretty_version']])
parts.append(ref[0])
parts.extend([ref[1]['pretty_name'], ref[1]['pretty_version']])
if type == 'mapplot_tc':
parts.append(other[0])
parts.extend([other[1]['pretty_name'], other[1]['pretty_version']])
return parts
@staticmethod
def _titles_lut(type:str) -> str:
"""
Lookup table for plot titles
Parameters
----------
type: str
type of plot
"""
titles = {'boxplot_basic': 'Intercomparison of \n{} \nwith {}-{} ({}) \nas the reference',
'boxplot_tc': 'Intercomparison of \n{} \nfor {}-{} ({}) \nwith {}-{} ({}) \nas the reference',
'mapplot_basic': '{} for {}-{} ({}) with {}-{} ({}) as the reference',
'mapplot_tc': '{} for {}-{} ({}) with {}-{} ({}) and {}-{} ({}) as the references'}
try:
return titles[type]
except KeyError as e:
message = "type '{}' is not in the lookup table".format(type)
warn(message)
@staticmethod # todo: cange file names and convention in qa4sm
def _filenames_lut(type:str) -> str:
"""
Lookup table for file names
Parameters
----------
type: str
type of plot
"""
# we stick to old naming convention
names = {'boxplot_basic': 'boxplot_{}',
'mapplot_common': 'overview_{}',
'boxplot_tc': 'boxplot_{}_for_{}-{}',
'mapplot_double': 'overview_{}-{}_and_{}-{}_{}',
'mapplot_tc': 'overview_{}-{}_and_{}-{}_and_{}-{}_{}_for_{}-{}'}
try:
return names[type]
except KeyError as e:
message = "type '{}' is not in the lookup table".format(type)
warn(message)
def create_title(self, Var, type:str) -> str:
"""
Create title of the plot
Parameters
----------
Var: MetricVar
variable for a metric
type: str
type of plot
"""
parts = [globals._metric_name[Var.metric]]
parts.extend(self._get_parts_name(Var=Var, type=type))
title = self._titles_lut(type=type).format(*parts)
return title
def create_filename(self, Var, type:str) -> str:
"""
Create name of the file
Parameters
----------
Var: MetricVar
variable for a metric
type: str
type of plot
"""
name = self._filenames_lut(type=type)
ref_meta, mds_meta, other_meta = Var.get_varmeta()
# fetch parts of the name for the variable
if not type in ["mapplot_tc", "mapplot_double"]:
parts = [Var.metric]
if mds_meta:
parts.extend([mds_meta[0], mds_meta[1]['short_name']])
else:
parts = [ref_meta[0], ref_meta[1]['short_name']]
if type == "mapplot_tc":
# necessary to respect old naming convention
for dss in Var.other_dss:
parts.extend([dss[0], dss[1]['short_name']])
parts.extend([Var.metric, mds_meta[0], mds_meta[1]['short_name']])
parts.extend([mds_meta[0], mds_meta[1]['short_name'], Var.metric])
name = name.format(*parts)
return name
def _yield_values(self, metric:str, tc:bool=False) -> tuple:
"""
Get iterable with pandas dataframes for all variables of a metric to plot
Parameters
----------
metric: str
metric name
add_stats : bool, optional (default: from globals)
Add stats of median, iqr and N to the box bottom.
tc: bool, default is False
True if TC. Then, caption starts with "Other Data:"
Yield
-----
df: pd.DataFrame
dataframe with variable values and caption name
Var: QA4SMMetricVariable
variable corresponding to the dataframe
"""
Vars = self.img._iter_vars(**{'metric':metric})
for n, Var in enumerate(Vars):
values = Var.values[Var.varname]
# changes if it's a common-type Var
if Var.g == 0:
box_cap_ds = 'All datasets'
else:
box_cap_ds = self._box_caption(Var, tc=tc)
# setting in global for caption stats
if globals.boxplot_printnumbers:
box_stats = self._box_stats(values)
box_cap = '{}\n{}'.format(box_cap_ds, box_stats)
else:
box_cap = box_cap_ds
df = values.to_frame(box_cap)
yield df, Var
def _boxplot_definition(
self, metric:str,
df:pd.DataFrame,
type:str,
ci=None,
offset=0.07,
Var=None,
**kwargs
) -> tuple:
"""
Define parameters of plot
Parameters
----------
df: pd.DataFrame
dataframe to plot
type: str
one of _titles_lut
ci: dict
Dict of dataframes with the lower and upper confidence intervals
shape: {"upper"/"lower": [CIs]}
xticks: list
caption to each boxplot (or triplet thereof)
offset: float
offset of boxplots
Var: QA4SMMetricVariable, optional. Default is None
Specified in case mds meta is needed
"""
# plot label
parts = [globals._metric_name[metric]]
parts.append(globals._metric_description[metric].format(
globals._metric_units[self.ref['short_name']]))
label = "{}{}".format(*parts)
# generate plot
figwidth = globals.boxplot_width * (len(df.columns) + 1)
figsize = [figwidth, globals.boxplot_height]
fig, ax = boxplot(
df=df,
ci=ci,
label=label,
figsize=figsize,
dpi=globals.dpi
)
if not Var:
# when we only need reference dataset from variables (i.e. is the same):
for Var in self.img._iter_vars(**{'metric':metric}):
Var = Var
break
title = self.create_title(Var, type=type)
ax.set_title(title, pad=globals.title_pad)
# add watermark
if self.img.has_CIs:
offset = 0.06 # offset smaller as CI variables have a larger caption
if Var.g == 0:
offset = 0.02 # offset larger as common metrics have a shorter caption
if globals.watermark_pos not in [None, False]:
make_watermark(fig, offset=offset)
return fig, ax
def _save_plot(self, out_name:str, out_types:str='png') -> list:
"""
Save plot with name to self.out_dir
Parameters
----------
out_name: str
name of output file
out_types: str or list
extensions which the files should be saved in
Returns
-------
fnames: list
list of file names with all the extensions
"""
fnames = []
if isinstance(out_types, str):
out_types = [out_types]
for ext in out_types:
fname = self._standard_filename(out_name, out_type=ext)
if fname.exists():
warnings.warn('Overwriting file {}'.format(fname.name))
plt.savefig(fname, dpi='figure', bbox_inches='tight')
fnames.append(fname.absolute())
return fnames
def boxplot_basic(
self, metric:str,
out_name:str=None,
out_types:str='png',
save_files:bool=False,
**plotting_kwargs
) -> list:
"""
Creates a boxplot for common and double metrics. Saves a figure and returns Matplotlib fig and ax objects for
further processing.
Parameters
----------
metric : str
metric that is collected from the file for all datasets and combined
into one plot.
out_name: str
name of output file
out_types: str or list
extensions which the files should be saved in
save_file: bool, optional. Default is False
wether to save the file in the output directory
plotting_kwargs: arguments for _boxplot_definition function
Returns
-------
fnames: list
list of file names with all the extensions
"""
fnames, values = [], []
ci = []
# we take the last iterated value for Var and use it for the file name
for df, Var in self._yield_values(metric=metric):
if not Var.is_CI:
# concat upper and lower CI bounds of Variable, if present
bounds = []
for ci_df, ci_Var in self._yield_values(metric=metric):
# make sure they refer to the right variable
if ci_Var.is_CI and (ci_Var.metric_ds == Var.metric_ds):
ci_df.columns = [ci_Var.bound]
bounds.append(ci_df)
if bounds: # could be that variable doesn't have CIs
bounds = pd.concat(bounds, axis=1)
# get the mean CI range
diff = bounds["upper"] - bounds["lower"]
ci_range = float(diff.mean())
df.columns = [
df.columns[0] + "\nMean CI range:"
" {:.3g}".format(ci_range)
]
ci.append(bounds)
values.append(df)
# put all Variables in the same dataframe
values = pd.concat(values)
# values are all Nan or NaNf - not plotted
if np.isnan(values.to_numpy()).all():
return None
# create plot
fig, ax = self._boxplot_definition(
metric=metric,
df=values,
type='boxplot_basic',
ci=ci,
**plotting_kwargs
)
if not out_name:
out_name = self.create_filename(Var, type='boxplot_basic')
# save or return plotting objects
if save_files:
fnames = self._save_plot(out_name, out_types=out_types)
plt.close('all')
return fnames
else:
return fig, ax
def boxplot_tc( # todo: set limits to show confidence intervals
self, metric:str,
out_name:str=None,
out_types:str='png',
save_files:bool=False,
**plotting_kwargs
) -> list:
"""
Creates a boxplot for TC metrics. Saves a figure and returns Matplotlib fig and ax objects for
further processing.
Parameters
----------
metric : str
metric that is collected from the file for all datasets and combined
into one plot.
out_name: str
name of output file
out_types: str or list
extensions which the files should be saved in
save_file: bool, optional. Default is False
wether to save the file in the output directory
plotting_kwargs: arguments for _boxplot_definition function
Returns
-------
fnames: list
list of file names with all the extensions
"""
fnames = []
# group Vars and CIs relative to the same dataset
metric_tc, ci = {}, {}
for df, Var in self._yield_values(metric=metric, tc=True):
if not Var.is_CI:
id, names = Var.metric_ds
bounds = []
for ci_df, ci_Var in self._yield_values(metric=metric):
# make sure they refer to the right variable
if ci_Var.is_CI and \
(ci_Var.metric_ds == Var.metric_ds) and \
(ci_Var.other_dss == Var.other_dss):
ci_df.columns = [ci_Var.bound]
bounds.append(ci_df)
if bounds: # could be that variable doesn't have CIs
bounds = pd.concat(bounds, axis=1)
# get the mean CI range
diff = bounds["upper"] - bounds["lower"]
ci_range = diff.mean()
df.columns = [
df.columns[0] + "\nMean CI range:"
" {:.3g}".format(ci_range)
]
if id in ci.keys():
ci[id].append(bounds)
else:
ci[id] = [bounds]
if id in metric_tc.keys():
metric_tc[id][0].append(df)
else:
metric_tc[id] = [df], Var
for id, values in metric_tc.items():
dfs, Var = values
df = pd.concat(dfs)
# values are all Nan or NaNf - not plotted
if np.isnan(df.to_numpy()).all():
continue
# necessary if statement to prevent key error when no CIs are in the netCDF
if ci:
bounds = ci[id]
else:
bounds = ci
# create plot
fig, ax = self._boxplot_definition(
metric=metric,
df=df,
ci=bounds,
type='boxplot_tc',
Var=Var,
**plotting_kwargs
)
# save. Below workaround to avoid same names
if not out_name:
save_name = self.create_filename(Var, type='boxplot_tc')
else:
save_name = out_name
# save or return plotting objects
if save_files:
fns = self._save_plot(save_name, out_types=out_types)
fnames.extend(fns)
plt.close('all')
if save_files:
return fnames
def mapplot_var(
self, Var,
out_name:str=None,
out_types:str='png',
save_files:bool=False,
**plotting_kwargs
) -> list:
"""
Plots values to a map, using the values as color. Plots a scatterplot for
ISMN and a image plot for other input values.
Parameters
----------
var : QA4SMMetricVariab;e
Var in the image to make the map for.
out_name: str
name of output file
out_types: str or list
extensions which the files should be saved in
save_file: bool, optional. Default is False
wether to save the file in the output directory
plotting_kwargs: arguments for mapplot function
Returns
-------
fnames: list
list of file names with all the extensions
"""
ref_meta, mds_meta, other_meta = Var.get_varmeta()
metric = Var.metric
ref_grid_stepsize = self.img.ref_dataset_grid_stepsize
# create mapplot
fig, ax = mapplot(df=Var.values[Var.varname],
metric=metric,
ref_short=ref_meta[1]['short_name'],
ref_grid_stepsize=ref_grid_stepsize,
plot_extent=None, # if None, extent is sutomatically adjusted (as opposed to img.extent)
**plotting_kwargs)
# title and plot settings depend on the metric group
if Var.g == 0:
title = "{} between all datasets".format(globals._metric_name[metric])
out_name = self.create_filename(Var, type='mapplot_common')
elif Var.g == 2:
title = self.create_title(Var=Var, type='mapplot_basic')
out_name = self.create_filename(Var, type='mapplot_double')
else:
title = self.create_title(Var=Var, type='mapplot_tc')
out_name = self.create_filename(Var, type='mapplot_tc')
# use title for plot, make watermark
ax.set_title(title, pad=globals.title_pad)
if globals.watermark_pos not in [None, False]:
make_watermark(fig, globals.watermark_pos, for_map=True, offset=0.04)
# save file or just return the image
if save_files:
fnames = self._save_plot(out_name, out_types=out_types)
return fnames
else:
return fig, ax
def mapplot_metric(
self, metric:str,
out_types:str='png',
save_files:bool=False,
**plotting_kwargs
) -> list:
"""
Mapplot for all variables for a given metric in the loaded file.
Parameters
----------
metric : str
Name of a metric. File is searched for variables for that metric.
out_name: str
name of output file
out_types: str or list
extensions which the files should be saved in
save_file: bool, optional. Default is False
wether to save the file in the output directory
plotting_kwargs: arguments for mapplot function
Returns
-------
fnames : list
List of files that were created
"""
fnames = []
for Var in self.img._iter_vars(**{'metric':metric}):
if not (np.isnan(Var.values.to_numpy()).all() or Var.is_CI):
fns = self.mapplot_var(Var,
out_name=None,
out_types=out_types,
save_files=save_files,
**plotting_kwargs)
# values are all Nan or NaNf - not plotted
else:
continue
if save_files:
fnames.extend(fns)
plt.close('all')
if fnames:
return fnames
def plot_metric(
self, metric:str,
out_types:str='png',
save_all:bool=True,
**plotting_kwargs
) -> tuple:
"""
Plot and save boxplot and mapplot for a certain metric
Parameters
----------
metric: str
name of the metric
out_types: str or list
extensions which the files should be saved in
save_all: bool, optional. Default is True.
all plotted images are saved to the output directory
plotting_kwargs: arguments for mapplot function.
"""
Metric = self.img.metrics[metric]
if Metric.g == 0 or Metric.g == 2:
fnames_bplot = self.boxplot_basic(metric=metric,
out_types=out_types,
save_files=save_all,
**plotting_kwargs)
elif Metric.g == 3:
fnames_bplot = self.boxplot_tc(metric=metric,
out_types=out_types,
save_files=save_all,
**plotting_kwargs)
fnames_mapplot = self.mapplot_metric(metric=metric,
out_types=out_types,
save_files=save_all,
**plotting_kwargs)
return fnames_bplot, fnames_mapplot
| 2.296875 | 2 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.