content
stringlengths 7
928k
| avg_line_length
float64 3.5
33.8k
| max_line_length
int64 6
139k
| alphanum_fraction
float64 0.08
0.96
| licenses
sequence | repository_name
stringlengths 7
104
| path
stringlengths 4
230
| size
int64 7
928k
| lang
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|
class Config:
def __init__(self,
use_cuda=True,
hidden_size=512,
dropout_p=0.5,
n_layers=4,
batch_size=32,
max_epochs=40,
lr=0.0001,
teacher_forcing_ratio=1.0,
seed=1,
max_len=428,
worker_num=1
):
self.use_cuda = use_cuda
self.hidden_size = hidden_size
self.dropout_p = dropout_p
self.n_layers = n_layers
self.batch_size = batch_size
self.max_epochs = max_epochs
self.lr = lr
self.teacher_forcing_ratio = teacher_forcing_ratio
self.seed = seed
self.max_len = max_len
self.worker_num = worker_num
| 30.461538 | 58 | 0.5 | [
"MIT"
] | sooftware/char-rnnlm | package/config.py | 792 | Python |
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
def updateParams(k, alpha, N,sum_log_di, x, h):
div_xByAlpha = np.divide(x,alpha)
powK_div_xByAlpha = np.power(div_xByAlpha, k)
log_div_xByAlpha = np.log(div_xByAlpha)
sum_powK_div_diByAlpha = np.sum(np.multiply(powK_div_xByAlpha, h))
sum_prod_OF_powK_div_diByAlpha_AND_log_div_diByAlpha = np.sum(np.multiply(np.multiply(powK_div_xByAlpha,log_div_xByAlpha),h))
sum_prod_OF_powK_div_diByAlpha_AND_logP2_div_diByAlpha = np.sum(np.multiply(np.multiply(powK_div_xByAlpha,np.power(log_div_xByAlpha,2)),h))
#N = d.shape[0]
hessian = np.zeros((2,2))
hessian[0,0] = -1.0 * ((N/(k*k)) + sum_prod_OF_powK_div_diByAlpha_AND_logP2_div_diByAlpha)
hessian[1,1] = (k/(alpha*alpha)) * (N-(k+1)*sum_powK_div_diByAlpha)
hessian[0,1] = hessian[1,0] = (1.0/alpha)*sum_powK_div_diByAlpha + (k/alpha)*sum_prod_OF_powK_div_diByAlpha_AND_log_div_diByAlpha - N/alpha
vec = np.zeros((2,1))
vec[0] = -1.0 *( N/k - N*np.log(alpha) + sum_log_di - sum_prod_OF_powK_div_diByAlpha_AND_log_div_diByAlpha)
vec[1] = -1.0 *(k/alpha * (sum_powK_div_diByAlpha - N))
param = np.linalg.inv(hessian).dot(vec)
return k+param[0], alpha+param[1]
if __name__ == "__main__":
#loading histograms
data = np.loadtxt('myspace.csv',dtype=np.object,comments='#',delimiter=',')
h = data[:,1].astype(np.int)
h = np.array([x for x in h if x>0])
x = np.array([num for num in range(1, h.shape[0]+1)])
k = 1.0
alpha = 1.0
N = np.sum(h)
sum_log_di = np.sum(np.multiply(np.log(x), h))
for i in range(0,20):
k,alpha = updateParams(k, alpha, N, sum_log_di, x, h)
print i
print k
print alpha
print "________"
x_1 = np.linspace(1,500,2500)
fig = plt.figure()
axs = fig.add_subplot(111)
y = N * (k/alpha) * np.multiply(np.power(np.divide(x_1,alpha), k-1), np.exp(-1.0* np.power(np.divide(x_1,alpha), k)))
axs.plot(x_1,y, 'b')
axs.plot(x, h, 'g')
plt.show()
| 37.071429 | 143 | 0.663776 | [
"MIT"
] | omartrinidad/pattern-recognition-bit | 01/Task13.py | 2,076 | Python |
from nose.tools import eq_, ok_
from nose.plugins.skip import SkipTest
# Skip test on PY3
from flask_admin._compat import PY2, as_unicode
if not PY2:
raise SkipTest('MongoEngine is not Python 3 compatible')
from wtforms import fields
from flask_admin import form
from flask_admin.contrib.mongoengine import ModelView
from . import setup
from datetime import datetime
class CustomModelView(ModelView):
def __init__(self, model,
name=None, category=None, endpoint=None, url=None,
**kwargs):
for k, v in kwargs.iteritems():
setattr(self, k, v)
super(CustomModelView, self).__init__(model,
name, category,
endpoint, url)
def create_models(db):
class Model1(db.Document):
test1 = db.StringField(max_length=20)
test2 = db.StringField(max_length=20)
test3 = db.StringField()
test4 = db.StringField()
datetime_field = db.DateTimeField()
def __str__(self):
return self.test1
class Model2(db.Document):
string_field = db.StringField()
int_field = db.IntField()
float_field = db.FloatField()
bool_field = db.BooleanField()
model1 = db.ReferenceField(Model1)
Model1.objects.delete()
Model2.objects.delete()
return Model1, Model2
def fill_db(Model1, Model2):
Model1('test1_val_1', 'test2_val_1').save()
Model1('test1_val_2', 'test2_val_2').save()
Model1('test1_val_3', 'test2_val_3').save()
Model1('test1_val_4', 'test2_val_4').save()
Model1(None, 'empty_obj').save()
Model2('string_field_val_1', None, None).save()
Model2('string_field_val_2', None, None).save()
Model2('string_field_val_3', 5000, 25.9).save()
Model2('string_field_val_4', 9000, 75.5).save()
Model1('datetime_obj1', datetime_field=datetime(2014,4,3,1,9,0)).save()
Model1('datetime_obj2', datetime_field=datetime(2013,3,2,0,8,0)).save()
def test_model():
app, db, admin = setup()
Model1, Model2 = create_models(db)
view = CustomModelView(Model1)
admin.add_view(view)
eq_(view.model, Model1)
eq_(view.name, 'Model1')
eq_(view.endpoint, 'model1')
eq_(view._primary_key, 'id')
ok_('test1' in view._sortable_columns)
ok_('test2' in view._sortable_columns)
ok_('test3' in view._sortable_columns)
ok_('test4' in view._sortable_columns)
ok_(view._create_form_class is not None)
ok_(view._edit_form_class is not None)
eq_(view._search_supported, False)
eq_(view._filters, None)
eq_(view._create_form_class.test1.field_class, fields.StringField)
eq_(view._create_form_class.test2.field_class, fields.StringField)
eq_(view._create_form_class.test3.field_class, fields.TextAreaField)
eq_(view._create_form_class.test4.field_class, fields.TextAreaField)
# Make some test clients
client = app.test_client()
rv = client.get('/admin/model1/')
eq_(rv.status_code, 200)
rv = client.get('/admin/model1/new/')
eq_(rv.status_code, 200)
rv = client.post('/admin/model1/new/',
data=dict(test1='test1large', test2='test2'))
eq_(rv.status_code, 302)
model = Model1.objects.first()
eq_(model.test1, 'test1large')
eq_(model.test2, 'test2')
eq_(model.test3, '')
eq_(model.test4, '')
rv = client.get('/admin/model1/')
eq_(rv.status_code, 200)
ok_('test1large' in rv.data)
url = '/admin/model1/edit/?id=%s' % model.id
rv = client.get(url)
eq_(rv.status_code, 200)
rv = client.post(url,
data=dict(test1='test1small', test2='test2large'))
eq_(rv.status_code, 302)
model = Model1.objects.first()
eq_(model.test1, 'test1small')
eq_(model.test2, 'test2large')
eq_(model.test3, '')
eq_(model.test4, '')
url = '/admin/model1/delete/?id=%s' % model.id
rv = client.post(url)
eq_(rv.status_code, 302)
eq_(Model1.objects.count(), 0)
def test_column_editable_list():
app, db, admin = setup()
Model1, Model2 = create_models(db)
view = CustomModelView(Model1,
column_editable_list=[
'test1', 'datetime_field'])
admin.add_view(view)
fill_db(Model1, Model2)
client = app.test_client()
# Test in-line edit field rendering
rv = client.get('/admin/model1/')
data = rv.data.decode('utf-8')
ok_('data-role="x-editable"' in data)
# Form - Test basic in-line edit functionality
obj1 = Model1.objects.get(test1 = 'test1_val_3')
rv = client.post('/admin/model1/ajax/update/', data={
'test1-' + str(obj1.id): 'change-success-1',
})
data = rv.data.decode('utf-8')
ok_('Record was successfully saved.' == data)
# confirm the value has changed
rv = client.get('/admin/model1/')
data = rv.data.decode('utf-8')
ok_('change-success-1' in data)
# Test validation error
obj2 = Model1.objects.get(test1 = 'datetime_obj1')
rv = client.post('/admin/model1/ajax/update/', data={
'datetime_field-' + str(obj2.id): 'problematic-input',
})
eq_(rv.status_code, 500)
# Test invalid primary key
rv = client.post('/admin/model1/ajax/update/', data={
'test1-1000': 'problematic-input',
})
data = rv.data.decode('utf-8')
eq_(rv.status_code, 500)
# Test editing column not in column_editable_list
rv = client.post('/admin/model1/ajax/update/', data={
'test2-1': 'problematic-input',
})
data = rv.data.decode('utf-8')
eq_(rv.status_code, 500)
# Test in-line editing for relations
view = CustomModelView(Model2,
column_editable_list=[
'model1'])
admin.add_view(view)
obj3 = Model2.objects.get(string_field = 'string_field_val_1')
rv = client.post('/admin/model2/ajax/update/', data={
'model1-' + str(obj3.id): str(obj1.id),
})
data = rv.data.decode('utf-8')
ok_('Record was successfully saved.' == data)
# confirm the value has changed
rv = client.get('/admin/model2/')
data = rv.data.decode('utf-8')
ok_('test1_val_1' in data)
def test_column_filters():
app, db, admin = setup()
Model1, Model2 = create_models(db)
# fill DB with values
fill_db(Model1, Model2)
# Test string filter
view = CustomModelView(Model1, column_filters=['test1'])
admin.add_view(view)
eq_(len(view._filters), 7)
eq_([(f['index'], f['operation']) for f in view._filter_groups[u'Test1']],
[
(0, 'contains'),
(1, 'not contains'),
(2, 'equals'),
(3, 'not equal'),
(4, 'empty'),
(5, 'in list'),
(6, 'not in list'),
])
# Make some test clients
client = app.test_client()
# string - equals
rv = client.get('/admin/model1/?flt0_0=test1_val_1')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('test2_val_1' in data)
ok_('test1_val_2' not in data)
# string - not equal
rv = client.get('/admin/model1/?flt0_1=test1_val_1')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('test2_val_1' not in data)
ok_('test1_val_2' in data)
# string - contains
rv = client.get('/admin/model1/?flt0_2=test1_val_1')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('test2_val_1' in data)
ok_('test1_val_2' not in data)
# string - not contains
rv = client.get('/admin/model1/?flt0_3=test1_val_1')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('test2_val_1' not in data)
ok_('test1_val_2' in data)
# string - empty
rv = client.get('/admin/model1/?flt0_4=1')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('empty_obj' in data)
ok_('test1_val_1' not in data)
ok_('test1_val_2' not in data)
# string - not empty
rv = client.get('/admin/model1/?flt0_4=0')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('empty_obj' not in data)
ok_('test1_val_1' in data)
ok_('test1_val_2' in data)
# string - in list
rv = client.get('/admin/model1/?flt0_5=test1_val_1%2Ctest1_val_2')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('test2_val_1' in data)
ok_('test2_val_2' in data)
ok_('test1_val_3' not in data)
ok_('test1_val_4' not in data)
# string - not in list
rv = client.get('/admin/model1/?flt0_6=test1_val_1%2Ctest1_val_2')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('test2_val_1' not in data)
ok_('test2_val_2' not in data)
ok_('test1_val_3' in data)
ok_('test1_val_4' in data)
# Test numeric filter
view = CustomModelView(Model2, column_filters=['int_field'])
admin.add_view(view)
eq_([(f['index'], f['operation']) for f in view._filter_groups[u'Int Field']],
[
(0, 'equals'),
(1, 'not equal'),
(2, 'greater than'),
(3, 'smaller than'),
(4, 'empty'),
(5, 'in list'),
(6, 'not in list'),
])
# integer - equals
rv = client.get('/admin/model2/?flt0_0=5000')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('string_field_val_3' in data)
ok_('string_field_val_4' not in data)
# integer - equals - test validation
rv = client.get('/admin/model2/?flt0_0=badval')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('Invalid Filter Value' in data)
# integer - not equal
rv = client.get('/admin/model2/?flt0_1=5000')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('string_field_val_3' not in data)
ok_('string_field_val_4' in data)
# integer - greater
rv = client.get('/admin/model2/?flt0_2=6000')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('string_field_val_3' not in data)
ok_('string_field_val_4' in data)
# integer - smaller
rv = client.get('/admin/model2/?flt0_3=6000')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('string_field_val_3' in data)
ok_('string_field_val_4' not in data)
# integer - empty
rv = client.get('/admin/model2/?flt0_4=1')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('string_field_val_1' in data)
ok_('string_field_val_2' in data)
ok_('string_field_val_3' not in data)
ok_('string_field_val_4' not in data)
# integer - not empty
rv = client.get('/admin/model2/?flt0_4=0')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('string_field_val_1' not in data)
ok_('string_field_val_2' not in data)
ok_('string_field_val_3' in data)
ok_('string_field_val_4' in data)
# integer - in list
rv = client.get('/admin/model2/?flt0_5=5000%2C9000')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('string_field_val_1' not in data)
ok_('string_field_val_2' not in data)
ok_('string_field_val_3' in data)
ok_('string_field_val_4' in data)
# integer - in list - test validation
rv = client.get('/admin/model2/?flt0_5=5000%2Cbadval')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('Invalid Filter Value' in data)
# integer - not in list
rv = client.get('/admin/model2/?flt0_6=5000%2C9000')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('string_field_val_1' in data)
ok_('string_field_val_2' in data)
ok_('string_field_val_3' not in data)
ok_('string_field_val_4' not in data)
# Test float filter
view = CustomModelView(Model2, column_filters=['float_field'],
endpoint="_float")
admin.add_view(view)
eq_([(f['index'], f['operation']) for f in view._filter_groups[u'Float Field']],
[
(0, 'equals'),
(1, 'not equal'),
(2, 'greater than'),
(3, 'smaller than'),
(4, 'empty'),
(5, 'in list'),
(6, 'not in list'),
])
# float - equals
rv = client.get('/admin/_float/?flt0_0=25.9')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('string_field_val_3' in data)
ok_('string_field_val_4' not in data)
# float - equals - test validation
rv = client.get('/admin/_float/?flt0_0=badval')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('Invalid Filter Value' in data)
# float - not equal
rv = client.get('/admin/_float/?flt0_1=25.9')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('string_field_val_3' not in data)
ok_('string_field_val_4' in data)
# float - greater
rv = client.get('/admin/_float/?flt0_2=60.5')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('string_field_val_3' not in data)
ok_('string_field_val_4' in data)
# float - smaller
rv = client.get('/admin/_float/?flt0_3=60.5')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('string_field_val_3' in data)
ok_('string_field_val_4' not in data)
# float - empty
rv = client.get('/admin/_float/?flt0_4=1')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('string_field_val_1' in data)
ok_('string_field_val_2' in data)
ok_('string_field_val_3' not in data)
ok_('string_field_val_4' not in data)
# float - not empty
rv = client.get('/admin/_float/?flt0_4=0')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('string_field_val_1' not in data)
ok_('string_field_val_2' not in data)
ok_('string_field_val_3' in data)
ok_('string_field_val_4' in data)
# float - in list
rv = client.get('/admin/_float/?flt0_5=25.9%2C75.5')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('string_field_val_1' not in data)
ok_('string_field_val_2' not in data)
ok_('string_field_val_3' in data)
ok_('string_field_val_4' in data)
# float - in list - test validation
rv = client.get('/admin/_float/?flt0_5=25.9%2Cbadval')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('Invalid Filter Value' in data)
# float - not in list
rv = client.get('/admin/_float/?flt0_6=25.9%2C75.5')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('string_field_val_1' in data)
ok_('string_field_val_2' in data)
ok_('string_field_val_3' not in data)
ok_('string_field_val_4' not in data)
# Test datetime filter
view = CustomModelView(Model1,
column_filters=['datetime_field'],
endpoint="_datetime")
admin.add_view(view)
eq_([(f['index'], f['operation']) for f in view._filter_groups[u'Datetime Field']],
[
(0, 'equals'),
(1, 'not equal'),
(2, 'greater than'),
(3, 'smaller than'),
(4, 'between'),
(5, 'not between'),
(6, 'empty'),
])
# datetime - equals
rv = client.get('/admin/_datetime/?flt0_0=2014-04-03+01%3A09%3A00')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('datetime_obj1' in data)
ok_('datetime_obj2' not in data)
# datetime - not equal
rv = client.get('/admin/_datetime/?flt0_1=2014-04-03+01%3A09%3A00')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('datetime_obj1' not in data)
ok_('datetime_obj2' in data)
# datetime - greater
rv = client.get('/admin/_datetime/?flt0_2=2014-04-03+01%3A08%3A00')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('datetime_obj1' in data)
ok_('datetime_obj2' not in data)
# datetime - smaller
rv = client.get('/admin/_datetime/?flt0_3=2014-04-03+01%3A08%3A00')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('datetime_obj1' not in data)
ok_('datetime_obj2' in data)
# datetime - between
rv = client.get('/admin/_datetime/?flt0_4=2014-04-02+00%3A00%3A00+to+2014-11-20+23%3A59%3A59')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('datetime_obj1' in data)
ok_('datetime_obj2' not in data)
# datetime - not between
rv = client.get('/admin/_datetime/?flt0_5=2014-04-02+00%3A00%3A00+to+2014-11-20+23%3A59%3A59')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('datetime_obj1' not in data)
ok_('datetime_obj2' in data)
# datetime - empty
rv = client.get('/admin/_datetime/?flt0_6=1')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('test1_val_1' in data)
ok_('datetime_obj1' not in data)
ok_('datetime_obj2' not in data)
# datetime - not empty
rv = client.get('/admin/_datetime/?flt0_6=0')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('test1_val_1' not in data)
ok_('datetime_obj1' in data)
ok_('datetime_obj2' in data)
def test_default_sort():
app, db, admin = setup()
M1, _ = create_models(db)
M1(test1='c').save()
M1(test1='b').save()
M1(test1='a').save()
eq_(M1.objects.count(), 3)
view = CustomModelView(M1, column_default_sort='test1')
admin.add_view(view)
_, data = view.get_list(0, None, None, None, None)
eq_(data[0].test1, 'a')
eq_(data[1].test1, 'b')
eq_(data[2].test1, 'c')
def test_extra_fields():
app, db, admin = setup()
Model1, _ = create_models(db)
view = CustomModelView(
Model1,
form_extra_fields={
'extra_field': fields.StringField('Extra Field')
}
)
admin.add_view(view)
client = app.test_client()
rv = client.get('/admin/model1/new/')
eq_(rv.status_code, 200)
# Check presence and order
data = rv.data.decode('utf-8')
ok_('Extra Field' in data)
pos1 = data.find('Extra Field')
pos2 = data.find('Test1')
ok_(pos2 < pos1)
def test_extra_field_order():
app, db, admin = setup()
Model1, _ = create_models(db)
view = CustomModelView(
Model1,
form_columns=('extra_field', 'test1'),
form_extra_fields={
'extra_field': fields.StringField('Extra Field')
}
)
admin.add_view(view)
client = app.test_client()
rv = client.get('/admin/model1/new/')
eq_(rv.status_code, 200)
# Check presence and order
data = rv.data.decode('utf-8')
pos1 = data.find('Extra Field')
pos2 = data.find('Test1')
ok_(pos2 > pos1)
def test_custom_form_base():
app, db, admin = setup()
class TestForm(form.BaseForm):
pass
Model1, _ = create_models(db)
view = CustomModelView(
Model1,
form_base_class=TestForm
)
admin.add_view(view)
ok_(hasattr(view._create_form_class, 'test1'))
create_form = view.create_form()
ok_(isinstance(create_form, TestForm))
def test_subdocument_config():
app, db, admin = setup()
class Comment(db.EmbeddedDocument):
name = db.StringField(max_length=20, required=True)
value = db.StringField(max_length=20)
class Model1(db.Document):
test1 = db.StringField(max_length=20)
subdoc = db.EmbeddedDocumentField(Comment)
# Check only
view1 = CustomModelView(
Model1,
form_subdocuments = {
'subdoc': {
'form_columns': ('name',)
}
}
)
ok_(hasattr(view1._create_form_class, 'subdoc'))
form = view1.create_form()
ok_('name' in dir(form.subdoc.form))
ok_('value' not in dir(form.subdoc.form))
# Check exclude
view2 = CustomModelView(
Model1,
form_subdocuments = {
'subdoc': {
'form_excluded_columns': ('value',)
}
}
)
form = view2.create_form()
ok_('name' in dir(form.subdoc.form))
ok_('value' not in dir(form.subdoc.form))
def test_subdocument_class_config():
app, db, admin = setup()
from flask_admin.contrib.mongoengine import EmbeddedForm
class Comment(db.EmbeddedDocument):
name = db.StringField(max_length=20, required=True)
value = db.StringField(max_length=20)
class Model1(db.Document):
test1 = db.StringField(max_length=20)
subdoc = db.EmbeddedDocumentField(Comment)
class EmbeddedConfig(EmbeddedForm):
form_columns = ('name',)
# Check only
view1 = CustomModelView(
Model1,
form_subdocuments = {
'subdoc': EmbeddedConfig()
}
)
form = view1.create_form()
ok_('name' in dir(form.subdoc.form))
ok_('value' not in dir(form.subdoc.form))
def test_nested_subdocument_config():
app, db, admin = setup()
# Check recursive
class Comment(db.EmbeddedDocument):
name = db.StringField(max_length=20, required=True)
value = db.StringField(max_length=20)
class Nested(db.EmbeddedDocument):
name = db.StringField(max_length=20, required=True)
comment = db.EmbeddedDocumentField(Comment)
class Model1(db.Document):
test1 = db.StringField(max_length=20)
nested = db.EmbeddedDocumentField(Nested)
view1 = CustomModelView(
Model1,
form_subdocuments = {
'nested': {
'form_subdocuments': {
'comment': {
'form_columns': ('name',)
}
}
}
}
)
form = view1.create_form()
ok_('name' in dir(form.nested.form.comment.form))
ok_('value' not in dir(form.nested.form.comment.form))
def test_nested_list_subdocument():
app, db, admin = setup()
class Comment(db.EmbeddedDocument):
name = db.StringField(max_length=20, required=True)
value = db.StringField(max_length=20)
class Model1(db.Document):
test1 = db.StringField(max_length=20)
subdoc = db.ListField(db.EmbeddedDocumentField(Comment))
# Check only
view1 = CustomModelView(
Model1,
form_subdocuments = {
'subdoc': {
'form_subdocuments': {
None: {
'form_columns': ('name',)
}
}
}
}
)
form = view1.create_form()
inline_form = form.subdoc.unbound_field.args[2]
ok_('name' in dir(inline_form))
ok_('value' not in dir(inline_form))
def test_list_subdocument_validation():
app, db, admin = setup()
class Comment(db.EmbeddedDocument):
name = db.StringField(max_length=20, required=True)
value = db.StringField(max_length=20)
class Model1(db.Document):
test1 = db.StringField(max_length=20)
subdoc = db.ListField(db.EmbeddedDocumentField(Comment))
view = CustomModelView(Model1)
admin.add_view(view)
client = app.test_client()
rv = client.post('/admin/model1/new/',
data={'test1': 'test1large', 'subdoc-0-name': 'comment', 'subdoc-0-value': 'test'})
eq_(rv.status_code, 302)
rv = client.post('/admin/model1/new/',
data={'test1': 'test1large', 'subdoc-0-name': '', 'subdoc-0-value': 'test'})
eq_(rv.status_code, 200)
ok_('This field is required' in rv.data)
def test_ajax_fk():
app, db, admin = setup()
Model1, Model2 = create_models(db)
view = CustomModelView(
Model2,
url='view',
form_ajax_refs={
'model1': {
'fields': ('test1', 'test2')
}
}
)
admin.add_view(view)
ok_(u'model1' in view._form_ajax_refs)
model = Model1(test1=u'first')
model.save()
model2 = Model1(test1=u'foo', test2=u'bar').save()
# Check loader
loader = view._form_ajax_refs[u'model1']
mdl = loader.get_one(model.id)
eq_(mdl.test1, model.test1)
items = loader.get_list(u'fir')
eq_(len(items), 1)
eq_(items[0].id, model.id)
items = loader.get_list(u'bar')
eq_(len(items), 1)
eq_(items[0].test1, u'foo')
# Check form generation
form = view.create_form()
eq_(form.model1.__class__.__name__, u'AjaxSelectField')
with app.test_request_context('/admin/view/'):
ok_(u'value=""' not in form.model1())
form.model1.data = model
needle = u'data-json="["%s", "first"]"' % as_unicode(model.id)
ok_(needle in form.model1())
ok_(u'value="%s"' % as_unicode(model.id) in form.model1())
# Check querying
client = app.test_client()
req = client.get(u'/admin/view/ajax/lookup/?name=model1&query=foo')
eq_(req.data, u'[["%s", "foo"]]' % model2.id)
# Check submitting
client.post('/admin/view/new/', data={u'model1': as_unicode(model.id)})
mdl = Model2.objects.first()
ok_(mdl is not None)
ok_(mdl.model1 is not None)
eq_(mdl.model1.id, model.id)
eq_(mdl.model1.test1, u'first')
def test_nested_ajax_refs():
app, db, admin = setup()
# Check recursive
class Comment(db.Document):
name = db.StringField(max_length=20, required=True)
value = db.StringField(max_length=20)
class Nested(db.EmbeddedDocument):
name = db.StringField(max_length=20, required=True)
comment = db.ReferenceField(Comment)
class Model1(db.Document):
test1 = db.StringField(max_length=20)
nested = db.EmbeddedDocumentField(Nested)
view1 = CustomModelView(
Model1,
form_subdocuments = {
'nested': {
'form_ajax_refs': {
'comment': {
'fields': ['name']
}
}
}
}
)
form = view1.create_form()
eq_(type(form.nested.form.comment).__name__, 'AjaxSelectField')
ok_('nested-comment' in view1._form_ajax_refs)
def test_form_flat_choices():
app, db, admin = setup()
class Model(db.Document):
name = db.StringField(max_length=20, choices=('a', 'b', 'c'))
view = CustomModelView(Model)
admin.add_view(view)
form = view.create_form()
eq_(form.name.choices, [('a', 'a'), ('b', 'b'), ('c', 'c')])
def test_form_args_embeddeddoc():
app, db, admin = setup()
class Info(db.EmbeddedDocument):
name = db.StringField()
age = db.StringField()
class Model(db.Document):
info = db.EmbeddedDocumentField('Info')
timestamp = db.DateTimeField()
view = CustomModelView(
Model,
form_args= {
'info': {'label': 'Information'},
'timestamp': {'label': 'Last Updated Time'}
}
)
admin.add_view(view)
form = view.create_form()
eq_(form.timestamp.label.text, 'Last Updated Time')
# This is the failure
eq_(form.info.label.text, 'Information')
def test_simple_list_pager():
app, db, admin = setup()
Model1, _ = create_models(db)
class TestModelView(CustomModelView):
simple_list_pager = True
def get_count_query(self):
assert False
view = TestModelView(Model1)
admin.add_view(view)
count, data = view.get_list(0, None, None, None, None)
ok_(count is None)
| 28.31405 | 104 | 0.608435 | [
"Apache-2.0"
] | hexlism/css_platform | sleepyenv/lib/python2.7/site-packages/Flask_Admin-1.2.0-py2.7.egg/flask_admin/tests/mongoengine/test_basic.py | 27,408 | Python |
"""
This file offers the methods to automatically retrieve the graph Mycobacterium sp. 1554424.7.
The graph is automatically retrieved from the STRING repository.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen import Graph # pylint: disable=import-error
def MycobacteriumSp15544247(
directed: bool = False,
preprocess: bool = True,
load_nodes: bool = True,
verbose: int = 2,
cache: bool = True,
cache_path: str = "graphs/string",
version: str = "links.v11.5",
**additional_graph_kwargs: Dict
) -> Graph:
"""Return new instance of the Mycobacterium sp. 1554424.7 graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False
Wether to load the graph as directed or undirected.
By default false.
preprocess: bool = True
Whether to preprocess the graph to be loaded in
optimal time and memory.
load_nodes: bool = True,
Whether to load the nodes vocabulary or treat the nodes
simply as a numeric range.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache: bool = True
Whether to use cache, i.e. download files only once
and preprocess them only once.
cache_path: str = "graphs"
Where to store the downloaded graphs.
version: str = "links.v11.5"
The version of the graph to retrieve.
The available versions are:
- homology.v11.5
- physical.links.v11.5
- links.v11.5
additional_graph_kwargs: Dict
Additional graph kwargs.
Returns
-----------------------
Instace of Mycobacterium sp. 1554424.7 graph.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
return AutomaticallyRetrievedGraph(
graph_name="MycobacteriumSp15544247",
repository="string",
version=version,
directed=directed,
preprocess=preprocess,
load_nodes=load_nodes,
verbose=verbose,
cache=cache,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| 33.380952 | 223 | 0.678174 | [
"MIT"
] | AnacletoLAB/ensmallen | bindings/python/ensmallen/datasets/string/mycobacteriumsp15544247.py | 3,505 | Python |
from ..data.models import TaskRow
from ..data.postgres_async_db import AsyncPostgresDB
from .utils import read_body, format_response, handle_exceptions
import asyncio
class TaskApi(object):
_task_table = None
lock = asyncio.Lock()
def __init__(self, app):
app.router.add_route(
"GET",
"/flows/{flow_id}/runs/{run_number}/steps/{step_name}/" "tasks",
self.get_tasks,
)
app.router.add_route(
"GET",
"/flows/{flow_id}/runs/{run_number}/steps/{step_name}/" "tasks/{task_id}",
self.get_task,
)
app.router.add_route(
"POST",
"/flows/{flow_id}/runs/{run_number}/steps/{step_name}/" "task",
self.create_task,
)
self._async_table = AsyncPostgresDB.get_instance().task_table_postgres
@format_response
@handle_exceptions
async def get_tasks(self, request):
"""
---
description: get all tasks associated with the specified step.
tags:
- Tasks
parameters:
- name: "flow_id"
in: "path"
description: "flow_id"
required: true
type: "string"
- name: "run_number"
in: "path"
description: "run_number"
required: true
type: "integer"
- name: "step_name"
in: "path"
description: "step_name"
required: true
type: "string"
produces:
- text/plain
responses:
"200":
description: successful operation. Return tasks
"405":
description: invalid HTTP Method
"""
flow_name = request.match_info.get("flow_id")
run_number = request.match_info.get("run_number")
step_name = request.match_info.get("step_name")
return await self._async_table.get_tasks(flow_name, run_number, step_name)
@format_response
@handle_exceptions
async def get_task(self, request):
"""
---
description: get all artifacts associated with the specified task.
tags:
- Tasks
parameters:
- name: "flow_id"
in: "path"
description: "flow_id"
required: true
type: "string"
- name: "run_number"
in: "path"
description: "run_number"
required: true
type: "integer"
- name: "step_name"
in: "path"
description: "step_name"
required: true
type: "string"
- name: "task_id"
in: "path"
description: "task_id"
required: true
type: "integer"
produces:
- text/plain
responses:
"200":
description: successful operation. Return task
"405":
description: invalid HTTP Method
"""
flow_name = request.match_info.get("flow_id")
run_number = request.match_info.get("run_number")
step_name = request.match_info.get("step_name")
task_id = request.match_info.get("task_id")
return await self._async_table.get_task(
flow_name, run_number, step_name, task_id
)
@format_response
@handle_exceptions
async def create_task(self, request):
"""
---
description: This end-point allow to test that service is up.
tags:
- Tasks
parameters:
- name: "flow_id"
in: "path"
description: "flow_id"
required: true
type: "string"
- name: "run_number"
in: "path"
description: "run_number"
required: true
type: "integer"
- name: "step_name"
in: "path"
description: "step_name"
required: true
type: "string"
- name: "body"
in: "body"
description: "body"
required: true
schema:
type: object
properties:
user_name:
type: string
tags:
type: object
system_tags:
type: object
produces:
- 'text/plain'
responses:
"202":
description: successful operation. Return newly registered task
"405":
description: invalid HTTP Method
"""
flow_id = request.match_info.get("flow_id")
run_number = request.match_info.get("run_number")
step_name = request.match_info.get("step_name")
body = await read_body(request.content)
user = body.get("user_name")
tags = body.get("tags")
system_tags = body.get("system_tags")
task = TaskRow(
flow_id=flow_id,
run_number=run_number,
step_name=step_name,
user_name=user,
tags=tags,
system_tags=system_tags,
)
return await self._async_table.add_task(task)
| 29.264368 | 86 | 0.530244 | [
"Apache-2.0"
] | ferras/metaflow-service-clone | metadata_service/api/task.py | 5,092 | Python |
from time import sleep
from os import system
chars = "abcdefghijklmnopqrstuvwxyz1234567890"
morseCode = [".-", "-...", "-.-.", "-..", ".", "..-.", "--.", "....", "..", ".---", "-.-", ".-..", "--", "-.", "---", ".--.", "--.-", ".-.", "...", "-", "..-", "...-", ".--", "-..-", "-.--", "--..", ".----", "..---", "...--", "....-", ".....", "-....", "--...", "---..", "----.", "-----"]
def toText(morse):
global chars
global morseCode
morse = morse.split(" ")
for char in range(len(morse)):
if morse[char] == "/":
morse[char] = " "
else:
index = morseCode.index(morse[char])
morse[char] = chars[index]
morse = "".join(morse)
return morse
def toMorse(text):
global chars
global morseCode
text = text.replace(" ", "/ ")
text = list(text)
for char in range(len(text)):
if text[char] != "/" and text[char] != " ":
index = chars.index(text[char])
text[char] = morseCode[index]
if char != len(text) - 1:
text[char] += " "
text = "".join(text)
return text
def flash(morse):
unit = 1
system("cls")
for char in morse:
if char == ".":
print(".")
sleep(unit)
system("cls")
sleep(unit)
elif char == "-":
print(".")
sleep(unit * 3)
system("cls")
sleep(unit)
elif char == " ":
sleep(unit)
system("cls")
elif char == "/":
sleep(unit)
system("cls")
if __name__ == "__main__":
print(toText(".... . .-.. .-.. --- / -.. .- .. .-.. -.-- / .--. .-. --- --. .-. .- -- -- . .-. / --. --- --- -.. / .-.. ..- -.-. -.- / --- -. / - .... . / -.-. .... .- .-.. .-.. . -. --. . ... / - --- -.. .- -.--"))
print(toMorse("hello daily programmer good luck on the challenges today"))
flash(".... . .-.. .-.. --- / -.. .- .. .-.. -.-- / .--. .-. --- --. .-. .- -- -- . .-. / --. --- --- -.. / .-.. ..- -.-. -.- / --- -. / - .... . / -.-. .... .- .-.. .-.. . -. --. . ... / - --- -.. .- -.--")
| 36.966102 | 289 | 0.331958 | [
"MIT"
] | ngmhprogramming/DailyProgrammer | Everything/python_easy_7.py | 2,181 | Python |
"""
How to use RxPY to prepare batches for asyncio client.
"""
import asyncio
from csv import DictReader
import rx
from rx import operators as ops
from rx.scheduler.eventloop import AsyncIOScheduler
from influxdb_client import Point
from influxdb_client.client.influxdb_client_async import InfluxDBClientAsync
def csv_to_generator(csv_file_path):
"""
Parse your CSV file into generator
"""
for row in DictReader(open(csv_file_path, 'r')):
point = Point('financial-analysis') \
.tag('type', 'vix-daily') \
.field('open', float(row['VIX Open'])) \
.field('high', float(row['VIX High'])) \
.field('low', float(row['VIX Low'])) \
.field('close', float(row['VIX Close'])) \
.time(row['Date'])
yield point
async def main():
async with InfluxDBClientAsync(url='http://localhost:8086', token='my-token', org='my-org') as client:
write_api = client.write_api()
"""
Async write
"""
async def async_write(batch):
"""
Prepare async task
"""
await write_api.write(bucket='my-bucket', record=batch)
return batch
"""
Prepare batches from generator
"""
batches = rx \
.from_iterable(csv_to_generator('vix-daily.csv')) \
.pipe(ops.buffer_with_count(500)) \
.pipe(ops.map(lambda batch: rx.from_future(asyncio.ensure_future(async_write(batch)))), ops.merge_all())
done = asyncio.Future()
"""
Write batches by subscribing to Rx generator
"""
batches.subscribe(on_next=lambda batch: print(f'Written batch... {len(batch)}'),
on_error=lambda ex: print(f'Unexpected error: {ex}'),
on_completed=lambda: done.set_result(0),
scheduler=AsyncIOScheduler(asyncio.get_event_loop()))
"""
Wait to finish all writes
"""
await done
if __name__ == "__main__":
asyncio.run(main())
| 29.814286 | 116 | 0.581696 | [
"MIT"
] | bonitoo-io/influxdb-client-python | examples/asynchronous_batching.py | 2,087 | Python |
# Copyright 2021 ZBW – Leibniz Information Centre for Economics
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pytest
from sklearn.exceptions import NotFittedError
from stwfsapy.text_features import mk_text_features
from qualle.label_calibration.simple import LabelCalibrator
from tests.common import DummyRegressor
@pytest.fixture
def calibrator():
return LabelCalibrator(DummyRegressor())
def test_lc_predict(calibrator, X):
calibrator.fit(X, [3, 5])
assert np.array_equal(calibrator.predict(X), [0, 1])
def test_lc_predict_without_fit_raises_exc(calibrator, X):
with pytest.raises(NotFittedError):
calibrator.predict(X)
def test_lc_fit_fits_regressor_with_txt_features(calibrator, X, mocker):
y = [3, 5]
txt_features = mk_text_features().fit(X)
X_transformed = txt_features.transform(X)
spy = mocker.spy(calibrator.regressor, 'fit')
calibrator.fit(X, y)
spy.assert_called_once()
assert (spy.call_args[0][0].toarray() == X_transformed.toarray()).all()
assert spy.call_args[0][1] == y
| 32.428571 | 75 | 0.750157 | [
"Apache-2.0"
] | annakasprzik/qualle | tests/label_calibration/test_simple.py | 1,591 | Python |
""" main_MST.py
Skript illustrating the calculation of a minimum weight spanning tree for an
exemplary graph instance.
Supplemental Material for the Lecture Notes "Networks - A brief Introduction
using a Paradigmatic Combinatorial Optimization Problem" at the international
summer school "Modern Computational Science 10 - Energy of the Future" held
in Oldenburg, September 3-14, 2018
Author: O. Melchert
Date: 2018-09-11
"""
import sys
from LibMCS2018 import *
def main():
G = fetchWeightedGraph(sys.argv[1])
T,Twgt = mstKruskal(G)
print mstGraphviz(G,T)
main()
# EOF: main_MST.py
| 24.958333 | 77 | 0.764608 | [
"MIT"
] | omelchert/MCS2018_Networks | main_MST.py | 599 | Python |
import os
import requests
WEBHOOK_URL = os.environ['DJANGO_WEBHOOK_URL']
def send_message(author_name: str, message: str) -> bool:
json_data = {
# 'content': f'**{name}**\n\n{message}'
'embeds': [
{
'author': {
'name': author_name,
},
'title': 'New message',
'description': message
}
]
}
response = requests.post(WEBHOOK_URL, json=json_data)
return 200 <= response.status_code < 300
| 23.26087 | 57 | 0.500935 | [
"MIT"
] | archimedeans/integration-bee | round/webhooks.py | 535 | Python |
"""
ASGI config for anyberry project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'anyberry.settings')
application = get_asgi_application()
| 23.117647 | 78 | 0.78626 | [
"MIT"
] | Egor4ik325/anyberry | web-api/anyberry/asgi.py | 393 | Python |
"""
The MIT License (MIT)
Copyright (c) 2017 Andreas Poppele
Copyright (c) 2017 Roland Jaeger
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from ..scrabTask import FileTask
import os
name = "LanguageDetector"
version = "1.1.1"
class LanguageDetector(FileTask):
cpp_extensions = ['.cpp', '.c++', '.cc',
'.cxx', '.c', '.h', '.hpp', '.hxx']
c_extensions = ['.c', '.h']
rust_extensions = ['.rs']
ruby_extensions = ['.rb']
java_extensions = ['.java']
go_extensions = ['.go']
php_extensions = ['.php', '.phtml', '.php3', '.php4', '.php5', '.php7',
'.phps']
js_extensions = ['.js']
objective_c_extensions = ['.h', '.m', '.mm', '.C']
swift_extensions = ['.swift']
c_sharp_extensions = ['.cs']
python_extensions = ['.py']
"""
Tries to detect the programming language of a library based on the file
extension
Example:
LanguageDetector:
main_language: C
languages:
- C
- C++
- Python
:param task_params: Parameter given explicitly for this task, for all
projects, defined in the task.yaml
:param global_args: Arguments that will be passed to all tasks. They
_might_ contain something that is useful for the
task, but the task has to check if it is _there_ as
these are user provided. If they are needed to work
that check should happen in the argHandler.
"""
def __init__(self, parameter, global_args):
super(LanguageDetector, self).__init__(name, version, parameter,
global_args)
# dictionary containing the common file extensions
# for each of the languages
self.__language_extensions = self.__get_language_extensions()
self.__report = self.__get_files_per_language()
def __get_language_extensions(self):
"""
:returns: A directory of the considered language extensions
"""
return {
'C++':
self.cpp_extensions,
'C':
self.c_extensions,
'Rust':
self.rust_extensions,
'Ruby':
self.ruby_extensions,
'Java':
self.java_extensions,
'Go':
self.go_extensions,
'PHP':
self.php_extensions,
'JavaScript':
self.js_extensions,
'Objective-C':
self.objective_c_extensions,
'Swift':
self.swift_extensions,
'C#':
self.c_sharp_extensions,
'Python':
self.python_extensions
}
def __get_files_per_language(self):
"""
:returns: A default directory of the considered languages, their
extensions and the amount of files that have that extension
(default=0)
"""
return {
'C++':
{extension: 0 for extension in self.cpp_extensions},
'C':
{extension: 0 for extension in self.c_extensions},
'Rust':
{extension: 0 for extension in self.rust_extensions},
'Ruby':
{extension: 0 for extension in self.ruby_extensions},
'Java':
{extension: 0 for extension in self.java_extensions},
'Go':
{extension: 0 for extension in self.go_extensions},
'PHP':
{extension: 0 for extension in self.php_extensions},
'JavaScript':
{extension: 0 for extension in self.js_extensions},
'Objective-C':
{extension: 0 for extension in self.objective_c_extensions},
'Swift':
{extension: 0 for extension in self.swift_extensions},
'C#':
{extension: 0 for extension in self.c_sharp_extensions},
'Python':
{extension: 0 for extension in self.python_extensions},
}
def __decide_h_extension(self):
"""
Decides which language 'owns' how many .h files
:returns: The report with divided header files
"""
report = self.__report
h_files = report['C']['.h']
if h_files > 0:
c_files = (sum(report['C'].values()) - h_files)
cpp_files = (sum(report['C++'].values())
- h_files
- report['C++']['.c'])
oc_files = (
sum(report['Objective-C'].values()) - h_files)
lang_fiels = c_files + cpp_files + oc_files
# Header only libraries are 'common' in C and C++
# the benefit of doubt goes to C
if lang_fiels == 0:
report['C']['.h'] = 1
report['C++']['.h'] = 0
report['Objective-C']['.h'] = 0
else:
report['C']['.h'] = (h_files *
c_files / lang_fiels)
report['C++']['.h'] = (h_files *
cpp_files / lang_fiels)
report['Objective-C']['.h'] = (h_files *
oc_files / lang_fiels)
return report
def __calculate_main_language(self, report):
"""
Calculates the main language (maximum of files extensions)
:param report: The report
:returns: The main language.
"""
max_files = 0
max_lang = None
for language in report:
lang_fiels = sum(report[language].values())
if max_files < lang_fiels:
max_lang = language
max_files = lang_fiels
return max_lang
def __calculate_used_languages(self, report):
"""
Calculates the used languages by throwing away the extension counts and
collapsing them to the language. Only languages that have at least one
file extension are kept and will appear in the report
:param report: The report
:returns: The used languages.
"""
languages = {}
for language in report:
total_files = sum(report[language].values())
if total_files > 0:
languages[language] = total_files
return sorted(languages, key=languages.get, reverse=True)
def scrab(self, project, filepath, file):
"""
Counts the files that have an extension of one of the languages
:param project: The project that the scrab task shall analyse
:param filepath: The filepath to the file that can be analysed
:param file: The file as string that can be analysed
:returns: Report that contains the scrabbed information of *this* file
- the extensions have either a count of 0 or 1
"""
filename, file_extension = os.path.splitext(filepath)
for language in self.__language_extensions:
if file_extension in self.__language_extensions[language]:
self.__report[language][file_extension] += 1
def report(self):
"""
Decides which headers files are (probable) from which language,
calculates the main language and removes redundant / unnecessary
detailed information from the report
:param report: The complete report this task created
:returns: Report that contains all scrabbed information
eg.:
LanguageDetector:
main_language: C
languages:
- C
- C++
- Python
"""
pre_report = self.__decide_h_extension()
main_language = self.__calculate_main_language(pre_report)
# write the result to the report
report = {}
report['main_language'] = main_language
report['languages'] = self.__calculate_used_languages(pre_report)
return report
| 35.706564 | 79 | 0.561635 | [
"MIT"
] | Eyenseo/gitScrabber | gitScrabber/scrabTasks/file/languageDetector.py | 9,248 | Python |
# Core Pkgs
import streamlit as st
# EDA Pkgs
import pandas as pd
import numpy as np
from PIL import Image
# Utils
import os
import joblib
import hashlib
# passlib,bcrypt
# Data Viz Pkgs
import matplotlib.pyplot as plt
import matplotlib
matplotlib.use('Agg')
# DB
from managed_db import *
# Password
def generate_hashes(password):
return hashlib.sha256(str.encode(password)).hexdigest()
def verify_hashes(password,hashed_text):
if generate_hashes(password) == hashed_text:
return hashed_text
return False
feature_names_best = ['age', 'sex', 'steroid', 'antivirals', 'fatigue', 'spiders', 'ascites','varices', 'bilirubin', 'alk_phosphate', 'sgot', 'albumin', 'protime','histology']
gender_dict = {"male":1,"female":2}
feature_dict = {"No":1,"Yes":2}
def get_value(val,my_dict):
for key,value in my_dict.items():
if val == key:
return value
def get_key(val,my_dict):
for key,value in my_dict.items():
if val == key:
return key
def get_fvalue(val):
feature_dict = {"No":1,"Yes":2}
for key,value in feature_dict.items():
if val == key:
return value
# Load ML Models
def load_model(model_file):
loaded_model = joblib.load(open(os.path.join(model_file),"rb"))
return loaded_model
# ML Interpretation
import lime
import lime.lime_tabular
html_temp = """
<div style="background-color:{};padding:10px;border-radius:10px">
<h1 style="color:white;text-align:center;">Disease Mortality Prediction </h1>
<h5 style="color:white;text-align:center;">Hepatitis B </h5>
</div>
"""
# Avatar Image using a url
avatar1 ="https://www.w3schools.com/howto/img_avatar1.png"
avatar2 ="https://www.w3schools.com/howto/img_avatar2.png"
result_temp ="""
<div style="background-color:#464e5f;padding:10px;border-radius:10px;margin:10px;">
<h4 style="color:white;text-align:center;">Algorithm:: {}</h4>
<img src="https://www.w3schools.com/howto/img_avatar.png" alt="Avatar" style="vertical-align: middle;float:left;width: 50px;height: 50px;border-radius: 50%;" >
<br/>
<br/>
<p style="text-align:justify;color:white">{} % probalibilty that Patient {}s</p>
</div>
"""
result_temp2 ="""
<div style="background-color:#464e5f;padding:10px;border-radius:10px;margin:10px;">
<h4 style="color:white;text-align:center;">Algorithm:: {}</h4>
<img src="https://www.w3schools.com/howto/{}" alt="Avatar" style="vertical-align: middle;float:left;width: 50px;height: 50px;border-radius: 50%;" >
<br/>
<br/>
<p style="text-align:justify;color:white">{} % probalibilty that Patient {}s</p>
</div>
"""
prescriptive_message_temp ="""
<div style="background-color:silver;overflow-x: auto; padding:10px;border-radius:5px;margin:10px;">
<h3 style="text-align:justify;color:black;padding:10px">Recommended Life style modification</h3>
<ul>
<li style="text-align:justify;color:black;padding:10px">Exercise Daily</li>
<li style="text-align:justify;color:black;padding:10px">Get Plenty of Rest</li>
<li style="text-align:justify;color:black;padding:10px">Exercise Daily</li>
<li style="text-align:justify;color:black;padding:10px">Avoid Alchol</li>
<li style="text-align:justify;color:black;padding:10px">Proper diet</li>
<ul>
<h3 style="text-align:justify;color:black;padding:10px">Medical Mgmt</h3>
<ul>
<li style="text-align:justify;color:black;padding:10px">Consult your doctor</li>
<li style="text-align:justify;color:black;padding:10px">Take your interferons</li>
<li style="text-align:justify;color:black;padding:10px">Go for checkups</li>
<ul>
</div>
"""
descriptive_message_temp ="""
<div style="background-color:silver;overflow-x: auto; padding:10px;border-radius:5px;margin:10px;">
<h3 style="text-align:justify;color:black;padding:10px">Definition</h3>
<p>Hepatitis B is a viral infection that attacks the liver and can cause both acute and chronic disease.</p>
</div>
"""
@st.cache
def load_image(img):
im =Image.open(os.path.join(img))
return im
def change_avatar(sex):
if sex == "male":
avatar_img = 'img_avatar.png'
else:
avatar_img = 'img_avatar2.png'
return avatar_img
def main():
"""Hep Mortality Prediction App"""
# st.title("Hepatitis Mortality Prediction App")
st.markdown(html_temp.format('royalblue'),unsafe_allow_html=True)
menu = ["Home","Login","SignUp"]
sub_menu = ["Plot","Prediction"] #,"Metrics"]
choice = st.sidebar.selectbox("Menu",menu)
if choice == "Home":
st.subheader("Home")
# st.text("What is Hepatitis?")
st.markdown(descriptive_message_temp,unsafe_allow_html=True)
st.image(load_image('hepimage.jpg'))
elif choice == "Login":
username = st.sidebar.text_input("Username")
password = st.sidebar.text_input("Password",type='password')
if st.sidebar.checkbox("Login"):
create_usertable()
hashed_pswd = generate_hashes(password)
result = login_user(username,verify_hashes(password,hashed_pswd))
# if password == "12345":
if result:
st.success("Welcome {}".format(username))
activity = st.selectbox("Activity",sub_menu)
if activity == "Plot":
st.subheader("Data Vis Plot")
df = pd.read_csv("clean_hepatitis_dataset.csv")
st.dataframe(df)
df['class'].value_counts().plot(kind='bar')
st.pyplot()
# Freq Dist Plot
freq_df = pd.read_csv("freq_df_hepatitis_dataset.csv")
st.bar_chart(freq_df['count'])
if st.checkbox("Area Chart"):
all_columns = df.columns.to_list()
feat_choices = st.multiselect("Choose a Feature",all_columns)
new_df = df[feat_choices]
st.area_chart(new_df)
elif activity == "Prediction":
st.subheader("Predictive Analytics")
age = st.number_input("Age",7,80)
sex = st.radio("Sex",tuple(gender_dict.keys()))
steroid = st.radio("Do You Take Steroids?",tuple(feature_dict.keys()))
antivirals = st.radio("Do You Take Antivirals?",tuple(feature_dict.keys()))
fatigue = st.radio("Do You Have Fatigue",tuple(feature_dict.keys()))
spiders = st.radio("Presence of Spider Naeve",tuple(feature_dict.keys()))
ascites = st.selectbox("Ascities",tuple(feature_dict.keys()))
varices = st.selectbox("Presence of Varices",tuple(feature_dict.keys()))
bilirubin = st.number_input("bilirubin Content",0.0,8.0)
alk_phosphate = st.number_input("Alkaline Phosphate Content",0.0,296.0)
sgot = st.number_input("Sgot",0.0,648.0)
albumin = st.number_input("Albumin",0.0,6.4)
protime = st.number_input("Prothrombin Time",0.0,100.0)
histology = st.selectbox("Histology",tuple(feature_dict.keys()))
feature_list = [age,get_value(sex,gender_dict),get_fvalue(steroid),get_fvalue(antivirals),get_fvalue(fatigue),get_fvalue(spiders),get_fvalue(ascites),get_fvalue(varices),bilirubin,alk_phosphate,sgot,albumin,int(protime),get_fvalue(histology)]
st.write(len(feature_list))
st.write(feature_list)
pretty_result = {"age":age,"sex":sex,"steroid":steroid,"antivirals":antivirals,"fatigue":fatigue,"spiders":spiders,"ascites":ascites,"varices":varices,"bilirubin":bilirubin,"alk_phosphate":alk_phosphate,"sgot":sgot,"albumin":albumin,"protime":protime,"histolog":histology}
st.json(pretty_result)
single_sample = np.array(feature_list).reshape(1,-1)
# ML
model_choice = st.selectbox("Select Model",["LR","KNN","DecisionTree"])
if st.button("Predict"):
if model_choice == "KNN":
loaded_model = load_model("knn_hepB_model.pkl")
prediction = loaded_model.predict(single_sample)
pred_prob = loaded_model.predict_proba(single_sample)
elif model_choice == "DecisionTree":
loaded_model = load_model("decision_tree_clf_hepB_model.pkl")
prediction = loaded_model.predict(single_sample)
pred_prob = loaded_model.predict_proba(single_sample)
else:
loaded_model = load_model("logistic_regression_hepB_model.pkl")
prediction = loaded_model.predict(single_sample)
pred_prob = loaded_model.predict_proba(single_sample)
# st.write(prediction)
# prediction_label = {"Die":1,"Live":2}
# final_result = get_key(prediction,prediction_label)
if prediction == 1:
st.warning("Patient Dies")
pred_probability_score = {"Die":pred_prob[0][0]*100,"Live":pred_prob[0][1]*100}
st.subheader("Prediction Probability Score using {}".format(model_choice))
st.json(pred_probability_score)
st.subheader("Prescriptive Analytics")
st.markdown(prescriptive_message_temp,unsafe_allow_html=True)
else:
st.success("Patient Lives")
pred_probability_score = {"Die":pred_prob[0][0]*100,"Live":pred_prob[0][1]*100}
st.subheader("Prediction Probability Score using {}".format(model_choice))
st.json(pred_probability_score)
if st.checkbox("Interpret"):
if model_choice == "KNN":
loaded_model = load_model("knn_hepB_model.pkl")
elif model_choice == "DecisionTree":
loaded_model = load_model("decision_tree_clf_hepB_model.pkl")
else:
loaded_model = load_model("logistic_regression_hepB_model.pkl")
# loaded_model = load_model("models/logistic_regression_model.pkl")
# 1 Die and 2 Live
df = pd.read_csv("clean_hepatitis_dataset.csv")
x = df[['age', 'sex', 'steroid', 'antivirals','fatigue','spiders', 'ascites','varices', 'bilirubin', 'alk_phosphate', 'sgot', 'albumin', 'protime','histology']]
feature_names = ['age', 'sex', 'steroid', 'antivirals','fatigue','spiders', 'ascites','varices', 'bilirubin', 'alk_phosphate', 'sgot', 'albumin', 'protime','histology']
class_names = ['Die(1)','Live(2)']
explainer = lime.lime_tabular.LimeTabularExplainer(x.values,feature_names=feature_names, class_names=class_names,discretize_continuous=True)
# The Explainer Instance
exp = explainer.explain_instance(np.array(feature_list), loaded_model.predict_proba,num_features=13, top_labels=1)
exp.show_in_notebook(show_table=True, show_all=False)
# exp.save_to_file('lime_oi.html')
st.write(exp.as_list())
new_exp = exp.as_list()
label_limits = [i[0] for i in new_exp]
# st.write(label_limits)
label_scores = [i[1] for i in new_exp]
plt.barh(label_limits,label_scores)
st.pyplot()
plt.figure(figsize=(20,10))
fig = exp.as_pyplot_figure()
st.pyplot()
else:
st.warning("Incorrect Username/Password")
elif choice == "SignUp":
new_username = st.text_input("User name")
new_password = st.text_input("Password", type='password')
confirm_password = st.text_input("Confirm Password",type='password')
if new_password == confirm_password:
st.success("Password Confirmed")
else:
st.warning("Passwords not the same")
if st.button("Submit"):
create_usertable()
hashed_new_password = generate_hashes(new_password)
add_userdata(new_username,hashed_new_password)
st.success("You have successfully created a new account")
st.info("Login to Get Started")
if __name__ == '__main__':
main()
| 35.211538 | 277 | 0.702713 | [
"MIT"
] | Let-Me-Code/Hepatitis-B-Mortality-Prediction | app.py | 10,986 | Python |
# Modification 2020 RangiLyu
# Copyright 2018-2019 Open-MMLab.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ...utils import util_mixins
class AssignResult(util_mixins.NiceRepr):
"""
Stores assignments between predicted and truth boxes.
Attributes:
num_gts (int): the number of truth boxes considered when computing this
assignment
gt_inds (LongTensor): for each predicted box indicates the 1-based
index of the assigned truth box. 0 means unassigned and -1 means
ignore.
max_overlaps (FloatTensor): the iou between the predicted box and its
assigned truth box.
labels (None | LongTensor): If specified, for each predicted box
indicates the category label of the assigned truth box.
Example:
>>> # An assign result between 4 predicted boxes and 9 true boxes
>>> # where only two boxes were assigned.
>>> num_gts = 9
>>> max_overlaps = torch.LongTensor([0, .5, .9, 0])
>>> gt_inds = torch.LongTensor([-1, 1, 2, 0])
>>> labels = torch.LongTensor([0, 3, 4, 0])
>>> self = AssignResult(num_gts, gt_inds, max_overlaps, labels)
>>> print(str(self)) # xdoctest: +IGNORE_WANT
<AssignResult(num_gts=9, gt_inds.shape=(4,), max_overlaps.shape=(4,),
labels.shape=(4,))>
>>> # Force addition of gt labels (when adding gt as proposals)
>>> new_labels = torch.LongTensor([3, 4, 5])
>>> self.add_gt_(new_labels)
>>> print(str(self)) # xdoctest: +IGNORE_WANT
<AssignResult(num_gts=9, gt_inds.shape=(7,), max_overlaps.shape=(7,),
labels.shape=(7,))>
"""
def __init__(self, num_gts, gt_inds, max_overlaps, labels=None):
self.num_gts = num_gts
self.gt_inds = gt_inds
self.max_overlaps = max_overlaps
self.labels = labels
# Interface for possible user-defined properties
self._extra_properties = {}
@property
def num_preds(self):
"""int: the number of predictions in this assignment"""
return len(self.gt_inds)
def set_extra_property(self, key, value):
"""Set user-defined new property."""
assert key not in self.info
self._extra_properties[key] = value
def get_extra_property(self, key):
"""Get user-defined property."""
return self._extra_properties.get(key, None)
@property
def info(self):
"""dict: a dictionary of info about the object"""
basic_info = {
"num_gts": self.num_gts,
"num_preds": self.num_preds,
"gt_inds": self.gt_inds,
"max_overlaps": self.max_overlaps,
"labels": self.labels,
}
basic_info.update(self._extra_properties)
return basic_info
def __nice__(self):
"""str: a "nice" summary string describing this assign result"""
parts = []
parts.append(f"num_gts={self.num_gts!r}")
if self.gt_inds is None:
parts.append(f"gt_inds={self.gt_inds!r}")
else:
parts.append(f"gt_inds.shape={tuple(self.gt_inds.shape)!r}")
if self.max_overlaps is None:
parts.append(f"max_overlaps={self.max_overlaps!r}")
else:
parts.append("max_overlaps.shape=" f"{tuple(self.max_overlaps.shape)!r}")
if self.labels is None:
parts.append(f"labels={self.labels!r}")
else:
parts.append(f"labels.shape={tuple(self.labels.shape)!r}")
return ", ".join(parts)
@classmethod
def random(cls, **kwargs):
"""Create random AssignResult for tests or debugging.
Args:
num_preds: number of predicted boxes
num_gts: number of true boxes
p_ignore (float): probability of a predicted box assinged to an
ignored truth
p_assigned (float): probability of a predicted box not being
assigned
p_use_label (float | bool): with labels or not
rng (None | int | numpy.random.RandomState): seed or state
Returns:
:obj:`AssignResult`: Randomly generated assign results.
Example:
>>> from nanodet.model.head.assigner.assign_result import AssignResult
>>> self = AssignResult.random()
>>> print(self.info)
"""
rng = kwargs.get("rng", None)
num_gts = kwargs.get("num_gts", None)
num_preds = kwargs.get("num_preds", None)
p_ignore = kwargs.get("p_ignore", 0.3)
p_assigned = kwargs.get("p_assigned", 0.7)
p_use_label = kwargs.get("p_use_label", 0.5)
num_classes = kwargs.get("p_use_label", 3)
import numpy as np
if rng is None:
rng = np.random.mtrand._rand
elif isinstance(rng, int):
rng = np.random.RandomState(rng)
else:
rng = rng
if num_gts is None:
num_gts = rng.randint(0, 8)
if num_preds is None:
num_preds = rng.randint(0, 16)
if num_gts == 0:
max_overlaps = torch.zeros(num_preds, dtype=torch.float32)
gt_inds = torch.zeros(num_preds, dtype=torch.int64)
if p_use_label is True or p_use_label < rng.rand():
labels = torch.zeros(num_preds, dtype=torch.int64)
else:
labels = None
else:
import numpy as np
# Create an overlap for each predicted box
max_overlaps = torch.from_numpy(rng.rand(num_preds))
# Construct gt_inds for each predicted box
is_assigned = torch.from_numpy(rng.rand(num_preds) < p_assigned)
# maximum number of assignments constraints
n_assigned = min(num_preds, min(num_gts, is_assigned.sum()))
assigned_idxs = np.where(is_assigned)[0]
rng.shuffle(assigned_idxs)
assigned_idxs = assigned_idxs[0:n_assigned]
assigned_idxs.sort()
is_assigned[:] = 0
is_assigned[assigned_idxs] = True
is_ignore = torch.from_numpy(rng.rand(num_preds) < p_ignore) & is_assigned
gt_inds = torch.zeros(num_preds, dtype=torch.int64)
true_idxs = np.arange(num_gts)
rng.shuffle(true_idxs)
true_idxs = torch.from_numpy(true_idxs)
gt_inds[is_assigned] = true_idxs[:n_assigned]
gt_inds = torch.from_numpy(rng.randint(1, num_gts + 1, size=num_preds))
gt_inds[is_ignore] = -1
gt_inds[~is_assigned] = 0
max_overlaps[~is_assigned] = 0
if p_use_label is True or p_use_label < rng.rand():
if num_classes == 0:
labels = torch.zeros(num_preds, dtype=torch.int64)
else:
labels = torch.from_numpy(
# remind that we set FG labels to [0, num_class-1]
# since mmdet v2.0
# BG cat_id: num_class
rng.randint(0, num_classes, size=num_preds)
)
labels[~is_assigned] = 0
else:
labels = None
self = cls(num_gts, gt_inds, max_overlaps, labels)
return self
def add_gt_(self, gt_labels):
"""Add ground truth as assigned results.
Args:
gt_labels (torch.Tensor): Labels of gt boxes
"""
self_inds = torch.arange(
1, len(gt_labels) + 1, dtype=torch.long, device=gt_labels.device
)
self.gt_inds = torch.cat([self_inds, self.gt_inds])
self.max_overlaps = torch.cat(
[self.max_overlaps.new_ones(len(gt_labels)), self.max_overlaps]
)
if self.labels is not None:
self.labels = torch.cat([gt_labels, self.labels])
| 37.026316 | 86 | 0.592514 | [
"Apache-2.0"
] | yangzilongdmgy/merge_monster_3d | mmdet3d/models/dense_heads/assigner/assign_result.py | 8,442 | Python |
"""
Interface item implementation. There are several notations supported
- class box with interface stereotype
- folded interface
- ball is drawn to indicate provided interface
- socket is drawn to indicate required interface
Interface item can act as icon of assembly connector, see
`gaphor.diagram.connector` module documentation for details. *Documentation
of this module does not take into accout assembly connector icon mode.*
Folded Interface Item
=====================
Folded interface notation is reserved for very simple situations.
When interface is folded
- only an implementation can be connected (ball - provided interface)
- or only usage dependency can be connected (socket - required interface)
Above means that interface cannot be folded when
- both, usage dependency and implementation are connected
- any other lines are connected
Dependencies
------------
Dependencies between folded interfaces are *not supported*
+---------------------+---------------------+
| *Supported* | *Unsupported* |
+=====================+=====================+
| :: | :: |
| | |
| |A|--( O--|B| | |A|--(--->O--|B| |
| Z Z | Z Z |
+---------------------+---------------------+
On above diagram, A requires interface Z and B provides interface Z.
Additionally, on the right diagram, Z is connected to itself with
dependency.
There is no need for additional dependency
- UML data model provides information, that Z is common for A and B
(A requires Z, B provides Z)
- on a diagram, both folded interface items (required and provided)
represent the same interface, which is easily identifiable with its name
Even more, adding a dependency between folded interfaces provides
information, on UML data model level, that an interface depenends on itself
but it is not the intention of this (*unsupported*) notation.
For more examples of non-supported by Gaphor notation, see
http://martinfowler.com/bliki/BallAndSocket.html.
Folding and Connecting
----------------------
Current approach to folding and connecting lines to an interface is as
follows
- allow folding/unfolding of an interface only when there is only one
implementation or depenedency usage connected
- when interface is folded, allow only one implementation or depenedency
usage to be connected
Folding and unfolding is performed by `InterfacePropertyPage` class.
"""
import ast
from enum import Enum
from math import pi
from gaphas.canvas import Canvas
from gaphas.connector import LinePort
from gaphas.geometry import distance_line_point, distance_point_point
from gaphas.item import NE, NW, SE, SW
from gaphor import UML
from gaphor.diagram.classes.klass import (
attribute_watches,
attributes_compartment,
operation_watches,
operations_compartment,
)
from gaphor.diagram.classes.stereotype import stereotype_compartments
from gaphor.diagram.presentation import (
Classified,
ElementPresentation,
from_package_str,
)
from gaphor.diagram.shapes import Box, EditableText, IconBox, Text, draw_border
from gaphor.diagram.support import represents
from gaphor.diagram.text import FontWeight, VerticalAlign
class Folded(Enum):
# Non-folded mode.
NONE = 0
# Folded mode, provided (ball) notation.
PROVIDED = 1
# Folded mode, required (socket) notation.
REQUIRED = 2
# Folded mode, notation of assembly connector icon mode (ball&socket).
ASSEMBLY = 3
class Side(Enum):
N = 0
E = pi * 0.5
S = pi
W = pi * 1.5
class InterfacePort(LinePort):
"""
Interface connection port.
It is simple line port, which changes glue behaviour depending on
interface folded state. If interface is folded, then
`InterfacePort.glue` method suggests connection in the middle of the
port.
The port provides rotation angle information as well. Rotation angle
is direction the port is facing (i.e. 0 is north, PI/2 is west, etc.).
The rotation angle shall be used to determine rotation of required
interface notation (socket's arc is in the same direction as the
angle).
"""
def __init__(self, start, end, is_folded, side):
super().__init__(start, end)
self.is_folded = is_folded
# Used by connection logic:
self.side = side
def glue(self, pos):
"""
Behaves like simple line port, but for folded interface suggests
connection to the middle point of a port.
"""
if self.is_folded():
px = (self.start.x + self.end.x) / 2
py = (self.start.y + self.end.y) / 2
d = distance_point_point((px, py), pos)
return (px, py), d
else:
d, pl = distance_line_point(self.start, self.end, pos)
return pl, d
@represents(UML.Interface)
class InterfaceItem(ElementPresentation, Classified):
"""
Interface item supporting class box, folded notations and assembly
connector icon mode.
When in folded mode, provided (ball) notation is used by default.
"""
RADIUS_PROVIDED = 10
RADIUS_REQUIRED = 14
def __init__(self, id=None, model=None):
super().__init__(id, model)
self._folded = Folded.NONE
self.side = Side.N
handles = self.handles()
h_nw = handles[NW]
h_ne = handles[NE]
h_sw = handles[SW]
h_se = handles[SE]
def is_folded():
return self._folded != Folded.NONE
# edge of element define default element ports
self._ports = [
InterfacePort(h_nw.pos, h_ne.pos, is_folded, Side.N),
InterfacePort(h_ne.pos, h_se.pos, is_folded, Side.E),
InterfacePort(h_se.pos, h_sw.pos, is_folded, Side.S),
InterfacePort(h_sw.pos, h_nw.pos, is_folded, Side.W),
]
self.watch("show_stereotypes", self.update_shapes).watch(
"show_attributes", self.update_shapes
).watch("show_operations", self.update_shapes).watch(
"subject[NamedElement].name"
).watch(
"subject[NamedElement].namespace.name"
).watch(
"subject.appliedStereotype", self.update_shapes
).watch(
"subject.appliedStereotype.classifier.name"
).watch(
"subject.appliedStereotype.slot", self.update_shapes
).watch(
"subject.appliedStereotype.slot.definingFeature.name"
).watch(
"subject.appliedStereotype.slot.value", self.update_shapes
).watch(
"subject[Interface].supplierDependency", self.update_shapes
)
attribute_watches(self, "Interface")
operation_watches(self, "Interface")
show_stereotypes = UML.properties.attribute("show_stereotypes", int)
show_attributes = UML.properties.attribute("show_attributes", int, default=True)
show_operations = UML.properties.attribute("show_operations", int, default=True)
def load(self, name, value):
if name == "folded":
self._folded = Folded(ast.literal_eval(value))
else:
super().load(name, value)
def save(self, save_func):
super().save(save_func)
save_func("folded", self._folded.value)
def _set_folded(self, folded):
"""
Set folded notation.
:param folded: Folded state, see Folded.* enum.
"""
if self._folded == folded:
return
self._folded = folded
if folded == Folded.NONE:
movable = True
else:
if self._folded == Folded.PROVIDED:
icon_size = self.RADIUS_PROVIDED * 2
else: # required interface or assembly icon mode
icon_size = self.RADIUS_REQUIRED * 2
self.min_width, self.min_height = icon_size, icon_size
self.width, self.height = icon_size, icon_size
# update only h_se handle - rest of handles should be updated by
# constraints
h_nw = self._handles[NW]
h_se = self._handles[SE]
h_se.pos.x = h_nw.pos.x + self.min_width
h_se.pos.y = h_nw.pos.y + self.min_height
movable = False
for h in self._handles:
h.movable = movable
self.update_shapes()
folded = property(
lambda s: s._folded,
_set_folded,
doc="Check or set folded notation, see Folded.* enum.",
)
def pre_update(self, context):
assert isinstance(self.canvas, Canvas)
connected_items = [c.item for c in self.canvas.get_connections(connected=self)]
connectors = any(
map(lambda i: isinstance(i.subject, UML.Connector), connected_items)
)
if connectors or self._folded != Folded.NONE:
provided = connectors or any(
map(
lambda i: isinstance(i.subject, UML.Implementation), connected_items
)
)
required = any(
map(lambda i: isinstance(i.subject, UML.Usage), connected_items)
)
if required and provided:
self.folded = Folded.ASSEMBLY
elif required:
self.folded = Folded.REQUIRED
else:
self.folded = Folded.PROVIDED
self.update_shapes(connectors=connectors)
super().pre_update(context)
def update_shapes(self, event=None, connectors=None):
if self._folded == Folded.NONE:
self.shape = self.class_shape()
else:
self.shape = self.ball_and_socket_shape(connectors)
def class_shape(self):
return Box(
Box(
Text(
text=lambda: UML.model.stereotypes_str(
self.subject, ("interface",)
),
style={"min-width": 0, "min-height": 0},
),
EditableText(
text=lambda: self.subject.name or "",
style={"font-weight": FontWeight.BOLD},
),
Text(
text=lambda: from_package_str(self),
style={"font": "sans 8", "min-width": 0, "min-height": 0},
),
style={"padding": (12, 4, 12, 4)},
),
*(
self.show_attributes
and self.subject
and [attributes_compartment(self.subject)]
or []
),
*(
self.show_operations
and self.subject
and [operations_compartment(self.subject)]
or []
),
*(self.show_stereotypes and stereotype_compartments(self.subject) or []),
style={
"min-width": 100,
"min-height": 50,
"vertical-align": VerticalAlign.TOP,
},
draw=draw_border,
)
def ball_and_socket_shape(self, connectors=None):
assert self.canvas
if connectors is None:
# distinguish between None and []
connected_items = [
c.item for c in self.canvas.get_connections(connected=self)
]
connectors = any(
map(lambda i: isinstance(i.subject, UML.Connector), connected_items)
)
return IconBox(
Box(
style={"min-width": self.min_width, "min-height": self.min_height},
draw=self.draw_interface_ball_and_socket,
),
Text(
text=lambda: UML.model.stereotypes_str(self.subject),
style={"min-width": 0, "min-height": 0},
),
EditableText(
text=lambda: self.subject.name or "",
style={
"font-weight": FontWeight.NORMAL if connectors else FontWeight.BOLD
},
),
)
def draw_interface_ball_and_socket(self, _box, context, _bounding_box):
cr = context.cairo
h_nw = self._handles[NW]
cx, cy = (h_nw.pos.x + self.width / 2, h_nw.pos.y + self.height / 2)
if self._folded in (Folded.REQUIRED, Folded.ASSEMBLY):
r = self.RADIUS_REQUIRED
if self.side == Side.N:
x, y = r * 2, r
elif self.side == Side.E:
x, y = r, r * 2
elif self.side == Side.S:
x, y = 0, r
elif self.side == Side.W:
x, y = r, 0
cr.move_to(x, y)
cr.arc_negative(
cx, cy, self.RADIUS_REQUIRED, self.side.value, pi + self.side.value
)
if self._folded in (Folded.PROVIDED, Folded.ASSEMBLY):
cr.move_to(cx + self.RADIUS_PROVIDED, cy)
cr.arc(cx, cy, self.RADIUS_PROVIDED, 0, pi * 2)
cr.stroke()
| 33.250639 | 88 | 0.590032 | [
"Apache-2.0"
] | 987Frogh/Makehuman | gaphor/diagram/classes/interface.py | 13,001 | Python |
import random
import time
pc=random.randint(0,10)
tentativas=0
chute=0
print('INICIANDO')
time.sleep(2)
while chute != pc:
chute=int(input('Digite um número entre 0 a 10: '))
print('PROCESSANDO')
time.sleep(1)
if chute < pc:
print('Mais, tente novamente.')
elif chute > pc:
print('Mneos, tente novamente.')
tentativas+=1
print('Você acertou com {} tentativas!'.format(tentativas)) | 24.705882 | 59 | 0.661905 | [
"MIT"
] | jhonatanmaia/python | study/curso-em-video/exercises/058.py | 422 | Python |
from optparse import OptionParser
import os,sys
import itertools
import re
def readSrc(src_dir):
lines=[]
for root, dirs, files in os.walk(src_dir):
for file in files:
if file.endswith(".cpp"):
lines+=["New_file "+ file]
lines_file = open(os.path.join(root, file)).read().splitlines()
lines+=lines_file
pass
pass
pass
return lines
def writeRunLog(dico, filename):
st=""
for clas in list(dico.keys()):
st+="class : "+clas+"\n"
st+="=======\n"
st+=" - Desc : "+dico[clas]["desc"]+"\n"
if (len(list(dico[clas]["parameters"].keys()))>0):
st+=" - Params : \n"
st+=" ********** \n"
pass
for param in list(dico[clas]["parameters"].keys()):
st+=" + Param : "+param+" ==> Desc : "+dico[clas]["parameters"][param]["desc"]+"\n"
st+=" -----\n"
if (len(list(dico[clas]["parameters"][param]["dict"].keys()))>0):
st+=" + Dicts : \n"
st+=" +++++ \n"
pass
for dic in list(dico[clas]["parameters"][param]["dict"].keys()):
st+=" Dict : "+dic+" ==> Desc : "+dico[clas]["parameters"][param]["dict"][dic]["desc"]+"\n"
st+=" ----\n"
pass
pass
pass
fi=open(filename, "w")
fi.write(st)
fi.close()
return
def getLinesWithRegExp(lines):
dico={}
for xd in ["XD","2XD","3XD"]:
debut=0
for line in lines:
# on rajoute un blanc pour avoir le dernier mot des commentaires
line+=" "
if ((len(line.strip())>=8) and (line.split()[0]=="New_file")):
debut=1
filename=line.split()[1]
# revoir les comm ne marchent pas pour mpcube
# elif (re.findall("//.*//[ ]*"+xd,line)):
# continue
elif (re.findall("//[ ]*"+xd+"[ ]+",line)):
# traitement des classes
li=re.findall(re.escape(xd)+"(.*)"+re.escape(' '),line)[0].split(' ')
li = [x for x in li if x.strip()]
desc=re.split("//[ ]*"+xd+"[ ]+",line)[-1]
if li[0]=="attr":
if (debut<2):
raise Exception("error in "+filename+" first line XD "+line)
# print dico[nameClass]
desc2=li[1:]
dico_p={"desc":' '.join(desc2)}
dico_p["dict"]={}
dico_p["numero"]=len(dico[nameClass]["parameters"])
dico[nameClass]['parameters'][li[1]]=dico_p
# print li
# print desc2
#1/0
elif li[0]=="ref":
if (debut<2):
raise Exception("error in "+filename+" first line XD "+line)
# print nameClass, line
dico[nameClass]["refs"].append([li[1],li[2]])
# 1/0
else:
nameClass=li[0]
dico[nameClass]={"desc":desc,"parameters":{},"refs":[]}
debut=2
pass
elif re.findall("//[ ]*"+xd+"_ADD_P+",line):
# traitement des parametres
if (debut<2):
raise Exception("error in "+filename+" first line XD "+line)
dico_param={}
optionnel=True
if (re.findall("Param::REQUIRED",line)):
optionnel=False
pass
print("line:",line)
param=line.split('"')[1].lower()
mparam=param.split("|")[0]
if mparam=="lambda":
mparam="lambda_u"
dico_param["mparm"]=mparam
dico_param["optionnel"]=optionnel
dr=line.split(xd+"_ADD_P")[-1].split()
desc=param+" "+dr[0]+" "+mparam+" "+str(int(optionnel))+" "+' '.join(dr[1:])
dico_param["desc"]=desc
dico_param["numero"]=len(dico[nameClass]["parameters"])
dico_param["dict"]={}
dico[nameClass]["parameters"][param]=dico_param
pass
elif re.findall("//[ ]*"+xd+"_ADD_DICO+",line):
# traitement des dictionnaires
if (debut<2):
raise 'jjjjjjjj'
dr=line.split(xd+"_ADD_P")[-1].split()
dico_dict={}
dico_dict["desc"]=line
dict_name=line.split('"')[1].lower()
dico[nameClass]["parameters"][param]["dict"][dict_name]=dico_dict
pass
pass
return dico
def writeOutPutFile(dico, filename,st_add=""):
st=""
for clas in list(dico.keys()):
st+=dico[clas]["desc"]+"\n"
Params=dico[clas]["parameters"]
for i in range(len(list(Params.keys()))):
ok=0
for j,param in enumerate(Params.keys()):
if (i==Params[param]["numero"]):
ok=1
break
if (ok==0):
print("pb",clas,"nmero",i,"params",Params)
1/0
if (len(list(Params[param]["dict"].keys()))==0):
st+=" attr "+Params[param]["desc"]+"\n"
pass
str_dico=" attr "+param+" chaine(into=["
for dic in list(Params[param]["dict"].keys()):
str_dico+='"'+dic+'",'
pass
if (len(list(Params[param]["dict"].keys()))>0):
desc=Params[param]["desc"].split()[2:]
st+=str_dico+"]) "+' '.join(desc)+"\n"
pass
pass
for ref in dico[clas]["refs"]:
st+=" ref "+ref[0]+" "+ref[1]+"\n"
pass
pass
st=st.replace(" double "," floattant ")
st=st.replace(" flag "," rien ")
st=st.replace(" int "," entier ")
st=st.replace(r"'",r"\'")
st=st.replace(r"\\'",r"\'")
#st="\\'".join(st.split("'"))
#st="\\'".join(st.split("\\\\'"))
fi=open(filename, "w")
fi.write(st_add)
fi.write(st)
fi.write("\n")
fi.close()
return
def run(result_dir, src_dir):
lines=readSrc(src_dir)
dico=getLinesWithRegExp(lines)
run_log=os.path.join(result_dir,"run.log")
writeRunLog(dico, run_log)
trad_org=os.path.join(result_dir,"TRAD_2.org")
fi=open(trad_org,"r")
st_org=fi.read()
fi.close()
st=st_org
trad_add=os.path.join(result_dir,"TRAD2_ajout0")
if (os.path.exists(trad_add)):
fi=open(trad_add,"r")
st+=fi.read()
fi.close()
trad_ajout=os.path.join(result_dir,"TRAD_2")
writeOutPutFile(dico,trad_ajout,st)
return
def options_script(argv):
parser = OptionParser(usage="usage: %prog [options]")
parser.add_option("-r", "--result", dest="result_dir", type="string",
metavar="<result_dir>",
help="choose results directory")
parser.add_option("-s", "--src", dest="src_dir", type="string",
metavar="<src_dir>",
help="choose src directory")
parser.set_defaults(result_dir=os.getcwd())
parser.set_defaults(src_dir=os.getcwd())
(options, args) = parser.parse_args(argv)
if len(args) > 0:
parser.print_help()
sys.exit(1)
pass
if options.result_dir != os.getcwd():
options.result_dir=os.path.join(os.getcwd(),options.result_dir)
if not os.path.isdir(options.result_dir):
os.mkdir(options.result_dir)
pass
pass
result_dir = os.path.expanduser(options.result_dir)
result_dir = os.path.expandvars(result_dir)
result_dir = os.path.abspath(result_dir)
if not os.path.isdir(result_dir):
sys.stderr.write('Error: result dir \"' + result_dir + '\" is not a directory\n')
sys.exit(1)
pass
src_dir = options.src_dir
if src_dir!=None:
os.path.expanduser(options.src_dir)
src_dir = os.path.expandvars(src_dir)
src_dir = os.path.abspath(src_dir)
if not os.path.isdir(src_dir):
sys.stderr.write('Error: source dir \"' + src_dir + '\" is not a directory\n')
sys.exit(1)
pass
pass
return result_dir, src_dir
def main(argv):
"""
Main function.
"""
result_dir, src_dir = options_script(argv)
run(result_dir, src_dir)
if __name__ == "__main__":
main(sys.argv[1:])
| 33.469231 | 119 | 0.478626 | [
"BSD-3-Clause"
] | cea-trust-platform/trust-code | Outils/TRIOXDATA/XTriou/Extract_xdata.py | 8,702 | Python |
import zmq
import random
ctx = zmq.Context(1)
sock = ctx.socket(zmq.REP)
sock.connect('tcp://127.0.0.1:7001')
while True:
parts = sock.recv_multipart()
print("GOT", parts)
if random.randrange(3) == 0:
import os, signal
os.kill(os.getpid(), signal.SIGKILL)
sock.send(b"Echo: " + b' '.join(parts))
| 23.5 | 44 | 0.62614 | [
"MIT"
] | asvetlov/zerogw | examples/crashing.py | 329 | Python |
__all__ = [
'BaseClassificationAggregator',
'BaseImageSegmentationAggregator',
'BaseEmbeddingsAggregator',
'BaseTextsAggregator',
'BasePairwiseAggregator',
]
import attr
from .. import annotations
@attr.s
@annotations.manage_docstring
class BaseClassificationAggregator:
""" This is a base class for all classification aggregators"""
labels_: annotations.OPTIONAL_LABELS = attr.ib(init=False)
@annotations.manage_docstring
def fit(self, data: annotations.LABELED_DATA) -> annotations.Annotation(type='BaseClassificationAggregator',
title='self'):
raise NotImplementedError()
@annotations.manage_docstring
def fit_predict(self, data: annotations.LABELED_DATA) -> annotations.TASKS_LABELS:
raise NotImplementedError()
@attr.s
@annotations.manage_docstring
class BaseImageSegmentationAggregator:
"""This is a base class for all image segmentation aggregators"""
segmentations_: annotations.TASKS_SEGMENTATIONS = attr.ib(init=False)
@annotations.manage_docstring
def fit(self, data: annotations.SEGMENTATION_DATA) -> annotations.Annotation(type='BaseImageSegmentationAggregator',
title='self'):
raise NotImplementedError()
@annotations.manage_docstring
def fit_predict(self, data: annotations.SEGMENTATION_DATA) -> annotations.TASKS_SEGMENTATIONS:
raise NotImplementedError()
@attr.s
@annotations.manage_docstring
class BaseEmbeddingsAggregator:
"""This is a base class for all embeddings aggregators"""
embeddings_and_outputs_: annotations.TASKS_EMBEDDINGS_AND_OUTPUTS = attr.ib(init=False)
@annotations.manage_docstring
def fit(self, data: annotations.EMBEDDED_DATA) -> annotations.Annotation(type='BaseEmbeddingsAggregator', title='self'):
raise NotImplementedError()
@annotations.manage_docstring
def fit_predict(self, data: annotations.EMBEDDED_DATA) -> annotations.TASKS_EMBEDDINGS_AND_OUTPUTS:
raise NotImplementedError()
@attr.s
@annotations.manage_docstring
class BaseTextsAggregator:
""" This is a base class for all texts aggregators"""
texts_: annotations.TASKS_TEXTS = attr.ib(init=False)
@annotations.manage_docstring
def fit(self, data: annotations.TEXT_DATA) -> annotations.Annotation(type='BaseTextsAggregator', title='self'):
raise NotImplementedError()
@annotations.manage_docstring
def fit_predict(self, data: annotations.TEXT_DATA) -> annotations.TASKS_TEXTS:
raise NotImplementedError()
@attr.s
@annotations.manage_docstring
class BasePairwiseAggregator:
""" This is a base class for all pairwise comparison aggregators"""
scores_: annotations.LABEL_SCORES = attr.ib(init=False)
@annotations.manage_docstring
def fit(self, data: annotations.PAIRWISE_DATA) -> annotations.Annotation(type='BasePairwiseAggregator', title='self'):
raise NotImplementedError()
@annotations.manage_docstring
def fit_predict(self, data: annotations.PAIRWISE_DATA) -> annotations.LABEL_SCORES:
raise NotImplementedError()
| 34.612903 | 124 | 0.724138 | [
"Apache-2.0"
] | artinmajdi/crowd-kit | build/lib/crowdkit/aggregation/base/__init__.py | 3,219 | Python |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pytype: skip-file
import ast
import shlex
import subprocess as sp
import apache_beam as beam
from apache_beam.testing.util import assert_that
from apache_beam.testing.util import equal_to
def assert_matches_stdout(
actual, expected_stdout, normalize_fn=lambda elem: elem, label=''):
"""Asserts a PCollection of strings matches the expected stdout elements.
Args:
actual (beam.PCollection): A PCollection.
expected (List[str]): A list of stdout elements, one line per element.
normalize_fn (Function[any]): A function to normalize elements before
comparing them. Can be used to sort lists before comparing.
label (str): [optional] Label to make transform names unique.
"""
def stdout_to_python_object(elem_str):
try:
elem = ast.literal_eval(elem_str)
except (SyntaxError, ValueError):
elem = elem_str
return normalize_fn(elem)
actual = actual | label >> beam.Map(stdout_to_python_object)
expected = list(map(stdout_to_python_object, expected_stdout))
assert_that(actual, equal_to(expected), 'assert ' + label)
def run_shell_commands(commands, **kwargs):
"""Runs a list of Notebook-like shell commands.
Lines starting with `#` are ignored as comments.
Lines starting with `!` are run as commands.
Variables like `{variable}` are substituted with **kwargs.
"""
for cmd in commands:
cmd = cmd.strip().lstrip('!').format(**kwargs)
sp_cmd = shlex.split(cmd, comments=True, posix=True)
if sp_cmd:
sp.call(sp_cmd)
yield sp_cmd
| 35.646154 | 75 | 0.740613 | [
"Apache-2.0"
] | DevangiDas/beam | sdks/python/apache_beam/examples/snippets/util.py | 2,317 | Python |
import numpy as np
__all__ = ["Kernel", "Uncorrelated", "ExpSquared", "Matern"]
class Kernel(object):
def __init__(self, parnames=[], name=''):
"""
:param parnames:
A list of names of the kernel params, used to alias the intrinsic
parameter names. This way different instances of the same kernel
can have different parameter names.
"""
if len(parnames) == 0:
parnames = self.kernel_params
assert len(parnames) == len(self.kernel_params)
self.param_alias = dict(zip(self.kernel_params, parnames))
self.params = {}
self.name = name
def __repr__(self):
return '{}({})'.format(self.__class__, self.param_alias.items())
def update(self, **kwargs):
"""Take a dictionary of parameters, pick out the properly named
parameters according to the alias, and put them in the param state
dictionary.
"""
for k in self.kernel_params:
self.params[k] = kwargs[self.param_alias[k]]
def __call__(self, metric, weights=None, ndim=2, **extras):
"""Return a covariance matrix, given a metric. Optionally, multiply
the output kernel by a weight function to induce non-stationarity.
"""
k = self.construct_kernel(metric)
if ndim != k.ndim:
# Either promote to 2 dimensions or demote to 1.
# The latter should never happen...
k = np.diag(k)
if weights is None:
return k
elif ndim == 2:
Sigma = weights[None, :] * k * weights[:, None]
else:
Sigma = k * weights**2
return Sigma
class Uncorrelated(Kernel):
# Simple uncorrelated noise model
ndim = 1
kernel_params = ['amplitude']
def construct_kernel(self, metric):
s = metric.shape[0]
jitter = self.params['amplitude']**2 * np.ones(s)
if metric.ndim == 2:
return np.diag(jitter)
elif metric.ndim == 1:
return jitter
else:
raise(NotImplementedError)
class ExpSquared(Kernel):
ndim = 2
npars = 2
kernel_params = ['amplitude', 'length']
def construct_kernel(self, metric):
"""Construct an exponential squared covariance matrix.
"""
a, l = self.params['amplitude'], self.params['length']
Sigma = a**2 * np.exp(-(metric[:, None] - metric[None, :])**2 / (2 * l**2))
return Sigma
class Matern(Kernel):
ndim = 2
npars = 2
kernel_params = ['amplitude', 'length']
def construct_kernel(self, metric):
"""Construct a Matern kernel covariance matrix, for \nu=3/2.
"""
a, l = self.params['amplitude'], self.params['length']
Sigma = np.sqrt(3) * np.abs(metric[:, None] - metric[None, :]) / l
Sigma = a**2 * (1 + Sigma) * np.exp(-Sigma)
return Sigma
class Outliers(Kernel):
kernel_params = ['amplitude', 'location']
def construct_kernel(self, metric):
raise(NotImplementedError)
| 30.098039 | 83 | 0.582736 | [
"MIT"
] | errai34/prospector | prospect/likelihood/kernels.py | 3,070 | Python |
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
from spack.version import Version
class Meme(AutotoolsPackage):
"""The MEME Suite allows the biologist to discover novel motifs in
collections of unaligned nucleotide or protein sequences, and to perform a
wide variety of other motif-based analyses."""
homepage = "http://meme-suite.org"
url = "http://meme-suite.org/meme-software/5.1.1/meme-5.1.1.tar.gz"
version('5.3.0', sha256='b2ddec9db972fcf77b29c7deb62df8b1dd8a6638c13c1aa06a5d563c4a7ff756')
version('5.2.0', sha256='0cbf8c2172e9b6c07855b8aeec457f4825f0b132f8cbb11192880e2f6033f54f')
version('5.1.1', sha256='38d73d256d431ad4eb7da2c817ce56ff2b4e26c39387ff0d6ada088938b38eb5')
version('4.12.0', sha256='49ff80f842b59d328588acfcd1d15bf94c55fed661d22b0f95f37430cc363a06')
version('4.11.4', sha256='3e869ff57e327a9c8615dbef784e3f1095f7f7a0120cecd55efe10c3f2ee8eb3')
variant('mpi', default=True, description='Enable MPI support')
variant('image-magick', default=False, description='Enable image-magick for png output')
depends_on('zlib', type=('link'))
depends_on('libgcrypt', type=('link'))
depends_on('perl', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('mpi', when='+mpi')
depends_on('imagemagick', type=('build', 'run'), when='+image-magick')
depends_on('perl-xml-parser', type=('build', 'run'))
def url_for_version(self, version):
url = 'http://meme-suite.org/meme-software/{0}/meme{1}{2}.tar.gz'
sep = '-' if version >= Version('5.0.2') else '_'
return url.format(version.up_to(3), sep, version)
def configure_args(self):
spec = self.spec
# have meme build its own versions of libxml2/libxslt, see #6736
args = ['--enable-build-libxml2', '--enable-build-libxslt']
if '~mpi' in spec:
args += ['--enable-serial']
return args
| 44.87234 | 96 | 0.697013 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | BenjaminRodenberg/spack | var/spack/repos/builtin/packages/meme/package.py | 2,109 | Python |
# -*- coding: utf-8 -*-
"""
Created on Sun May 20 11:35:03 2018
@author: DanielM
"""
import os
import numpy as np
import shelve
# Setup some parameters given by paradigm_frequency_inhibition.py
stim_delay = 100 # ms
dt = 0.01 # ms
stim_dtp = stim_delay / dt
data_path = "C:\\Users\\Daniel\\pyDentateData\\tuning\\revised\\frequency_inhibition_data\\"
save_path = data_path
data_files = [f for f in os.listdir(data_path) if os.path.isfile(os.path.join(data_path, f)) and '.pydd' in f]
for x in data_files:
interval = int(x.split('_')[8].split('.')[0][1:3])
data = shelve.open(data_path + x)
split_name_current = x.split('.')
split_name_peaks = list(split_name_current)
split_name_current[1] = split_name_current[1] + '_current'
name_current = '.'.join(split_name_current)
np.savez(save_path + name_current, np.array(data[data.keys()[0]]['populations'][0]['VClamps_i']))
| 31.172414 | 110 | 0.698009 | [
"MIT"
] | danielmk/pyDentateeLife2020 | analysis/tuning_get_seclamp_currents_frequency.py | 904 | Python |
# -*- coding: utf-8 -*-
"""Top-level package for medium-shell."""
__author__ = 'Colin Bitterfield'
__email__ = '[email protected]'
__version__ = '0.1.0'
| 20 | 41 | 0.675 | [
"MIT"
] | cbitterfield/medium-shell | medium-shell/__init__.py | 160 | Python |
class Person(object):
""" Class Person for testing python.
Following packages need to be installed:
- requests
:param name: person's name, string
:param age: person's age, integer
:param phone: person's phone, string
:rtype: object
"""
def __init__(self, name, age, phone):
self.name = name
self.age = age
self.phone = phone
def print(self):
""" Method prints person's data.
:return: None
"""
print("Name: {}, age: {}, phone: {}".format(self.name, self.age, self.phone))
def set_name(self, name):
""" Method saves a new name for the person.
:param name: new name for the person, string
:return: None
"""
self.name = name
def get_name(self):
""" Method returns the name of the person.
:return: name, string
"""
return self.name
def set_age(self, age):
""" Method saves a new age for the person.
:param age: new age for the person, integer
:return: None
"""
if type(age) != int:
print("not valid age {}".format(age))
return
if age >= 0:
self.age = age
else:
print("not valid age {}".format(age))
def get_age(self):
""" Method returns the age of the person.
:return: age, integer
"""
return self.age
def set_phone(self, phone):
""" Method saves a new phone for the person.
:param phone: new phone for the person, string
:return: None
"""
self.phone = phone
def get_phone(self):
""" Method returns the phone of the person.
:return: phone, string
"""
return self.phone
class Employee(Person):
""" Class Employee for testing python.
:param name: person's name, string
:param age: person's age, integer
:param phone: person's phone, string
:param phone: person's title, string
:param phone: person's salary, string
:param phone: person's location, string
:rtype: object
"""
def __init__(self, name, age, phone, title, salary, location):
super().__init__(name, age, phone)
self.title = title
self.salary = salary
self.location = location
def get_title(self):
""" Method returns the title of the person.
:return: title, string
"""
return self.title
def set_title(self, title):
""" Method saves a new title for the person.
:param title: new title for the person, string
:return: None
"""
self.title = title
def get_salary(self):
""" Method returns the salary of the person.
:return: salary, string
"""
return self.salary
def set_salary(self, salary):
""" Method saves a new salary for the person.
:param salary: new salary for the person, string
:return: None
"""
if salary >= 0:
self.salary = salary
def get_location(self):
""" Method returns the location of the person.
:return: location, string
"""
return self.location
def set_location(self, location):
""" Method saves a new location for the person.
:param location: new location for the person, string
:return: None
"""
self.location = location
def print_businesscard(self):
""" Method prints a business card information.
:return: None
"""
print(" Name: {}\n Title: {}\n Phone: {}".format(self.name, self.title, self.phone))
| 25.109589 | 92 | 0.56192 | [
"MIT"
] | jhsaraja/testiprojekti | person.py | 3,666 | Python |
# coding: utf8
from spacy.symbols import POS, TAG, DEP, LEMMA, HEAD
from spacy.language import Language
from spacy.tokens import Doc
from spacy.util import get_lang_class
import numpy
import re
class StanfordNLPLanguage(Language):
def __init__(self, snlp, meta=None, **kwargs):
"""Initialize the Language class.
Instead of "en" etc. we call the language "stanfordnlp_en" to not
cause conflicts with spaCy's built-in languages. Using entry points,
this also allows serializing and deserializing the language class
and "lang": "stanfordnlp_en" in the meta.json will automatically
instantiate this class if this package is available.
snlp (stanfordnlp.Pipeline): The loaded StanfordNLP pipeline.
kwargs: Optional config parameters.
RETURNS (spacy.language.Language): The nlp object.
"""
lang = snlp.config["lang"]
self.lang = "stanfordnlp_" + lang
self.Defaults = get_defaults(lang)
self.vocab = self.Defaults.create_vocab()
self.tokenizer = Tokenizer(snlp, self.vocab)
self.pipeline = []
self.max_length = kwargs.get("max_length", 10 ** 6)
self._meta = (
{"lang": self.lang, "stanfordnlp": snlp.config}
if meta is None
else dict(meta)
)
self._path = None
self._optimizer = None
def make_doc(self, text):
return self.tokenizer(text)
def get_defaults(lang):
"""Get the language-specific defaults, if available in spaCy. This allows
using lexical attribute getters that depend on static language data, e.g.
Token.like_num, Token.is_stop, Doc.noun_chunks etc.
lang (unicode): The language code.
RETURNS (Language.Defaults): The language defaults.
"""
try:
lang_cls = get_lang_class(lang)
return lang_cls.Defaults
except ImportError:
return Language.Defaults
class Tokenizer(object):
"""Because we're only running the StanfordNLP pipeline once and don't split
it up into spaCy pipeline components, we'll set all the attributes within
a custom tokenizer. The tokenizer is currently expected to
implement serialization methods so we're mocking them up here. When loading
the serialized nlp object back in, you can pass `snlp` to spacy.load:
>>> nlp.to_disk('/path/to/model')
>>> nlp = spacy.load('/path/to/model', snlp=snlp)
"""
to_disk = lambda self, *args, **kwargs: None
from_disk = lambda self, *args, **kwargs: None
to_bytes = lambda self, *args, **kwargs: None
from_bytes = lambda self, *args, **kwargs: None
_ws_pattern = re.compile(r"\s+")
def __init__(self, snlp, vocab):
"""Initialize the tokenizer.
snlp (stanfordnlp.Pipeline): The initialized StanfordNLP pipeline.
vocab (spacy.vocab.Vocab): The vocabulary to use.
RETURNS (Tokenizer): The custom tokenizer.
"""
self.snlp = snlp
self.vocab = vocab
def __call__(self, text):
"""Convert a StanfordNLP Doc to a spaCy Doc.
text (unicode): The text to process.
RETURNS (spacy.tokens.Doc): The spaCy Doc object.
"""
snlp_doc = self.snlp(text)
text = snlp_doc.text
tokens, heads = self.get_tokens_with_heads(snlp_doc)
if not len(tokens):
raise ValueError("No tokens available.")
words = []
spaces = []
pos = []
tags = []
deps = []
lemmas = []
offset = 0
is_aligned = self.check_aligned(text, tokens)
for i, token in enumerate(tokens):
span = text[offset:]
if not len(span):
break
while len(span) and span[0].isspace():
# If we encounter leading whitespace, skip one character ahead
offset += 1
span = text[offset:]
words.append(token.text)
# Make sure all strings are in the vocabulary
pos.append(self.vocab.strings.add(token.upos or ""))
tags.append(self.vocab.strings.add(token.xpos or ""))
deps.append(self.vocab.strings.add(token.dependency_relation or ""))
lemmas.append(self.vocab.strings.add(token.lemma or ""))
offset += len(token.text)
span = text[offset:]
if i == len(tokens) - 1:
spaces.append(False)
elif not is_aligned:
spaces.append(True)
else:
next_token = tokens[i + 1]
spaces.append(not span.startswith(next_token.text))
attrs = [POS, TAG, DEP, HEAD]
array = numpy.array(list(zip(pos, tags, deps, heads)), dtype="uint64")
doc = Doc(self.vocab, words=words, spaces=spaces).from_array(attrs, array)
# Overwrite lemmas separately to prevent them from being overwritten by spaCy
lemma_array = numpy.array([[lemma] for lemma in lemmas], dtype="uint64")
doc.from_array([LEMMA], lemma_array)
if any(pos) and any(tags):
doc.is_tagged = True
if any(deps):
doc.is_parsed = True
return doc
def get_tokens_with_heads(self, snlp_doc):
"""Flatten the tokens in the StanfordNLP Doc and extract the token indices
of the sentence start tokens to set is_sent_start.
snlp_doc (stanfordnlp.Document): The processed StanfordNLP doc.
RETURNS (list): The tokens (words).
"""
tokens = []
heads = []
offset = 0
for sentence in snlp_doc.sentences:
for token in sentence.tokens:
for word in token.words:
# Here, we're calculating the absolute token index in the doc,
# then the *relative* index of the head, -1 for zero-indexed
# and if the governor is 0 (root), we leave it at 0
if word.governor:
head = word.governor + offset - len(tokens) - 1
else:
head = 0
heads.append(head)
tokens.append(word)
offset += sum(len(token.words) for token in sentence.tokens)
return tokens, heads
def check_aligned(self, text, tokens):
token_texts = "".join(t.text for t in tokens)
return re.sub(self._ws_pattern, "", text) == token_texts
| 38.580838 | 85 | 0.60329 | [
"MIT"
] | mehmetilker/spacy-stanfordnlp | spacy_stanfordnlp/language.py | 6,443 | Python |
def train_instance_plot(temp_data_X, temp_data_Y, save_name):
plt.figure(figsize=(12,8))
plot_dx = 0
for chdx in range(3):
plt.plot(temp_data_X[:, chdx]/np.max(temp_data_X[:, chdx]) + plot_dx*2,color='k')
plot_dx += 1
plt.plot(temp_data_Y[:,0]-2,color='b')
plt.plot(temp_data_Y[:,1]-3,color='r')
plt.plot(temp_data_Y[:,2]-4,color='g')
plt.yticks([])
plt.tight_layout()
plt.savefig(save_name ,dpi=300)
plt.close()
return | 28.823529 | 89 | 0.612245 | [
"MIT"
] | MrXiaoXiao/PickNet_keras | src/misc/plot.py | 490 | Python |
import socket
from pywincffi.core import dist
from pywincffi.dev.testutil import TestCase, mock_library
from pywincffi.exceptions import WindowsAPIError
from pywincffi.kernel32 import CloseHandle
from pywincffi.wintypes import LPWSANETWORKEVENTS, socket_from_object
from pywincffi.ws2_32 import (
WSAGetLastError, WSACreateEvent, WSAEventSelect, WSAEnumNetworkEvents)
class EventsCase(TestCase):
"""
Has some common methods used by tests in this module
"""
def tearDown(self):
super(EventsCase, self).tearDown()
self.assertEqual(WSAGetLastError(), 0)
def create_wsaevent(self):
event = WSACreateEvent()
self.addCleanup(CloseHandle, event)
return event
def create_socket_pair(self):
"""
Creates a local socket listening on a random port.
"""
# Establish the server's socket
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(server.close)
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(client.close)
return server, client
class TestWSAGetLastError(TestCase):
"""
Tests for ``pywincffi.ws2_32.events.WSAGetLastError``
"""
def test_get_last_error(self):
self.addCleanup(self.WSASetLastError, 0)
self.WSASetLastError(4242)
self.assertEqual(WSAGetLastError(), 4242)
class TestWSACreateEvent(TestCase):
"""
Tests for ``pywincffi.ws2_32.events.WSACreateEvent``
"""
def test_invalid_event(self):
with mock_library(wsa_invalid_event=lambda _: True):
with self.assertRaises(WindowsAPIError):
WSACreateEvent()
class TestWSAEventSelect(EventsCase):
"""
Tests for ``pywincffi.ws2_32.events.WSAEventSelect``
"""
def test_basic_call(self):
# Establish a simple socket server and client
_, library = dist.load()
sock, _, = self.create_socket_pair()
# Setup the event
event = self.create_wsaevent()
WSAEventSelect(
socket_from_object(sock),
event,
library.FD_WRITE | library.FD_ACCEPT | library.FD_CONNECT
)
def test_socket_error(self):
def wrapped(*_):
_, library = dist.load()
return library.SOCKET_ERROR
with mock_library(WSAEventSelect=wrapped):
# Establish a simple socket server and client
_, library = dist.load()
sock, _ = self.create_socket_pair()
# Setup the event
event = self.create_wsaevent()
with self.assertRaises(WindowsAPIError):
WSAEventSelect(
socket_from_object(sock),
event,
library.FD_WRITE | library.FD_ACCEPT | library.FD_CONNECT
)
class TestWSAEnumNetworkEvents(EventsCase):
"""
Tests for ``pywincffi.ws2_32.events.WSAEnumNetworkEvents``
"""
def test_basic_call(self):
_, library = dist.load()
sock, _ = self.create_socket_pair()
events = WSAEnumNetworkEvents(socket_from_object(sock))
self.assertIsInstance(events, LPWSANETWORKEVENTS)
self.assertEqual(events.iErrorCode, tuple([0] * library.FD_MAX_EVENTS))
def test_triggers_write_event(self):
_, library = dist.load()
sock_server, sock_client = self.create_socket_pair()
sock_client_wintype = socket_from_object(sock_client)
# Listen on one socket and then connect with another. This should
# cause an FD_ACCEPT network event to occur.
sock_server.bind(("127.0.0.1", 0))
sock_server.listen(0)
_, port = sock_server.getsockname()
sock_client.connect(("127.0.0.1", port))
sock_client.send(b"Hello world")
event = self.create_wsaevent()
WSAEventSelect(sock_client_wintype, event, library.FD_WRITE)
events = WSAEnumNetworkEvents(sock_client_wintype)
self.assertEqual(events.lNetworkEvents, library.FD_WRITE)
def test_resets_event(self):
_, library = dist.load()
sock_server, sock_client = self.create_socket_pair()
sock_client_wintype = socket_from_object(sock_client)
# Listen on one socket and then connect with another. This should
# cause an FD_ACCEPT network event to occur.
sock_server.bind(("127.0.0.1", 0))
sock_server.listen(0)
_, port = sock_server.getsockname()
sock_client.connect(("127.0.0.1", port))
sock_client.send(b"Hello world")
waiter = self.create_wsaevent()
event = self.create_wsaevent()
WSAEventSelect(sock_client_wintype, event, library.FD_WRITE)
events = WSAEnumNetworkEvents(sock_client_wintype, hEventObject=waiter)
self.assertEqual(events.lNetworkEvents, library.FD_WRITE)
| 34.359155 | 79 | 0.659561 | [
"MIT"
] | opalmer/pycffiwin32 | tests/test_ws2_32/test_events.py | 4,879 | Python |
try :
from facepy import GraphAPI
from facepy.exceptions import OAuthError
import time
from sys import stdout
except ImportError:
print("Import Error")
token = 'Enter-Token-Here'
OWNER_NAME = ''
photos_together = {}
no_of_comments = {}
words_in_comment = {}
no_of_messages = {}
total_chat_length = {}
def process_photo_tags(tags):
#Until we get an empty result page
if 'error' in tags:
print("Error = ", error)
raise Exception("Error in Response")
if 'data' not in tags:
return
while len(tags['data']) > 0:
#Iterating through all the tags in the current result page
for tagged_person in tags['data']:
name = tagged_person['name'].encode('utf-8')
if name == OWNER_NAME:
continue
if name in photos_together:
#If the tag was encountered before increment
photos_together[name] += 1
else:
#Else initialize new count
photos_together[name] = 1
#Get the nect result page
if 'paging' in tags and 'next' in tags['paging']:
request_str = tags['paging']['next'].replace('https://graph.facebook.com/', '')
request_str = request_str.replace('limit=25', 'limit=200')
tags = graph.get(request_str)
else:
tags['data'] = []
def process_photo_comments(comments):
if 'error' in comments:
print("Error = ", error)
raise Exception("Error in Response")
if 'data' not in comments:
return
while len(comments['data']) > 0:
for comment in comments['data']:
try:
commentor = comment['from']['name'].encode('utf-8')
if commentor == OWNER_NAME:
#Ignore Comment by owner on his own photos
continue
word_count = len(comment['message'].encode('utf-8').split())
except UnicodeEncodeError:
print(comment['message'])
raise Exception('Unicode Encoding Error Encountered')
if commentor in no_of_comments:
#If a comment by this person was encountered before
no_of_comments[commentor] += 1
words_in_comment[commentor] += word_count
else:
#If this is a new commentor
no_of_comments[commentor] = 1
words_in_comment[commentor] = word_count
if 'paging' in comments and 'next' in comments['paging']:
request_str = comments['paging']['next'].replace('https://graph.facebook.com/', '')
request_str = request_str.replace('limit=25', 'limit=200')
comments = graph.get(request_str)
else:
comments['data'] = []
def process_photos(photos):
if 'error' in photos:
print("Error = ", error)
raise Exception("Error in Response")
no_of_photos = 0
if 'data' not in photos:
return
while len(photos['data']) > 0:
for photo in photos['data']:
if 'tags' in photo:
process_photo_tags(photo['tags'])
if 'comments' in photo:
process_photo_comments(photo['comments'])
no_of_photos += 1
stdout.write("\rNumber of Photos Processed = %d" % no_of_photos)
stdout.flush()
if 'paging' in photos and 'next' in photos['paging']:
request_str = photos['paging']['next'].replace('https://graph.facebook.com/', '')
request_str = request_str.replace('limit=25', 'limit=200')
photos = graph.get(request_str)
else:
photos['data'] = []
def process_texts(texts, friend_name):
if 'error' in texts:
print("Error = ", error)
raise Exception("Error in Response")
if 'data' not in texts:
return
while len(texts['data']) > 0:
for text in texts['data']:
if 'message' not in text:
#This can happen in message with only an attachment and No text
continue
if friend_name in no_of_messages:
no_of_messages[friend_name] += 1
total_chat_length[friend_name] += len(text['message'])
else:
no_of_messages[friend_name] = 1
total_chat_length[friend_name] = len(text['message'])
if 'paging' in texts and 'next' in texts['paging']:
request_str = texts['paging']['next'].replace('https://graph.facebook.com/', '')
request_str = request_str.replace('limit=25', 'limit=100')
success = False
while not success:
try:
texts = graph.get(request_str)
success = True
except OAuthError:
stdout.write("\nCall Limit Exceeded ! Sleeping for 4 min before retrying !!\n")
for i in range(250):
stdout.write("\rSleeing.......%d" % i)
stdout.flush()
time.sleep(1)
stdout.write("Woke Up! Retrying !!\n")
else:
texts['data'] = []
def process_all_messages(messages):
if 'error' in messages:
print("Error = ", error)
raise Exception("Error in Response")
if 'data' not in messages:
return
while len(messages['data']) > 0:
for chat in messages['data']:
if len(chat['to']['data']) != 2:
#Ignore Group and self messages
continue
friend_name = chat['to']['data'][1]['name'].encode('utf-8')
if friend_name == OWNER_NAME:
friend_name = chat['to']['data'][0]['name'].encode('utf-8')
success = False
while not success:
try:
stdout.write("\rProcessing Chat With : %s " % friend_name)
stdout.flush()
process_texts(chat['comments'], friend_name)
success = True
except OAuthError:
stdout.write("\nCall Limit Exceeded ! Sleeping for 10 min before retrying !!")
stdout.flush()
no_of_messages[friend_name] = 0
total_chat_length[friend_name] = 0
stdout.write('\n')
for i in range(600):
stdout.write("\rSleeing.......%d" % i)
stdout.flush()
time.sleep(1)
stdout.write("Woke Up! Retrying !!")
if 'paging' in messages and 'next' in messages['paging']:
request_str = messages['paging']['next'].replace('https://graph.facebook.com/', '')
request_str = request_str.replace('limit=25', 'limit=400')
messages = graph.get(request_str)
else:
mesages['data'] = []
graph = GraphAPI(token)
me = graph.get('v2.0/me?fields=id,name')
OWNER_NAME = me['name'].encode('utf-8')
photos = graph.get('v2.0/me/photos?fields=comments{message,from},tags{name}&limit=100')
process_photos(photos)
stdout.write('\n\n')
stdout.flush()
inbox = graph.get('v2.0/me/inbox?fields=comments{message},to&limit=100')
process_all_messages(inbox)
top_photos = []
for people in photos_together:
temp = []
temp.append(people)
temp.append(photos_together[people])
top_photos.append(temp)
top_photos.sort(key=lambda x: x[1], reverse=True)
print("Top People Whom You share photos")
for i in range(5):
print(i+1, ". ", top_photos[i][0], " - ", top_photos[i][1])
top_commentors = []
for people in no_of_comments:
temp = []
temp.append(people)
temp.append(no_of_comments[people])
top_commentors.append(temp)
top_commentors.sort(key=lambda x: x[1], reverse=True)
print("Top People Who comments on your photo")
for i in range(5):
print(i+1, ". ", top_commentors[i][0], " - ", top_commentors[i][1])
long_commentors = []
for people in words_in_comment:
temp = []
temp.append(people)
temp.append(words_in_comment[people])
long_commentors.append(temp)
long_commentors.sort(key=lambda x: x[1], reverse=True)
print("Top People with most content in comments")
for i in range(5):
print(i+1, ". ", long_commentors[i][0], " - ", long_commentors[i][1])
top_chatboxes = []
for people in no_of_messages:
temp = []
temp.append(people)
temp.append(no_of_messages[people])
top_chatboxes.append(temp)
top_chatboxes.sort(key=lambda x:x[1], reverse=True)
print("Top people with most number of Messages")
for i in range(5):
print(i+1, ". ", top_chatboxes[i][0], " - ", top_chatboxes[i][1])
long_chats = []
for people in total_chat_length:
temp = []
temp.append(people)
temp.append(total_chat_length[people])
long_chats.append(temp)
long_chats.sort(key=lambda x: x[1], reverse=True)
print("Top People with most content in inbox")
for i in range(5):
print(i+1, ". ", long_chats[i][0], " - ", long_chats[i][1])
total_count_of_comments = 0
for num in top_commentors:
total_count_of_comments += num[1]
print("Total Number of comments across all pics = ", total_count_of_comments)
| 32.900794 | 89 | 0.638162 | [
"MIT"
] | ashish1294/facepy-scripts | friendship_py3.py | 8,291 | Python |
import zutils
class zhighlighter:
def highlight(self, text):
return [(zutils.CL_FG, zutils.CL_BG, zutils.AT_BLINK if i % 2 == 0 else zutils.AT_NORMAL) for i in range(len(text))] #LOL!
| 31.5 | 124 | 0.719577 | [
"MIT"
] | herimonster/zoid | zhighlighter.py | 189 | Python |
from rest_framework import permissions
class PolyaxonPermission(permissions.BasePermission):
"""
Polyaxon Base permission system.
"""
def has_object_permission(self, request, view, obj):
return False
| 20.636364 | 56 | 0.722467 | [
"MPL-2.0"
] | AntonFriberg/polyaxon | polyaxon/scopes/permissions/base.py | 227 | Python |
import glob
import json
import os
import shutil
import subprocess
from .helpers import *
def constructor(*args, default_channel=True, no_rc=True, no_dry_run=False):
umamba = get_umamba()
cmd = [umamba, "constructor"] + [arg for arg in args if arg]
try:
res = subprocess.check_output(cmd)
if "--json" in args:
try:
j = json.loads(res)
return j
except json.decoder.JSONDecodeError as e:
print(f"Error when loading JSON output from {res}")
raise (e)
print(f"Error when executing '{' '.join(cmd)}'")
return res.decode()
except subprocess.CalledProcessError as e:
print(f"Error when executing '{' '.join(cmd)}'")
raise (e)
class TestInstall:
current_root_prefix = os.environ["MAMBA_ROOT_PREFIX"]
current_prefix = os.environ["CONDA_PREFIX"]
cache = os.path.join(current_root_prefix, "pkgs")
env_name = random_string()
root_prefix = os.path.expanduser(os.path.join("~", "tmproot" + random_string()))
prefix = os.path.join(root_prefix, "envs", env_name)
new_cache = os.path.join(root_prefix, "pkgs")
@classmethod
def setup_class(cls):
os.environ["MAMBA_ROOT_PREFIX"] = TestInstall.root_prefix
os.environ["CONDA_PREFIX"] = TestInstall.prefix
# speed-up the tests
os.environ["CONDA_PKGS_DIRS"] = TestInstall.new_cache
os.makedirs(TestInstall.new_cache, exist_ok=True)
root_pkgs = glob.glob(
os.path.join(TestInstall.current_root_prefix, "pkgs", "x*.tar.bz2")
)
urls = []
for pkg in root_pkgs:
shutil.copy(pkg, TestInstall.new_cache)
urls.append(
"http://testurl.com/conda-forge/linux-64/"
+ os.path.basename(pkg)
+ "#123412341234"
)
cls.pkgs = [os.path.basename(pkg) for pkg in root_pkgs]
with open(os.path.join(TestInstall.new_cache, "urls"), "w") as furls:
furls.write("\n".join(urls))
@classmethod
def teardown_class(cls):
os.environ["MAMBA_ROOT_PREFIX"] = TestInstall.current_root_prefix
os.environ["CONDA_PREFIX"] = TestInstall.current_prefix
shutil.rmtree(TestInstall.root_prefix)
@classmethod
def teardown(cls):
os.environ["MAMBA_ROOT_PREFIX"] = TestInstall.root_prefix
os.environ["CONDA_PREFIX"] = TestInstall.prefix
def test_extract_pkgs(self):
constructor("--prefix", TestInstall.root_prefix, "--extract-conda-pkgs")
for pkg in self.pkgs:
extracted_pkg = os.path.join(
TestInstall.root_prefix, "pkgs", pkg.rsplit(".tar.bz2")[0]
)
with open(
os.path.join(extracted_pkg, "info", "repodata_record.json")
) as rr:
repodata_record = json.load(rr)
with open(os.path.join(extracted_pkg, "info", "index.json")) as ri:
index = json.load(ri)
assert repodata_record["fn"] == pkg
assert repodata_record["md5"] == "123412341234"
assert (
repodata_record["url"]
== "http://testurl.com/conda-forge/linux-64/" + pkg
)
assert repodata_record["depends"] == index["depends"]
| 34.173469 | 84 | 0.59779 | [
"BSD-3-Clause"
] | ExternalRepositories/mamba | micromamba/tests/test_constructor.py | 3,349 | Python |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import typing # NOQA: F401
from nixnet import _cconsts
from nixnet import _errors
from nixnet import _props
from nixnet import constants
from nixnet.database import _collection
from nixnet.database import _database_object
from nixnet.database import _find_object
from nixnet.database import _frame
# workaround to avoid circular imports caused by mypy type annotations
MYPY = False
if MYPY:
from nixnet.database import _pdu # NOQA: F401
class SubFrame(_database_object.DatabaseObject):
"""Database subframe"""
def __init__(
self,
**kwargs # type: int
):
# type: (...) -> None
if not kwargs or '_handle' not in kwargs:
raise TypeError()
self._handle = kwargs['_handle']
from nixnet.database import _signal
self._dyn_signals = _collection.DbCollection(
self._handle, constants.ObjectClass.SIGNAL, _cconsts.NX_PROP_SUBFRM_DYN_SIG_REFS, _signal.Signal)
def __eq__(self, other):
if isinstance(other, self.__class__):
return self._handle == other._handle
else:
return NotImplemented
def __ne__(self, other):
result = self.__eq__(other)
if result is NotImplemented:
return result
else:
return not result
def __hash__(self):
return hash(self._handle)
def __repr__(self):
return '{}(handle={})'.format(type(self).__name__, self._handle)
def check_config_status(self):
# type: () -> None
"""Check this subframe's configuration status.
By default, incorrectly configured subframes in the database are not returned from
:any:`Frame.mux_subframes` because they cannot be used in the bus communication.
You can change this behavior by setting :any:`Database.show_invalid_from_open` to `True`.
When a subframe configuration status becomes invalid after the database is opened,
the subframe still is returned from :any:`Frame.mux_subframes`
even if :any:`Database.show_invalid_from_open` is `False`.
Raises:
:any:`XnetError`: The subframe is incorrectly configured.
"""
status_code = _props.get_subframe_config_status(self._handle)
_errors.check_for_error(status_code)
def find(
self,
object_class, # type: typing.Type[_database_object.DatabaseObject]
object_name, # type: typing.Text
):
# type: (...) -> _database_object.DatabaseObject
"""Finds an object in the database.
This function finds a database object relative to this parent object.
This object may be a grandparent or great-grandparent.
If this object is a direct parent
(for example, :any:`Frame<_frame.Frame>` for :any:`Signal<_signal.Signal>`),
the ``object_name`` to search for can be short, and the search proceeds quickly.
If this object is not a direct parent
(for example, :any:`Database` for :any:`Signal<_signal.Signal>`),
the ``object_name`` to search for must be qualified such
that it is unique within the scope of this object.
For example, if the class of this object is :any:`Cluster`,
and ``object_class`` is :any:`Signal<_signal.Signal>`,
you can specify ``object_name`` of ``mySignal``,
assuming that signal name is unique to the cluster.
If not, you must include the :any:`Frame<_frame.Frame>` name as a prefix,
such as ``myFrameA.mySignal``.
NI-XNET supports the following subclasses of ``DatabaseObject`` as arguments for ``object_class``:
* :any:`nixnet.database.Cluster<Cluster>`
* :any:`nixnet.database.Frame<_frame.Frame>`
* :any:`nixnet.database.Pdu<Pdu>`
* :any:`nixnet.database.Signal<_signal.Signal>`
* :any:`nixnet.database.SubFrame<SubFrame>`
* :any:`nixnet.database.Ecu<Ecu>`
* :any:`nixnet.database.LinSched<LinSched>`
* :any:`nixnet.database.LinSchedEntry<LinSchedEntry>`
Args:
object_class(``DatabaseObject``): The class of the object to find.
object_name(str): The name of the object to find.
Returns:
An instance of the found object.
Raises:
ValueError: Unsupported value provided for argument ``object_class``.
:any:`XnetError`: The object is not found.
"""
return _find_object.find_object(self._handle, object_class, object_name)
@property
def dyn_signals(self):
# type: () -> _collection.DbCollection
""":any:`DbCollection`: Returns a collection of dynamic :any:`Signal<_signal.Signal>` objects in the subframe.
Those signals are transmitted when the multiplexer signal
in the frame has the multiplexer value defined in the subframe.
"""
return self._dyn_signals
@property
def frm(self):
# type: () -> _frame.Frame
""":any:`Frame<_frame.Frame>`: Returns the reference to the parent frame.
The parent frame is defined when the subframe is created,
and you cannot change it afterwards.
"""
handle = _props.get_subframe_frm_ref(self._handle)
return _frame.Frame(_handle=handle)
@property
def mux_value(self):
# type: () -> int
"""int: Get or set the multiplexer value for this subframe.
This property specifies the multiplexer signal value used when the
dynamic signals in this subframe are transmitted in the frame.
Only one subframe is transmitted at a time in the frame.
There also is a multiplexer value for a signal object as a read-only property.
It reflects the value set on the parent subframe object.
This property is required. If the property does not contain a valid value,
and you create an XNET session that uses this subframe,
the session returns an error.
To ensure that the property contains a valid value,
you can do one of the following:
* Use a database file (or alias) to create the session.
The file formats require a valid value in the text for this property.
* Set a value at runtime using this property.
This is needed when you create your own in-memory database (*:memory:*) rather than use a file.
The property does not contain a default in this case,
so you must set a valid value prior to creating a session.
"""
return _props.get_subframe_mux_value(self._handle)
@mux_value.setter
def mux_value(self, value):
# type: (int) -> None
_props.set_subframe_mux_value(self._handle, value)
@property
def name(self):
# type: () -> typing.Text
"""str: Get or set the name of the subframe object.
Lowercase letters, uppercase letters, numbers,
and the underscore (_) are valid characters for the short name.
The space ( ), period (.), and other special characters are not supported within the name.
The short name must begin with a letter (uppercase or lowercase) or underscore, and not a number.
The short name is limited to 128 characters.
A subframe name must be unique for all subframes in a frame.
This short name does not include qualifiers to ensure that it is unique,
such as the database, cluster, and frame name. It is for display purposes.
"""
return _props.get_subframe_name(self._handle)
@name.setter
def name(self, value):
# type: (typing.Text) -> None
_props.set_subframe_name(self._handle, value)
@property
def pdu(self):
# type: () -> _pdu.Pdu
""":any:`Pdu`: Returns the subframe's parent PDU.
This property returns the reference to the subframe's parent PDU.
The parent PDU is defined when the subframe object is created.
You cannot change it afterwards.
"""
from nixnet.database import _pdu # NOQA: F811
handle = _props.get_subframe_pdu_ref(self._handle)
return _pdu.Pdu(_handle=handle)
@property
def name_unique_to_cluster(self):
# type: () -> typing.Text
"""str: Returns a subframe name unique to the cluster that contains the subframe.
If the single name is not unique within the cluster, the name is <frame-name>.<subframe-name>.
You can pass the name to the `find` function to retrieve the reference to the object,
while the single name is not guaranteed success in `find`
because it may be not unique in the cluster.
"""
return _props.get_subframe_name_unique_to_cluster(self._handle)
| 38.647826 | 118 | 0.657779 | [
"MIT"
] | bigoulours/nixnet-python | nixnet/database/_subframe.py | 8,889 | Python |
# Databricks notebook source
# MAGIC %md-sandbox
# MAGIC
# MAGIC <div style="text-align: center; line-height: 0; padding-top: 9px;">
# MAGIC <img src="https://databricks.com/wp-content/uploads/2018/03/db-academy-rgb-1200px.png" alt="Databricks Learning" style="width: 600px">
# MAGIC </div>
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC
# MAGIC # Incremental Data Ingestion with Auto Loader
# MAGIC
# MAGIC Incremental ETL is important since it allows us to deal solely with new data that has been encountered since the last ingestion. Reliably processing only the new data reduces redundant processing and helps enterprises reliably scale data pipelines.
# MAGIC
# MAGIC The first step for any successful data lakehouse implementation is ingesting into a Delta Lake table from cloud storage.
# MAGIC
# MAGIC Historically, ingesting files from a data lake into a database has been a complicated process.
# MAGIC
# MAGIC Databricks Auto Loader provides an easy-to-use mechanism for incrementally and efficiently processing new data files as they arrive in cloud file storage. In this notebook, you'll see Auto Loader in action.
# MAGIC
# MAGIC Due to the benefits and scalability that Auto Loader delivers, Databricks recommends its use as general **best practice** when ingesting data from cloud object storage.
# MAGIC
# MAGIC ## Learning Objectives
# MAGIC By the end of this lesson, you should be able to:
# MAGIC * Execute Auto Loader code to incrementally ingest data from cloud storage to Delta Lake
# MAGIC * Describe what happens when a new file arrives in a directory configured for Auto Loader
# MAGIC * Query a table fed by a streaming Auto Loader query
# MAGIC
# MAGIC ## Dataset Used
# MAGIC This demo uses simplified artificially generated medical data representing heart rate recordings delivered in the JSON format.
# MAGIC
# MAGIC | Field | Type |
# MAGIC | --- | --- |
# MAGIC | device_id | int |
# MAGIC | mrn | long |
# MAGIC | time | double |
# MAGIC | heartrate | double |
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC
# MAGIC
# MAGIC ## Getting Started
# MAGIC
# MAGIC Run the following cell to reset the demo and configure required variables and help functions.
# COMMAND ----------
# MAGIC %run ../Includes/Classroom-Setup-6.1
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC
# MAGIC ## Using Auto Loader
# MAGIC
# MAGIC In the cell below, a function is defined to demonstrate using Databricks Auto Loader with the PySpark API. This code includes both a Structured Streaming read and write.
# MAGIC
# MAGIC The following notebook will provide a more robust overview of Structured Streaming. If you wish to learn more about Auto Loader options, refer to the <a href="https://docs.databricks.com/spark/latest/structured-streaming/auto-loader.html" target="_blank">documentation</a>.
# MAGIC
# MAGIC Note that when using Auto Loader with automatic <a href="https://docs.databricks.com/spark/latest/structured-streaming/auto-loader-schema.html" target="_blank">schema inference and evolution</a>, the 4 arguments shown here should allow ingestion of most datasets. These arguments are explained below.
# MAGIC
# MAGIC | argument | what it is | how it's used |
# MAGIC | --- | --- | --- |
# MAGIC | **`data_source`** | The directory of the source data | Auto Loader will detect new files as they arrive in this location and queue them for ingestion; passed to the **`.load()`** method |
# MAGIC | **`source_format`** | The format of the source data | While the format for all Auto Loader queries will be **`cloudFiles`**, the format of the source data should always be specified for the **`cloudFiles.format`** option |
# MAGIC | **`table_name`** | The name of the target table | Spark Structured Streaming supports writing directly to Delta Lake tables by passing a table name as a string to the **`.table()`** method. Note that you can either append to an existing table or create a new table |
# MAGIC | **`checkpoint_directory`** | The location for storing metadata about the stream | This argument is pass to the **`checkpointLocation`** and **`cloudFiles.schemaLocation`** options. Checkpoints keep track of streaming progress, while the schema location tracks updates to the fields in the source dataset |
# MAGIC
# MAGIC **NOTE**: The code below has been streamlined to demonstrate Auto Loader functionality. We'll see in later lessons that additional transformations can be applied to source data before saving them to Delta Lake.
# COMMAND ----------
def autoload_to_table(data_source, source_format, table_name, checkpoint_directory):
query = (spark.readStream
.format("cloudFiles")
.option("cloudFiles.format", source_format)
.option("cloudFiles.schemaLocation", checkpoint_directory)
.load(data_source)
.writeStream
.option("checkpointLocation", checkpoint_directory)
.option("mergeSchema", "true")
.table(table_name))
return query
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC
# MAGIC In the following cell, we use the previously defined function and some path variables defined in the setup script to begin an Auto Loader stream.
# MAGIC
# MAGIC Here, we're reading from a source directory of JSON files.
# COMMAND ----------
query = autoload_to_table(data_source = f"{DA.paths.working_dir}/tracker",
source_format = "json",
table_name = "target_table",
checkpoint_directory = f"{DA.paths.checkpoints}/target_table")
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC
# MAGIC Because Auto Loader uses Spark Structured Streaming to load data incrementally, the code above doesn't appear to finish executing.
# MAGIC
# MAGIC We can think of this as a **continuously active query**. This means that as soon as new data arrives in our data source, it will be processed through our logic and loaded into our target table. We'll explore this in just a second.
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC
# MAGIC ## Helper Function for Streaming Lessons
# MAGIC
# MAGIC Our notebook-based lessons combine streaming functions with batch and streaming queries against the results of those operations. These notebooks are for instructional purposes and intended for interactive, cell-by-cell execution. This pattern is not intended for production.
# MAGIC
# MAGIC Below, we define a helper function that prevents our notebook from executing the next cell just long enough to ensure data has been written out by a given streaming query. This code should not be necessary in a production job.
# COMMAND ----------
def block_until_stream_is_ready(query, min_batches=2):
import time
while len(query.recentProgress) < min_batches:
time.sleep(5) # Give it a couple of seconds
print(f"The stream has processed {len(query.recentProgress)} batchs")
block_until_stream_is_ready(query)
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC
# MAGIC ## Query Target Table
# MAGIC
# MAGIC Once data has been ingested to Delta Lake with Auto Loader, users can interact with it the same way they would any table.
# COMMAND ----------
# MAGIC %sql
# MAGIC SELECT * FROM target_table
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC
# MAGIC Note that the **`_rescued_data`** column is added by Auto Loader automatically to capture any data that might be malformed and not fit into the table otherwise.
# MAGIC
# MAGIC While Auto Loader captured the field names for our data correctly, note that it encoded all fields as **`STRING`** type. Because JSON is a text-based format, this is the safest and most permissive type, ensuring that the least amount of data is dropped or ignored at ingestion due to type mismatch.
# COMMAND ----------
# MAGIC %sql
# MAGIC DESCRIBE TABLE target_table
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC
# MAGIC Use the cell below to define a temporary view that summarizes the recordings in our target table.
# MAGIC
# MAGIC We'll use this view below to demonstrate how new data is automatically ingested with Auto Loader.
# COMMAND ----------
# MAGIC %sql
# MAGIC CREATE OR REPLACE TEMP VIEW device_counts AS
# MAGIC SELECT device_id, count(*) total_recordings
# MAGIC FROM target_table
# MAGIC GROUP BY device_id;
# MAGIC
# MAGIC SELECT * FROM device_counts
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC
# MAGIC ## Land New Data
# MAGIC
# MAGIC As mentioned previously, Auto Loader is configured to incrementally process files from a directory in cloud object storage into a Delta Lake table.
# MAGIC
# MAGIC We have configured and are currently executing a query to process JSON files from the location specified by **`source_path`** into a table named **`target_table`**. Let's review the contents of the **`source_path`** directory.
# COMMAND ----------
files = dbutils.fs.ls(f"{DA.paths.working_dir}/tracker")
display(files)
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC
# MAGIC At present, you should see a single JSON file listed in this location.
# MAGIC
# MAGIC The method in the cell below was configured in our setup script to allow us to model an external system writing data to this directory. Each time you execute the cell below, a new file will land in the **`source_path`** directory.
# COMMAND ----------
DA.data_factory.load()
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC
# MAGIC List the contents of the **`source_path`** again using the cell below. You should see an additional JSON file for each time you ran the previous cell.
# COMMAND ----------
files = dbutils.fs.ls(f"{DA.paths.working_dir}/tracker")
display(files)
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC
# MAGIC ## Tracking Ingestion Progress
# MAGIC
# MAGIC Historically, many systems have been configured to either reprocess all records in a source directory to calculate current results or require data engineers to implement custom logic to identify new data that's arrived since the last time a table was updated.
# MAGIC
# MAGIC With Auto Loader, your table has already been updated.
# MAGIC
# MAGIC Run the query below to confirm that new data has been ingested.
# COMMAND ----------
# MAGIC %sql
# MAGIC SELECT * FROM device_counts
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC
# MAGIC The Auto Loader query we configured earlier automatically detects and processes records from the source directory into the target table. There is a slight delay as records are ingested, but an Auto Loader query executing with default streaming configuration should update results in near real time.
# MAGIC
# MAGIC The query below shows the table history. A new table version should be indicated for each **`STREAMING UPDATE`**. These update events coincide with new batches of data arriving at the source.
# COMMAND ----------
# MAGIC %sql
# MAGIC DESCRIBE HISTORY target_table
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC
# MAGIC ## Clean Up
# MAGIC Feel free to continue landing new data and exploring the table results with the cells above.
# MAGIC
# MAGIC When you're finished, run the following cell to stop all active streams and remove created resources before continuing.
# COMMAND ----------
DA.cleanup()
# COMMAND ----------
# MAGIC %md-sandbox
# MAGIC © 2022 Databricks, Inc. All rights reserved.<br/>
# MAGIC Apache, Apache Spark, Spark and the Spark logo are trademarks of the <a href="https://www.apache.org/">Apache Software Foundation</a>.<br/>
# MAGIC <br/>
# MAGIC <a href="https://databricks.com/privacy-policy">Privacy Policy</a> | <a href="https://databricks.com/terms-of-use">Terms of Use</a> | <a href="https://help.databricks.com/">Support</a>
| 42.167857 | 315 | 0.720505 | [
"CC0-1.0"
] | databricks-academy/data-engineering-with-databricks | Data-Engineering-with-Databricks/06 - Incremental Data Processing/DE 6.1 - Incremental Data Ingestion with Auto Loader.py | 11,807 | Python |
import numpy as np
def log_gaussian(x, mean, sigma):
"""
Computes the log-probability of X=x for a Gaussian of mean=mean and sigma=sigma
Parameters
----------
x
mean
sigma
Returns
-------
"""
log_pdf = -(x - mean) ** 2 / (2 * sigma ** 2)
log_pdf = log_pdf - np.log((np.sqrt(2 * np.pi) * sigma))
return log_pdf
def log_gaussian2d(size, x, y, x_cm, y_cm, width, length, psi):
scale_w = 1. / (2. * width ** 2)
scale_l = 1. / (2. * length ** 2)
a = np.cos(psi) ** 2 * scale_l + np.sin(psi) ** 2 * scale_w
b = np.sin(2 * psi) * (scale_w - scale_l) / 2.
c = np.cos(psi) ** 2 * scale_w + np.sin(psi) ** 2 * scale_l
norm = 1. / (2 * np.pi * width * length)
log_pdf = - (a * (x - x_cm) ** 2 - 2 * b * (x - x_cm) * (y - y_cm) + c * (
y - y_cm) ** 2)
log_pdf += np.log(norm) + np.log(size)
return log_pdf | 23.333333 | 83 | 0.503297 | [
"BSD-3-Clause"
] | calispac/cta-lstchain | lstchain/image/pdf.py | 910 | Python |
repeat_age = 0
while repeat_age < 3:
age = int(
input(f"Check your movie ticket price by typing your age below. You may check {3 - repeat_age} more times\n"))
if age < 3:
print("Your ticket is free!")
elif 3 <= age <= 12:
print("Your ticket costs $10")
elif age > 12:
print("Your ticket costs $15")
if repeat_age == 2:
break
check = input("Would you like to check another ticket price? Type 'quit' to exit this program\n")
if check == 'quit':
break
repeat_age += 1
print(f"Thank you for using our service! You have checked {repeat_age + 1} times")
| 34.888889 | 118 | 0.61465 | [
"MIT"
] | kg55555/pypractice | Part 1/Chapter 7/exercise_7.6.py | 628 | Python |
from django.contrib.auth.models import AbstractUser
from django.db import models
import uuid
# class Role(models.Model):
# '''
# The Role entries are managed by the system,
# automatically created via a Django data migration.
# '''
# name = models.CharField(max_length=50, default=1)
# def __str__(self):
# return self.name
class MyUser(AbstractUser):
""" Base model for patient
and doctor """
ROLE_CHOICES = (
(1, 'patient'),
(2, 'doctor'),
(3, 'developer'),
)
#ID
id = models.UUIDField(primary_key=True, default=uuid.uuid4,)
#Role
role = models.IntegerField(choices=ROLE_CHOICES)
#Personal
last_name = models.CharField(max_length=100, default='')
first_name = models.CharField(max_length=100, default='')
middle_name = models.CharField(max_length=100)
age = models.IntegerField()
blood_type = models.CharField(max_length=50, blank=True, null=True)
#Contact Details
phone_number = models.CharField(max_length=50)
email = models.EmailField(max_length=254, unique=True)
#Address
street = models.CharField(max_length=50, blank=True, null=True)
city = models.CharField(max_length=50)
barangay = models.CharField(max_length=50)
postal_code = models.IntegerField()
#System
date_created = models.DateTimeField(
auto_now=True, blank=True, null=True
)
date_updated = models.DateTimeField(
auto_now_add=True, blank=True, null=True
)
active_status = models.BooleanField(
default=True, blank=True, null=True
)
USERNAME_FIELD = 'username'
REQUIRED_FIELDS = ['email']
def __str__(self):
return f'{self.id}' | 28.046154 | 71 | 0.628086 | [
"MIT"
] | RadySonabu/ThesisAPI | user/models.py | 1,823 | Python |
'''
'''
import hashlib
import uuid
import logging
from cassandra.cluster import Cluster
from cassandra.policies import DCAwareRoundRobinPolicy
from subprocess import PIPE, Popen
from kazoo.client import KazooClient
from multiprocessing import Process
class CassandraAware:
def init__(self):
self.cluster = Cluster("", protocol_version=4,
load_balancing_policy=DCAwareRoundRobinPolicy(local_dc='datacenter1'))
self.session = self.cluster.connect("")
class KazooAware:
def __init__(self):
zk = KazooClient(hosts='')
zk.start()
self.ver = zk.Counter('/ver', default=0x821)
class blocks_view(CassandraAware, KazooAware):
def __init__(self):
logging.info('initialize blocks_view')
self.updateBlock = super.session.prepare('''
insert into blocks_view(sha256_id, block_id, ver, raw, sha256, nonce, consent_algorithm, predecessor, counter, setup)
values(?, ?, ?, ?, ?, ?, ?, ?, ? toTimestamp(now()))''')
self.updateStatusStatement = super.session.prepare('update candidate set processed=true where sha256_id = ?')
self.genesis()
def runLoop(self):
while True:
self.run()
def publicMining(self, raw):
ver = super.ver
super.ver += 1
predecessor = self.block_id
m = hashlib.sha256()
m.update(bytes(raw, 'utf-8'))
sha256 = m.hexdigest()
self.block_ptr = sha256
block_id = '' # fxxx
nonce = ''
while True:
if block_id.startswith('f'):
self.block_id = block_id
self.nonce = nonce
break
nonce = uuid.uuid4().hex
with Popen(('./mining', sha256, nonce), stdout=PIPE) as p:
block_id = p.stdout.read()
m = hashlib.sha256()
m.update(bytes(block_id, 'utf-8'))
sha256_id = m.hexdigest()
self.counter += 1
super.session.execute(self.updateBlock, [sha256_id, block_id, ver, raw, sha256, nonce,
'Mersenne15Mersenne14', predecessor, self.counter])
def genesis(self):
self.counter = 0
self.block_id = None
raw = '''
In God We Trust
'''
self.publicMining(raw)
def run(self):
rows = super.session.execute('''select sha256_id, pq, proposal, verdict, target, raw_statement, block_id
from candidate where ready = true and processed = false''')
candidates = []
ids = []
for row in rows:
[sha256_id, pq, proposal, verdict, target, raw_statement, block_id] = row
# verify the transaction sanity
candidates.append('||'.join([pq, proposal, verdict, target, raw_statement, block_id]))
ids.append(sha256_id)
candidates.sort()
candidate_transactions = '@@'.join(candidates)
predecessor = self.block_id
raw = '<{0}/{1}/{2}>{3}'.format(self.block_ptr, self.nonce, predecessor, candidate_transactions)
self.publicMining(raw)
for shaId in ids:
super.session.execute(self.updateStatusStatement, [shaId])
'''
create table
player3(sha256_id text primary key, symbol text, ver bigint,
pq0 text, d0 text, f0 text, pq1 text ,d1 text, f1 text, setup timestamp);
'''
class player3(CassandraAware, KazooAware):
def __init__(self):
logging.info('initialize player3')
self.newPlayer = super.session.prepare('''
insert into player3(sha256_id, symbol, ver, pq0, d0, f0, pq1, d1, f1, setup)
values(?, ?, ?, ?, ?, ?, ?, ?, ?, toTimestamp(now()))
''')
def new(self, symbol):
ver = super.ver
super.ver += 1
m = hashlib.sha256()
m.update(bytes(symbol, 'utf-8'))
sha256_id = m.hexdigest()
numbers = []
with Popen('./openssl genrsa 2048 {0}'.format(sha256_id).split(' '), stdout=PIPE) as p:
output = str(p.stdout.read(), 'utf-8')
for row in output.split('INTEGER'):
numbers.extend(list(filter(lambda x: x.startswith(' :'), row.splitlines())))
pqKey = ''.join(reversed(numbers[1])).lower().replace(':', '')
dKey = ''.join(reversed(numbers[3])).lower().replace(':', '')
jgKey = ''.join(reversed(numbers[-1])).lower().replace(':', '')
pq0 = pqKey.strip()
d0 = dKey.strip()
f0 = jgKey.strip()
with Popen('./openssl genrsa 2048 {0}'.format(sha256_id).split(' '), stdout=PIPE) as p:
output = str(p.stdout.read(), 'utf-8')
for row in output.split('INTEGER'):
numbers.extend(list(filter(lambda x: x.startswith(' :'), row.splitlines())))
pqKey = ''.join(reversed(numbers[1])).lower().replace(':', '')
dKey = ''.join(reversed(numbers[3])).lower().replace(':', '')
jgKey = ''.join(reversed(numbers[-1])).lower().replace(':', '')
pq1 = pqKey.strip()
d1 = dKey.strip()
f1 = jgKey.strip()
super.session.execute(self.newPlayer, [sha256_id, symbol, ver, pq0, d0, f0, pq1, d1, f1])
'''
create table draft(sha256_id text primary key, note_id text, target text, ver bigint,
symbol text, quantity bigint, refer text, processed boolean, setup timestamp);
create table symbol_chain(sha256_id primary key, symbol text, ver bigint,
block_counter bigint, updated timestamp);
'''
class draft(CassandraAware, KazooAware):
def __init__(self):
logging.info('initialize draft')
self.newDraft = super.session.prepare('''
insert into draft(sha256_id, note_id, target, ver, symbol, quantity, refer, type, processed, setup)
values(?, ?, ?, ?, ?, ?, ?, ?, false, toTimestamp(now())
''')
self.updateSymbolChain = super.session.prepare('''
update symbol_chain set symbol =? , ver= ?, block_counter= ?, updated = toTimestamp(now()
where sha256_id = ?
''')
def issue(self, symbol, quantity):
logging.info('going to issue with symbol:{}'.format(symbol))
ver = super.ver
super.ver += 1
m = hashlib.sha256()
m.update(bytes(symbol, 'utf-8'))
sha256_id = m.hexdigest()
result = super.session.execute('select block_counter from symbol_chain where sha256_id = {0}'.format(sha256_id)).one()
if not result:
counter = 0
else:
counter = int(result.block_counter)
counter += 1
super.session.execute(self.updateSymbolChain, [symbol, ver, counter, sha256_id])
(block_id) = super.session.execute('select sha256_id from blocks_view where counter = {}'.format(counter)).one()
note_id = '{}||{}||{}'.format(symbol, block_id[:16], quantity)
super.session.execute(self.newDraft, [m.hexdigest(), note_id[:32], sha256_id, ver, quantity, block_id, 'issue'])
def transfer(self, note_id, target, quantity, refer):
logging.info('going to transfer {} to {}'.format(note_id, target))
ver = super.ver
super.ver += 1
m = hashlib.sha256()
m.update(bytes(note_id, 'utf-8'))
m.update(bytes(ver, 'utf-8'))
sha256_id = m.hexdigest()
super.session.execute(self.newDraft, [sha256_id, note_id, target, ver, quantity, refer, 'transfer'])
class proposal(CassandraAware, KazooAware):
def __init__(self):
logging.info('initialize proposal')
def runLoop(self):
while True:
self.process()
def process(self):
result = super.session.execute('''
select sha256_id, note_id, target, symbol, quantity, refer from draft where processed=false
''')
for row in result:
[sha256_id, note_id, target, symbol, quantity, refer, type] = row
if type == 'issue':
self.processIssue()
if type == 'transfer':
self.processTransfer()
super.seesion.execute('''update draft set processed=true where sha256_id = {0}
'''.format(sha256_id))
def processIssue(self):
# insert into candidate
pass
def processTransfer(self):
# insert into candidate
pass
if __name__ == '__main__':
b = blocks_view()
player = player3()
d = draft()
prop = proposal()
Process(target=b.runLoop).start()
Process(target=player.new('ABCDEFG')).start()
Process(target=d.issue('ABCDEFG', '1')).start()
Process(target=prop.runLoop).start()
| 36.353191 | 126 | 0.594288 | [
"BSD-3-Clause"
] | rsacpp/repre | clique3.py | 8,543 | Python |
from matplotlib import pyplot as plt
from matplotlib_venn import venn2
import glob
import compare_two_zlists as cv
import math
from scipy.stats import hypergeom
from decimal import Decimal
from math import log
def make_venn_matrix(filename_list):
fig1 = plt.figure(1)
fig1.suptitle('Differentially Expressed Genes Overlap', fontsize=24)
subplot_counter = 1
print len(filename_list)*len(filename_list)
for zlist1 in filename_list:
for zlist2 in filename_list:
plt.subplot(len(filename_list), len(filename_list), subplot_counter)
offset = math.ceil(float(subplot_counter)/float(len(filename_list)))
position = int(subplot_counter - 1) % len(filename_list) + 1
if zlist1 == zlist2:
plt.subplot(len(filename_list), len(filename_list), subplot_counter)
up_A, up_B, all_changing = cv.get_changing(zlist1)
plt.text(0, 0, '{}'.format(zlist1))
plt.text(0, .4, 'Higher in A: {}'.format(str(len(up_A))))
plt.text(0, .2, 'Higher in B: {}'.format(str(len(up_B))))
plt.axis('off')
plt.plot()
print 'working {}'.format(subplot_counter)
subplot_counter+=1
else:
plt.subplot(len(filename_list), len(filename_list), subplot_counter)
(venn_values, all_union) = cv.compare_two(zlist1, zlist2)
color1 = ''
color2 = ''
if position > offset:
color1 = 'MediumVioletRed'
color2 = 'OrangeRed'
union = venn_values['up_A']['union']
in_common = venn_values['up_A']['common']
unique_1 = venn_values['up_A']['up_1']
unique_2 = venn_values['up_A']['up_2']
if position < offset:
color1 = 'LimeGreen'
color2 = 'DodgerBlue'
union = venn_values['up_B']['union']
in_common = venn_values['up_B']['common']
unique_1 = venn_values['up_B']['up_1']
unique_2 = venn_values['up_B']['up_2']
total_genes = len(all_union)
total_1 = unique_1 + in_common
total_2 = unique_2 + in_common
try:
log_prob = Decimal(log(hypergeom.sf(in_common, total_genes, total_1, total_2)))
except:
log_prob = '-inf'
plt.plot(cv.draw_venn(union, in_common, unique_1, unique_2, color1, color2))
if log_prob != '-inf':
plt.annotate('log p-value: %2.3f'%log_prob, xy=(0,0), xycoords='axes fraction')
else:
plt.annotate('log p-value: -inf', xy=(0,0), xycoords='axes fraction')
print 'working {}'.format(subplot_counter)
print str(total_genes)
print str(log_prob)
subplot_counter+=1
plt.show()
return
if __name__ == '__main__':
venn_filelist = glob.glob('*zlist')
venn_filelist.sort()
make_venn_matrix(venn_filelist)
| 26.835052 | 84 | 0.676143 | [
"MIT"
] | YeoLab/gscripts | gscripts/general/venn_matrix.py | 2,603 | Python |
from ctypes import cdll, c_int, c_ulonglong, c_char_p
import time
import json
import thread
def runList(ll, interpID, inputList):
strList = json.dumps(inputList)
listID = ll.elconn_list_from_json(strList.encode())
resultID = ll.elconn_call(interpID, listID)
return resultID
# === load library
ll = cdll.LoadLibrary("../sharedlib/elconn.so")
# === set return types
ll.elconn_get_type.restype = c_int
ll.elconn_init.restype = c_ulonglong
ll.elconn_list_from_json.restype = c_ulonglong
ll.elconn_make_interpreter.restype = c_ulonglong
ll.elconn_call.restype = c_ulonglong
ll.elconn_connect_remote.restype = c_ulonglong
ll.elconn_list_strfirst.restype = c_char_p
ll.elconn_list_to_json.restype = c_char_p
# === set argument types
ll.elconn_list_from_json.argtypes = [c_char_p]
ll.elconn_serve_remote.argtypes = [c_char_p, c_ulonglong]
# == Manual Test 1 == Using the interpreter
initMsg = ll.elconn_init(0)
ll.elconn_display_info(initMsg)
testList = json.dumps(["format", "Hello, %s!", "World"])
listID = ll.elconn_list_from_json(testList.encode())
ll.elconn_list_print(listID)
interpID = ll.elconn_make_interpreter()
resultID = ll.elconn_call(interpID, listID)
ll.elconn_list_print(resultID)
# == Manual Test 2 == Connecting to remote interpreter
ll.elconn_serve_remote(b":3003", interpID)
time.sleep(1)
remoteID = ll.elconn_connect_remote(b"http://localhost:3003")
rResultID = ll.elconn_call(remoteID, listID)
ll.elconn_list_print(rResultID)
# == Manual Test 3 == Value on server
someList = json.dumps([":", "test-var", ["store", "hello", 1]])
listID = ll.elconn_list_from_json(someList.encode())
resultID = ll.elconn_call(interpID, listID)
someList = json.dumps(["format", "%s there %f", ["test-var"]])
listID = ll.elconn_list_from_json(someList.encode())
resultID = ll.elconn_call(remoteID, listID)
rResultID = ll.elconn_call(remoteID, listID)
ll.elconn_list_print(rResultID)
# == Manual Test 3 == Directory with value on server
runList(ll, interpID, [":", "test-map", ["@", "directory"]])
runList(ll, interpID, ["test-map", ":", "a", ["store", "test value 2"]])
resID = runList(ll, remoteID, ["test-map", "a"])
ll.elconn_list_print(resID)
# == Manual Test 5 == Request queue
runList(ll, interpID, ["test-map", ":", "b", ["@", "requests"]])
runList(ll, interpID, ["test-map", "b", "enque", "[\"some_json\"]"])
resID = runList(ll, remoteID, ["test-map", "b", "block"])
ll.elconn_list_print(resID)
# -- schedule something to be enqueued later
def do_the_thing(ll, interpID, item, delay):
time.sleep(delay)
runList(ll, interpID, ["test-map", "b", "enque", item])
thread.start_new_thread(do_the_thing, (ll, interpID, "test-thread", 4))
print("Wait 4 seconds...")
resID = runList(ll, remoteID, ["test-map", "b", "block"])
ll.elconn_list_print(resID)
# == Maual Test 6 == Getting values
someList = json.dumps(["format", "%s there %f", ["test-var"]])
listID = ll.elconn_list_from_json(someList.encode())
firstStr = ll.elconn_list_strfirst(listID)
print("firstStr = %s" % firstStr)
asJSON = ll.elconn_list_to_json(listID)
print("asJSON = %s" % asJSON)
| 33.225806 | 72 | 0.724272 | [
"MIT"
] | epicframework/EPIC-Core | connective/connective/python/elconn_trial_script.py | 3,090 | Python |
from lxml import html
from Proxy import Proxy
import urllib.request
import urllib.parse
import urllib.error
import data
import sys
import bcolors
class Search :
#Initializes variables
def __init__(self, useproxy, retries = None, verbose = False, sleep = 5):
self.urls = [] # contains scraped urls
self.blacklist = [] # contains blacklisted proxies
self.useproxy = useproxy # dictates use of proxy
self.retries = retries # sets the number of search retries, if None => unlimited
self.verbose = verbose # sets verbosity level
self.sleep = sleep # dictates sleep while searching for urls
self.proxyhandler = None
if (self.useproxy) :
self.proxyhandler = Proxy(self.verbose)
self.proxyhandler.proxify()
if (self.verbose) :
bcolors.printGreen("[+]Search object created!")
def print_state(self) :
bcolors.printBold("****Printing object state****")
bcolors.printBold("URLs:\n")
print(str(self.urls))
bcolors.printBold("Blacklist:\n")
print(str(self.blacklist))
bcolors.printBold("Settings:\n")
print("Retries: " + str(self.retries) + ", Verbose: " + str(self.verbose) + ", Sleep: " + str(self.sleep))
def print_urls(self) :
bcolors.printBold("****PRINTING URLS****\n")
for url in self.urls :
print(str(url))
# Returns the HTML page of a website.
# It incorporates error checking and retries
# If an unknown error was raised,
def get_html(self, url) :
if (self.useproxy) :
self.proxyhandler.validate_proxy()
req = urllib.request.Request(url, None, data.headers)
tries = 0
while (self.retries == None or tries < self.retries):
try :
res = urllib.request.urlopen(req)
src = res.read()
break
except urllib.error.HTTPError as e:
if (self.useproxy) :
self.update_proxy()
if (e.code != 503) :
bcolors.printFail("[-]HTTP Error " + str(e) + " was raised!")
return None
# If we have to retry, append current proxy to blacklist
if (self.useproxy) :
# Blacklists both proxies if error occured!
self.proxyhandler.blacklist_current_proxy(True)
tries += 1
return html.fromstring(str(src))
def update_proxy(self, https=False) :
self.proxyhandler.proxify(https, True)
self.proxyhandler.validate_proxy()
def fatal_exception(self,e = None, function_name = None) :
bcolors.printFail("A fatal exception has occured!")
if (not e == None) :
print(str(e))
elif (not function_name == None) :
print(str(function_name))
bcolors.printBold("****PROGRAM STATE****")
self.print_state()
sys.exit(0)
| 37.256098 | 115 | 0.572504 | [
"MIT"
] | Spectrum256/webvulns | dev/Search.py | 3,055 | Python |
# !pip install transformers
import torch
from transformers.file_utils import is_tf_available, is_torch_available, is_torch_tpu_available
from transformers import BertTokenizerFast, BertForSequenceClassification
from transformers import Trainer, TrainingArguments
import numpy as np
import random
from sklearn.datasets import fetch_20newsgroups
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
def set_seed(seed: int):
"""
Helper function for reproducible behavior to set the seed in ``random``, ``numpy``, ``torch`` and/or ``tf`` (if
installed).
Args:
seed (:obj:`int`): The seed to set.
"""
random.seed(seed)
np.random.seed(seed)
if is_torch_available():
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
# safe to call this function even if cuda is not available
if is_tf_available():
import tensorflow as tf
tf.random.set_seed(seed)
set_seed(1)
# the model we gonna train, base uncased BERT
# check text classification models here: https://huggingface.co/models?filter=text-classification
model_name = "bert-base-uncased"
# max sequence length for each document/sentence sample
max_length = 512
# load the tokenizer
tokenizer = BertTokenizerFast.from_pretrained(model_name, do_lower_case=True)
def read_20newsgroups(test_size=0.2):
# download & load 20newsgroups dataset from sklearn's repos
dataset = fetch_20newsgroups(subset="all", shuffle=True, remove=("headers", "footers", "quotes"))
documents = dataset.data
labels = dataset.target
# split into training & testing a return data as well as label names
return train_test_split(documents, labels, test_size=test_size), dataset.target_names
# call the function
(train_texts, valid_texts, train_labels, valid_labels), target_names = read_20newsgroups()
# tokenize the dataset, truncate when passed `max_length`,
# and pad with 0's when less than `max_length`
train_encodings = tokenizer(train_texts, truncation=True, padding=True, max_length=max_length)
valid_encodings = tokenizer(valid_texts, truncation=True, padding=True, max_length=max_length)
class NewsGroupsDataset(torch.utils.data.Dataset):
def __init__(self, encodings, labels):
self.encodings = encodings
self.labels = labels
def __getitem__(self, idx):
item = {k: torch.tensor(v[idx]) for k, v in self.encodings.items()}
item["labels"] = torch.tensor([self.labels[idx]])
return item
def __len__(self):
return len(self.labels)
# convert our tokenized data into a torch Dataset
train_dataset = NewsGroupsDataset(train_encodings, train_labels)
valid_dataset = NewsGroupsDataset(valid_encodings, valid_labels)
# load the model and pass to CUDA
model = BertForSequenceClassification.from_pretrained(model_name, num_labels=len(target_names)).to("cuda")
def compute_metrics(pred):
labels = pred.label_ids
preds = pred.predictions.argmax(-1)
# calculate accuracy using sklearn's function
acc = accuracy_score(labels, preds)
return {
'accuracy': acc,
}
training_args = TrainingArguments(
output_dir='./results', # output directory
num_train_epochs=3, # total number of training epochs
per_device_train_batch_size=8, # batch size per device during training
per_device_eval_batch_size=20, # batch size for evaluation
warmup_steps=500, # number of warmup steps for learning rate scheduler
weight_decay=0.01, # strength of weight decay
logging_dir='./logs', # directory for storing logs
load_best_model_at_end=True, # load the best model when finished training (default metric is loss)
# but you can specify `metric_for_best_model` argument to change to accuracy or other metric
logging_steps=400, # log & save weights each logging_steps
save_steps=400,
evaluation_strategy="steps", # evaluate each `logging_steps`
)
trainer = Trainer(
model=model, # the instantiated Transformers model to be trained
args=training_args, # training arguments, defined above
train_dataset=train_dataset, # training dataset
eval_dataset=valid_dataset, # evaluation dataset
compute_metrics=compute_metrics, # the callback that computes metrics of interest
)
# train the model
trainer.train()
# evaluate the current model after training
trainer.evaluate()
# saving the fine tuned model & tokenizer
model_path = "20newsgroups-bert-base-uncased"
model.save_pretrained(model_path)
tokenizer.save_pretrained(model_path) | 41.078947 | 115 | 0.730728 | [
"MIT"
] | AJuneSlop/pythoncode-tutorials | machine-learning/nlp/bert-text-classification/train.py | 4,683 | Python |
# DADSA - Assignment 1
# Reece Benson
import json
from classes import Player as Player
from classes import Season as Season
from classes import Tournament as Tournament
from classes import Round as Round
from classes import Match as Match
class Handler():
# Define the variables we will be using
app = None
prize_money = None
seasons = { }
def __init__(self, _app):
if(_app.debug):
print("[LOAD]: Loaded Handler!")
# Define our Application within this Handler class
self.app = _app
# Used to load all data into memory
def load(self):
# This function will create our seasons and implement the genders & players
self.load_prize_money()
self.load_players()
#TODO: Implement load_seasons()
# Used to load prize money
def load_prize_money(self):
with open('./data/rankingPoints.json') as tData:
data = json.load(tData)
# Make our prize_money a dictionary
if(self.prize_money == None):
self.prize_money = { }
# Make use of the values
self.prize_money = [ (rank,pts) for pts in data for rank in data[pts] ]
print(self.prize_money)
# Used to load players from all seasons into memory
def load_players(self):
with open('./data/players.json') as tData:
data = json.load(tData)
# Players are classed within Seasons
for season in data:
# If the season does not yet exist, create it
if(not season in self.seasons):
self.seasons[season] = { "players": { } }
# Players are then stored within Gender classifications
for gender in data[season]:
if(not gender in self.seasons[season]["players"]):
self.seasons[season]["players"][gender] = [ ]
# Append our player in the season, within the gender
for player in data[season][gender]:
#TODO: Change to using Player class
self.seasons[season]["players"][gender].append(player)
def get_players(self, season):
# Check our Season exists
if(not season in self.seasons):
return None
else:
# Check we have players within our Season
if("players" in self.seasons[season]):
return self.seasons[season]["players"]
else:
return None | 33.8 | 83 | 0.582249 | [
"MIT"
] | reecebenson/DADSA-Tennis-PartA | .history/classes/Handler_20171106200011.py | 2,535 | Python |
'''
Test script for GrFNN, plotting the entrainment for a sin wave of changing frequency.
@author T. Kaplan
'''
import numpy as np
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
import time
from gfnn import FrequencyType, FrequencyDist, ZParams, GrFNN
from plot import spectro_plot
# Construct our model by instantiating the class defined above
dim_in = 300
freq_dist = FrequencyDist(0.25, 6.0, dim_in, FrequencyType.LINEAR)
zparams = ZParams()
model = GrFNN(freq_dist, zparams, fs=160)
# Stimulus - 50 seconds of FHz sin, at a changing frequency (4->2)
F = 4
t1 = np.arange(0, 25, model.dt)
x1 = np.sin(2 * np.pi * F * t1) * 0.25
t2 = np.arange(25, 50, model.dt)
x2 = np.sin(2 * np.pi * F/2 * t2) * 0.25
# Prepare an initial plot
t = np.concatenate([t1, t2])
x = np.concatenate([x1, x2])
px = freq_dist.dist
py = np.zeros(px.shape)
plt.plot(px, py)
zs = np.empty((len(t), dim_in), dtype=np.complex64)
t0 = time.time()
for i in range(len(t)):
out = model(x[i])
zs[i] = out
# Update plot:
if i % 10 == 0:
py = np.abs(out)
plt.gca().lines[0].set_ydata(py)
plt.gca().relim()
plt.gca().autoscale_view()
plt.pause(0.01)
t1 = time.time()
print('Took', round(t1-t0, 2))
plt.show()
| 24.326923 | 85 | 0.658498 | [
"MIT"
] | Kappers/oscirhythm | model/sin_example_gfnn.py | 1,265 | Python |
from maestro.core.provider import BaseSyncProvider
class NoSQLSyncProvider(BaseSyncProvider):
pass | 20.8 | 50 | 0.836538 | [
"BSD-3-Clause"
] | estudio89/maestro-python | maestro/backends/base_nosql/provider.py | 104 | Python |
'''
Adapted from https://github.com/IntelligentQuadruped, with permission
Description: Module to connect to camera and retrieve RGB and depth data. Currently supports the Intel RealSense R200 Camera.
'''
import numpy as np
import logging
import time
import cv2
import matplotlib.pyplot as plt
from skimage.transform import rescale
from file_support import ensureDir
from os import path, makedirs
try:
import pyrealsense as pyrs
except ImportError as error:
logging.warning("cam.py: " + str(error))
class Camera:
"""
Object to get data from R200
"""
def __init__(self, max_depth = 4.0, save_images = False, \
t_buffer = 5, output_dir = './Trials/'):
"""
Intitalizes Camera object
"""
self.max_depth = max_depth
self.save_images = save_images
self.clock = time.time()
self.t_buffer = t_buffer
self.output_dir = output_dir
self.data_dir = path.join(self.output_dir,"{}".format(time.strftime("%d_%b_%Y_%H:%M", time.localtime())))
if self.save_images:
ensureDir(self.data_dir)
pass
np.warnings.filterwarnings('ignore')
def connect(self):
"""
Establishes connection to R200 camera
"""
logging.info("Cam.py: connecting components")
self.serv = pyrs.Service()
self.dev = self.serv.Device(device_id=0,
streams=[\
pyrs.stream.DepthStream(fps=60), pyrs.stream.ColorStream(fps=60)])
def disconnect(self):
"""
Disconnects from R200 camera
"""
self.dev.stop()
self.serv.stop()
logging.info("Cam.py: camera disconnected")
def getFrames(self, frames = 5, rgb = False):
"""
Retrieves depth frames (and RGB if true) from R200 input, cleans and averages depth images
"""
self.dev.wait_for_frames()
# Convert depth to meters
depth = self.dev.depth * self.dev.depth_scale
col = self.dev.color
if self.save_images and (time.time() - self.clock > self.t_buffer):
np.save(path.join(self.data_dir,str(time.time())+"_d"),depth)
np.save(path.join(self.data_dir,str(time.time())+"_c"),col)
self.clock = time.time()
for _ in range(frames-1):
self.dev.wait_for_frames()
# Convert depth to meters
curr = self.dev.depth * self.dev.depth_scale
depth = np.dstack((depth, curr))
if frames != 1:
depth = np.nanmean(depth, 2)
depth[depth <= 0] = np.nan
depth[depth > self.max_depth] = np.nan
if rgb:
return depth, col
return depth
def reduceFrame(self, depth, height_ratio = 0.5, sub_sample = 0.3, reduce_to = 'lower'):
"""
Takes in a depth image and rescales it
Args:
height_ratio: Determines fraction of rows to keep
sub_sample: Scaling factor for image
"""
if (height_ratio > 1.0) or (height_ratio < 0.0)\
or (sub_sample > 1.0) or (sub_sample < 0.0):
print('height_ratio and sub_sample must be between 0 and 1')
exit(1)
depth_copy = depth.copy()
height = depth_copy.shape[0]
h = int(height_ratio*(height))
cols_to_cut = 0
# catches the case when all rows are kept
if height_ratio == 1:
d_short = depth_copy
elif reduce_to == 'lower':
d_short = depth_copy[(height - h):,\
cols_to_cut:-(cols_to_cut+1)]
elif reduce_to == 'middle_lower':
upper_brdr = int(3*(height/4.0) - h/2)
lower_brdr = upper_brdr + h
d_short = depth_copy[upper_brdr:lower_brdr,\
cols_to_cut:-(cols_to_cut+1)]
elif reduce_to == 'middle':
upper_brdr = int((height - h)/2.0)
lower_brdr = upper_brdr + h
d_short = depth_copy[upper_brdr:lower_brdr,\
cols_to_cut:-(cols_to_cut+1)]
elif reduce_to == 'middle_upper':
upper_brdr = int((height/4.0) - h/2)
lower_brdr = upper_brdr + h
d_short = depth_copy[upper_brdr:lower_brdr,\
cols_to_cut:-(cols_to_cut+1)]
elif reduce_to == 'upper':
d_short = depth_copy[:h, cols_to_cut:-(cols_to_cut+1)]
d_short[d_short <= 0] = np.nan
d_short[d_short > self.max_depth] = np.nan
rescaled = rescale(d_short, sub_sample, mode='reflect', multichannel=False, anti_aliasing=True)
return rescaled
def main():
"""
Unit tests
"""
max_depth = 4.0
numFrames = 10
# height_ratio of 0 crops 0 rows away
height_ratio = 0.5
sub_sample = 1
# reduce_to argument can be: 'lower', 'middle_lower', 'middle', 'middle_upper', and 'upper'
reduce_to = 'middle_lower'
print('Program settings:')
print('\tmax_depth: ' + str(max_depth))
print('\tnumFrames: ' + str(numFrames))
print('\theight_ratio: ' + str(height_ratio))
print('\tsub_sample: ' + str(sub_sample))
print('\treduce_to: ' + reduce_to)
cam = Camera(max_depth = max_depth)
cam.connect()
time.sleep(2.5)
t1 = time.time()
d = cam.getFrames(numFrames)
t2 = time.time()
printStmt = 'Time to get {0} frames: ' + str(t2 - t1)
print(printStmt.format(numFrames))
d_small = cam.reduceFrame(d, height_ratio = height_ratio, sub_sample = sub_sample, reduce_to = reduce_to)
# colormap:
# https://matplotlib.org/tutorials/colors/colormaps.html
# scaled depth
plt.figure(figsize = (6, 7)) # figsize = width, height
ax2 = plt.subplot(2, 1, 2)
plt.imshow(d_small, cmap='gist_rainbow')
plt.colorbar()
plt.title('Scaled (height_ratio = {0}, sub_sample = {1})'.format(height_ratio, sub_sample))
plt.grid()
# original depth
# plt.subplot(2, 1, 1, sharex=ax2, sharey=ax2)
plt.subplot(2, 1, 1)
plt.imshow(d, cmap='gist_rainbow')
plt.colorbar()
plt.title('Original')
plt.grid()
plt.subplots_adjust(hspace = 0.3)
plt.show()
cam.disconnect()
if __name__ == "__main__":
main() | 31.134328 | 125 | 0.591243 | [
"MIT"
] | marioliu/AutonomousQuadblade | Camera/camera.py | 6,258 | Python |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2017-11-08 21:18
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0011_auto_20171108_2343'),
]
operations = [
migrations.AddField(
model_name='onlinecheckout',
name='transaction_date',
field=models.DateTimeField(blank=True, null=True),
),
]
| 22.666667 | 62 | 0.630252 | [
"MIT"
] | Eleazar-Harold/mpesa_api | mpesa_api/core/migrations/0012_onlinecheckout_transaction_date.py | 476 | Python |
#!/usr/bin/env python
# Copyright 2012 La Honda Research Center, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""clean_file_locks.py - Cleans stale interprocess locks
This rountine can be used to find and delete stale lock files from
nova's interprocess synchroization. It can be used safely while services
are running.
"""
import logging
import optparse
from nova import flags
from nova import log
from nova import utils
LOG = log.getLogger('nova.utils')
FLAGS = flags.FLAGS
def parse_options():
"""process command line options."""
parser = optparse.OptionParser('usage: %prog [options]')
parser.add_option('--verbose', action='store_true',
help='List lock files found and deleted')
options, args = parser.parse_args()
return options, args
def main():
"""Main loop."""
options, args = parse_options()
verbose = options.verbose
if verbose:
LOG.logger.setLevel(logging.DEBUG)
else:
LOG.logger.setLevel(logging.INFO)
LOG.info('Cleaning stale locks from %s' % FLAGS.lock_path)
utils.cleanup_file_locks()
LOG.info('Finished')
if __name__ == '__main__':
main()
| 26.09375 | 74 | 0.713772 | [
"Apache-2.0"
] | bopopescu/extra-specs-1 | tools/clean_file_locks.py | 1,670 | Python |
from flask import Flask
from flask_cors import CORS
from . import configs
def create_app():
app = Flask(__name__)
app.config.from_object(configs.Config)
CORS(app, resources={r"/api/*": {"origins": "*"}})
from .controllers import api_blueprint
app.register_blueprint(api_blueprint)
return app
| 18.941176 | 54 | 0.704969 | [
"MIT"
] | BiznetGIO/whois-api | app/__init__.py | 322 | Python |
# Copyright 2018 Yegor Bitensky
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def get_parameters(func):
def wrap(self, values):
return func(self, **values)
return wrap
| 34.15 | 74 | 0.746706 | [
"Apache-2.0"
] | YegorDB/THPoker | tests/utils.py | 683 | Python |
import redis
import json
from itertools import zip_longest
from common.config import REDIS_ADDR, REDIS_PORT, REDIS_DB
def batcher(iterable, n):
args = [iter(iterable)] * n
return zip_longest(*args)
def insert2redis(ids, dataList):
# dataList [{"videoId": id, "feat": feat, "name": name}]
r = redis.Redis(host=REDIS_ADDR, port=REDIS_PORT, db=REDIS_DB)
if len(ids) != len(dataList):
# TODO return error
return ""
for k, v in enumerate(ids):
r_key = v
r_value = dataList[k]
r.set(r_key, json.dumps(r_value))
def clean_with_video_id(id):
r = redis.StrictRedis(host=REDIS_ADDR, port=REDIS_PORT, db=REDIS_DB)
for keybatch in batcher(r.scan_iter('*'), 500):
for i in keybatch:
if i:
if json.loads(r.get(i)).get("videoId") == id:
r.delete(i)
def total_images():
r = redis.StrictRedis(host=REDIS_ADDR, port=REDIS_PORT, db=REDIS_DB, decode_responses=True)
total = r.get("IMG_TOTAL")
if not total:
r.set("IMG_TOTAL", 0)
return 0
return int(total)
def total_images_add():
r = redis.StrictRedis(host=REDIS_ADDR, port=REDIS_PORT, db=REDIS_DB, decode_responses=True)
total = r.get("IMG_TOTAL")
if not total:
r.set("IMG_TOTAL", 1)
return 1
r.set("IMG_TOTAL", int(total)+1)
return int(total)+1
def total_images_reduce():
r = redis.StrictRedis(host=REDIS_ADDR, port=REDIS_PORT, db=REDIS_DB, decode_responses=True)
total = r.get("IMG_TOTAL")
if not total:
r.set("IMG_TOTAL", 0)
return 0
r.set("IMG_TOTAL", int(total)-1)
return int(total)-1
def search(ids):
res = []
r = redis.Redis(host=REDIS_ADDR, port=REDIS_PORT, db=REDIS_DB)
for i in ids:
v = r.get(i.id)
if v and json.loads(v).get('videoId') not in res:
res.append([json.loads(v).get('videoId'), i.distance])
return res
| 29.424242 | 95 | 0.625644 | [
"Apache-2.0"
] | JackLCL/bootcamp | solutions/video_similarity_search/search-video-demo/search/controller/database.py | 1,942 | Python |
from django.contrib import admin
from menus.models import Dish, Menu
@admin.register(Menu)
class MenuAdmin(admin.ModelAdmin):
list_display = ('id', 'name', 'description', 'created', 'updated')
search_fields = ('id', 'name', 'description')
list_filter = ('created', 'updated')
raw_id_fields = ('dishes',)
@admin.register(Dish)
class DishAdmin(admin.ModelAdmin):
list_display = (
'id',
'name',
'description',
'price',
'time_to_prepare',
'is_vegetarian',
'created',
'updated',
)
search_fields = ('id', 'name', 'description')
list_filter = ('is_vegetarian', 'created', 'updated')
| 25.037037 | 70 | 0.606509 | [
"MIT"
] | dzbrozek/emenu-api | emenuapi/menus/admin.py | 676 | Python |
"""
show_interface.py
IOSXE parsers for the following show commands:
* show interfaces
* show ip interfaces <interface>
* show ip interface brief
* show ip interface brief | include Vlan
* show interfaces switchport
* show ip interface
* show interfaces <interface>
* show ipv6 interface
* show interfaces accounting
* show interfaces status
* show interface {interface} transceiver detail
"""
import os
import logging
import pprint
import re
import unittest
from genie import parsergen
from collections import defaultdict
from pyats.log.utils import banner
import xmltodict
try:
import iptools
from cnetconf import testmodel
except (ImportError, OSError):
pass
try:
from pyats import tcl
except Exception:
pass
from genie.metaparser import MetaParser
from genie.metaparser.util import merge_dict, keynames_convert
from genie.metaparser.util.schemaengine import Schema, \
Any, \
Optional, \
Or, \
And, \
Default, \
Use
# import parser utils
from genie.libs.parser.utils.common import Common
logger = logging.getLogger(__name__)
class ShowInterfacesSchema(MetaParser):
"""schema for show interfaces
show interfaces <interface>"""
schema = {
Any(): {
Optional('oper_status'): str,
Optional('line_protocol'): str,
Optional('enabled'): bool,
Optional('connected'): bool,
Optional('description'): str,
Optional('type'): str,
Optional('link_state'): str,
Optional('port_speed'): str,
Optional('duplex_mode'): str,
Optional('link_type'): str,
Optional('media_type'): str,
Optional('mtu'): int,
Optional('maximum_active_vcs'): str,
Optional('vcs_per_vp'): str,
Optional('vc_idle_disconnect_time'): str,
Optional('vc_auto_creation'): str,
Optional('current_vccs'): str,
Optional('aal5_crc_errors'): int,
Optional('aal5_oversized_sdus'): int,
Optional('aal5_sar_timeouts'): int,
Optional('vaccess_status'): str,
Optional('vaccess_loopback'): str,
Optional('base_pppoatm'): str,
Optional('dtr_pulsed'): str,
Optional('sub_mtu'): int,
Optional('medium'): str,
Optional('reliability'): str,
Optional('txload'): str,
Optional('rxload'): str,
Optional('mac_address'): str,
Optional('phys_address'): str,
Optional('delay'): int,
Optional('carrier_delay'): int,
Optional('carrier_delay_up'): int,
Optional('carrier_delay_down'): int,
Optional('keepalive'): int,
Optional('auto_negotiate'): bool,
Optional('arp_type'): str,
Optional('arp_timeout'): str,
Optional('last_input'): str,
Optional('last_output'): str,
Optional('output_hang'): str,
Optional('autostate'): bool,
Optional('queues'): {
Optional('input_queue_size'): int,
Optional('input_queue_max'): int,
Optional('input_queue_drops'): int,
Optional('input_queue_flushes'): int,
Optional('total_output_drop'): int,
Optional('queue_strategy'): str,
Optional('output_queue_size'): int,
Optional('output_queue_max'): int,
Optional('threshold'): int,
Optional('drops'): int,
},
Optional('flow_control'):
{Optional('receive'): bool,
Optional('send'): bool,
},
Optional('port_channel'):
{Optional('port_channel_member'): bool,
Optional('port_channel_int'): str,
Optional('port_channel_member_intfs'): list,
Optional('active_members'): int,
Optional('num_of_pf_jumbo_supported_members'): int,
},
Optional('bandwidth'): int,
Optional('counters'):
{Optional('rate'):
{Optional('load_interval'): int,
Optional('in_rate'): int,
Optional('in_rate_pkts'): int,
Optional('out_rate'): int,
Optional('out_rate_pkts'): int,
Optional('in_rate_bps'): int,
Optional('in_rate_pps'): int,
Optional('out_rate_bps'): int,
Optional('out_rate_pps'): int,
},
Optional('in_multicast_pkts'): int,
Optional('in_broadcast_pkts'): int,
Optional('in_crc_errors'): int,
Optional('in_giants'): int,
Optional('in_pkts'): int,
Optional('in_frame'): int,
Optional('in_runts'): int,
Optional('in_overrun'): int,
Optional('in_ignored'): int,
Optional('in_watchdog'): int,
Optional('in_with_dribble'): int,
Optional('in_octets'): int,
Optional('in_errors'): int,
Optional('in_abort'): int,
Optional('in_no_buffer'): int,
Optional('in_throttles'): int,
Optional('in_mac_pause_frames'): int,
Optional('out_pkts'): int,
Optional('out_octets'): int,
Optional('out_multicast_pkts'): int,
Optional('out_broadcast_pkts'): int,
Optional('out_errors'): int,
Optional('out_collision'): int,
Optional('out_interface_resets'): int,
Optional('out_unknown_protocl_drops'): int,
Optional('out_babbles'): int,
Optional('out_deferred'): int,
Optional('out_underruns'): int,
Optional('out_late_collision'): int,
Optional('out_lost_carrier'): int,
Optional('out_no_carrier'): int,
Optional('out_babble'): int,
Optional('out_mac_pause_frames'): int,
Optional('out_buffer_failure'): int,
Optional('out_buffers_swapped'): int,
Optional('last_clear'): str,
},
Optional('encapsulations'):
{Optional('encapsulation'): str,
Optional('first_dot1q'): str,
Optional('second_dot1q'): str,
Optional('native_vlan'): int,
},
Optional('ipv4'):
{Any():
{Optional('ip'): str,
Optional('prefix_length'): str,
Optional('secondary'): bool
},
Optional('unnumbered'): {
'interface_ref': str,
},
},
},
}
class ShowInterfaces(ShowInterfacesSchema):
"""parser for show interfaces
show interfaces <interface>"""
cli_command = ['show interfaces','show interfaces {interface}']
exclude = ['in_octets', 'in_pkts', 'out_octets', 'out_pkts',
'in_rate', 'in_rate_pkts', 'out_rate', 'out_rate_pkts',
'input_queue_size', 'in_broadcast_pkts', 'in_multicast_pkts',
'last_output', 'out_unknown_protocl_drops', 'last_input',
'input_queue_drops', 'out_interface_resets', 'rxload',
'txload', 'last_clear', 'in_crc_errors', 'in_errors',
'in_giants', 'unnumbered', 'mac_address', 'phys_address',
'out_lost_carrier', '(Tunnel.*)', 'input_queue_flushes',
'reliability']
def cli(self,interface="",output=None):
if output is None:
if interface:
cmd = self.cli_command[1].format(interface=interface)
else:
cmd = self.cli_command[0]
out = self.device.execute(cmd)
else:
out = output
# GigabitEthernet1 is up, line protocol is up
# Port-channel12 is up, line protocol is up (connected)
# Vlan1 is administratively down, line protocol is down , Autostate Enabled
# Dialer1 is up (spoofing), line protocol is up (spoofing)
p1 = re.compile(r'^(?P<interface>[\w\/\.\-]+) +is +(?P<enabled>[\w\s]+)(?: '
r'+\S+)?, +line +protocol +is +(?P<line_protocol>\w+)(?: '
r'*\((?P<attribute>\S+)\)|( +\, +Autostate +(?P<autostate>\S+)))?.*$')
p1_1 = re.compile(r'^(?P<interface>[\w\/\.\-]+) +is'
r' +(?P<enabled>[\w\s]+),'
r' +line +protocol +is +(?P<line_protocol>\w+)'
r'( *, *(?P<attribute>[\w\s]+))?$')
# Hardware is Gigabit Ethernet, address is 0057.d2ff.428c (bia 0057.d2ff.428c)
# Hardware is Loopback
p2 = re.compile(r'^Hardware +is +(?P<type>[a-zA-Z0-9\-\/\s\+]+)'
r'(, *address +is +(?P<mac_address>[a-z0-9\.]+)'
r' *\(bia *(?P<phys_address>[a-z0-9\.]+)\))?$')
# Hardware is LTE Adv CAT6 - Multimode LTE/DC-HSPA+/HSPA+/HSPA/UMTS/EDGE/GPRS
p2_2 = re.compile(r'Hardware +is +(?P<type>[a-zA-Z0-9\-\/\+ ]+)'
r'(?P<mac_address>.*)(?P<phys_address>.*)')
# Description: desc
# Description: Pim Register Tunnel (Encap) for RP 10.186.1.1
p3 = re.compile(r'^Description: *(?P<description>.*)$')
# Secondary address 10.2.2.2/24
p4 = re.compile(r'^Secondary +Address +is +(?P<ipv4>(?P<ip>[0-9\.]+)'
r'\/(?P<prefix_length>[0-9]+))$')
# Internet address is 10.4.4.4/24
p5 = re.compile(r'^Internet +[A|a]ddress +is +(?P<ipv4>(?P<ip>[0-9\.x]+)'
r'\/(?P<prefix_length>[0-9]+))$')
# MTU 1500 bytes, BW 768 Kbit/sec, DLY 3330 usec,
# MTU 1500 bytes, BW 10000 Kbit, DLY 1000 usec,
# MTU 1600 bytes, sub MTU 1600, BW 3584 Kbit/sec, DLY 410 usec,
# MTU 1500 bytes, BW 5200 Kbit/sec, RxBW 25000 Kbit/sec, DLY 100 usec,
p6 = re.compile(r'^MTU +(?P<mtu>\d+) +bytes(, +sub +MTU +'
r'(?P<sub_mtu>\d+))?, +BW +(?P<bandwidth>[0-9]+) +Kbit(\/sec)?'
r'(, +RxBW +[0-9]+ +Kbit(\/sec)?)?, +'
r'DLY +(?P<delay>[0-9]+) +usec,$')
# reliability 255/255, txload 1/255, rxload 1/255
p7 = re.compile(r'^reliability +(?P<reliability>[\d\/]+),'
r' +txload +(?P<txload>[\d\/]+), +rxload'
r' +(?P<rxload>[\d\/]+)$')
# Encapsulation LOOPBACK, loopback not set
# Encapsulation 802.1Q Virtual LAN, Vlan ID 20, medium is p2p
# Encapsulation ARPA, medium is broadcast
# Encapsulation QinQ Virtual LAN, outer ID 10, inner ID 20
# Encapsulation 802.1Q Virtual LAN, Vlan ID 1., loopback not set
# Encapsulation 802.1Q Virtual LAN, Vlan ID 105.
# Encapsulation(s): AAL5
p8 = re.compile(r'^Encapsulation(\(s\):)? +(?P<encapsulation>[\w\s\.]+)'
r'(, +(?P<rest>.*))?$')
# Keepalive set (10 sec)
p10 = re.compile(r'^Keepalive +set +\((?P<keepalive>[0-9]+)'
r' +sec\)$')
# Auto-duplex, 1000Mb/s, media type is 10/100/1000BaseTX
# Full-duplex, 1000Mb/s, link type is auto, media type is
# Full Duplex, 1000Mbps, link type is auto, media type is RJ45
# Full Duplex, Auto Speed, link type is auto, media type is RJ45
# Full Duplex, 10000Mbps, link type is force-up, media type is unknown media type
# full-duplex, 1000 Mb/s
# auto-duplex, auto-speed
# auto-duplex, 10 Gb/s, media type is 10G
# Full Duplex, 10000Mbps, link type is force-up, media type is SFP-LR
# Full-duplex, 100Gb/s, link type is force-up, media type is QSFP 100G SR4
p11 = re.compile(r'^(?P<duplex_mode>\w+)[\-\s]+[d|D]uplex\, '
r'+(?P<port_speed>[\w\s\/]+|[a|A]uto-[S|s]peed|Auto '
r'(S|s)peed)(?:(?:\, +link +type +is '
r'+(?P<link_type>\S+))?(?:\, *(media +type +is| )'
r'*(?P<media_type>[\w\/\- ]+)?)(?: +media +type)?)?$')
# input flow-control is off, output flow-control is unsupported
p12 = re.compile(r'^(input|output) +flow-control +is +(?P<receive>\w+), +'
'(output|input) +flow-control +is +(?P<send>\w+)$')
# ARP type: ARPA, ARP Timeout 04:00:00
p13 = re.compile(r'^ARP +type: +(?P<arp_type>\w+), +'
'ARP +Timeout +(?P<arp_timeout>[\w\:\.]+)$')
# Last input never, output 00:01:05, output hang never
p14 = re.compile(r'^Last +input +(?P<last_input>[\w\.\:]+), +'
'output +(?P<last_output>[\w\.\:]+), '
'output +hang +(?P<output_hang>[\w\.\:]+)$')
# Members in this channel: Gi1/0/2
# Members in this channel: Fo1/0/2 Fo1/0/4
p15 = re.compile(r'^Members +in +this +channel: +'
'(?P<port_channel_member_intfs>[\w\/\.\s\,]+)$')
# No. of active members in this channel: 12
p15_1 = re.compile(r'^No\. +of +active +members +in +this +'
'channel: +(?P<active_members>\d+)$')
# Member 2 : GigabitEthernet0/0/10 , Full-duplex, 900Mb/s
p15_2 = re.compile(r'^Member +\d+ +: +(?P<interface>\S+) +,'
' +\S+, +\S+$')
# No. of PF_JUMBO supported members in this channel : 0
p15_3 = re.compile(r'^No\. +of +PF_JUMBO +supported +members +'
'in +this +channel +: +(?P<number>\d+)$')
# Last clearing of "show interface" counters 1d02h
p16 = re.compile(r'^Last +clearing +of +\"show +interface\" +counters +'
'(?P<last_clear>[\w\:\.]+)$')
# Input queue: 0/375/0/0 (size/max/drops/flushes); Total output drops: 0
p17 = re.compile(r'^Input +queue: +(?P<size>\d+)\/(?P<max>\d+)\/'
'(?P<drops>\d+)\/(?P<flushes>\d+) +'
'\(size\/max\/drops\/flushes\); +'
'Total +output +drops: +(?P<output_drop>\d+)$')
# Queueing strategy: fifo
# Queueing strategy: Class-based queueing
p18 = re.compile(r'^Queueing +strategy: +(?P<queue_strategy>\S+).*$')
# Output queue: 0/0 (size/max)
# Output queue: 0/1000/64/0 (size/max total/threshold/drops)
p19 = re.compile(r'^Output +queue: +(?P<size>\d+)\/(?P<max>\d+)'
'(?:\/(?P<threshold>\d+)\/(?P<drops>\d+))? '
'+\(size\/max(?: +total\/threshold\/drops\))?.*$')
# 5 minute input rate 0 bits/sec, 0 packets/sec
p20 = re.compile(r'^(?P<load_interval>[0-9\#]+)'
' *(?P<unit>(minute|second|minutes|seconds)) *input *rate'
' *(?P<in_rate>[0-9]+) *bits/sec,'
' *(?P<in_rate_pkts>[0-9]+) *packets/sec$')
# 5 minute output rate 0 bits/sec, 0 packets/sec
p21 = re.compile(r'^(?P<load_interval>[0-9\#]+)'
' *(minute|second|minutes|seconds) *output *rate'
' *(?P<out_rate>[0-9]+) *bits/sec,'
' *(?P<out_rate_pkts>[0-9]+) *packets/sec$')
# 0 packets input, 0 bytes, 0 no buffer
# 13350 packets input, 2513375 bytes
p22 = re.compile(r'^(?P<in_pkts>[0-9]+) +packets +input, +(?P<in_octets>[0-9]+) '
'+bytes(?:, +(?P<in_no_buffer>[0-9]+) +no +buffer)?$')
# Received 4173 broadcasts (0 IP multicasts)
# Received 535996 broadcasts (535961 multicasts)
p23 = re.compile(r'^Received +(?P<in_broadcast_pkts>\d+) +broadcasts +'
'\((?P<in_multicast_pkts>\d+) *(IP)? *multicasts\)$')
# 0 runts, 0 giants, 0 throttles
p24 = re.compile(r'^(?P<in_runts>[0-9]+) *runts,'
' *(?P<in_giants>[0-9]+) *giants,'
' *(?P<in_throttles>[0-9]+) *throttles$')
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored, 0 abort
p25 = re.compile(r'^(?P<in_errors>[0-9]+) +input +errors, +'
'(?P<in_crc_errors>[0-9]+) +CRC, +'
'(?P<in_frame>[0-9]+) +frame, +'
'(?P<in_overrun>[0-9]+) +overrun, +'
'(?P<in_ignored>[0-9]+) +ignored'
'(, *(?P<in_abort>[0-9]+) +abort)?$')
# 0 watchdog, 535961 multicast, 0 pause input
p26 = re.compile(r'^(?P<in_watchdog>[0-9]+) +watchdog, +'
'(?P<in_multicast_pkts>[0-9]+) +multicast, +'
'(?P<in_pause_input>[0-9]+) +pause +input$')
# 0 input packets with dribble condition detected
p27 = re.compile(r'^(?P<in_with_dribble>[0-9]+) +input +packets +with +'
'dribble +condition +detected$')
# 23376 packets output, 3642296 bytes, 0 underruns
# 13781 packets output, 2169851 bytes
p28 = re.compile(r'^(?P<out_pkts>[0-9]+) +packets +output, +(?P<out_octets>[0-9]+) '
'+bytes(?:\, +(?P<out_underruns>[0-9]+) +underruns)?$')
# Received 4173 broadcasts (0 IP multicasts)
# Received 535996 broadcasts (535961 multicasts)
p29 = re.compile(r'^Received +(?P<out_broadcast_pkts>\d+) +broadcasts +'
'\((?P<out_multicast_pkts>\d+) *(IP)? *multicasts\)$')
# 0 output errors, 0 collisions, 2 interface resets
# 0 output errors, 0 interface resets
p30 = re.compile(r'^(?P<out_errors>[0-9]+) +output +errors,'
'( *(?P<out_collision>[0-9]+) +collisions,)? +'
'(?P<out_interface_resets>[0-9]+) +interface +resets$')
# 0 unknown protocol drops
p31 = re.compile(r'^(?P<out_unknown_protocl_drops>[0-9]+) +'
'unknown +protocol +drops$')
# 0 babbles, 0 late collision, 0 deferred
p32 = re.compile(r'^(?P<out_babble>[0-9]+) +babbles, +'
'(?P<out_late_collision>[0-9]+) +late +collision, +'
'(?P<out_deferred>[0-9]+) +deferred$')
# 0 lost carrier, 0 no carrier, 0 pause output
# 0 lost carrier, 0 no carrier
p33 = re.compile(r'^(?P<out_lost_carrier>\d+) +lost +carrier, +'
r'(?P<out_no_carrier>\d+) +no +carrier(, +(?P<out_pause_output>\d+) +'
r'pause +output)?$')
# 0 output buffer failures, 0 output buffers swapped out
p34 = re.compile(r'^(?P<out_buffer_failure>[0-9]+) +output +buffer +failures, +'
'(?P<out_buffers_swapped>[0-9]+) +output +buffers +swapped +out$')
# Interface is unnumbered. Using address of Loopback0 (10.4.1.1)
# Interface is unnumbered. Using address of GigabitEthernet0/2.1 (192.168.154.1)
p35 = re.compile(r'^Interface +is +unnumbered. +Using +address +of +'
'(?P<unnumbered_intf>[\w\/\.]+) +'
'\((?P<unnumbered_ip>[\w\.\:]+)\)$')
# 8 maximum active VCs, 1024 VCs per VP, 1 current VCCs
p36 = re.compile(r'^(?P<maximum_active_vcs>\d+) +maximum +active +VCs, +'
r'(?P<vcs_per_vp>\d+) +VCs +per +VP, +(?P<current_vccs>\d+) +current +VCCs$')
# VC Auto Creation Disabled.
p37 = re.compile(r'^VC +Auto +Creation +(?P<vc_auto_creation>\S+)\.$')
# VC idle disconnect time: 300 seconds
p38 = re.compile(r'^VC +idle +disconnect +time: +(?P<vc_idle_disconnect_time>\d+) +'
r'seconds$')
# AAL5 CRC errors : 0
p39 = re.compile(r'^(?P<key>\S+ +CRC +errors) +: +(?P<val>\d+)$')
# AAL5 SAR Timeouts : 0
p40 = re.compile(r'^(?P<key>\S+ +SAR +Timeouts) +: +(?P<val>\d+)$')
# AAL5 Oversized SDUs : 0
p41 = re.compile(r'^(?P<key>\S+ +Oversized +SDUs) +: +(?P<val>\d+)$')
# LCP Closed
# LCP Closed, loopback not set
p42 = re.compile(r'^LCP\s+(?P<state>\S+)(,\s+loopback\s+(?P<loopback>[\S\s]+))?$')
# Base PPPoATM vaccess
p43 = re.compile(r'^Base PPPoATM +(?P<base_pppoatm>\S+)$')
# Vaccess status 0x44, loopback not set
p44 = re.compile(r'^Vaccess\s+status\s+(?P<status>\S+),\s+'
r'loopback\s+(?P<loopback>[\S\s]+)$')
# DTR is pulsed for 5 seconds on reset
p45 = re.compile(r'^DTR +is +pulsed +for +(?P<dtr_pulsed>\d+) +'
r'seconds +on +reset$')
interface_dict = {}
unnumbered_dict = {}
for line in out.splitlines():
line = line.strip()
# GigabitEthernet1 is up, line protocol is up
# Port-channel12 is up, line protocol is up (connected)
# Vlan1 is administratively down, line protocol is down , Autostate Enabled
# Dialer1 is up (spoofing), line protocol is up (spoofing)
m = p1.match(line)
m1 = p1_1.match(line)
m = m if m else m1
if m:
interface = m.groupdict()['interface']
enabled = m.groupdict()['enabled']
line_protocol = m.groupdict()['line_protocol']
connected = m.groupdict()['attribute']
if m.groupdict()['autostate']:
autostate = m.groupdict()['autostate'].lower()
else:
autostate = None
if interface not in interface_dict:
interface_dict[interface] = {}
interface_dict[interface]['port_channel'] = {}
interface_dict[interface]['port_channel']\
['port_channel_member'] = False
if 'administratively down' in enabled or 'delete' in enabled:
interface_dict[interface]['enabled'] = False
else:
interface_dict[interface]['enabled'] = True
if line_protocol:
interface_dict[interface]\
['line_protocol'] = line_protocol
interface_dict[interface]\
['oper_status'] = line_protocol
if connected:
interface_dict[interface]['connected'] = True if connected == 'connected' else False
if autostate:
interface_dict[interface]['autostate'] = True if autostate == 'enabled' else False
continue
# Hardware is Gigabit Ethernet, address is 0057.d2ff.428c (bia 0057.d2ff.428c)
# Hardware is Loopback
m = p2.match(line)
# Hardware is LTE Adv CAT6 - Multimode LTE/DC-HSPA+/HSPA+/HSPA/UMTS/EDGE/GPRS
m1 = p2_2.match(line)
m = m if m else m1
if m:
types = m.groupdict()['type']
mac_address = m.groupdict()['mac_address']
phys_address = m.groupdict()['phys_address']
interface_dict[interface]['type'] = types
if mac_address:
interface_dict[interface]['mac_address'] = mac_address
if phys_address:
interface_dict[interface]['phys_address'] = phys_address
continue
# Description: desc
# Description: Pim Register Tunnel (Encap) for RP 10.186.1.1
m = p3.match(line)
if m:
description = m.groupdict()['description']
interface_dict[interface]['description'] = description
continue
# Secondary address 10.2.2.2/24
m = p4.match(line)
if m:
ip_sec = m.groupdict()['ip']
prefix_length_sec = m.groupdict()['prefix_length']
address_sec = m.groupdict()['ipv4']
if 'ipv4' not in interface_dict[interface]:
interface_dict[interface]['ipv4'] = {}
if address_sec not in interface_dict[interface]['ipv4']:
interface_dict[interface]['ipv4'][address_sec] = {}
interface_dict[interface]['ipv4'][address_sec]\
['ip'] = ip_sec
interface_dict[interface]['ipv4'][address_sec]\
['prefix_length'] = prefix_length_sec
interface_dict[interface]['ipv4'][address_sec]\
['secondary'] = True
continue
# Internet Address is 10.4.4.4/24
m = p5.match(line)
if m:
ip = m.groupdict()['ip']
prefix_length = m.groupdict()['prefix_length']
address = m.groupdict()['ipv4']
if 'ipv4' not in interface_dict[interface]:
interface_dict[interface]['ipv4'] = {}
if address not in interface_dict[interface]['ipv4']:
interface_dict[interface]['ipv4'][address] = {}
interface_dict[interface]['ipv4'][address]\
['ip'] = ip
interface_dict[interface]['ipv4'][address]\
['prefix_length'] = prefix_length
continue
# MTU 1500 bytes, BW 768 Kbit/sec, DLY 3330 usec,
# MTU 1500 bytes, BW 10000 Kbit, DLY 1000 usec,
m = p6.match(line)
if m:
mtu = m.groupdict()['mtu']
sub_mtu = m.groupdict().get('sub_mtu', None)
bandwidth = m.groupdict()['bandwidth']
if m.groupdict()['delay']:
interface_dict[interface]['delay'] = int(m.groupdict()['delay'])
if mtu:
interface_dict[interface]['mtu'] = int(mtu)
if sub_mtu:
interface_dict[interface]['sub_mtu'] = int(sub_mtu)
if bandwidth:
interface_dict[interface]['bandwidth'] = int(bandwidth)
continue
# reliability 255/255, txload 1/255, rxload 1/255
m = p7.match(line)
if m:
reliability = m.groupdict()['reliability']
txload = m.groupdict()['txload']
rxload = m.groupdict()['rxload']
interface_dict[interface]['reliability'] = reliability
interface_dict[interface]['txload'] = txload
interface_dict[interface]['rxload'] = rxload
continue
# Encapsulation LOOPBACK, loopback not set
# Encapsulation 802.1Q Virtual LAN, Vlan ID 20, medium is p2p
# Encapsulation ARPA, medium is broadcast
# Encapsulation QinQ Virtual LAN, outer ID 10, inner ID 20
# Encapsulation 802.1Q Virtual LAN, Vlan ID 1., loopback not set
# Encapsulation 802.1Q Virtual LAN, Vlan ID 105.
m = p8.match(line)
if m:
encapsulation = m.groupdict()['encapsulation']
encapsulation = m.groupdict()['encapsulation'].lower()
encapsulation = encapsulation.replace("802.1q virtual lan","dot1q")
if 'encapsulations' not in interface_dict[interface]:
interface_dict[interface]['encapsulations'] = {}
interface_dict[interface]['encapsulations']\
['encapsulation'] = encapsulation
rest = m.groupdict()['rest']
if not rest:
continue
# Vlan ID 20, medium is p2p
m1 = re.compile(r'(Vlan +ID +(?P<first_dot1q>[0-9]+),)?'
' *medium +is +(?P<medium>[a-z0-9]+)$').match(rest)
# will update key when output is valid
m2 = re.compile(r'loopback +(?P<loopback>[\w\s]+)$').match(rest)
# outer ID 10, inner ID 20
m3 = re.compile(r'outer +ID +(?P<first>[0-9]+), +'
'inner +ID (?P<second>[0-9]+)$').match(rest)
# Vlan ID 1., loopback not set
# Vlan ID 105.
m4 = re.compile(r'Vlan +ID +(?P<first_dot1q>\d+).'
'|(?:,(?P<rest>[\s\w]+))$').match(rest)
if m1:
first_dot1q = m1.groupdict()['first_dot1q']
if first_dot1q:
interface_dict[interface]['encapsulations']\
['first_dot1q'] = first_dot1q
interface_dict[interface]['medium'] = m.groupdict()['medium']
elif m3:
first_dot1q = m3.groupdict()['first']
second_dot1q = m3.groupdict()['second']
interface_dict[interface]['encapsulations']\
['first_dot1q'] = first_dot1q
interface_dict[interface]['encapsulations']\
['second_dot1q'] = second_dot1q
elif m4:
first_dot1q = m4.groupdict()['first_dot1q']
if first_dot1q:
interface_dict[interface]['encapsulations']\
['first_dot1q'] = first_dot1q
continue
# Keepalive set (10 sec)
m = p10.match(line)
if m:
keepalive = m.groupdict()['keepalive']
if keepalive:
interface_dict[interface]['keepalive'] = int(keepalive)
continue
# Auto-duplex, 1000Mb/s, media type is 10/100/1000BaseTX
# Full-duplex, 1000Mb/s, link type is auto, media type is
# Full Duplex, 1000Mbps, link type is auto, media type is RJ45
# Full Duplex, Auto Speed, link type is auto, media type is RJ45
# Full Duplex, 10000Mbps, link type is force-up, media type is unknown media type
# full-duplex, 1000 Mb/s
# auto-duplex, auto-speed
# auto-duplex, 10 Gb/s, media type is 10G
# Full Duplex, 10000Mbps, link type is force-up, media type is SFP-LR
# Full-duplex, 100Gb/s, link type is force-up, media type is QSFP 100G SR4
m = p11.match(line)
if m:
duplex_mode = m.groupdict()['duplex_mode'].lower()
port_speed = m.groupdict()['port_speed'].lower().replace('-speed', '')
link_type = m.groupdict()['link_type']
media_type = m.groupdict()['media_type']
interface_dict[interface]['duplex_mode'] = duplex_mode
interface_dict[interface]['port_speed'] = port_speed
if link_type:
interface_dict[interface]['link_type'] = link_type
if 'auto' in link_type:
interface_dict[interface]['auto_negotiate'] = True
else:
interface_dict[interface]['auto_negotiate'] = False
if media_type:
unknown = re.search(r'[U|u]nknown',media_type)
if unknown:
interface_dict[interface]['media_type'] = 'unknown'
else:
interface_dict[interface]['media_type'] = media_type
continue
# input flow-control is off, output flow-control is unsupported
m = p12.match(line)
if m:
receive = m.groupdict()['receive'].lower()
send = m.groupdict()['send'].lower()
if 'flow_control' not in interface_dict[interface]:
interface_dict[interface]['flow_control'] = {}
if 'on' in receive:
interface_dict[interface]['flow_control']['receive'] = True
elif 'off' in receive or 'unsupported' in receive:
interface_dict[interface]['flow_control']['receive'] = False
if 'on' in send:
interface_dict[interface]['flow_control']['send'] = True
elif 'off' in send or 'unsupported' in send:
interface_dict[interface]['flow_control']['send'] = False
continue
# Carrier delay is 10 sec
p_cd = re.compile(r'^Carrier +delay +is +(?P<carrier_delay>\d+).*$')
m = p_cd.match(line)
if m:
group = m.groupdict()
sub_dict = interface_dict.setdefault(interface, {})
sub_dict['carrier_delay'] = int(group['carrier_delay'])
# Asymmetric Carrier-Delay Up Timer is 2 sec
# Asymmetric Carrier-Delay Down Timer is 10 sec
p_cd_2 = re.compile(r'^Asymmetric +Carrier-Delay +(?P<type>Down|Up)'
' +Timer +is +(?P<carrier_delay>\d+).*$')
m = p_cd_2.match(line)
if m:
group = m.groupdict()
tp = group['type'].lower()
sub_dict = interface_dict.setdefault(interface, {})
if tp == 'up':
sub_dict['carrier_delay_up'] = int(group['carrier_delay'])
else:
sub_dict['carrier_delay_down'] = int(group['carrier_delay'])
# ARP type: ARPA, ARP Timeout 04:00:00
m = p13.match(line)
if m:
arp_type = m.groupdict()['arp_type'].lower()
arp_timeout = m.groupdict()['arp_timeout']
interface_dict[interface]['arp_type'] = arp_type
interface_dict[interface]['arp_timeout'] = arp_timeout
continue
# Last input never, output 00:01:05, output hang never
m = p14.match(line)
if m:
last_input = m.groupdict()['last_input']
last_output = m.groupdict()['last_output']
output_hang = m.groupdict()['output_hang']
interface_dict[interface]['last_input'] = last_input
interface_dict[interface]['last_output'] = last_output
interface_dict[interface]['output_hang'] = output_hang
continue
# Members in this channel: Gi1/0/2
# Members in this channel: Fo1/0/2 Fo1/0/4
m = p15.match(line)
if m:
interface_dict[interface]['port_channel']\
['port_channel_member'] = True
intfs = m.groupdict()['port_channel_member_intfs'].split(' ')
intfs = [Common.convert_intf_name(i.strip()) for i in intfs]
interface_dict[interface]['port_channel']\
['port_channel_member_intfs'] = intfs
# build connected interface port_channel
for intf in intfs:
if intf not in interface_dict:
interface_dict[intf] = {}
if 'port_channel' not in interface_dict[intf]:
interface_dict[intf]['port_channel'] = {}
interface_dict[intf]['port_channel']['port_channel_member'] = True
interface_dict[intf]['port_channel']['port_channel_int'] = interface
continue
# No. of active members in this channel: 12
m = p15_1.match(line)
if m:
group = m.groupdict()
active_members = int(group['active_members'])
interface_dict[interface]['port_channel']\
['port_channel_member'] = True
interface_dict[interface]['port_channel']\
['active_members'] = active_members
continue
# Member 2 : GigabitEthernet0/0/10 , Full-duplex, 900Mb/s
m = p15_2.match(line)
if m:
group = m.groupdict()
intf = group['interface']
if 'port_channel_member_intfs' not in interface_dict[interface]['port_channel']:
interface_dict[interface]['port_channel']\
['port_channel_member_intfs'] = []
interface_dict[interface]['port_channel']\
['port_channel_member_intfs'].append(intf)
continue
# No. of PF_JUMBO supported members in this channel : 0
m = p15_3.match(line)
if m:
group = m.groupdict()
number = int(group['number'])
interface_dict[interface]['port_channel']\
['num_of_pf_jumbo_supported_members'] = number
continue
# Last clearing of "show interface" counters 1d02h
m = p16.match(line)
if m:
last_clear = m.groupdict()['last_clear']
continue
# Input queue: 0/375/0/0 (size/max/drops/flushes); Total output drops: 0
m = p17.match(line)
if m:
if 'queues' not in interface_dict[interface]:
interface_dict[interface]['queues'] = {}
interface_dict[interface]['queues']['input_queue_size'] = \
int(m.groupdict()['size'])
interface_dict[interface]['queues']['input_queue_max'] = \
int(m.groupdict()['max'])
interface_dict[interface]['queues']['input_queue_drops'] = \
int(m.groupdict()['drops'])
interface_dict[interface]['queues']['input_queue_flushes'] = \
int(m.groupdict()['flushes'])
interface_dict[interface]['queues']['total_output_drop'] = \
int(m.groupdict()['output_drop'])
continue
# Queueing strategy: fifo
# Queueing strategy: Class-based queueing
m = p18.match(line)
if m:
if 'queues' not in interface_dict[interface]:
interface_dict[interface]['queues'] = {}
interface_dict[interface]['queues']['queue_strategy'] = \
m.groupdict()['queue_strategy']
continue
# Output queue: 0/0 (size/max)
# Output queue: 0/1000/64/0 (size/max total/threshold/drops)
m = p19.match(line)
if m:
if 'queues' not in interface_dict[interface]:
interface_dict[interface]['queues'] = {}
interface_dict[interface]['queues']['output_queue_size'] = \
int(m.groupdict()['size'])
interface_dict[interface]['queues']['output_queue_max'] = \
int(m.groupdict()['max'])
if m.groupdict()['threshold'] and m.groupdict()['drops']:
interface_dict[interface]['queues']['threshold'] = \
int(m.groupdict()['threshold'])
interface_dict[interface]['queues']['drops'] = \
int(m.groupdict()['drops'])
continue
# 5 minute input rate 0 bits/sec, 0 packets/sec
m = p20.match(line)
if m:
load_interval = int(m.groupdict()['load_interval'])
in_rate = int(m.groupdict()['in_rate'])
in_rate_pkts = int(m.groupdict()['in_rate_pkts'])
unit = m.groupdict()['unit']
# covert minutes to seconds
if 'minute' in unit:
load_interval = load_interval * 60
if 'counters' not in interface_dict[interface]:
interface_dict[interface]['counters'] = {}
if 'rate' not in interface_dict[interface]['counters']:
interface_dict[interface]['counters']['rate'] = {}
interface_dict[interface]['counters']['rate']\
['load_interval'] = load_interval
interface_dict[interface]['counters']['rate']\
['in_rate'] = in_rate
interface_dict[interface]['counters']['rate']\
['in_rate_pkts'] = in_rate_pkts
if 'last_clear' not in interface_dict[interface]['counters']:
try:
last_clear
except Exception:
pass
else:
interface_dict[interface]['counters']\
['last_clear'] = last_clear
continue
# 5 minute output rate 0 bits/sec, 0 packets/sec
m = p21.match(line)
if m:
out_rate = int(m.groupdict()['out_rate'])
out_rate_pkts = int(m.groupdict()['out_rate_pkts'])
interface_dict[interface]['counters']['rate']\
['out_rate'] = out_rate
interface_dict[interface]['counters']['rate']\
['out_rate_pkts'] = out_rate_pkts
continue
# 0 packets input, 0 bytes, 0 no buffer
m = p22.match(line)
if m:
if 'counters' not in interface_dict[interface]:
interface_dict[interface]['counters'] = {}
interface_dict[interface]['counters']['in_pkts'] = \
int(m.groupdict()['in_pkts'])
interface_dict[interface]['counters']['in_octets'] = \
int(m.groupdict()['in_octets'])
if m.groupdict()['in_no_buffer']:
interface_dict[interface]['counters']['in_no_buffer'] = \
int(m.groupdict()['in_no_buffer'])
continue
# Received 4173 broadcasts (0 IP multicasts)
# Received 535996 broadcasts (535961 multicasts)
m = p23.match(line)
if m:
interface_dict[interface]['counters']['in_multicast_pkts'] = \
int(m.groupdict()['in_broadcast_pkts'])
interface_dict[interface]['counters']['in_broadcast_pkts'] = \
int(m.groupdict()['in_multicast_pkts'])
continue
# 0 runts, 0 giants, 0 throttles
m = p24.match(line)
if m:
interface_dict[interface]['counters']['in_runts'] = \
int(m.groupdict()['in_runts'])
interface_dict[interface]['counters']['in_giants'] = \
int(m.groupdict()['in_giants'])
interface_dict[interface]['counters']['in_throttles'] = \
int(m.groupdict()['in_throttles'])
continue
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
# 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored, 0 abort
m = p25.match(line)
if m:
interface_dict[interface]['counters']['in_errors'] = \
int(m.groupdict()['in_errors'])
interface_dict[interface]['counters']['in_crc_errors'] = \
int(m.groupdict()['in_crc_errors'])
interface_dict[interface]['counters']['in_frame'] = \
int(m.groupdict()['in_frame'])
interface_dict[interface]['counters']['in_overrun'] = \
int(m.groupdict()['in_overrun'])
interface_dict[interface]['counters']['in_ignored'] = \
int(m.groupdict()['in_ignored'])
if m.groupdict()['in_abort']:
interface_dict[interface]['counters']['in_abort'] = \
int(m.groupdict()['in_abort'])
continue
# 0 watchdog, 535961 multicast, 0 pause input
m = p26.match(line)
if m:
interface_dict[interface]['counters']['in_watchdog'] = \
int(m.groupdict()['in_watchdog'])
interface_dict[interface]['counters']['in_multicast_pkts'] = \
int(m.groupdict()['in_multicast_pkts'])
interface_dict[interface]['counters']['in_mac_pause_frames'] = \
int(m.groupdict()['in_pause_input'])
continue
# 0 input packets with dribble condition detected
m = p27.match(line)
if m:
interface_dict[interface]['counters']['in_with_dribble'] = \
int(m.groupdict()['in_with_dribble'])
continue
# 23376 packets output, 3642296 bytes, 0 underruns
m = p28.match(line)
if m:
interface_dict[interface]['counters']['out_pkts'] = \
int(m.groupdict()['out_pkts'])
interface_dict[interface]['counters']['out_octets'] = \
int(m.groupdict()['out_octets'])
if m.groupdict()['out_underruns']:
interface_dict[interface]['counters']['out_underruns'] = \
int(m.groupdict()['out_underruns'])
continue
# Received 4173 broadcasts (0 IP multicasts)
# Received 535996 broadcasts (535961 multicasts)
m = p29.match(line)
if m:
interface_dict[interface]['counters']['out_broadcast_pkts'] = \
int(m.groupdict()['out_broadcast_pkts'])
interface_dict[interface]['counters']['out_multicast_pkts'] = \
int(m.groupdict()['out_multicast_pkts'])
continue
# 0 output errors, 0 collisions, 2 interface resets
# 0 output errors, 0 interface resets
m = p30.match(line)
if m:
interface_dict[interface]['counters']['out_errors'] = \
int(m.groupdict()['out_errors'])
interface_dict[interface]['counters']['out_interface_resets'] = \
int(m.groupdict()['out_interface_resets'])
if m.groupdict()['out_collision']:
interface_dict[interface]['counters']['out_collision'] = \
int(m.groupdict()['out_collision'])
continue
# 0 unknown protocol drops
m = p31.match(line)
if m:
interface_dict[interface]['counters']['out_unknown_protocl_drops'] = \
int(m.groupdict()['out_unknown_protocl_drops'])
continue
# 0 babbles, 0 late collision, 0 deferred
m = p32.match(line)
if m:
interface_dict[interface]['counters']['out_babble'] = \
int(m.groupdict()['out_babble'])
interface_dict[interface]['counters']['out_late_collision'] = \
int(m.groupdict()['out_late_collision'])
interface_dict[interface]['counters']['out_deferred'] = \
int(m.groupdict()['out_deferred'])
continue
# 0 lost carrier, 0 no carrier, 0 pause output
m = p33.match(line)
if m:
interface_dict[interface]['counters']['out_lost_carrier'] = \
int(m.groupdict()['out_lost_carrier'])
interface_dict[interface]['counters']['out_no_carrier'] = \
int(m.groupdict()['out_no_carrier'])
out_pause_output = m.groupdict().get('out_pause_output', None)
if out_pause_output:
interface_dict[interface]['counters']['out_mac_pause_frames'] = \
int(m.groupdict()['out_pause_output'])
continue
# 0 output buffer failures, 0 output buffers swapped out
m = p34.match(line)
if m:
interface_dict[interface]['counters']['out_buffer_failure'] = \
int(m.groupdict()['out_buffer_failure'])
interface_dict[interface]['counters']['out_buffers_swapped'] = \
int(m.groupdict()['out_buffers_swapped'])
continue
# Interface is unnumbered. Using address of Loopback0 (10.4.1.1)
# Interface is unnumbered. Using address of GigabitEthernet0/2.1 (192.168.154.1)
m = p35.match(line)
if m:
unnumbered_dict[interface] = {}
unnumbered_dict[interface]['unnumbered_intf'] = m.groupdict()['unnumbered_intf']
unnumbered_dict[interface]['unnumbered_ip'] = m.groupdict()['unnumbered_ip']
continue
# 8 maximum active VCs, 1024 VCs per VP, 1 current VCCs
m = p36.match(line)
if m:
group = m.groupdict()
maximum_active_vcs = group['maximum_active_vcs']
vcs_per_vp = group['vcs_per_vp']
current_vccs = group['current_vccs']
interface_dict[interface].update({'maximum_active_vcs': maximum_active_vcs})
interface_dict[interface].update({'vcs_per_vp': vcs_per_vp})
interface_dict[interface].update({'current_vccs': current_vccs})
continue
# VC Auto Creation Disabled.
m = p37.match(line)
if m:
group = m.groupdict()
vc_auto_creation = group['vc_auto_creation']
interface_dict[interface].update({'vc_auto_creation': vc_auto_creation})
continue
# VC idle disconnect time: 300 seconds
m = p38.match(line)
if m:
group = m.groupdict()
vc_idle_disconnect_time = group['vc_idle_disconnect_time']
interface_dict[interface].update({'vc_idle_disconnect_time': vc_idle_disconnect_time})
continue
# AAL5 CRC errors : 0
m = p39.match(line)
if m:
group = m.groupdict()
interface_dict[interface].update({'aal5_crc_errors': int(group['val'])})
continue
# AAL5 SAR Timeouts : 0
m = p40.match(line)
if m:
group = m.groupdict()
interface_dict[interface].update({'aal5_oversized_sdus': int(group['val'])})
continue
# AAL5 Oversized SDUs : 0
m = p41.match(line)
if m:
group = m.groupdict()
interface_dict[interface].update({'aal5_sar_timeouts': int(group['val'])})
continue
# LCP Closed
m = p42.match(line)
if m:
group = m.groupdict()
interface_dict[interface].update({'lcp_state': group['state']})
loopback = group.get('loopback', None)
if loopback:
interface_dict[interface].update({'lcp_loopack': loopback})
continue
# Base PPPoATM vaccess
m = p43.match(line)
if m:
group = m.groupdict()
interface_dict[interface].update({'base_pppoatm': group['base_pppoatm']})
continue
# Vaccess status 0x44, loopback not set
m = p44.match(line)
if m:
group = m.groupdict()
interface_dict[interface].update({'vaccess_status': group['status']})
interface_dict[interface].update({'vaccess_loopback': group['loopback']})
continue
# DTR is pulsed for 5 seconds on reset
m = p45.match(line)
if m:
group = m.groupdict()
interface_dict[interface].update({'dtr_pulsed': group['dtr_pulsed']})
continue
# create strucutre for unnumbered interface
if not unnumbered_dict:
return(interface_dict)
for intf in unnumbered_dict:
unnumbered_intf = unnumbered_dict[intf]['unnumbered_intf']
unnumbered_ip = unnumbered_dict[intf]['unnumbered_ip']
if unnumbered_intf in interface_dict:
if 'ipv4' in interface_dict[unnumbered_intf]:
for ip in interface_dict[unnumbered_intf]['ipv4']:
if unnumbered_ip in ip:
if 'ipv4' not in interface_dict[intf]:
interface_dict[intf]['ipv4'] = {}
if ip not in interface_dict[intf]['ipv4']:
interface_dict[intf]['ipv4'][ip] = {}
m = re.search('([\w\.\:]+)\/(\d+)', ip)
interface_dict[intf]['ipv4'][ip]['ip'] = m.groups()[0]
interface_dict[intf]['ipv4'][ip]['prefix_length'] = m.groups()[1]
interface_dict[intf]['ipv4']['unnumbered'] = {}
interface_dict[intf]['ipv4']['unnumbered']\
['interface_ref'] = unnumbered_intf
return(interface_dict)
# parser using parsergen
# ----------------------
class ShowIpInterfaceBriefSchema(MetaParser):
"""Parser for show ip interface brief"""
schema = {'interface':
{Any():
{Optional('vlan_id'):
{Optional(Any()):
{'ip_address': str,
Optional('interface_is_ok'): str,
Optional('method'): str,
Optional('status'): str,
Optional('protocol'): str}
},
Optional('ip_address'): str,
Optional('interface_is_ok'): str,
Optional('method'): str,
Optional('status'): str,
Optional('protocol'): str}
},
}
class ShowIpInterfaceBrief(ShowIpInterfaceBriefSchema):
"""Parser for:
show ip interface brief
parser class implements detail parsing mechanisms for cli and yang output.
"""
exclude = ['method', '(Tunnel.*)']
#*************************
# schema - class variable
#
# Purpose is to make sure the parser always return the output
# (nested dict) that has the same data structure across all supported
# parsing mechanisms (cli(), yang(), xml()).
def __init__ (self, *args, **kwargs):
super().__init__(*args, **kwargs)
cli_command = ['show ip interface brief {interface}','show ip interface brief']
def cli(self, interface='',output=None):
"""parsing mechanism: cli
Function cli() defines the cli type output parsing mechanism which
typically contains 3 steps: exe
cuting, transforming, returning
"""
parsed_dict = {}
if output is None:
if interface:
cmd = self.cli_command[0].format(interface=interface)
else:
cmd = self.cli_command[1]
out = self.device.execute(cmd)
else:
out = output
if out:
res = parsergen.oper_fill_tabular(device_output=out,
device_os='iosxe',
table_terminal_pattern=r"^\n",
header_fields=
[ "Interface",
"IP-Address",
"OK\?",
"Method",
"Status",
"Protocol" ],
label_fields=
[ "Interface",
"ip_address",
"interface_is_ok",
"method",
"status",
"protocol" ],
index=[0])
# Building the schema out of the parsergen output
if res.entries:
for intf, intf_dict in res.entries.items():
intf = Common.convert_intf_name(intf)
del intf_dict['Interface']
parsed_dict.setdefault('interface', {}).update({intf: intf_dict})
return (parsed_dict)
def yang(self):
""" parsing mechanism: yang
Function yang() defines the yang type output parsing mechanism which
typically contains 3 steps: executing, transforming, returning
"""
pass
def yang_cli(self):
cli_output = self.cli()
yang_output = self.yang()
merged_output = merge_dict(yang_output,cli_output)
return merged_output
class ShowIpInterfaceBriefPipeVlan(ShowIpInterfaceBrief):
"""Parser for:
show ip interface brief | include Vlan
parser class implements detail parsing mechanisms for cli and yang output.
"""
#*************************
# schema - class variable
#
# Purpose is to make sure the parser always return the output
# (nested dict) that has the same data structure across all supported
# parsing mechanisms (cli(), yang(), xml()).
cli_command = "show ip interface brief | include Vlan"
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.cmd = self.cli_command
def cli(self):
super(ShowIpInterfaceBriefPipeVlan, self).cli()
def yang(self):
"""parsing mechanism: yang
Function yang() defines the yang type output parsing mechanism which
typically contains 3 steps: executing, transforming, returning
"""
ret = {}
cmd = '''<native><interface><Vlan/></interface></native>'''
output = self.device.get(('subtree', cmd))
for data in output.data:
for native in data:
for interface in native:
vlan_id = None
interface_name = None
ip_address = None
ip_mask = None
for vlan in interface:
# Remove the namespace
text = vlan.tag[vlan.tag.find('}')+1:]
#ydk.models.ned_edison.ned.Native.Interface.Vlan.name
#ydk.models.xe_recent_edison.Cisco_IOS_XE_native.Native.Interface.Vlan.name
if text == 'name':
vlan_id = vlan.text
interface_name = 'Vlan' + str(vlan_id)
continue
if text == 'ip':
for ip in vlan:
text = ip.tag[ip.tag.find('}')+1:]
#ydk.models.ned_edison.ned.Native.Interface.Vlan.ip.address
#ydk.models.xe_recent_edison.Cisco_IOS_XE_native.Native.Interface.Vlan.ip.address
if text == 'address':
for address in ip:
text = address.tag[address.tag.find('}')+1:]
#ydk.models.ned_edison.ned.Native.Interface.Vlan.ip.address.primary
#ydk.models.xe_recent_edison.Cisco_IOS_XE_native.Native.Interface.Vlan.ip.address.primary
if text == 'primary':
for primary in address:
# Remove the namespace
text = primary.tag[primary.tag.find('}')+1:]
#ydk.models.ned_edison.ned.Native.Interface.Vlan.ip.address.primary.address
#ydk.models.xe_recent_edison.Cisco_IOS_XE_native.Native.Interface.Vlan.ip.address.primary.address
if text == 'address':
ip_address = primary.text
continue
#ydk.models.ned_edison.ned.Native.Interface.Vlan.ip.address.primary.mask
#ydk.models.xe_recent_edison.Cisco_IOS_XE_native.Native.Interface.Vlan.ip.address.primary.mask
if text == 'mask':
ip_mask = primary.text
continue
# Let's build it now
if 'interface' not in ret:
ret['interface'] = {}
if interface_name is not None:
ret['interface'][interface_name] = {}
if vlan_id is not None:
ret['interface'][interface_name]['vlan_id'] = {}
ret['interface'][interface_name]['vlan_id'][vlan_id] = {}
if ip_address is not None:
ret['interface'][interface_name]['vlan_id'][vlan_id]['ip_address'] = ip_address
else:
ret['interface'][interface_name]['vlan_id'][vlan_id]['ip_address'] = 'unassigned'
return ret
def yang_cli(self):
super(ShowIpInterfaceBriefPipeVlan, self).yang_cli()
class ShowIpInterfaceBriefPipeIpSchema(MetaParser):
"""Schema for show ip interface brief | include <WORD>"""
schema = {'interface':
{Any():
{Optional('ip_address'): str,
Optional('interface_ok'): str,
Optional('method'): str,
Optional('interface_status'): str,
Optional('protocol_status'): str}
},
}
class ShowIpInterfaceBriefPipeIp(ShowIpInterfaceBriefPipeIpSchema):
"""Parser for:
show ip interface brief | include <WORD>
parser class implements detail parsing mechanisms for cli and yang output.
"""
#*************************
# schema - class variable
#
# Purpose is to make sure the parser always return the output
# (nested dict) that has the same data structure across all supported
# parsing mechanisms (cli(), yang(), xml()).
cli_command = 'show ip interface brief | include {ip}'
def cli(self, ip,output=None):
if output is None:
out = self.device.execute(self.cli_command.format(ip=ip))
else:
out = output
interface_dict = {}
# GigabitEthernet0/0 10.1.18.80 YES manual up up
p = re.compile(r'^\s*(?P<interface>[a-zA-Z0-9\/\.\-]+) '
'+(?P<ip_address>[a-z0-9\.]+) +(?P<interface_ok>[A-Z]+) '
'+(?P<method>[a-zA-Z]+) +(?P<interface_status>[a-z\s]+) '
'+(?P<protocol_status>[a-z]+)$')
for line in out.splitlines():
line = line.strip()
m = p.match(line)
if m:
interface = m.groupdict()['interface']
if 'interface' not in interface_dict:
interface_dict['interface'] = {}
if interface not in interface_dict['interface']:
interface_dict['interface'][interface] = {}
interface_dict['interface'][interface]['ip_address'] = \
m.groupdict()['ip_address']
interface_dict['interface'][interface]['interface_ok'] = \
m.groupdict()['interface_ok']
interface_dict['interface'][interface]['method'] = \
m.groupdict()['method']
interface_dict['interface'][interface]['interface_status'] = \
m.groupdict()['interface_status'].strip()
interface_dict['interface'][interface]['protocol_status'] = \
m.groupdict()['protocol_status']
continue
return interface_dict
class ShowInterfacesSwitchportSchema(MetaParser):
"""Schema for show interfaces switchport"""
schema = {
Any(): {
'switchport_enable': bool,
'switchport_mode': str,
Optional('operational_mode'): str,
Optional('port_channel'): {
Optional('port_channel_int'): str,
Optional('port_channel_member_intfs'): list,
Optional('port_channel_member'): bool,
},
Optional('encapsulation'): {
Optional('administrative_encapsulation'): str,
Optional('operational_encapsulation'): str,
Optional('native_vlan'): str,
Optional('native_vlan_name'): str,
},
Optional('negotiation_of_trunk'): bool,
Optional('access_vlan'): str,
Optional('access_vlan_name'): str,
Optional('voice_vlan'): str,
Optional('voice_vlan_name'): str,
Optional('native_vlan_tagging'): bool,
Optional('private_vlan'): {
Optional('host_association'): str,
Optional('mapping'): str,
Optional('native_vlan'): str,
Optional('native_vlan_tagging'): bool,
Optional('encapsulation'): str,
Optional('normal_vlans'): str,
Optional('associations'): str,
Optional('trunk_mappings'): str,
Optional('operational'): str,
},
Optional('trunk_vlans'): str,
Optional('pruning_vlans'): str,
Optional('capture_mode'): bool,
Optional('capture_vlans'): str,
Optional('protected'): bool,
Optional('unknown_unicast_blocked'): bool,
Optional('unknown_multicast_blocked'): bool,
Optional('appliance_trust'): str,
},
}
class ShowInterfacesSwitchport(ShowInterfacesSwitchportSchema):
"""parser for show interfaces switchport"""
cli_command = ['show interfaces switchport','show interfaces {interface} switchport']
def cli(self, interface='', output=None):
if output is None:
if interface:
cmd = self.cli_command[1].format(interface=interface)
else:
cmd = self.cli_command[0]
out = self.device.execute(cmd)
else:
out = output
ret_dict = {}
private_trunk_mappings = None
private_operational = None
for line in out.splitlines():
line = line.strip()
# Name: Gi1/0/2
p1 = re.compile(r'^Name: +(?P<intf>[\w\/\.\-]+)$')
m = p1.match(line)
if m:
intf = Common.convert_intf_name(m.groupdict()['intf'])
if intf not in ret_dict:
ret_dict[intf] = {}
continue
# Switchport: Enabled
p2 = re.compile(r'^Switchport: +(?P<switchport_enable>\w+)$')
m = p2.match(line)
if m:
if m.groupdict()['switchport_enable'].lower() == 'enabled':
ret_dict[intf]['switchport_enable'] = True
else:
ret_dict[intf]['switchport_enable'] = False
continue
# Administrative Mode: trunk
p3 = re.compile(r'^Administrative +Mode: +(?P<switchport_mode>[\w\s]+)$')
m = p3.match(line)
if m:
ret_dict[intf]['switchport_mode'] = m.groupdict()['switchport_mode']
continue
# Operational Mode: trunk (member of bundle Po12)
# Operational Mode: down (suspended member of bundle Po12)
p4 = re.compile(r'^Operational +Mode: +(?P<operational_mode>[\w\s]+)'
r'( +\((?P<dummy>[\w\s]+)? *member +of +bundle '
r'+(?P<port_channel_int>[\w\/\.\-]+)\))?$')
m = p4.match(line)
if m:
ret_dict[intf]['operational_mode'] = m.groupdict()['operational_mode']
bundle_intf = m.groupdict()['port_channel_int']
if bundle_intf:
if 'port_channel' not in ret_dict[intf]:
ret_dict[intf]['port_channel'] = {}
bundle_intf = Common.convert_intf_name(bundle_intf)
ret_dict[intf]['port_channel']['port_channel_int'] = bundle_intf
ret_dict[intf]['port_channel']['port_channel_member'] = True
# bundle interface is port_channel interface as well
if bundle_intf not in ret_dict:
ret_dict[bundle_intf] = {}
if 'port_channel' not in ret_dict[bundle_intf]:
ret_dict[bundle_intf]['port_channel'] = {}
ret_dict[bundle_intf]['port_channel']['port_channel_member'] = True
# append the list
if 'port_channel_member_intfs' in ret_dict[bundle_intf]['port_channel']:
port_list = ret_dict[bundle_intf]['port_channel']['port_channel_member_intfs']
port_list.append(intf)
ret_dict[bundle_intf]['port_channel']['port_channel_member_intfs'] = port_list
else:
ret_dict[bundle_intf]['port_channel']['port_channel_member_intfs'] = [intf]
continue
# Administrative Trunking Encapsulation: dot1q
p5 = re.compile(r'^Administrative +Trunking +Encapsulation: +'
'(?P<encapsulation>\w+)$')
m = p5.match(line)
if m:
if 'encapsulation' not in ret_dict[intf]:
ret_dict[intf]['encapsulation'] = {}
ret_dict[intf]['encapsulation']['administrative_encapsulation'] = \
m.groupdict()['encapsulation'].lower()
continue
# Operational Trunking Encapsulation: dot1q
p6 = re.compile(r'^Operational +Trunking +Encapsulation: +'
'(?P<encapsulation>\w+)$')
m = p6.match(line)
if m:
if 'encapsulation' not in ret_dict[intf]:
ret_dict[intf]['encapsulation'] = {}
ret_dict[intf]['encapsulation']['operational_encapsulation'] = \
m.groupdict()['encapsulation'].lower()
continue
# Negotiation of Trunking: On
p7 = re.compile(r'^Negotiation +of +Trunking: +(?P<negotiation_of_trunk>\w+)$')
m = p7.match(line)
if m:
negotiation_of_trunk = m.groupdict()['negotiation_of_trunk'].lower()
if 'on' in negotiation_of_trunk:
ret_dict[intf]['negotiation_of_trunk'] = True
elif 'off' in negotiation_of_trunk:
ret_dict[intf]['negotiation_of_trunk'] = False
continue
# Access Mode VLAN: 1 (default)
# Access Mode VLAN: 100 (Falback-Data)
p8 = re.compile(r'^Access +Mode +VLAN: +(?P<access_vlan>[\d\-]+)'
'( *\((?P<access_vlan_name>.+)\))?$')
m = p8.match(line)
if m:
ret_dict[intf]['access_vlan'] = m.groupdict()['access_vlan']
if m.groupdict()['access_vlan_name']:
ret_dict[intf]['access_vlan_name'] = m.groupdict()['access_vlan_name']
continue
# Trunking Native Mode VLAN: 1 (default)
p9 = re.compile(r'^Trunking +Native +Mode +VLAN: +(?P<native_vlan>[\d\-]+)'
'( *\((?P<native_vlan_name>.+)\))?$')
m = p9.match(line)
if m:
if 'encapsulation' not in ret_dict[intf]:
ret_dict[intf]['encapsulation'] = {}
ret_dict[intf]['encapsulation']['native_vlan'] = m.groupdict()['native_vlan']
if m.groupdict()['native_vlan_name']:
ret_dict[intf]['encapsulation']['native_vlan_name'] = m.groupdict()['native_vlan_name']
continue
# Administrative Native VLAN tagging: enabled
p10 = re.compile(r'^Administrative +Native +VLAN +tagging: +'
'(?P<tagging>\w+)$')
m = p10.match(line)
if m:
if 'enable' in m.groupdict()['tagging'].lower():
ret_dict[intf]['native_vlan_tagging'] = True
else:
ret_dict[intf]['native_vlan_tagging'] = False
continue
# Voice VLAN: none
# Voice VLAN: 100 (Fallback-Voice)
p11 = re.compile(r'^Voice +VLAN: +(?P<vlan>[\d\-]+)'
'( *\((?P<voice_vlan_name>.+)\))?$')
m = p11.match(line)
if m:
ret_dict[intf]['voice_vlan'] = m.groupdict()['vlan']
if m.groupdict()['voice_vlan_name']:
ret_dict[intf]['voice_vlan_name'] = m.groupdict()['voice_vlan_name']
continue
# Administrative private-vlan host-association: none
p12 = re.compile(r'^Administrative +private-vlan +'
'host-association: +(?P<ret>[\w\-]+)$')
m = p12.match(line)
if m:
if 'private_vlan' not in ret_dict[intf]:
ret_dict[intf]['private_vlan'] = {}
ret = m.groupdict()['ret'].lower()
if ret != 'none':
ret_dict[intf]['private_vlan']['host_association'] = m.groupdict()['ret']
continue
# Administrative private-vlan mapping: none
p13 = re.compile(r'^Administrative +private-vlan +'
'mapping: +(?P<ret>[\w\-]+)$')
m = p13.match(line)
if m:
if 'private_vlan' not in ret_dict[intf]:
ret_dict[intf]['private_vlan'] = {}
ret = m.groupdict()['ret'].lower()
if ret != 'none':
ret_dict[intf]['private_vlan']['mapping'] = m.groupdict()['ret']
continue
# Administrative private-vlan trunk native VLAN: none
p14 = re.compile(r'^Administrative +private-vlan +'
'trunk +native +VLAN: +(?P<ret>[\w\-]+)$')
m = p14.match(line)
if m:
if 'private_vlan' not in ret_dict[intf]:
ret_dict[intf]['private_vlan'] = {}
ret = m.groupdict()['ret'].lower()
if ret != 'none':
ret_dict[intf]['private_vlan']['native_vlan'] = m.groupdict()['ret']
continue
# Administrative private-vlan trunk Native VLAN tagging: enabled
p15 = re.compile(r'^Administrative +private-vlan +'
'trunk +Native +VLAN +tagging: +(?P<ret>[\w\-]+)$')
m = p15.match(line)
if m:
if 'private_vlan' not in ret_dict[intf]:
ret_dict[intf]['private_vlan'] = {}
ret = m.groupdict()['ret'].lower()
if 'enable' in ret:
ret_dict[intf]['private_vlan']['native_vlan_tagging'] = True
else:
ret_dict[intf]['private_vlan']['native_vlan_tagging'] = False
continue
# Administrative private-vlan trunk encapsulation: dot1q
p16 = re.compile(r'^Administrative +private-vlan +'
'trunk +encapsulation: +(?P<ret>[\w\-]+)$')
m = p16.match(line)
if m:
if 'private_vlan' not in ret_dict[intf]:
ret_dict[intf]['private_vlan'] = {}
ret = m.groupdict()['ret'].lower()
if ret != 'none':
ret_dict[intf]['private_vlan']['encapsulation'] = m.groupdict()['ret']
continue
# Administrative private-vlan trunk normal VLANs: none
p17 = re.compile(r'^Administrative +private-vlan +'
'trunk +normal +VLANs: +(?P<ret>[\w\-]+)$')
m = p17.match(line)
if m:
if 'private_vlan' not in ret_dict[intf]:
ret_dict[intf]['private_vlan'] = {}
ret = m.groupdict()['ret'].lower()
if ret != 'none':
ret_dict[intf]['private_vlan']['normal_vlans'] = m.groupdict()['ret']
continue
# Administrative private-vlan trunk associations: none
p18 = re.compile(r'^Administrative +private-vlan +'
'trunk +associations: +(?P<ret>[\w\-]+)$')
m = p18.match(line)
if m:
if 'private_vlan' not in ret_dict[intf]:
ret_dict[intf]['private_vlan'] = {}
ret = m.groupdict()['ret'].lower()
if ret != 'none':
ret_dict[intf]['private_vlan']['associations'] = m.groupdict()['ret']
continue
# Administrative private-vlan trunk mappings: none
# Administrative private-vlan trunk mappings:
p19 = re.compile(r'^Administrative +private-vlan +'
'trunk +mappings:( *(?P<ret>[\w\-]+))?$')
m = p19.match(line)
if m:
if 'private_vlan' not in ret_dict[intf]:
ret_dict[intf]['private_vlan'] = {}
private_trunk_mappings = m.groupdict()['ret']
if private_trunk_mappings and private_trunk_mappings.lower() != 'none':
ret_dict[intf]['private_vlan']['trunk_mappings'] = private_trunk_mappings
private_trunk_mappings = ''
continue
# 10 (VLAN0010) 100 (VLAN0100)
if isinstance(private_trunk_mappings, str):
p19_1 = re.compile(r'^(?P<mappings>[\w\(\)\s]+)$')
m = p19_1.match(line)
if m:
ret = m.groupdict()['mappings']
private_trunk_mappings += ' {}'.format(ret)
ret_dict[intf]['private_vlan']['trunk_mappings'] = private_trunk_mappings.strip()
# reset private_trunk_mappings
private_trunk_mappings = None
continue
# Operational private-vlan: none
# Operational private-vlan:
p20 = re.compile(r'^Operational +private-vlan:'
'( *(?P<private_operational>[\w\-]+))?$')
m = p20.match(line)
if m:
if 'private_vlan' not in ret_dict[intf]:
ret_dict[intf]['private_vlan'] = {}
private_operational = m.groupdict()['private_operational']
if private_operational and private_operational.lower() != 'none':
ret_dict[intf]['private_vlan']['operational'] = private_operational
private_operational = ''
continue
# Trunking VLANs Enabled: 200-211
# Trunking VLANs Enabled: 100,101,110-120,121,130,170,180,
p21 = re.compile(r'^Trunking +VLANs +Enabled: +(?P<trunk_vlans>[\w\-\,\s]+)$')
m = p21.match(line)
if m:
ret_dict[intf]['trunk_vlans'] = m.groupdict()['trunk_vlans'].lower()
continue
# 10 (VLAN0010) 100 (VLAN0100)
if isinstance(private_operational, str):
p20_1 = re.compile(r'^(?P<private_operational>[\w\(\)\s]+)$')
m = p20_1.match(line)
if m:
ret = m.groupdict()['private_operational']
private_operational += ' {}'.format(ret)
ret_dict[intf]['private_vlan']['operational'] = private_operational.strip()
# reset private_trunk_mappings
private_operational = None
continue
# 1111,2222,3333, 500-55,
p21_1 = re.compile(r'^(?P<trunk_vlans>[\d\,\-]+)$')
m = p21_1.match(line)
if m:
ret_dict[intf]['trunk_vlans'] += m.groupdict()['trunk_vlans'].lower()
continue
# Pruning VLANs Enabled: 2-1001
p22 = re.compile(r'^Pruning +VLANs +Enabled: +(?P<pruning_vlans>[\w\-]+)$')
m = p22.match(line)
if m:
ret_dict[intf]['pruning_vlans'] = m.groupdict()['pruning_vlans'].lower()
continue
# Capture Mode Disabled
p23 = re.compile(r'^Capture +Mode +(?P<mode>\w+)$')
m = p23.match(line)
if m:
mode = m.groupdict()['mode'].lower()
if 'disabled' in mode:
ret_dict[intf]['capture_mode'] = False
else:
ret_dict[intf]['capture_mode'] = True
continue
# Capture VLANs Allowed: ALL
p24 = re.compile(r'^Capture +VLANs +Allowed: +(?P<capture_vlans>[\w\-]+)$')
m = p24.match(line)
if m:
ret_dict[intf]['capture_vlans'] = m.groupdict()['capture_vlans'].lower()
continue
# Protected: false
p25 = re.compile(r'^Protected: +(?P<protected>\w+)$')
m = p25.match(line)
if m:
if 'false' in m.groupdict()['protected'].lower():
ret_dict[intf]['protected'] = False
else:
ret_dict[intf]['protected'] = True
continue
# Unknown unicast blocked: disabled
p26 = re.compile(r'^Unknown +unicast +blocked: +(?P<block>\w+)$')
m = p26.match(line)
if m:
if 'disabled' in m.groupdict()['block'].lower():
ret_dict[intf]['unknown_unicast_blocked'] = False
else:
ret_dict[intf]['unknown_unicast_blocked'] = True
continue
# Unknown multicast blocked: disabled
p27 = re.compile(r'^Unknown +multicast +blocked: +(?P<block>\w+)$')
m = p27.match(line)
if m:
if 'disabled' in m.groupdict()['block'].lower():
ret_dict[intf]['unknown_multicast_blocked'] = False
else:
ret_dict[intf]['unknown_multicast_blocked'] = True
continue
# Appliance trust: none
p28 = re.compile(r'^Appliance +trust: +(?P<trust>[\w\-]+)$')
m = p28.match(line)
if m:
if m.groupdict()['trust'] != 'none':
ret_dict[intf]['appliance_trust'] = m.groupdict()['trust']
continue
return ret_dict
class ShowIpInterfaceSchema(MetaParser):
"""Schema for show ip interface
show ip interface <interface>"""
schema = {
Any(): {
'enabled': bool,
'oper_status': str,
Optional('ipv4'): {
Any(): {
'ip': str,
Optional('prefix_length'): str,
Optional('secondary'): bool,
Optional('broadcast_address'): str,
},
},
Optional('mtu'): int,
Optional('address_determined_by'): str,
Optional('helper_address'): Or(str, list),
Optional('directed_broadcast_forwarding'): bool,
Optional('outbound_common_access_list'): str,
Optional('outbound_access_list'): str,
Optional('inbound_common_access_list'): str,
Optional('inbound_access_list'): str,
Optional('proxy_arp'): bool,
Optional('local_proxy_arp'): bool,
Optional('security_level'): str,
Optional('split_horizon'): bool,
Optional('icmp'): {
Optional('redirects'): str,
Optional('unreachables'): str,
Optional('mask_replies'): str,
},
Optional('wccp'): {
Optional('redirect_outbound'): bool,
Optional('redirect_inbound'): bool,
Optional('redirect_exclude'): bool,
},
Optional('ip_fast_switching'): bool,
Optional('ip_flow_switching'): bool,
Optional('ip_cef_switching'): bool,
Optional('ip_cef_switching_turbo_vector'): bool,
Optional('ip_null_turbo_vector'): bool,
Optional('vrf'): str,
Optional('unicast_routing_topologies'): {
'topology': {
Any(): {
'status': str,
}
},
},
Optional('ip_multicast_fast_switching'): bool,
Optional('ip_multicast_distributed_fast_switching'): bool,
Optional('ip_route_cache_flags'): list,
Optional('router_discovery'): bool,
Optional('ip_output_packet_accounting'): bool,
Optional('ip_access_violation_accounting'): bool,
Optional('tcp_ip_header_compression'): bool,
Optional('rtp_ip_header_compression'): bool,
Optional('probe_proxy_name_replies'): bool,
Optional('policy_routing'): bool,
Optional('network_address_translation'): bool,
Optional('bgp_policy_mapping'): bool,
Optional('input_features'): list,
Optional('multicast_groups'): list,
},
}
class ShowIpInterface(ShowIpInterfaceSchema):
"""Parser for show ip interface
show ip interface <interface>"""
cli_command = ['show ip interface','show ip interface {interface}']
exclude = ['unnumbered', 'address_determined_by', '(Tunnel.*)', 'joins', 'leaves']
def cli(self,interface="",output=None):
if output is None:
if interface:
cmd = self.cli_command[1].format(interface=interface)
else:
cmd = self.cli_command[0]
out = self.device.execute(cmd)
else:
out = output
interface_dict = {}
unnumbered_dict = {}
for line in out.splitlines():
line = line.strip()
# Vlan211 is up, line protocol is up
# GigabitEthernet2 is administratively down, line protocol is down
p1 = re.compile(r'^(?P<interface>[\w\/\.\-]+) +is'
r' +(?P<enabled>[\w\s]+),'
r' +line +protocol +is +(?P<oper_status>\w+)$')
m = p1.match(line)
if m:
interface = m.groupdict()['interface']
enabled = m.groupdict()['enabled'].lower()
if interface not in interface_dict:
interface_dict[interface] = {}
if 'administratively down' in enabled or 'delete' in enabled:
interface_dict[interface]['enabled'] = False
else:
interface_dict[interface]['enabled'] = True
interface_dict[interface]['oper_status'] = \
m.groupdict()['oper_status'].lower()
# initial variables
multicast_groups = []
continue
# Internet address is 192.168.76.1/24
p2 = re.compile(r'^Internet +[A|a]ddress +is +(?P<ipv4>(?P<ip>[0-9\.]+)'
r'\/(?P<prefix_length>[0-9]+))$')
m = p2.match(line)
if m:
ip = m.groupdict()['ip']
prefix_length = m.groupdict()['prefix_length']
address = m.groupdict()['ipv4']
if 'ipv4' not in interface_dict[interface]:
interface_dict[interface]['ipv4'] = {}
if address not in interface_dict[interface]['ipv4']:
interface_dict[interface]['ipv4'][address] = {}
interface_dict[interface]['ipv4'][address]\
['ip'] = ip
interface_dict[interface]['ipv4'][address]\
['prefix_length'] = prefix_length
interface_dict[interface]['ipv4'][address]\
['secondary'] = False
continue
# Interface is unnumbered. Using address of GigabitEthernet0/0.101 (10.1.98.10)
p2_0 = re.compile(r'^Interface +is +unnumbered. +Using +address +of +(\S+)'
r' +\((?P<ipv4>(?P<ip>[0-9\.]+))\)$')
m = p2_0.match(line)
if m:
ip = m.groupdict()['ip']
address = m.groupdict()['ipv4']
if 'ipv4' not in interface_dict[interface]:
interface_dict[interface]['ipv4'] = {}
if address not in interface_dict[interface]['ipv4']:
interface_dict[interface]['ipv4'][address] = {}
interface_dict[interface]['ipv4'][address]\
['ip'] = ip
interface_dict[interface]['ipv4'][address]\
['secondary'] = False
continue
# Secondary address 10.2.2.2/24
p2_1 = re.compile(r'^Secondary +address +(?P<ipv4>(?P<ip>[0-9\.]+)'
r'\/(?P<prefix_length>[0-9]+))$')
m = p2_1.match(line)
if m:
ip = m.groupdict()['ip']
prefix_length = m.groupdict()['prefix_length']
address = m.groupdict()['ipv4']
if 'ipv4' not in interface_dict[interface]:
interface_dict[interface]['ipv4'] = {}
if address not in interface_dict[interface]['ipv4']:
interface_dict[interface]['ipv4'][address] = {}
interface_dict[interface]['ipv4'][address]\
['ip'] = ip
interface_dict[interface]['ipv4'][address]\
['prefix_length'] = prefix_length
interface_dict[interface]['ipv4'][address]\
['secondary'] = True
continue
# Internet address will be negotiated using DHCP
# Internet address will be negotiated using IPCP
p2_2 = re.compile(r'^Internet +[A|a]ddress +will +be +negotiated '
r'+using +(?P<negotiated>DHCP|IPCP)$')
m = p2_2.match(line)
if m:
negotiated_holder = m.groupdict()
if 'DHCP' in negotiated_holder.get('negotiated'):
address='dhcp_negotiated'
if 'IPCP' in negotiated_holder.get('negotiated'):
address='ipcp_negotiated'
ipv4_dict = interface_dict[interface].setdefault('ipv4',{})
ipv4_dict.setdefault(address, {})
ipv4_dict[address]['ip'] = address
continue
# Broadcast address is 255.255.255.255
p3 = re.compile(r'^Broadcast +address +is +(?P<address>[\w\.\:]+)$')
m = p3.match(line)
if m:
interface_dict[interface]['ipv4'][address]['broadcast_address'] = \
m.groupdict()['address']
continue
# Address determined by configuration file
# Address determined by non-volatile memory
p36 = re.compile(r'^Address +determined +by +(?P<file>[\w\s\-]+)$')
m = p36.match(line)
if m:
interface_dict[interface]['address_determined_by'] = \
m.groupdict()['file']
continue
# MTU is 1500 bytes
p4 = re.compile(r'^MTU +is +(?P<mtu>\d+) +bytes$')
m = p4.match(line)
if m:
interface_dict[interface]['mtu'] = \
int(m.groupdict()['mtu'])
continue
# Helper address is not set
p5 = re.compile(r'^Helper +address +is +not +set$')
m = p5.match(line)
if m:
continue
# Helper address is 10.1.1.1
p5_0 = re.compile(r'^Helper +address +is +(?P<address>[\d\.]+)$')
m = p5_0.match(line)
if m:
interface_dict[interface]['helper_address'] = \
[m.groupdict()['address']]
continue
# Helper addresses are 10.1.1.1
p5_1 = re.compile(r'^Helper +addresses +are +(?P<address>[\w\.\:\s]+)$')
m = p5_1.match(line)
if m:
helper_flag = True
if 'not set' not in m.groupdict()['address']:
helper_list = []
helper_list.append(m.groupdict()['address'])
interface_dict[interface]['helper_address'] = \
helper_list
continue
# 10.2.2.2
p5_2 = re.compile(r'^(?P<address>[\d\.]+)$')
m = p5_2.match(line)
if m:
if helper_flag:
helper_list.append(m.groupdict()['address'])
continue
else:
helper_flag = False
# Directed broadcast forwarding is disabled
p6 = re.compile(r'^Directed +broadcast +forwarding +is +(?P<status>\w+)$')
m = p6.match(line)
if m:
if 'disabled' in m.groupdict()['status']:
interface_dict[interface]['directed_broadcast_forwarding'] = False
else:
interface_dict[interface]['directed_broadcast_forwarding'] = True
continue
# Multicast reserved groups joined: 224.0.0.1 224.0.0.2 224.0.0.22 224.0.0.13
p41 = re.compile(r'^Multicast +reserved +groups +joined: +(?P<multicast_groups>[\w\s\.]+)$')
m = p41.match(line)
if m:
multicast_groups_address = str(m.groupdict()['multicast_groups'])
#Split string of addressed into a list
multicast_groups = multicast_groups_address.split()
interface_dict[interface]['multicast_groups']\
= sorted(multicast_groups)
continue
# Multicast reserved groups joined: 224.0.0.1 224.0.0.2 224.0.0.22 224.0.0.13
p41_1 = re.compile(r'(?P<multicast_groups>\d+\.\d+\.\d+\.\d+)')
m = p41_1.findall(line)
if m and multicast_groups:
multicast_groups.extend(m)
interface_dict[interface]['multicast_groups']\
= sorted(multicast_groups)
continue
# Outgoing Common access list is not set
p7 = re.compile(r'^Outgoing +Common +access +list +is +'
r'(?P<access_list>.+)$')
m = p7.match(line)
if m:
if 'not set' not in m.groupdict()['access_list']:
interface_dict[interface]['outbound_common_access_list'] = \
m.groupdict()['access_list']
continue
# Outgoing access list is not set
p8 = re.compile(r'^Outgoing +access +list +is +'
r'(?P<access_list>.+)$')
m = p8.match(line)
if m:
if 'not set' not in m.groupdict()['access_list']:
interface_dict[interface]['outbound_access_list'] = \
m.groupdict()['access_list']
continue
# Inbound Common access list is not set
p9 = re.compile(r'^Inbound +Common +access +list +is +'
r'(?P<access_list>.+)$')
m = p9.match(line)
if m:
if 'not set' not in m.groupdict()['access_list']:
interface_dict[interface]['inbound_common_access_list'] = \
m.groupdict()['access_list']
continue
# Inbound access list is not set
p10 = re.compile(r'^Inbound +access +list +is +'
r'(?P<access_list>.+)$')
m = p10.match(line)
if m:
if 'not set' not in m.groupdict()['access_list']:
interface_dict[interface]['inbound_access_list'] = \
m.groupdict()['access_list']
continue
# Proxy ARP is enabled
p11 = re.compile(r'^Proxy +ARP +is +'
r'(?P<status>\w+)$')
m = p11.match(line)
if m:
if 'disabled' in m.groupdict()['status']:
interface_dict[interface]['proxy_arp'] = False
else:
interface_dict[interface]['proxy_arp'] = True
continue
# Local Proxy ARP is disabled
p12 = re.compile(r'^Local +Proxy +ARP +is +'
r'(?P<status>\w+)$')
m = p12.match(line)
if m:
if 'disabled' in m.groupdict()['status']:
interface_dict[interface]['local_proxy_arp'] = False
else:
interface_dict[interface]['local_proxy_arp'] = True
continue
# Security level is default
p13 = re.compile(r'^Security +level +is +'
r'(?P<level>\w+)$')
m = p13.match(line)
if m:
interface_dict[interface]['security_level'] = m.groupdict()['level']
continue
# Split horizon is enabled
p14 = re.compile(r'^Split +horizon +is +'
r'(?P<status>\w+)$')
m = p14.match(line)
if m:
if 'disabled' in m.groupdict()['status']:
interface_dict[interface]['split_horizon'] = False
else:
interface_dict[interface]['split_horizon'] = True
continue
# ICMP redirects are always sent
p15 = re.compile(r'^ICMP +redirects +are +'
r'(?P<sent>[\w\s]+)$')
m = p15.match(line)
if m:
if 'icmp' not in interface_dict[interface]:
interface_dict[interface]['icmp'] = {}
if 'not set' not in m.groupdict()['sent']:
interface_dict[interface]['icmp']['redirects'] = \
m.groupdict()['sent']
continue
# ICMP unreachables are always sent
p16 = re.compile(r'^ICMP +unreachables +are +'
r'(?P<sent>[\w\s]+)$')
m = p16.match(line)
if m:
if 'icmp' not in interface_dict[interface]:
interface_dict[interface]['icmp'] = {}
if 'not set' not in m.groupdict()['sent']:
interface_dict[interface]['icmp']['unreachables'] = \
m.groupdict()['sent']
continue
# ICMP mask replies are never sent
p17 = re.compile(r'^ICMP +mask +replies +are +'
r'(?P<sent>[\w\s]+)$')
m = p17.match(line)
if m:
if 'icmp' not in interface_dict[interface]:
interface_dict[interface]['icmp'] = {}
if 'not set' not in m.groupdict()['sent']:
interface_dict[interface]['icmp']['mask_replies'] = \
m.groupdict()['sent']
continue
# IP fast switching is enabled
p18 = re.compile(r'^IP +fast +switching +is +'
r'(?P<status>\w+)$')
m = p18.match(line)
if m:
if 'disabled' in m.groupdict()['status']:
interface_dict[interface]['ip_fast_switching'] = False
else:
interface_dict[interface]['ip_fast_switching'] = True
continue
# IP Flow switching is disabled
p19 = re.compile(r'^IP +Flow +switching +is +'
r'(?P<status>\w+)$')
m = p19.match(line)
if m:
if 'disabled' in m.groupdict()['status']:
interface_dict[interface]['ip_flow_switching'] = False
else:
interface_dict[interface]['ip_flow_switching'] = True
continue
# IP CEF switching is enabled
p20 = re.compile(r'^IP +CEF +switching +is +'
r'(?P<status>\w+)$')
m = p20.match(line)
if m:
if 'disabled' in m.groupdict()['status']:
interface_dict[interface]['ip_cef_switching'] = False
else:
interface_dict[interface]['ip_cef_switching'] = True
continue
# IP CEF switching turbo vector
p21 = re.compile(r'^IP +CEF +switching +turbo +vector$')
m = p21.match(line)
if m:
interface_dict[interface]['ip_cef_switching_turbo_vector'] = True
continue
# IP Null turbo vector
p22 = re.compile(r'^IP +Null +turbo +vector$')
m = p22.match(line)
if m:
interface_dict[interface]['ip_null_turbo_vector'] = True
continue
# VPN Routing/Forwarding "Mgmt-vrf"
p23 = re.compile(r'^VPN +Routing\/Forwarding +\"(?P<vrf>[\w\-]+)\"$')
m = p23.match(line)
if m:
interface_dict[interface]['vrf'] = m.groupdict()['vrf']
continue
# Associated unicast routing topologies:
# Topology "base", operation state is UP
p24 = re.compile(r'^Associated +unicast +routing +topologies:$')
m = p24.match(line)
if m:
if 'unicast_routing_topologies' not in interface_dict[interface]:
interface_dict[interface]['unicast_routing_topologies'] = {}
continue
p24_1 = re.compile(r'^Topology +\"(?P<topo>\w+)\", +'
r'operation +state +is +(?P<topo_status>\w+)$')
m = p24_1.match(line)
if m:
if 'unicast_routing_topologies' in interface_dict[interface]:
if 'topology' not in interface_dict[interface]\
['unicast_routing_topologies']:
interface_dict[interface]['unicast_routing_topologies']['topology'] = {}
topo = m.groupdict()['topo']
if topo not in interface_dict[interface]\
['unicast_routing_topologies']['topology']:
interface_dict[interface]['unicast_routing_topologies']\
['topology'][topo] = {}
interface_dict[interface]['unicast_routing_topologies']\
['topology'][topo]['status'] = m.groupdict()['topo_status'].lower()
continue
# IP multicast fast switching is disabled
p25 = re.compile(r'^IP +multicast +fast +switching +is +'
r'(?P<status>\w+)$')
m = p25.match(line)
if m:
if 'disabled' in m.groupdict()['status']:
interface_dict[interface]['ip_multicast_fast_switching'] = False
else:
interface_dict[interface]['ip_multicast_fast_switching'] = True
continue
# IP multicast distributed fast switching is disabled
p25 = re.compile(r'^IP +multicast +distributed +fast +switching +is +'
r'(?P<status>\w+)$')
m = p25.match(line)
if m:
if 'disabled' in m.groupdict()['status']:
interface_dict[interface]['ip_multicast_distributed_fast_switching'] = False
else:
interface_dict[interface]['ip_multicast_distributed_fast_switching'] = True
continue
# IP route-cache flags are Fast, CEF
p26 = re.compile(r'^IP +route\-cache +flags +are +(?P<flags>[\w\s\,]+)$')
m = p26.match(line)
if m:
ret = m.groupdict()['flags'].split(',')
ret = [i.strip() for i in ret]
interface_dict[interface]['ip_route_cache_flags'] = sorted(ret)
continue
# Router Discovery is disabled
p27 = re.compile(r'^Router +Discovery +is +'
r'(?P<status>\w+)$')
m = p27.match(line)
if m:
if 'disabled' in m.groupdict()['status']:
interface_dict[interface]['router_discovery'] = False
else:
interface_dict[interface]['router_discovery'] = True
continue
# IP output packet accounting is disabled
p28 = re.compile(r'^IP +output +packet +accounting +is +'
r'(?P<status>\w+)$')
m = p28.match(line)
if m:
if 'disabled' in m.groupdict()['status']:
interface_dict[interface]['ip_output_packet_accounting'] = False
else:
interface_dict[interface]['ip_output_packet_accounting'] = True
continue
# IP access violation accounting is disabled
p29 = re.compile(r'^IP +access +violation +accounting +is +'
r'(?P<status>\w+)$')
m = p29.match(line)
if m:
if 'disabled' in m.groupdict()['status']:
interface_dict[interface]['ip_access_violation_accounting'] = False
else:
interface_dict[interface]['ip_access_violation_accounting'] = True
continue
# TCP/IP header compression is disabled
p30 = re.compile(r'^TCP\/IP +header +compression +is +'
r'(?P<status>\w+)$')
m = p30.match(line)
if m:
if 'disabled' in m.groupdict()['status']:
interface_dict[interface]['tcp_ip_header_compression'] = False
else:
interface_dict[interface]['tcp_ip_header_compression'] = True
continue
# RTP/IP header compression is disabled
p31 = re.compile(r'^RTP\/IP +header +compression +is +'
r'(?P<status>\w+)$')
m = p31.match(line)
if m:
if 'disabled' in m.groupdict()['status']:
interface_dict[interface]['rtp_ip_header_compression'] = False
else:
interface_dict[interface]['rtp_ip_header_compression'] = True
continue
# Probe proxy name replies are disabled
p32 = re.compile(r'^Probe +proxy +name +replies +are +'
r'(?P<status>\w+)$')
m = p32.match(line)
if m:
if 'disabled' in m.groupdict()['status']:
interface_dict[interface]['probe_proxy_name_replies'] = False
else:
interface_dict[interface]['probe_proxy_name_replies'] = True
continue
# Policy routing is disabled
p33 = re.compile(r'^Policy +routing +is +'
r'(?P<status>\w+)$')
m = p33.match(line)
if m:
if 'disabled' in m.groupdict()['status']:
interface_dict[interface]['policy_routing'] = False
else:
interface_dict[interface]['policy_routing'] = True
continue
# Network address translation is disabled
p34 = re.compile(r'^Network +address +translation +is +'
r'(?P<status>\w+)$')
m = p34.match(line)
if m:
if 'disabled' in m.groupdict()['status']:
interface_dict[interface]['network_address_translation'] = False
else:
interface_dict[interface]['network_address_translation'] = True
continue
# BGP Policy Mapping is disabled
p35 = re.compile(r'^BGP +Policy +Mapping +is +'
r'(?P<status>\w+)$')
m = p35.match(line)
if m:
if 'disabled' in m.groupdict()['status']:
interface_dict[interface]['bgp_policy_mapping'] = False
else:
interface_dict[interface]['bgp_policy_mapping'] = True
continue
# Input features: MCI Check
# Input features: QoS Classification, QoS Marking, MCI Check
p36 = re.compile(r'^Input +features: +(?P<input_feature>[\w\s\,]+)$')
m = p36.match(line)
if m:
features = m.groupdict()['input_feature'].split(',')
features = [i.strip() for i in features]
interface_dict[interface]['input_features'] = sorted(features)
continue
# IPv4 WCCP Redirect outbound is disable
p37 = re.compile(r'^IPv4 +WCCP +Redirect +outbound +is +(?P<status>\w+)$')
m = p37.match(line)
if m:
if 'wccp' not in interface_dict[interface]:
interface_dict[interface]['wccp'] = {}
if 'disabled' in m.groupdict()['status']:
interface_dict[interface]['wccp']\
['redirect_outbound'] = False
else:
interface_dict[interface]['wccp']\
['redirect_outbound'] = True
continue
# IPv4 WCCP Redirect inbound is disabled
p38 = re.compile(r'^IPv4 +WCCP +Redirect +inbound +is +(?P<status>\w+)$')
m = p38.match(line)
if m:
if 'wccp' not in interface_dict[interface]:
interface_dict[interface]['wccp'] = {}
if 'disabled' in m.groupdict()['status']:
interface_dict[interface]['wccp']\
['redirect_inbound'] = False
else:
interface_dict[interface]['wccp']\
['redirect_inbound'] = True
# IPv4 WCCP Redirect exclude is disabled
p39 = re.compile(r'^IPv4 +WCCP +Redirect +exclude +is +(?P<status>\w+)$')
m = p39.match(line)
if m:
if 'wccp' not in interface_dict[interface]:
interface_dict[interface]['wccp'] = {}
if 'disabled' in m.groupdict()['status']:
interface_dict[interface]['wccp']\
['redirect_exclude'] = False
else:
interface_dict[interface]['wccp']\
['redirect_exclude'] = True
# Interface is unnumbered. Using address of Loopback11 (192.168.151.1)
p40 = re.compile(r'^Interface +is +unnumbered. +Using +address +of +'
r'(?P<unnumbered_intf>[\w\/\-\.]+) +'
r'\((?P<unnumbered_ip>[\w\.\:]+)\)$')
m = p40.match(line)
if m:
unnumbered_dict[interface] = {}
unnumbered_intf = m.groupdict()['unnumbered_intf']
unnumbered_ip = m.groupdict()['unnumbered_ip']
unnumbered_dict[interface]['unnumbered_intf'] = unnumbered_intf
unnumbered_dict[interface]['unnumbered_ip'] = unnumbered_ip
if unnumbered_intf in interface_dict:
if 'ipv4' in interface_dict[unnumbered_intf]:
for address in interface_dict[unnumbered_intf]['ipv4']:
if unnumbered_ip in address:
ip_dict = interface_dict[interface].\
setdefault('ipv4', {}).setdefault(address, {})
m = re.search('([\w\.\:]+)\/(\d+)', address)
ip_dict['ip'] = m.groups()[0]
ip_dict['prefix_length'] = m.groups()[1]
ip_dict['secondary'] = False
break
else:
address = unnumbered_ip
if 'ipv4' not in interface_dict[interface]:
interface_dict[interface]['ipv4'] = {}
if address not in interface_dict[interface]['ipv4']:
interface_dict[interface]['ipv4'][address] = {}
interface_dict[interface]['ipv4'][address]['ip'] = address
continue
return interface_dict
class ShowIpv6InterfaceSchema(MetaParser):
"""Schema for show ipv6 interface"""
schema = {
Any(): {
'oper_status': str,
'enabled': bool,
Optional('autoconf'): bool,
'ipv6': {
Any(): {
'ip': str,
Optional('prefix_length'): str,
Optional('status'): str,
Optional('origin'): str,
Optional('anycast'): bool,
Optional('eui_64'): bool,
Optional('virtual'): bool,
Optional('autoconf'): {
'valid_lifetime': int,
'preferred_lifetime': int,
},
},
'enabled': bool,
Optional('icmp'): {
Optional('error_messages_limited'): int,
Optional('redirects'): bool,
Optional('unreachables'): str,
},
Optional('nd'): {
Optional('suppress'): bool,
Optional('dad_enabled'): bool,
Optional('dad_attempts'): int,
Optional('reachable_time'): int,
Optional('using_time'): int,
Optional('ns_retransmit_interval'): int,
Optional('advertised_reachable_time'): int,
Optional('advertised_retransmit_interval'): int,
Optional('router_advertisements_interval'): int,
Optional('router_advertisements_live'): int,
Optional('advertised_default_router_preference'): str,
Optional('advertised_retransmit_interval_unspecified'): bool,
Optional('advertised_reachable_time_unspecified'): bool,
},
Optional('unnumbered'): {
'interface_ref': str,
},
},
Optional('mtu'): int,
Optional('vrf'): str,
Optional('addresses_config_method'): str,
Optional('joined_group_addresses'): list,
},
}
class ShowIpv6Interface(ShowIpv6InterfaceSchema):
"""Parser for show ipv6 interface"""
cli_command = ['show ipv6 interface {interface}','show ipv6 interface']
def cli(self, interface='',output=None):
if output is None:
if not interface:
cmd = self.cli_command[1]
else:
cmd = self.cli_command[0].format(interface=interface)
out = self.device.execute(cmd)
else:
out = output
ret_dict = {}
ipv6 = False
joined_group = []
# status code dict:
status_code = {'ten': 'tentative',
'dep': 'duplicate',
'pre': 'preferre'}
for line in out.splitlines():
line = line.strip()
# Vlan211 is up, line protocol is up
# GigabitEthernet1/0/1 is administratively down, line protocol is down
p1 = re.compile(r'^(?P<interface>[\w\/\.\-]+) +is'
r' +(?P<enabled>[\w\s]+),'
r' +line +protocol +is +(?P<oper_status>\w+)$')
m = p1.match(line)
if m:
intf = m.groupdict()['interface']
enabled = m.groupdict()['enabled'].lower()
if intf not in ret_dict:
ret_dict[intf] = {}
if 'administratively down' in enabled:
ret_dict[intf]['enabled'] = False
else:
ret_dict[intf]['enabled'] = True
ret_dict[intf]['oper_status'] = \
m.groupdict()['oper_status'].lower()
# initial list variable again for new interface
joined_group = []
continue
# IPv6 is enabled, link-local address is FE80::257:D2FF:FE28:
# IPv6 is tentative, link-local address is FE80::257:D2FF:FEFF:428C [TEN]
# IPv6 is tentative, link-local address is FE80::257:D2FF:FEFF:428C [UNA/TEN]
p2 = re.compile(r'^IPv6 +is +(?P<status>\w+), +'
'link-local +address +is +(?P<link_local>[\w\:]+)'
'( *\[(?P<type>[\w\/]+)\])?$')
m = p2.match(line)
if m:
status = m.groupdict()['status']
link_addr = m.groupdict()['link_local']
if 'ipv6' not in ret_dict[intf]:
ret_dict[intf]['ipv6'] = {}
if link_addr not in ret_dict[intf]['ipv6']:
ret_dict[intf]['ipv6'][link_addr] = {}
ret_dict[intf]['ipv6'][link_addr]['ip'] = link_addr
ret_dict[intf]['ipv6'][link_addr]['origin'] = 'link_layer'
if status.lower() in ['preferred', 'deprecated', 'invalid',
'inaccessible', 'unknown', 'tentative',
'duplicate', 'optimistic']:
ret_dict[intf]['ipv6'][link_addr]['status'] = status.lower()
else:
ret_dict[intf]['ipv6'][link_addr]['status'] = 'valid'
continue
# No Virtual link-local address(es):
# Virtual link-local address(es):
# FE80::5:73FF:FEA0:16 [UNA/OOD]
p21 = re.compile(r'^Virtual +link\-local +address\(es\)\:$')
m = p21.match(line)
if m:
ipv6 = True
continue
p21_1 = re.compile(r'^(?P<ipv6>[\w\:]+)'
'( *\[(?P<type>[\w\/]+)\])?$')
m = p21_1.match(line)
if m and ipv6:
if 'ipv6' not in ret_dict[intf]:
ret_dict[intf]['ipv6'] = {}
address = '{ip}'.format(ip=m.groupdict()['ipv6'])
if address not in ret_dict[intf]['ipv6']:
ret_dict[intf]['ipv6'][address] = {}
ret_dict[intf]['ipv6'][address]['ip'] = m.groupdict()['ipv6']
ret_dict[intf]['ipv6'][address]['virtual'] = True
ip_type = m.groupdict()['type']
if ip_type and 'any' in ip_type.lower():
ret_dict[intf]['ipv6'][address]['anycast'] = True
elif ip_type and 'eui' in ip_type.lower():
ret_dict[intf]['ipv6'][address]['eui_64'] = True
elif ip_type:
for code in ip_type.lower().split('/'):
if code in status_code:
ret_dict[intf]['ipv6'][address]['status'] = status_code[code]
else:
ret_dict[intf]['ipv6'][address]['status'] = 'valid'
continue
# Stateless address autoconfig enabled
p3 = re.compile(r'^Stateless +address +autoconfig +enabled$')
m = p3.match(line)
if m:
ret_dict[intf]['autoconf'] = True
continue
# Global unicast address(es):
# 2001:10::14:1, subnet is 2001:10::14:0/112
# 2001:DB8:3:3::3, subnet is 2001:DB8:3:3::/64 [ANY/TEN]
p4 = re.compile(r'^Global +unicast +address\(es\):$')
m = p4.match(line)
if m:
ipv6 = True
continue
p4_1 = re.compile(r'^(?P<ipv6>[\w\:]+), +subnet +is +(?P<dum1>(?P<dum2>[\w\:]+)'
'\/(?P<prefix_length>[0-9]+))'
'( *\[(?P<type>[\w\/]+)\])?$')
m = p4_1.match(line)
if m and ipv6:
if 'ipv6' not in ret_dict[intf]:
ret_dict[intf]['ipv6'] = {}
address = '{ip}/{mask}'.format(ip=m.groupdict()['ipv6'],
mask=m.groupdict()['prefix_length'])
if address not in ret_dict[intf]['ipv6']:
ret_dict[intf]['ipv6'][address] = {}
ret_dict[intf]['ipv6'][address]['ip'] = m.groupdict()['ipv6']
ret_dict[intf]['ipv6'][address]['prefix_length'] = \
m.groupdict()['prefix_length']
try:
status
except Exception:
pass
else:
if status.lower() in ['preferred', 'deprecated', 'invalid',
'inaccessible', 'unknown', 'tentative',
'duplicate', 'optimistic']:
ret_dict[intf]['ipv6'][address]['status'] = status.lower()
else:
ret_dict[intf]['ipv6'][address]['status'] = 'valid'
ret_dict[intf]['ipv6']['enabled'] = True
ip_type = m.groupdict()['type']
if ip_type and 'any' in ip_type.lower():
ret_dict[intf]['ipv6'][address]['anycast'] = True
elif ip_type and 'eui' in ip_type.lower():
ret_dict[intf]['ipv6'][address]['eui_64'] = True
elif ip_type:
for code in ip_type.lower().split('/'):
if code in status_code:
ret_dict[intf]['ipv6'][address]['status'] = status_code[code]
else:
ret_dict[intf]['ipv6'][address]['status'] = 'valid'
continue
# valid lifetime 2591911 preferred lifetime 604711
p4_2 = re.compile(r'^valid +lifetime +(?P<valid>\d+) +'
'preferred +lifetime +(?P<preferred>\d+)$')
m = p4_2.match(line)
if m and ipv6:
try:
address
except Exception:
pass
else:
if 'autoconf' not in ret_dict[intf]['ipv6'][address]:
ret_dict[intf]['ipv6'][address]['autoconf'] = {}
ret_dict[intf]['ipv6'][address]['autoconf']\
['valid_lifetime'] = int(m.groupdict()['valid'])
ret_dict[intf]['ipv6'][address]['autoconf']\
['preferred_lifetime'] = int(m.groupdict()['preferred'])
continue
# Joined group address(es):
# FF02::1
# FF02::1:FF14:1
# FF02::1:FF28:1A71
p5 = re.compile(r'^Joined +group +address\(es\):$')
m = p5.match(line)
if m:
ipv6 = False
continue
p5_1 = re.compile(r'^(?P<address>[\w\:]+)$')
m = p5_1.match(line)
if m and not ipv6:
joined_group.append(m.groupdict()['address'])
ret_dict[intf]['joined_group_addresses'] = sorted(joined_group)
continue
# MTU is 1500 bytes
p6 = re.compile(r'^MTU +is +(?P<mtu>\d+) +bytes$')
m = p6.match(line)
if m:
ret_dict[intf]['mtu'] = int(m.groupdict()['mtu'])
continue
# VPN Routing/Forwarding "VRF1"
p6 = re.compile(r'^VPN +Routing\/Forwarding +\"(?P<vrf>[\w\-]+)\"$')
m = p6.match(line)
if m:
ret_dict[intf]['vrf'] = m.groupdict()['vrf']
continue
# ICMP error messages limited to one every 100 milliseconds
p7 = re.compile(r'^ICMP +error +messages +limited +to +one +'
'every +(?P<limited>\d+) +milliseconds$')
m = p7.match(line)
if m:
if 'ipv6' not in ret_dict[intf]:
ret_dict[intf]['ipv6'] = {}
if 'icmp' not in ret_dict[intf]['ipv6']:
ret_dict[intf]['ipv6']['icmp'] = {}
ret_dict[intf]['ipv6']['icmp']['error_messages_limited'] = \
int(m.groupdict()['limited'])
continue
# ICMP redirects are enabled
p8 = re.compile(r'^ICMP +redirects +are +(?P<status>\w+)$')
m = p8.match(line)
if m:
if 'ipv6' not in ret_dict[intf]:
ret_dict[intf]['ipv6'] = {}
if 'icmp' not in ret_dict[intf]['ipv6']:
ret_dict[intf]['ipv6']['icmp'] = {}
if 'enabled' in m.groupdict()['status']:
ret_dict[intf]['ipv6']['icmp']['redirects'] = True
else:
ret_dict[intf]['ipv6']['icmp']['redirects'] = False
continue
# ICMP unreachables are sent
p9 = re.compile(r'^ICMP +unreachables +are +(?P<status>[\w\s]+)$')
m = p9.match(line)
if m:
if 'ipv6' not in ret_dict[intf]:
ret_dict[intf]['ipv6'] = {}
if 'icmp' not in ret_dict[intf]['ipv6']:
ret_dict[intf]['ipv6']['icmp'] = {}
if 'not sent' not in m.groupdict()['status']:
ret_dict[intf]['ipv6']['icmp']['unreachables'] = m.groupdict()['status']
continue
# ND DAD is enabled, number of DAD attempts: 1
p10 = re.compile(r'^ND +DAD +is +(?P<status>\w+), +'
'number +of +DAD +attempts: +(?P<attempts>\d+)$')
m = p10.match(line)
if m:
nd_dict = ret_dict.setdefault(intf, {}).setdefault('ipv6', {}).setdefault('nd', {})
nd_dict.setdefault('suppress', False)
if 'enabled' in m.groupdict()['status']:
nd_dict['dad_enabled'] = True
else:
nd_dict['dad_enabled'] = False
nd_dict['dad_attempts'] = int(m.groupdict()['attempts'])
continue
# ND reachable time is 30000 milliseconds (using 30000)
p11 = re.compile(r'^ND +reachable +time +is (?P<time>\d+) +milliseconds'
' +\(using +(?P<use>\d+)\)$')
m = p11.match(line)
if m:
nd_dict = ret_dict.setdefault(intf, {}).setdefault('ipv6', {}).setdefault('nd', {})
nd_dict.setdefault('suppress', False)
nd_dict['reachable_time'] = int(m.groupdict()['time'])
nd_dict['using_time'] = int(m.groupdict()['use'])
continue
# ND NS retransmit interval is 1000 milliseconds
p12 = re.compile(r'^ND +NS +retransmit +interval +is'
' +(?P<interval>\d+) +milliseconds$')
m = p12.match(line)
if m:
nd_dict = ret_dict.setdefault(intf, {}).setdefault('ipv6', {}).setdefault('nd', {})
nd_dict.setdefault('suppress', False)
nd_dict['ns_retransmit_interval'] = int(m.groupdict()['interval'])
continue
# ND advertised reachable time is 0 (unspecified)
p13 = re.compile(r'^ND +advertised +reachable +time +is +(?P<time>\d+)'
' +\((?P<dummy>\S+)\)$')
m = p13.match(line)
if m:
nd_dict = ret_dict.setdefault(intf, {}).setdefault('ipv6', {}).setdefault('nd', {})
nd_dict.setdefault('suppress', False)
nd_dict['advertised_reachable_time'] = int(m.groupdict()['time'])
if m.groupdict()['dummy'] == 'unspecified':
nd_dict['advertised_reachable_time_unspecified'] = True
else:
nd_dict['advertised_reachable_time_unspecified'] = False
continue
# ND advertised retransmit interval is 0 (unspecified)
p14 = re.compile(r'^ND +advertised +retransmit +interval +is +(?P<time>\d+)'
' +\((?P<dummy>\S+)\)$')
m = p14.match(line)
if m:
nd_dict = ret_dict.setdefault(intf, {}).setdefault('ipv6', {}).setdefault('nd', {})
nd_dict.setdefault('suppress', False)
nd_dict['advertised_retransmit_interval'] = int(m.groupdict()['time'])
if m.groupdict()['dummy'] == 'unspecified':
nd_dict['advertised_retransmit_interval_unspecified'] = True
else:
nd_dict['advertised_retransmit_interval_unspecified'] = False
continue
# ND router advertisements are sent every 200 seconds
p15 = re.compile(r'^ND +router +advertisements +are +sent +'
'every +(?P<time>\d+) +seconds$')
m = p15.match(line)
if m:
nd_dict = ret_dict.setdefault(intf, {}).setdefault('ipv6', {}).setdefault('nd', {})
nd_dict.setdefault('suppress', False)
nd_dict['router_advertisements_interval'] = int(m.groupdict()['time'])
continue
# ND router advertisements live for 1800 seconds
p16 = re.compile(r'^ND +router +advertisements +live +for +'
'(?P<time>\d+) +seconds$')
m = p16.match(line)
if m:
nd_dict = ret_dict.setdefault(intf, {}).setdefault('ipv6', {}).setdefault('nd', {})
nd_dict.setdefault('suppress', False)
nd_dict['router_advertisements_live'] = int(m.groupdict()['time'])
continue
# ND advertised default router preference is Medium
p17 = re.compile(r'^ND +advertised +default +router +preference +'
'is +(?P<prefer>\w+)$')
m = p17.match(line)
if m:
nd_dict = ret_dict.setdefault(intf, {}).setdefault('ipv6', {}).setdefault('nd', {})
nd_dict.setdefault('suppress', False)
nd_dict['advertised_default_router_preference'] = m.groupdict()['prefer']
continue
# ND RAs are suppressed (periodic)
p17_1 = re.compile(r'^ND +RAs +are +suppressed.*$')
m = p17_1.match(line)
if m:
nd_dict = ret_dict.setdefault(intf, {}).setdefault('ipv6', {}).setdefault('nd', {})
nd_dict.update({'suppress': True})
continue
# Hosts use stateless autoconfig for addresses.
p18 = re.compile(r'^Hosts +use +(?P<addr_conf_method>[\w\s]+) +for +addresses.$')
m = p18.match(line)
if m:
ret_dict[intf]['addresses_config_method'] = \
m.groupdict()['addr_conf_method']
continue
# Interface is unnumbered. Using address of Loopback0
p19 = re.compile(r'^Interface +is +unnumbered. +Using +address +of'
' +(?P<unnumbered_intf>[\w\/\.]+)$')
m = p19.match(line)
if m:
if 'ipv6' not in ret_dict[intf]:
ret_dict[intf]['ipv6'] = {}
if 'unnumbered' not in ret_dict[intf]['ipv6']:
ret_dict[intf]['ipv6']['unnumbered'] = {}
ret_dict[intf]['ipv6']['unnumbered']['interface_ref'] = \
Common.convert_intf_name(m.groupdict()['unnumbered_intf'])
continue
# No global unicast address is configured
p20 = re.compile(r'^No +global +unicast +address +is +configured$')
m = p20.match(line)
if m:
if 'ipv6' not in ret_dict[intf]:
ret_dict[intf]['ipv6'] = {}
ret_dict[intf]['ipv6']['enabled'] = False
continue
return ret_dict
class ShowInterfacesTrunkSchema(MetaParser):
"""Schema for show interfaces trunk"""
schema = {
'interface': {
Any(): {
'name': str,
'mode': str,
'encapsulation': str,
'status': str,
'native_vlan': str,
'vlans_allowed_on_trunk': str,
'vlans_allowed_active_in_mgmt_domain': str,
'vlans_in_stp_forwarding_not_pruned': str
}
}
}
class ShowInterfacesTrunk(ShowInterfacesTrunkSchema):
"""parser for show interfaces trunk"""
cli_command = 'show interfaces trunk'
def cli(self,output=None):
if output is None:
out = self.device.execute(self.cli_command)
else:
out = output
# initial regexp pattern
p1 = re.compile(r'^(?P<name>[\w\-\/\.]+) +(?P<mode>\w+) +(?P<encapsulation>\S+) +'
'(?P<status>\w+) +(?P<native_vlan>\d+)$')
p2 = re.compile('^Port +Vlans +allowed +on +trunk$')
p3 = re.compile('^Port +Vlans +allowed +and +active +in +management +domain$')
p4 = re.compile('^Port +Vlans +in +spanning +tree +forwarding +state +and +not +pruned$')
p5 = re.compile('^(?P<name>[\w\-\/\.]+) +(?P<vlans>none\s*|[\d\-\,\s]+)$')
# initial variables
ret_dict = {}
vlan_list_type = None
for line in out.splitlines():
line = line.strip()
# Gi1/0/4 on 802.1q trunking 1
# Gi1/0/4 auto n-isl trunking 1
# Gi1/0/23 on isl trunking 1
# Gi1/0/24 on 802.1q trunking 1
# Po12 auto n-802.1q trunking 1
m = p1.match(line)
if m:
group = m.groupdict()
intf = Common.convert_intf_name(group.pop('name'))
intf_dict = ret_dict.setdefault('interface', {}).setdefault(intf, {})
intf_dict['name'] = intf
intf_dict.update({k:v for k,v in group.items()})
continue
# Port Vlans allowed on trunk
if p2.match(line):
vlan_list_type = 'vlans_allowed_on_trunk'
continue
# Port Vlans allowed and active in management domain
if p3.match(line):
vlan_list_type = 'vlans_allowed_active_in_mgmt_domain'
continue
# Port Vlans in spanning tree forwarding state and not pruned
if p4.match(line):
vlan_list_type = 'vlans_in_stp_forwarding_not_pruned'
continue
# Gi1/0/4 200-211
m = p5.match(line)
if m:
group = m.groupdict()
intf = Common.convert_intf_name(group['name'])
intf_dict = ret_dict.setdefault('interface', {}).setdefault(intf, {})
intf_dict.setdefault(vlan_list_type, group['vlans']) if group['vlans'] else None
continue
return ret_dict
class ShowInterfacesCountersSchema(MetaParser):
"""Schema for show interfaces <WORD> counters"""
schema = {
'interface': {
Any(): {
Any(): { # in or out
'octets': int,
'ucast_pkts': int,
'mcast_pkts': int,
'bcast_pkts': int,
'name': str
},
},
}
}
class ShowInterfacesCounters(ShowInterfacesCountersSchema):
"""parser for show interfaces <WORD> counters"""
cli_command = 'show interfaces {interface} counters'
def cli(self, interface,output=None):
if output is None:
out = self.device.execute(self.cli_command.format(interface=interface))
else:
out = output
# initial regexp pattern
p1 = re.compile(r'^(?P<name>[\w\-\/\.]+) +(?P<octets>\d+) +(?P<ucast_pkts>\d+) +'
'(?P<mcast_pkts>\d+) +(?P<bcast_pkts>\d+)$')
p2 = re.compile(r'Port +InOctets +InUcastPkts +InMcastPkts +InBcastPkts')
p2_1 = re.compile(r'Port +OutOctets +OutUcastPkts +OutMcastPkts +OutBcastPkts')
# initial variables
ret_dict = {}
for line in out.splitlines():
line = line.strip()
# decide the in or out packets
if p2.match(line):
in_out = 'in'
continue
if p2_1.match(line):
in_out = 'out'
continue
# Gi1/0/4 on 802.1q trunking 1
m = p1.match(line)
if m:
group = m.groupdict()
intf = Common.convert_intf_name(group.pop('name'))
intf_dict = ret_dict.setdefault('interface', {}).setdefault(intf, {}).setdefault(in_out, {})
intf_dict['name'] = intf
intf_dict.update({k:int(v) for k,v in group.items()})
continue
return ret_dict
class ShowInterfacesAccountingSchema(MetaParser):
"""Schema for show interfaces accounting"""
schema = {
Any(): {
Optional('description'): str,
'accounting': {
Any(): {
'pkts_in': int,
'pkts_out': int,
'chars_in': int,
'chars_out': int,
}
}
}
}
class ShowInterfacesAccounting(ShowInterfacesAccountingSchema):
"""Parser for:
show interfaces accounting
show interfaces <interface> accounting
"""
cli_command = ['show interfaces {interface} accounting','show interfaces accounting']
exclude = ['pkts_in', 'pkts_out', 'chars_in', 'chars_out']
def cli(self, interface=None,output=None):
if output is None:
if not interface:
cmd = self.cli_command[1]
else:
cmd = self.cli_command[0].format(interface=interface)
out = self.device.execute(cmd)
else:
out = output
# initial return disctionary
ret_dict = {}
# initial regexp pattern
# GigabitEthernet0/0/0/0
# GigabitEthernet11 OOB Net
p1 = re.compile(r'^(?P<interface>[a-zA-Z\-\d\/\.]+)(?P<description>( (\S)+)*)$')
# Tunnel0 Pim Register Tunnel (Encap) for RP 10.186.1.1
p1_1 = re.compile(r'^(?P<interface>Tunnel\d+) +Pim +Register +'
'Tunnel +\(Encap\) +for +RP +(?P<rp>[\w\.]+)$')
# IPV4_UNICAST 9943 797492 50 3568
p2 = re.compile(r'^(?P<protocol>[\w\_\-\s]+)\s+(?P<pkts_in>\d+)\s+'
'(?P<chars_in>\d+)\s+(?P<pkts_out>\d+)\s+'
'(?P<chars_out>\d+)')
# No traffic sent or received on this interface.
p3 = re.compile(r'^No +traffic +sent +or +received +on +this +interface\.$')
for line in out.splitlines():
if line:
line = line.strip()
else:
continue
m = p3.match(line)
if m:
continue
# GigabitEthernet0/0/0/0
# GigabitEthernet11 OOB Net
m = p1.match(line)
if m:
intf = m.groupdict()['interface']
description = m.groupdict()['description']
continue
# IPV4_UNICAST 9943 797492 50 3568
m = p2.match(line)
if m:
protocol_dict = m.groupdict()
protocol = protocol_dict.pop('protocol').lower().strip()
ret_dict.setdefault(intf, {}).\
setdefault('accounting', {}).setdefault(protocol, {})
ret_dict[intf]['accounting'][protocol].update({k: int(v) \
for k, v in protocol_dict.items()})
if description:
ret_dict[intf].setdefault('description', description.strip())
continue
return ret_dict
# ====================================================
# schema for show interfaces stats
# ====================================================
class ShowInterfacesStatsSchema(MetaParser):
"""Schema for:
show interfaces <interface> stats
show interfaces stats"""
schema = {
Any(): {
'switching_path': {
Any(): {
'pkts_in': int,
'pkts_out': int,
'chars_in': int,
'chars_out': int,
},
}
},
}
# ====================================================
# parser for show interfaces stats
# ====================================================
class ShowInterfacesStats(ShowInterfacesStatsSchema):
"""Parser for :
show interfaces <interface> stats
show interfaces stats"""
cli_command = ['show interfaces stats' ,'show interfaces {interface} stats']
exclude = ['chars_in' , 'chars_out', 'pkts_in', 'pkts_out']
def cli(self, interface="", output=None):
if output is None:
if interface:
cmd = self.cli_command[1].format(interface=interface)
else:
cmd = self.cli_command[0]
out = self.device.execute(cmd)
else:
out = output
# initialize result dict
result_dict = {}
# GigabitEthernet0/0/0
p1 = re.compile(r'^\s*(?P<interface>[\w./]+)$')
# Switching path Pkts In Chars In Pkts Out Chars Out
# Processor 33 2507 33 2490
p2 = re.compile(r'^\s*(?P<path>[\w\- ]*?) +(?P<pkts_in>[\d]+) +(?P<chars_in>[\d]+)'
' +(?P<pkts_out>[\d]+) +(?P<chars_out>[\d]+)$')
for line in out.splitlines():
line = line.rstrip()
m = p1.match(line)
if m:
interface = m.groupdict()['interface']
path_dict = result_dict.setdefault(interface, {}).setdefault('switching_path', {})
continue
m = p2.match(line)
if m:
group = m.groupdict()
path = group.pop('path').replace(" ", "_").replace("-", "_").lower()
tmp_dict = path_dict.setdefault(path, {})
tmp_dict.update({k: int(v) for k, v in group.items()})
continue
return result_dict
# ====================================================
# parser for show interfaces description
# ====================================================
class ShowInterfacesDescriptionSchema(MetaParser):
"""schema for show interfaces description
"""
schema = {
'interfaces': {
Any(): {
'status': str,
'protocol': str,
Optional('description'): str
}
}
}
class ShowInterfacesDescription(ShowInterfacesDescriptionSchema):
"""parser for show interfaces description
"""
cli_command = ['show interfaces description', 'show interfaces {interface} description']
def cli(self, interface="", output=None):
if output is None:
if interface:
cmd = self.cli_command[1].format(interface=interface)
else:
cmd = self.cli_command[0]
out = self.device.execute(cmd)
else:
out = output
result_dict = {}
index = 1
#Interface Status Protocol Description
#Gi0/0 up up
#Gi0/1 admin down down to router2
p1 = re.compile(r'(?P<interface>(\S+)) +(?P<status>(\S+)([\s+](\S+))?) +(?P<protocol>(\S+))(?: +(?P<description>(.*)))?$')
for line in out.splitlines():
line = line.strip()
#Interface Status Protocol Description
#Gi0/0 up up
#Gi0/1 admin down down to router2
m = p1.match(line)
if m and m.groupdict()['protocol'] != 'Protocol':
group = m.groupdict()
interface = Common.convert_intf_name(group['interface'])
intf_dict = result_dict.setdefault('interfaces', {}).setdefault(interface, {})
intf_dict['status'] = group['status']
intf_dict['protocol'] = group['protocol']
if group['description'] is not None:
intf_dict['description'] = str(group['description'])
else:
intf_dict['description'] = ""
index += 1
continue
return result_dict
# ====================================================
# schema for show interfaces status
# ====================================================
class ShowInterfacesStatusSchema(MetaParser):
"""Schema for:
show interfaces status"""
schema = {
'interfaces': {
Any(): {
Optional('name'): str,
'status': str,
'vlan': str,
'duplex_code': str,
'port_speed': str,
Optional('type'): str,
}
}
}
# ====================================================
# parser for show interfaces status
# ====================================================
class ShowInterfacesStatus(ShowInterfacesStatusSchema):
"""parser for
* show interfaces status
"""
cli_command = 'show interfaces status'
def cli(self, output=None):
if output is None:
out = self.device.execute(self.cli_command)
else:
out = output
result_dict = {}
# Port Name Status Vlan Duplex Speed Type
# Gi1/2 TelenlqPOIU notconnect 125 full 100 10/100/1000-TX
# Gi1/3 SE connected 132 a-full a-1000 10/100/1000-TX
# Gi1/7 notconnect 99 auto auto 10/100/1000-TX
# Gi1/10 To cft123 connected trunk a-full a-1000 10/100/1000-TX
# Gi1/1/0/1 FAST-HELLO connected 4094 a-full a-1000 10/100/1000BaseTX
# Te1/1/2 VSL connected trunk full a-10G 10GBase-SR
# Te2/1/20 disabled 1 full auto No XCVR
# Te2/1/21 VSL LINK1 disabled 1 full auto No XCVR
# Po10 VSL LINK2 connected trunk a-full a-10G
p1 = re.compile(r'^(?P<interfaces>\S+)(?:\s+(?P<name>([\S\s]+)))?'
r'\s+(?P<status>(connected|notconnect|suspended|inactive|disabled|err-disabled|monitoring))'
r'\s+(?P<vlan>\S+)\s+(?P<duplex_code>[\S\-]+)\s+(?P<port_speed>[\S\-]+)(\s+(?P<type>.+))?$')
for line in out.splitlines():
line = line.strip()
m = p1.match(line)
if m:
group = m.groupdict()
intf_dict = result_dict.setdefault('interfaces', {}).\
setdefault(Common.convert_intf_name(group['interfaces']), {})
name_val = group['name'].strip()
if len(name_val)>0 :
intf_dict['name'] = name_val
keys = ['status',
'vlan', 'duplex_code', 'port_speed',
'type']
for k in keys:
if group[k]:
intf_dict[k] = group[k].strip()
continue
return result_dict
# ==========================================================
# Parser for show interface {interface} transceiver detail
# ==========================================================
class ShowInterfaceTransceiverDetailSchema(MetaParser):
"""Schema for:
show interface {interface} transceiver detail"""
schema = {
'interfaces': {
Any(): {# interface name
Optional('cisco_extended_id_number'): str,
Optional('cisco_id'): str,
Optional('cisco_part_number'): str,
Optional('cisco_product_id'): str,
Optional('cisco_vendor_id'): str,
Optional('name'): str,
Optional('nominal_bitrate'): str,
Optional('number_of_lanes'): str,
Optional('part_number'): str,
Optional('revision'): str,
Optional('serial_number'): str,
Optional('transceiver'): str,
Optional('type'): str,
Any(): str,
}
}
}
class ShowInterfaceTransceiverDetail(ShowInterfaceTransceiverDetailSchema):
"""parser for
* show interface {interface} transceiver detail
"""
cli_command = 'show interface {interface} transceiver detail'
def cli(self, interface, output=None):
if output is None:
out = self.device.execute(self.cli_command.format(interface=interface))
else:
out = output
result_dict = {}
# transceiver is present
# type is 10Gbase-LR
# name is CISCO-FINISAR
# part number is FTLX1474D3BCL-CS
p1 = re.compile(r'^(?P<key>[\S\s]+) +is +(?P<value>[\S\s]+)$')
# number of lanes 1
p2 = re.compile(r'^number +of +lanes +(?P<lanes>[\d]+)$')
for line in out.splitlines():
line = line.strip()
# transceiver is present
# type is 10Gbase-LR
m = p1.match(line)
if m:
group = m.groupdict()
key = group['key'].strip().replace(" ", '_').lower()
value = group['value'].strip()
intf_dict = result_dict.setdefault('interfaces', {}).setdefault(interface, {})
intf_dict.update({key: value})
continue
# number of lanes 1
m = p2.match(line)
if m:
intf_dict['number_of_lanes'] = m.groupdict()['lanes']
continue
return result_dict
| 44.005934 | 145 | 0.479359 | [
"Apache-2.0"
] | Tristou27/genieparser | src/genie/libs/parser/iosxe/show_interface.py | 155,737 | Python |
import subprocess
import tator
def test_activities(host, token, video_type, video):
cmd = [
'python3',
'examples/activities.py',
'--host', host,
'--token', token,
'--video_type_id', str(video_type),
'--video_id', str(video),
]
subprocess.run(cmd, check=True)
| 21.4 | 52 | 0.573209 | [
"MIT"
] | cvisionai/tator-py | test/examples/test_activities.py | 321 | Python |
import os
from setuptools import setup, find_packages
from guizero import __name__, __package__, __version__, __author__
## This is a Python 3 package only
from sys import version_info
if version_info.major != 3:
print("This package will only work with Python 3. \n"
"If you already have Python 3 installed try 'pip3 install guizero'.")
__desc__ = 'Python module to allow learners to easily create GUIs'
__author_email__ = '[email protected]'
__license__ = 'BSD'
__url__ = 'https://github.com/lawsie/guizero'
__requires__ = []
__extras_require__={
'images': ["pillow>=4.3.0"]
}
__python_requires__ = ">=3"
__keywords__ = [
"GUI",
"guizero",
"interface",
]
__classifiers__ = [
"Development Status :: 5 - Production/Stable",
"Topic :: Education",
"Topic :: Software Development :: User Interfaces",
"Topic :: Education",
"Intended Audience :: Education",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
]
__long_description__ = """# guizero
guizero is designed to allow new learners to quickly and easily create GUIs for their programs.
There is comprehensive documentation at [lawsie.github.io/guizero](https://lawsie.github.io/guizero/)
## Install
If you can download and unzip a file, you can [install guizero](https://lawsie.github.io/guizero/#easy-install) - **no special permissions or administrator rights are required**.
If you have administrator rights and are connected to the internet, you can use [pip to quickly install guizero]((https://lawsie.github.io/guizero/#install-using-pip).
guizero only requires `tkinter` to be installed, which is included with a standard Python installation on all platforms except Linux.
A python module to allow learners to easily create GUIs. guizero is designed to be used by new learners.
## Use
guizero is simple to use, taking away much of the complexity of creating simple GUIs.
```python
from guizero import App, Text
app = App(title="Hello world")
message = Text(app, text="Welcome to the Hello world app!")
app.display()
```
"""
setup(
name=__name__,
version=__version__,
author=__author__,
author_email=__author_email__,
description=__desc__,
long_description=__long_description__,
long_description_content_type='text/markdown',
license=__license__,
keywords=__keywords__,
url=__url__,
packages=find_packages(),
classifiers=__classifiers__,
install_requires=__requires__,
extras_require = __extras_require__,
python_requires=__python_requires__,
)
| 33.627907 | 178 | 0.720609 | [
"BSD-3-Clause"
] | hormigaAzul/guizero | setup.py | 2,892 | Python |
import os
import logging
import seqlog
from seqlog import StructuredRootLogger, StructuredLogger, ConsoleStructuredLogHandler
if bool(os.environ.get('PROD')):
# Production logging setup
url = os.environ.get('SEQ_URL')
key = os.environ.get('SEQ_BOT_KEY')
if not key:
raise Exception('SEQ_BOT_KEY not found but SEQ_URL was specified')
seqlog.log_to_seq(
# Initialize the seq logging url before the secrets are loaded
# this is ok because seq logging only happens in prod
server_url=url,
api_key=key,
level=logging.INFO,
batch_size=5,
auto_flush_timeout=10, # seconds
override_root_logger=False,
)
else:
# Development logging setup
logging.setLoggerClass(StructuredLogger)
logging.root = StructuredRootLogger(logging.WARNING)
logging.Logger.root = logging.root
logging.Logger.manager = logging.Manager(logging.Logger.root)
logging.basicConfig(
format='%(asctime)s %(levelname)s %(message)s',
handlers=[
ConsoleStructuredLogHandler()
],
level=logging.INFO,
)
| 27.585366 | 86 | 0.678161 | [
"MIT"
] | ClemBotProject/ClemBot | ClemBot.Bot/bot/__init__.py | 1,131 | Python |
import filecmp
import os
from os.path import abspath, dirname, join, exists
import pytest
from image_diet import storage
THIS_DIR = abspath(dirname(__file__))
@pytest.fixture
def dietstorage():
dietstorage = storage.DietStorage()
# Filesystem storage parameters
dietstorage.location = THIS_DIR
dietstorage.file_permissions_mode = 0o644
return dietstorage
def test_the_right_storage_has_been_imported():
from django.core import files
assert files.storage.FileSystemStorage == storage.STORAGE_CLASS
def test_get_configuration_returns_parsed_configuration():
config = storage.get_configuration()
assert config['commands']['fake'] == 'fakecmd'
assert config['commands']['advpng'] == 'advpng'
def test_mixin_reads_default_configuration():
mixin = storage.DietMixin()
assert hasattr(mixin, 'config')
assert mixin.config['commands']['optipng'] == 'optipng'
assert mixin.temp_dir == '/tmp'
def test_mixin_also_reads_custom_configuration():
mixin = storage.DietMixin()
assert mixin.config['commands']['optipng'] == 'optipng'
assert mixin.config['commands']['fake'] == 'fakecmd'
assert mixin.config['notreal'] == 'not a real value'
def test_save_to_temp_copies_content_to_same_named_file_in_temp_directory():
mixin = storage.DietMixin()
filename = 'stockholm.jpg'
path = join(THIS_DIR, 'test_files', 'stockholm.jpg')
with open(path, 'rb') as f:
content = f.read()
tmppath = join(mixin.temp_dir, filename)
try:
assert exists(tmppath) is False
assert mixin.save_to_temp(path, content) == tmppath
assert exists(tmppath)
assert filecmp.cmp(path, tmppath)
finally:
os.remove(tmppath)
def test_save_method_saves_text_file(dietstorage):
filename = 'tempfile.txt'
content = "This file is empty."
path = join(THIS_DIR, filename)
tmppath = dietstorage.save_to_temp(path, content)
new_path = dietstorage.save(path, open(tmppath, 'r'))
try:
assert exists(new_path)
assert open(new_path, 'r').read() == content
assert not exists(tmppath)
finally:
os.remove(new_path)
def prepare_test_files(filename):
copy_filename = 'copy_' + filename
tmppath = join('/tmp', filename)
copypath = join('/tmp', copy_filename)
path = join(THIS_DIR, 'test_files', filename)
with open(path, 'rb') as f:
content = f.read()
with open(copypath, 'wb') as f:
f.write(content)
# Original test_file , working copy (can be changed), internal copy if it
# exists and content of original file
return (path, copypath, tmppath, content)
def test_save_method_saves_binary_file(dietstorage):
path, copypath, tmppath, content = prepare_test_files('stockholm.jpg')
new_path = dietstorage.save(path, open(copypath, 'rb'))
try:
assert exists(new_path)
assert open(new_path, 'rb').read() == content
assert not exists(tmppath)
finally:
os.remove(new_path)
os.remove(copypath)
def test_save_method_compresses(dietstorage):
path, copypath, tmppath, content = prepare_test_files('png_test.png')
new_path = dietstorage.save(path, open(copypath, 'rb'))
try:
assert exists(new_path)
assert len(open(new_path, 'rb').read()) < len(content)
assert not exists(tmppath)
finally:
os.remove(new_path)
os.remove(copypath)
def test_logger_logs_errors(caplog, dietstorage):
# Delete configuration section so that DietException will be raised
del dietstorage.config['commands']
filename = 'stockholm.jpg'
path = join(THIS_DIR, 'test_files', 'stockholm.jpg')
tmppath = join(dietstorage.temp_dir, filename)
try:
dietstorage.save(path, open(path, 'rb'))
except storage.diet.DietException as e:
assert not exists(tmppath)
assert isinstance(e, storage.diet.ConfigurationErrorDietException)
records = list(caplog.records())
assert len(records) == 1
record = records[0]
assert record.levelname == 'ERROR'
assert record.message.startswith('Missing key(s) in configuration: ')
def test_storage_logs_file_access_errors(caplog, dietstorage):
dietstorage.temp_dir = '/doesnotexist'
filename = 'stockholm.jpg'
path = join(THIS_DIR, 'test_files', 'stockholm.jpg')
tmppath = join(dietstorage.temp_dir, filename)
try:
dietstorage.save(path, open(path, 'rb'))
except (OSError, IOError):
assert not exists(tmppath)
records = list(caplog.records())
assert len(records) == 1
record = records[0]
assert record.levelname == 'ERROR'
assert record.message.startswith('Cannot save to temp dir')
def test_save_method_cleans_temp_directory(dietstorage):
filename = 'stockholm.jpg'
path = join(THIS_DIR, 'test_files', 'stockholm.jpg')
tmppath = join(dietstorage.temp_dir, filename)
new_path = dietstorage.save(path, open(path, 'rb'))
try:
assert not exists(tmppath)
finally:
os.remove(new_path)
| 27.497297 | 77 | 0.686259 | [
"MIT"
] | samastur/image-diet2 | tests/test_storage.py | 5,087 | Python |
import pytest
from model_mommy import mommy
from rest_framework import status
import json
'''
This set of tests exists to test all example requests used in the API documentation
if any of these tests break, but others do not, you should first check that the specified
request is still valid. If the request is no longer valid, the documentation requires an update
alongside of its test
Note that these tests don't test the accuracy of the responses, just that they return a
200 response code. Testing the accuracy of these requests should be done in relevant app
testing suites
'''
@pytest.fixture(scope="session")
def documentation_test_data():
mommy.make('awards.Award', _quantity=1, _fill_optional=True)
@pytest.mark.parametrize("url, req", [
("/api/v1/awards/", {"filters": []}),
("/api/v1/awards/", {"filters": [{"field": "date_signed", "operation": "greater_than_or_equal",
"value": "2016-06-01"}]}),
("/api/v1/awards/", {"filters": [{"field": "date_signed", "operation": "greater_than_or_equal",
"value": "2016-06-01"}, {"field": "date_signed", "operation": "less_than",
"value": "2017-06-01"}]}),
("/api/v1/awards/", {"filters": [{"combine_method": "OR",
"filters": [{"field": "type", "operation": "equals", "value": "A"},
{"field": "type", "operation": "equals", "value": "B"}]}]}),
("/api/v1/awards/", {"filters": [{"field": "date_signed", "operation": "greater_than_or_equal",
"value": "2016-06-01"},
{"combine_method": "OR",
"filters": [{"field": "type", "operation": "equals", "value": "A"},
{"combine_method": "AND",
"filters": [{"field": "type", "operation": "equals", "value": "B"},
{"field": "date_signed", "operation": "less_than",
"value": "2017-06-01"}]}]}]}),
("/api/v1/awards/", {"filters": [{"field": "recipient__recipient_name", "operation": "equals",
"value": "GENERAL ELECTRIC COMPANY"}]}),
("/api/v1/awards/", {"fields": ["description", "recipient"]}),
("/api/v1/awards/", {"exclude": ["type"]}),
("/api/v1/awards/", {"verbose": True}),
("/api/v1/awards/", {"fields": ["type"], "filters": [{"field": "date_signed", "operation": "greater_than",
"value": "2016-06-01"}]}),
("/api/v1/awards/", {"order": ["recipient__recipient_name"]}),
("/api/v1/awards/", {"order": ["-recipient__recipient_name"]}),
("/api/v1/awards/", {"page": 5, "limit": 10}),
("/api/v1/awards/total/", {"field": "total_obligation", "group": "date_signed__fy"}),
("/api/v1/awards/total/", {"field": "total_obligation", "group": "date_signed", "aggregate": "count",
"date_part": "month"}),
])
@pytest.mark.django_db
def test_intro_tutorial_post_requests(client, url, req, documentation_test_data):
assert client.post(
url,
data=json.dumps(req),
content_type='application/json').status_code == status.HTTP_200_OK
@pytest.mark.parametrize("url", [
"/api/v1/awards/",
"/api/v1/transactions/",
"/api/v1/awards/?awarding_agency=1788",
"/api/v1/awards/?type=A&piid=LB01",
"/api/v1/awards/?page=5&limit=10",
])
@pytest.mark.django_db
def test_intro_tutorial_get_requests(client, url, documentation_test_data):
assert client.get(
url).status_code == status.HTTP_200_OK
@pytest.mark.parametrize("url, req", [
("/api/v1/awards/", {"filters": [{"field": "awarding_agency__toptier_agency__cgac_code", "operation": "equals",
"value": "097"}]}),
("/api/v1/awards/", {"filters": [{"field": "type", "operation": "in", "value": ["A", "B", "C", "D"]}]}),
("/api/v1/awards/", {"filters": [{"field": "latest_transaction__contract_data", "operation": "is_null",
"value": False}]}),
("/api/v1/awards/", {"filters": [{"field": "place_of_performance__state_code", "operation": "not_equals",
"value": "NJ"}]})
])
@pytest.mark.django_db
def test_recipe_post_requests(client, url, req, documentation_test_data):
assert client.post(
url,
data=json.dumps(req),
content_type='application/json').status_code == status.HTTP_200_OK
@pytest.mark.parametrize("url", [
"/api/v1/awards/?awarding_agency__toptier_agency__cgac_code=097",
])
@pytest.mark.django_db
def test_recipe_get_requests(client, url, documentation_test_data):
assert client.get(
url).status_code == status.HTTP_200_OK
| 49.54902 | 118 | 0.550257 | [
"CC0-1.0"
] | COEJKnight/five | usaspending_api/api_docs/tests/test_documentation_examples.py | 5,054 | Python |
#!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'dj8bihu.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| 33.6875 | 73 | 0.686456 | [
"MIT"
] | RileyGe/dj8bihu | manage.py | 539 | Python |
"""project URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from . import views as example_views
app_name = 'example'
urlpatterns = [
path('', admin.site.urls),
# path('api-auth/', include('rest_framework.urls', namespace='rest_framework')),
path('users/<int:pk>/', example_views.UserDetailView().as_view(), name='user_detail'),
path('users/', example_views.UserCreateListView().as_view(), name='user_list_create')
]
| 35.666667 | 90 | 0.71028 | [
"BSD-3-Clause"
] | sewi2/simple-django-template | project/example/urls.py | 1,070 | Python |
import uuid
import requests;
import os;
from PIL import Image
from io import BytesIO
__name__ = "download_images"
def download(link, counter, size, root_folder, class_name):
IMG_SIZE = size, size
response = requests.get(link, timeout=3.000)
file = BytesIO(response.content)
img = Image.open(file)
if size > 0:
IMG_SIZE = size, size
img.thumbnail(IMG_SIZE, Image.ANTIALIAS)
# Split last part of url to get image name
img_name = link.rsplit('/', 1)[1]
img_type = img_name.split('.')[1]
if img_type.lower() != "jpg":
raise Exception("Cannot download these type of file")
else:
#Check if another file of the same name already exists
id = uuid.uuid1()
img.save(f"./{root_folder}/{class_name}/{class_name}-{id.hex}.jpg", "JPEG") | 30.185185 | 83 | 0.653988 | [
"MIT"
] | Bergolfs/idt | idt/utils/download_images.py | 815 | Python |
import time
import cv2
import numpy as np
j = 1
while 1:
path = 'Bearing/' + str(j) + '.jpg'
img = cv2.imread(path)
img_copy = img.copy()
img = cv2.blur(img, (1, 1))
gray = cv2.cvtColor(img_copy, cv2.COLOR_BGR2GRAY)
# flag, img_copy = cv2.threshold(gray, 127, 255, cv2.THRESH_BINARY)
imgray = cv2.Canny(img_copy, 600, 100, 3) # Canny边缘检测,参数可更改
# cv2.imshow("imgray",imgray)
ret, thresh = cv2.threshold(imgray, 127, 255, cv2.THRESH_BINARY)
cv2.imshow("thresh", thresh)
contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) # contours为轮廓集,可以计算轮廓的长度、面积等
ux = 0
uy = 0
for cnt in contours:
if len(cnt) > 50:
# S1 = cv2.contourArea(cnt) # 格林公式计算的实际面积
ell = cv2.fitEllipse(cnt) # 拟合椭圆 ellipse = [ center(x, y) , long short (a, b), angle ]
x = int(ell[0][0])
y = int(ell[0][1])
a = ell[1][0]
b = ell[1][1]
# S2 = math.pi * ell[1][0] * ell[1][1] # 理论面积
if (b / a) < 1.2: # and a > 0 and b > 0 and a < 0 and b < 0: # 面积比例
uy = y
ux = x
img = cv2.ellipse(img, ell, (0, 0, 200), 2)
cv2.circle(img, (x, y), 2, (255, 255, 255), 3)
cv2.putText(img, str((x, y)), (x + 20, y + 10), 0, 0.5,
[225, 255, 255], thickness=1, lineType=cv2.LINE_AA)
print("长轴: " + str(a) + " " + "短轴: " + str(b) + " " + str(ell[0][0]) + " " + str(ell[0][1]))
cv2.imshow("ell", img)
j+=1
if j==44:
j=1
time.sleep(0.5)
cv2.waitKey(20)
| 34.541667 | 120 | 0.496984 | [
"Apache-2.0"
] | Thinkin99/intelligent_visionforce_assemble | ellcircle_detect.py | 1,772 | Python |
# Generated by Django 2.0.8 on 2018-09-21 10:31
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app_challenges', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='challenge',
name='challenge_code',
field=models.CharField(blank=True, max_length=5, null=True, unique=True, verbose_name='Challenge Code'),
),
]
| 24.157895 | 116 | 0.62963 | [
"MIT"
] | Audiotuete/backend_challenge_api | app_challenges/migrations/0002_auto_20180921_1031.py | 459 | Python |
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async_support.base.exchange import Exchange
import base64
import hashlib
import math
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import DDoSProtection
class btcmarkets (Exchange):
def describe(self):
return self.deep_extend(super(btcmarkets, self).describe(), {
'id': 'btcmarkets',
'name': 'BTC Markets',
'countries': ['AU'], # Australia
'rateLimit': 1000, # market data cached for 1 second(trades cached for 2 seconds)
'has': {
'CORS': False,
'fetchOHLCV': True,
'fetchOrder': True,
'fetchOrders': True,
'fetchClosedOrders': 'emulated',
'fetchOpenOrders': True,
'fetchMyTrades': True,
'cancelOrders': True,
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/29142911-0e1acfc2-7d5c-11e7-98c4-07d9532b29d7.jpg',
'api': {
'public': 'https://api.btcmarkets.net',
'private': 'https://api.btcmarkets.net',
'web': 'https://btcmarkets.net/data',
},
'www': 'https://btcmarkets.net',
'doc': 'https://github.com/BTCMarkets/API',
},
'api': {
'public': {
'get': [
'market/{id}/tick',
'market/{id}/orderbook',
'market/{id}/trades',
'v2/market/{id}/tickByTime/{timeframe}',
'v2/market/{id}/trades',
'v2/market/active',
],
},
'private': {
'get': [
'account/balance',
'account/{id}/tradingfee',
'fundtransfer/history',
'v2/order/open',
'v2/order/open/{id}',
'v2/order/history/{instrument}/{currency}/',
'v2/order/trade/history/{id}',
'v2/transaction/history/{currency}',
],
'post': [
'fundtransfer/withdrawCrypto',
'fundtransfer/withdrawEFT',
'order/create',
'order/cancel',
'order/history',
'order/open',
'order/trade/history',
'order/createBatch', # they promise it's coming soon...
'order/detail',
],
},
'web': {
'get': [
'market/BTCMarkets/{id}/tickByTime',
],
},
},
'timeframes': {
'1m': 'minute',
'1h': 'hour',
'1d': 'day',
},
'exceptions': {
'3': InvalidOrder,
'6': DDoSProtection,
},
})
async def fetch_transactions(self, code=None, since=None, limit=None, params={}):
await self.load_markets()
request = {}
if limit is not None:
request['limit'] = limit
if since is not None:
request['since'] = since
response = await self.privateGetFundtransferHistory(self.extend(request, params))
transactions = response['fundTransfers']
return self.parseTransactions(transactions, None, since, limit)
def parse_transaction_status(self, status):
# todo: find more statuses
statuses = {
'Complete': 'ok',
}
return self.safe_string(statuses, status, status)
def parse_transaction(self, item, currency=None):
#
# {
# status: 'Complete',
# fundTransferId: 1904311906,
# description: 'ETH withdraw from [[email protected]] to Address: 0xF123aa44FadEa913a7da99cc2eE202Db684Ce0e3 amount: 8.28965701 fee: 0.00000000',
# creationTime: 1529418358525,
# currency: 'ETH',
# amount: 828965701,
# fee: 0,
# transferType: 'WITHDRAW',
# errorMessage: null,
# lastUpdate: 1529418376754,
# cryptoPaymentDetail: {
# address: '0xF123aa44FadEa913a7da99cc2eE202Db684Ce0e3',
# txId: '0x8fe483b6f9523559b9ebffb29624f98e86227d2660d4a1fd4785d45e51c662c2'
# }
# }
#
# {
# status: 'Complete',
# fundTransferId: 494077500,
# description: 'BITCOIN Deposit, B 0.1000',
# creationTime: 1501077601015,
# currency: 'BTC',
# amount: 10000000,
# fee: 0,
# transferType: 'DEPOSIT',
# errorMessage: null,
# lastUpdate: 1501077601133,
# cryptoPaymentDetail: null
# }
#
# {
# "fee": 0,
# "amount": 56,
# "status": "Complete",
# "currency": "BCHABC",
# "lastUpdate": 1542339164044,
# "description": "BitcoinCashABC Deposit, P 0.00000056",
# "creationTime": 1542339164003,
# "errorMessage": null,
# "transferType": "DEPOSIT",
# "fundTransferId": 2527326972,
# "cryptoPaymentDetail": null
# }
#
timestamp = self.safe_integer(item, 'creationTime')
lastUpdate = self.safe_integer(item, 'lastUpdate')
transferType = self.safe_string(item, 'transferType')
cryptoPaymentDetail = self.safe_value(item, 'cryptoPaymentDetail', {})
address = self.safe_string(cryptoPaymentDetail, 'address')
txid = self.safe_string(cryptoPaymentDetail, 'txId')
type = None
if transferType == 'DEPOSIT':
type = 'deposit'
elif transferType == 'WITHDRAW':
type = 'withdrawal'
else:
type = transferType
fee = self.safe_float(item, 'fee')
status = self.parse_transaction_status(self.safe_string(item, 'status'))
ccy = self.safe_string(item, 'currency')
code = self.common_currency_code(ccy)
# todo: self logic is duplicated below
amount = self.safe_float(item, 'amount')
if amount is not None:
amount = amount * 1e-8
return {
'id': self.safe_string(item, 'fundTransferId'),
'txid': txid,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'address': address,
'tag': None,
'type': type,
'amount': amount,
'currency': code,
'status': status,
'updated': lastUpdate,
'fee': {
'currency': code,
'cost': fee,
},
'info': item,
}
async def fetch_markets(self, params={}):
response = await self.publicGetV2MarketActive(params)
result = []
markets = self.safe_value(response, 'markets')
for i in range(0, len(markets)):
market = markets[i]
baseId = self.safe_string(market, 'instrument')
quoteId = self.safe_string(market, 'currency')
id = baseId + '/' + quoteId
base = self.common_currency_code(baseId)
quote = self.common_currency_code(quoteId)
symbol = base + '/' + quote
# todo: refactor self
fee = 0.0085 if (quote == 'AUD') else 0.0022
pricePrecision = 2
amountPrecision = 4
minAmount = 0.001 # where does it come from?
minPrice = None
if quote == 'AUD':
if (base == 'XRP') or (base == 'OMG'):
pricePrecision = 4
amountPrecision = -math.log10(minAmount)
minPrice = math.pow(10, -pricePrecision)
precision = {
'amount': amountPrecision,
'price': pricePrecision,
}
limits = {
'amount': {
'min': minAmount,
'max': None,
},
'price': {
'min': minPrice,
'max': None,
},
'cost': {
'min': None,
'max': None,
},
}
result.append({
'info': market,
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'active': None,
'maker': fee,
'taker': fee,
'limits': limits,
'precision': precision,
})
return result
async def fetch_balance(self, params={}):
await self.load_markets()
balances = await self.privateGetAccountBalance(params)
result = {'info': balances}
for i in range(0, len(balances)):
balance = balances[i]
currencyId = self.safe_string(balance, 'currency')
code = self.common_currency_code(currencyId)
multiplier = 100000000
total = self.safe_float(balance, 'balance') / multiplier
used = self.safe_float(balance, 'pendingFunds') / multiplier
free = total - used
account = {
'free': free,
'used': used,
'total': total,
}
result[code] = account
return self.parse_balance(result)
def parse_ohlcv(self, ohlcv, market=None, timeframe='1m', since=None, limit=None):
multiplier = 100000000 # for price and volume
return [
ohlcv[0],
float(ohlcv[1]) / multiplier,
float(ohlcv[2]) / multiplier,
float(ohlcv[3]) / multiplier,
float(ohlcv[4]) / multiplier,
float(ohlcv[5]) / multiplier,
]
async def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'id': market['id'],
'timeWindow': self.timeframes[timeframe],
}
if since is not None:
request['since'] = since
response = await self.webGetMarketBTCMarketsIdTickByTime(self.extend(request, params))
return self.parse_ohlcvs(response['ticks'], market, timeframe, since, limit)
async def fetch_order_book(self, symbol, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'id': market['id'],
}
response = await self.publicGetMarketIdOrderbook(self.extend(request, params))
timestamp = self.safe_integer(response, 'timestamp')
if timestamp is not None:
timestamp *= 1000
return self.parse_order_book(response, timestamp)
def parse_ticker(self, ticker, market=None):
timestamp = self.safe_integer(ticker, 'timestamp')
if timestamp is not None:
timestamp *= 1000
symbol = None
if market is not None:
symbol = market['symbol']
last = self.safe_float(ticker, 'lastPrice')
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': None,
'low': None,
'bid': self.safe_float(ticker, 'bestBid'),
'bidVolume': None,
'ask': self.safe_float(ticker, 'bestAsk'),
'askVolume': None,
'vwap': None,
'open': None,
'close': last,
'last': last,
'previousClose': None,
'change': None,
'percentage': None,
'average': None,
'baseVolume': self.safe_float(ticker, 'volume24h'),
'quoteVolume': None,
'info': ticker,
}
async def fetch_ticker(self, symbol, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'id': market['id'],
}
response = await self.publicGetMarketIdTick(self.extend(request, params))
return self.parse_ticker(response, market)
def parse_trade(self, trade, market=None):
timestamp = self.safe_integer(trade, 'timestamp')
if timestamp is not None:
timestamp *= 1000
symbol = None
if market is not None:
symbol = market['symbol']
id = self.safe_string(trade, 'tid')
price = self.safe_float(trade, 'price')
amount = self.safe_float(trade, 'amount')
cost = None
if amount is not None:
if price is not None:
cost = amount * price
return {
'info': trade,
'id': id,
'order': None,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'type': None,
'side': None,
'takerOrMaker': None,
'price': price,
'amount': amount,
'cost': cost,
'fee': None,
}
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
# 'since': 59868345231,
'id': market['id'],
}
response = await self.publicGetMarketIdTrades(self.extend(request, params))
return self.parse_trades(response, market, since, limit)
async def create_order(self, symbol, type, side, amount, price=None, params={}):
await self.load_markets()
market = self.market(symbol)
multiplier = 100000000 # for price and volume
orderSide = 'Bid' if (side == 'buy') else 'Ask'
request = self.ordered({
'currency': market['quote'],
})
request['currency'] = market['quote']
request['instrument'] = market['base']
request['price'] = int(price * multiplier)
request['volume'] = int(amount * multiplier)
request['orderSide'] = orderSide
request['ordertype'] = self.capitalize(type)
request['clientRequestId'] = str(self.nonce())
response = await self.privatePostOrderCreate(self.extend(request, params))
id = self.safe_string(response, 'id')
return {
'info': response,
'id': id,
}
async def cancel_orders(self, ids, symbol=None, params={}):
await self.load_markets()
for i in range(0, len(ids)):
ids[i] = int(ids[i])
request = {
'orderIds': ids,
}
return await self.privatePostOrderCancel(self.extend(request, params))
async def cancel_order(self, id, symbol=None, params={}):
await self.load_markets()
return await self.cancel_orders([id])
def calculate_fee(self, symbol, type, side, amount, price, takerOrMaker='taker', params={}):
market = self.markets[symbol]
rate = market[takerOrMaker]
currency = None
cost = None
if market['quote'] == 'AUD':
currency = market['quote']
cost = float(self.cost_to_precision(symbol, amount * price))
else:
currency = market['base']
cost = float(self.amount_to_precision(symbol, amount))
return {
'type': takerOrMaker,
'currency': currency,
'rate': rate,
'cost': float(self.fee_to_precision(symbol, rate * cost)),
}
def parse_my_trade(self, trade, market):
multiplier = 100000000
timestamp = self.safe_integer(trade, 'creationTime')
side = self.safe_float(trade, 'side')
side = 'buy' if (side == 'Bid') else 'sell'
# BTCMarkets always charge in AUD for AUD-related transactions.
feeCurrencyCode = None
symbol = None
if market is not None:
feeCurrencyCode = market['quote'] if (market['quote'] == 'AUD') else market['base']
symbol = market['symbol']
id = self.safe_string(trade, 'id')
price = self.safe_float(trade, 'price')
if price is not None:
price /= multiplier
amount = self.safe_float(trade, 'volume')
if amount is not None:
amount /= multiplier
feeCost = self.safe_float(trade, 'fee')
if feeCost is not None:
feeCost /= multiplier
cost = None
if price is not None:
if amount is not None:
cost = price * amount
orderId = self.safe_string(trade, 'orderId')
return {
'info': trade,
'id': id,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'order': orderId,
'symbol': symbol,
'type': None,
'side': side,
'price': price,
'amount': amount,
'cost': cost,
'fee': {
'currency': feeCurrencyCode,
'cost': feeCost,
},
}
def parse_my_trades(self, trades, market=None, since=None, limit=None):
result = []
for i in range(0, len(trades)):
trade = self.parse_my_trade(trades[i], market)
result.append(trade)
return result
def parse_order(self, order, market=None):
multiplier = 100000000
side = 'buy' if (order['orderSide'] == 'Bid') else 'sell'
type = 'limit' if (order['ordertype'] == 'Limit') else 'market'
timestamp = self.safe_integer(order, 'creationTime')
if market is None:
market = self.market(order['instrument'] + '/' + order['currency'])
status = 'open'
if order['status'] == 'Failed' or order['status'] == 'Cancelled' or order['status'] == 'Partially Cancelled' or order['status'] == 'Error':
status = 'canceled'
elif order['status'] == 'Fully Matched' or order['status'] == 'Partially Matched':
status = 'closed'
price = self.safe_float(order, 'price') / multiplier
amount = self.safe_float(order, 'volume') / multiplier
remaining = self.safe_float(order, 'openVolume', 0.0) / multiplier
filled = amount - remaining
trades = self.parse_my_trades(order['trades'], market)
numTrades = len(trades)
cost = filled * price
average = None
lastTradeTimestamp = None
if numTrades > 0:
cost = 0
for i in range(0, numTrades):
trade = trades[i]
cost = self.sum(cost, trade[i]['cost'])
if filled > 0:
average = cost / filled
lastTradeTimestamp = trades[numTrades - 1]['timestamp']
id = self.safe_string(order, 'id')
return {
'info': order,
'id': id,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': lastTradeTimestamp,
'symbol': market['symbol'],
'type': type,
'side': side,
'price': price,
'cost': cost,
'amount': amount,
'filled': filled,
'remaining': remaining,
'average': average,
'status': status,
'trades': trades,
'fee': None,
}
async def fetch_order(self, id, symbol=None, params={}):
await self.load_markets()
ids = [int(id)]
request = {
'orderIds': ids,
}
response = await self.privatePostOrderDetail(self.extend(request, params))
numOrders = len(response['orders'])
if numOrders < 1:
raise OrderNotFound(self.id + ' No matching order found: ' + id)
order = response['orders'][0]
return self.parse_order(order)
def create_paginated_request(self, market, since=None, limit=None):
limit = 100 if (limit is None) else limit
since = 0 if (since is None) else since
request = self.ordered({
'currency': market['quoteId'],
'instrument': market['baseId'],
'limit': limit,
'since': since,
})
return request
async def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ': fetchOrders requires a `symbol` argument.')
await self.load_markets()
market = self.market(symbol)
request = self.create_paginated_request(market, since, limit)
response = await self.privatePostOrderHistory(self.extend(request, params))
return self.parse_orders(response['orders'], market)
async def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ': fetchOpenOrders requires a `symbol` argument.')
await self.load_markets()
market = self.market(symbol)
request = self.create_paginated_request(market, since, limit)
response = await self.privatePostOrderOpen(self.extend(request, params))
return self.parse_orders(response['orders'], market)
async def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
orders = await self.fetch_orders(symbol, since, limit, params)
return self.filter_by(orders, 'status', 'closed')
async def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ': fetchMyTrades requires a `symbol` argument.')
await self.load_markets()
market = self.market(symbol)
request = self.create_paginated_request(market, since, limit)
response = await self.privatePostOrderTradeHistory(self.extend(request, params))
return self.parse_my_trades(response['trades'], market)
def nonce(self):
return self.milliseconds()
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
uri = '/' + self.implode_params(path, params)
url = self.urls['api'][api] + uri
if api == 'private':
self.check_required_credentials()
nonce = str(self.nonce())
auth = None
headers = {
'apikey': self.apiKey,
'timestamp': nonce,
}
if method == 'POST':
headers['Content-Type'] = 'application/json'
auth = uri + "\n" + nonce + "\n" # eslint-disable-line quotes
body = self.json(params)
auth += body
else:
query = self.keysort(self.omit(params, self.extract_params(path)))
queryString = ''
if query:
queryString = self.urlencode(query)
url += '?' + queryString
queryString += "\n" # eslint-disable-line quotes
auth = uri + "\n" + queryString + nonce + "\n" # eslint-disable-line quotes
secret = base64.b64decode(self.secret)
signature = self.hmac(self.encode(auth), secret, hashlib.sha512, 'base64')
headers['signature'] = self.decode(signature)
else:
if params:
url += '?' + self.urlencode(params)
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, code, reason, url, method, headers, body, response):
if response is None:
return # fallback to default error handler
if 'success' in response:
if not response['success']:
error = self.safe_string(response, 'errorCode')
message = self.id + ' ' + self.json(response)
if error in self.exceptions:
ExceptionClass = self.exceptions[error]
raise ExceptionClass(message)
else:
raise ExchangeError(message)
| 38.633282 | 156 | 0.517489 | [
"MIT"
] | QuoineFinancial/ccxt | python/ccxt/async_support/btcmarkets.py | 25,073 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# 该例程将发布/person_info话题,自定义消息类型learning_communication::PersonMsg
import rospy
from learning_communication.msg import PersonMsg
def velocity_publisher():
# ROS节点初始化
rospy.init_node('person_publisher', anonymous=True)
# 创建一个Publisher,发布名为/person_info的topic,消息类型为PersonMsg,队列长度10
person_info_pub = rospy.Publisher('/person_info', PersonMsg, queue_size=10)
#设置循环的频率
rate = rospy.Rate(10)
while not rospy.is_shutdown():
# 初始化PersonMsg类型的消息
person_msg = PersonMsg()
person_msg.name = "Tom";
person_msg.age = 18;
person_msg.sex = PersonMsg.male;
# 发布消息
person_info_pub.publish(person_msg)
rospy.loginfo("Publsh person message[%s, %d, %d]",
person_msg.name, person_msg.age, person_msg.sex)
# 按照循环频率延时
rate.sleep()
if __name__ == '__main__':
try:
velocity_publisher()
except rospy.ROSInterruptException:
pass
| 25.026316 | 79 | 0.700315 | [
"Apache-2.0"
] | Colin1245/ROS-Theory-Application-Shenlan | 3.ROS_communication/learning_communication/scripts/person_publisher.py | 1,089 | Python |
#one.py
print('hello')
def func():
print("Func() in one.py")
print("TOP LEVEL IN one.py")
if __name__ == "__main__":
print("one.py is being run directly!")
else:
print("one.py has been imported!") | 16.833333 | 39 | 0.658416 | [
"MIT"
] | bitadept/codebook | Programming Languages & Libraries/Python/Python Complete Bootcamp/__name__ and __main__/one.py | 202 | Python |
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Numpy implementations of TensorFlow general top-level functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
# Dependency imports
import numpy as np
from tensorflow_probability.python.internal.backend.numpy import _utils as utils
from tensorflow_probability.python.internal.backend.numpy import ops
from tensorflow_probability.python.internal.backend.numpy.linalg_impl import einsum
from tensorflow_probability.python.internal.backend.numpy.linalg_impl import norm
from tensorflow_probability.python.internal.backend.numpy.linalg_impl import tensordot
__all__ = [
'concat',
'einsum',
'expand_dims',
'fill',
'gather',
'gather_nd',
'linspace',
'meshgrid',
'norm',
'one_hot',
'ones',
'ones_like',
'pad',
'range',
'rank',
'reshape',
'reverse',
'repeat',
'roll',
'searchsorted',
'shape',
'size',
'slice',
'split',
'squeeze',
'stack',
'tensordot',
'tile',
'transpose',
'unstack',
'where',
'zeros',
'zeros_like',
# 'boolean_mask',
# 'foldl',
# 'foldr',
]
JAX_MODE = False
if JAX_MODE:
import jax # pylint: disable=g-import-not-at-top
def _astuple(x):
try:
return tuple(x)
except TypeError:
return x
def _gather( # pylint: disable=unused-argument
params,
indices,
validate_indices=None,
axis=None,
batch_dims=0,
name=None):
"""gather."""
indices = ops.convert_to_tensor(indices, dtype_hint=np.int32)
if validate_indices is not None:
raise NotImplementedError(
'Argument `validate_indices != None` is currently unimplemented.')
if batch_dims < 0:
raise NotImplementedError('Negative `batch_dims` is currently unsupported.')
if axis is None:
axis = batch_dims
if axis < 0:
axis = axis + len(params.shape)
# NOTE: For only the numpy backend, this function could create a single result
# ndarray and use in-place updates. For the Jax backend, this function
# vmaps `np.take`.
if JAX_MODE:
take = lambda params, indices: np.take(params, indices, # pylint: disable=g-long-lambda
axis=axis - batch_dims)
take = functools.reduce(
lambda g, f: f(g), [jax.vmap] * int(batch_dims),
take
)
return take(params, indices)
params = ops.convert_to_tensor(params)
res = np.array([
np.take(params[i], indices[i], axis=axis - batch_dims)
for i in np.ndindex(*params.shape[:batch_dims])
])
return np.reshape(
res,
params.shape[:axis] + indices.shape[batch_dims:] + params.shape[axis+1:])
def _args_to_matching_arrays(args_list, dtype_hint=None):
"""Converts a list to array using the first element for dtype.
This method is used to match the behavior of `tf.concat`.
Args:
args_list: A list or tuple of arguments.
dtype_hint: An optional hint used when converting the args to tensors.
Returns:
A list of tensors.
"""
dtype = None
for arg in args_list:
if ops.is_tensor(arg):
dtype = arg.dtype
break
if dtype is None:
ret = []
for arg in args_list:
ret.append(ops.convert_to_tensor(arg, dtype, dtype_hint=dtype_hint))
if dtype is None:
dtype = ret[-1].dtype
else:
ret = [ops.convert_to_tensor(arg, dtype) for arg in args_list]
return ret
def _concat(values, axis, name='concat'):
del name
if axis is None:
raise ValueError('None values for `axis` argument not supported.')
if not isinstance(values, (list, tuple)):
values = [values]
if len(values) == 1:
return values[0]
values = _args_to_matching_arrays(values)
return np.concatenate(values, axis=axis)
def _gather_nd_single(params, indices):
idx = tuple(np.moveaxis(indices, -1, 0))
return params[idx]
def _gather_nd( # pylint: disable=unused-argument
params,
indices,
batch_dims=0,
name=None):
"""gather_nd."""
indices = ops.convert_to_tensor(indices, dtype_hint=np.int32)
if batch_dims < 0:
raise NotImplementedError('Negative `batch_dims` is currently unsupported.')
if not JAX_MODE and batch_dims > 0:
raise NotImplementedError(
'`batch_dims > 0` currently unsupported in NumPy backend.')
gather_nd_ = _gather_nd_single
if JAX_MODE:
gather_nd_ = functools.reduce(
lambda g, f: f(g), [jax.vmap] * int(batch_dims),
gather_nd_
)
return gather_nd_(params, indices)
def _linspace(start, stop, num, name=None, axis=0): # pylint: disable=unused-argument
"""Match TF behavior with np.linspace."""
start = ops.convert_to_tensor(start)
# Match TF weirdness arising from truediv(int32, int32) = float64
if np.issubdtype(start.dtype, np.integer):
start = start.astype(np.float64)
stop = ops.convert_to_tensor(stop, dtype=start.dtype)
num = ops.convert_to_tensor(num, dtype_hint=np.int32)
if not np.issubdtype(num.dtype, np.integer):
raise TypeError('`num` must be an integer but got {}'.format(num.dtype))
num = num.astype(np.int32)
return np.linspace(start, stop, num, axis=axis).astype(start.dtype)
def _one_hot( # pylint: disable=unused-argument
indices,
depth,
on_value=None,
off_value=None,
axis=None,
dtype=None,
name=None):
"""One hot."""
if on_value is None:
on_value = 1
if off_value is None:
off_value = 0
if dtype is None:
dtype = utils.common_dtype([on_value, off_value], np.float32)
indices = np.array(indices)
depth = np.array(depth)
pred = abs(np.arange(depth, dtype=indices.dtype) -
indices[..., np.newaxis]) > 0
y_out = np.where(pred, np.array(off_value, dtype), np.array(on_value, dtype))
if axis is not None:
y_out = np.moveaxis(y_out, -1, axis)
return y_out
def _ones_like(input, dtype=None, name=None): # pylint: disable=redefined-builtin,unused-argument
return np.ones_like(input, dtype=utils.numpy_dtype(dtype))
# TODO(b/136555907): Add unit-test.
def _pad( # pylint: disable=unused-argument
tensor,
paddings,
mode='CONSTANT',
constant_values=0,
name=None):
return np.pad(
tensor, paddings,
mode=mode.lower(),
constant_values=constant_values)
def _range(start, limit=None, delta=1, dtype=None, name='range'): # pylint: disable=unused-argument
"""Emulates tf.range."""
# Emulating dtype inference logic from tf.range
dtype = utils.numpy_dtype(dtype)
start = ops.convert_to_tensor(start, dtype=dtype)
limit = None if limit is None else ops.convert_to_tensor(limit, dtype=dtype)
delta = ops.convert_to_tensor(delta, dtype=dtype)
if dtype is None:
dtype_hierarchy = [np.int32, np.int64, np.float32, np.float64]
inferred_dtype = max([arg.dtype for arg in [start, limit, delta]
if arg is not None],
key=dtype_hierarchy.index)
else:
inferred_dtype = dtype
return np.arange(start, limit, delta).astype(inferred_dtype)
def _reverse(tensor, axis, name=None): # pylint: disable=unused-argument
if np.array(axis).ndim == 0:
return np.flip(tensor, axis)
for ax in axis:
tensor = np.flip(tensor, ax)
return tensor
if JAX_MODE:
_searchsorted_vmap_sides = {
side: jax.vmap(functools.partial(jax.numpy.searchsorted, side=side))
for side in ('left', 'right')
}
def _searchsorted( # pylint: disable=unused-argument
sorted_sequence,
values,
side='left',
out_type=np.int32,
name=None):
"""Find indices for insertion for list to remain sorted."""
if JAX_MODE:
try:
func = _searchsorted_vmap_sides[side]
except KeyError:
raise ValueError("'%s' is an invalid value for keyword 'side'" % side)
sorted_sequence_2d = np.reshape(sorted_sequence,
(-1, sorted_sequence.shape[-1]))
values_2d = np.reshape(values, (-1, values.shape[-1]))
if sorted_sequence_2d.shape[0] != values_2d.shape[0]:
raise ValueError('Leading dim_size of both tensors must match.')
return np.reshape(func(sorted_sequence_2d, values_2d).astype(out_type),
values.shape)
# We don't use np.searchsorted in the numpy backend because it doesn't support
# batching.
sorted_sequence = sorted_sequence[..., np.newaxis, :]
values = values[..., :, np.newaxis]
if side == 'left':
is_in_right_location = sorted_sequence < values
elif side == 'right':
is_in_right_location = sorted_sequence <= values
return np.sum(is_in_right_location, axis=-1).astype(out_type)
def _shape(input, out_type=np.int32, name=None): # pylint: disable=redefined-builtin,unused-argument
return ops.convert_to_tensor(ops.convert_to_tensor(input).shape).astype(
out_type)
def _size(input, out_type=np.int32, name=None): # pylint: disable=redefined-builtin, unused-argument
return np.asarray(np.prod(ops.convert_to_tensor(input).shape), dtype=out_type)
builtin_slice = slice # pylint: disable=invalid-name
def _slice(input_, begin, size, name=None): # pylint: disable=unused-argument,redefined-outer-name
slices = tuple(
builtin_slice(b, b + s if s != -1 else None) for b, s in zip(begin, size))
return input_[slices]
def _split(value, num_or_size_splits, axis=0, num=None, name='split'): # pylint: disable=unused-argument
"""Map tf.split -> np.split."""
indices_or_sections = np.array(num_or_size_splits)
if indices_or_sections.ndim == 1:
if any(idx == -1 for idx in indices_or_sections):
# Numpy parameterizes by split indices and returns nsplits+1 arrays.
total_splits = sum(idx for idx in indices_or_sections if idx != -1)
remainder = int(max(0, np.array(value).shape[axis] - total_splits))
indices_or_sections = [
idx if idx != -1 else remainder for idx in indices_or_sections
]
indices_or_sections = np.cumsum(np.array(indices_or_sections))[:-1]
return np.split(value, indices_or_sections, axis)
def _stack(values, axis=0, name='stack'):
del name
if axis is None:
raise ValueError('None values for `axis` argument not supported.')
values = _args_to_matching_arrays(values)
return np.stack(values, axis=axis)
def _transpose(a, perm=None, conjugate=False, name='transpose'): # pylint: disable=unused-argument
x = np.transpose(a, perm)
return np.conjugate(x) if conjugate else x
def _zeros_like(input, dtype=None, name=None): # pylint: disable=redefined-builtin,unused-argument
return np.zeros_like(input, dtype=utils.numpy_dtype(dtype))
# --- Begin Public Functions --------------------------------------------------
concat = utils.copy_docstring(
'tf.concat',
_concat)
expand_dims = utils.copy_docstring(
'tf.expand_dims',
lambda input, axis, name=None: np.expand_dims(input, axis))
fill = utils.copy_docstring(
'tf.fill',
lambda dims, value, name=None: np.full(dims, value))
gather = utils.copy_docstring(
'tf.gather',
_gather)
gather_nd = utils.copy_docstring(
'tf.gather_nd',
_gather_nd)
reverse = utils.copy_docstring('tf.reverse', _reverse)
linspace = utils.copy_docstring(
'tf.linspace',
_linspace)
meshgrid = utils.copy_docstring(
'tf.meshgrid',
np.meshgrid)
norm = utils.copy_docstring(
'tf.norm',
norm)
one_hot = utils.copy_docstring(
'tf.one_hot',
_one_hot)
ones = utils.copy_docstring(
'tf.ones',
lambda shape, dtype=np.float32, name=None: np.ones( # pylint: disable=g-long-lambda
shape, utils.numpy_dtype(dtype)))
ones_like = utils.copy_docstring(
'tf.ones_like',
_ones_like)
pad = utils.copy_docstring(
'tf.pad',
_pad)
range = utils.copy_docstring( # pylint: disable=redefined-builtin
'tf.range',
_range)
rank = utils.copy_docstring(
'tf.rank',
lambda input, name=None: np.int32(np.array(input).ndim)) # pylint: disable=redefined-builtin,g-long-lambda
repeat = utils.copy_docstring(
'tf.repeat',
lambda input, repeats, axis=None, name=None: np.repeat( # pylint: disable=g-long-lambda
input, repeats, axis=axis))
reshape = utils.copy_docstring(
'tf.reshape',
lambda tensor, shape, name=None: np.reshape( # pylint: disable=g-long-lambda
ops.convert_to_tensor(tensor), shape))
roll = utils.copy_docstring(
'tf.roll',
lambda input, shift, axis: np.roll(input, shift, axis)) # pylint: disable=unnecessary-lambda
searchsorted = utils.copy_docstring(
'tf.searchsorted',
_searchsorted)
shape = utils.copy_docstring(
'tf.shape',
_shape)
size = utils.copy_docstring(
'tf.size',
_size)
slice = utils.copy_docstring( # pylint: disable=redefined-builtin
'tf.slice', _slice)
split = utils.copy_docstring('tf.split', _split)
squeeze = utils.copy_docstring(
'tf.squeeze',
lambda input, axis=None, name=None: np.squeeze(input, _astuple(axis)))
stack = utils.copy_docstring(
'tf.stack',
_stack)
tile = utils.copy_docstring(
'tf.tile',
lambda input, multiples, name=None: np.tile(np.array(input), multiples))
transpose = utils.copy_docstring(
'tf.transpose',
_transpose)
unstack = utils.copy_docstring(
'tf.unstack',
lambda value, num=None, axis=0, name='unstack': tuple( # pylint: disable=g-long-lambda
np.squeeze(x, axis=axis) for x in
np.split(value, value.shape[axis] if num is None else num, axis)))
where = utils.copy_docstring(
'tf.where',
lambda condition, x=None, y=None, name=None: np.where(condition, x, y))
zeros = utils.copy_docstring(
'tf.zeros',
lambda shape, dtype=np.float32, name=None: np.zeros( # pylint: disable=g-long-lambda
shape, utils.numpy_dtype(dtype)))
zeros_like = utils.copy_docstring(
'tf.zeros_like',
_zeros_like)
| 29.736626 | 111 | 0.679076 | [
"Apache-2.0"
] | michalbrys/probability | tensorflow_probability/python/internal/backend/numpy/numpy_array.py | 14,452 | Python |
import netrc, os, unittest, sys, textwrap
from test import test_support
temp_filename = test_support.TESTFN
class NetrcTestCase(unittest.TestCase):
def make_nrc(self, test_data):
test_data = textwrap.dedent(test_data)
mode = 'w'
if sys.platform != 'cygwin':
mode += 't'
with open(temp_filename, mode) as fp:
fp.write(test_data)
self.addCleanup(os.unlink, temp_filename)
return netrc.netrc(temp_filename)
def test_default(self):
nrc = self.make_nrc("""\
machine host1.domain.com login log1 password pass1 account acct1
default login log2 password pass2
""")
self.assertEqual(nrc.hosts['host1.domain.com'],
('log1', 'acct1', 'pass1'))
self.assertEqual(nrc.hosts['default'], ('log2', None, 'pass2'))
def test_macros(self):
nrc = self.make_nrc("""\
macdef macro1
line1
line2
macdef macro2
line3
line4
""")
self.assertEqual(nrc.macros, {'macro1': ['line1\n', 'line2\n'],
'macro2': ['line3\n', 'line4\n']})
def _test_passwords(self, nrc, passwd):
nrc = self.make_nrc(nrc)
self.assertEqual(nrc.hosts['host.domain.com'], ('log', 'acct', passwd))
def test_password_with_leading_hash(self):
self._test_passwords("""\
machine host.domain.com login log password #pass account acct
""", '#pass')
def test_password_with_trailing_hash(self):
self._test_passwords("""\
machine host.domain.com login log password pass# account acct
""", 'pass#')
def test_password_with_internal_hash(self):
self._test_passwords("""\
machine host.domain.com login log password pa#ss account acct
""", 'pa#ss')
def _test_comment(self, nrc, passwd='pass'):
nrc = self.make_nrc(nrc)
self.assertEqual(nrc.hosts['foo.domain.com'], ('bar', None, passwd))
self.assertEqual(nrc.hosts['bar.domain.com'], ('foo', None, 'pass'))
def test_comment_before_machine_line(self):
self._test_comment("""\
# comment
machine foo.domain.com login bar password pass
machine bar.domain.com login foo password pass
""")
def test_comment_before_machine_line_no_space(self):
self._test_comment("""\
#comment
machine foo.domain.com login bar password pass
machine bar.domain.com login foo password pass
""")
def test_comment_before_machine_line_hash_only(self):
self._test_comment("""\
#
machine foo.domain.com login bar password pass
machine bar.domain.com login foo password pass
""")
def test_comment_at_end_of_machine_line(self):
self._test_comment("""\
machine foo.domain.com login bar password pass # comment
machine bar.domain.com login foo password pass
""")
def test_comment_at_end_of_machine_line_no_space(self):
self._test_comment("""\
machine foo.domain.com login bar password pass #comment
machine bar.domain.com login foo password pass
""")
def test_comment_at_end_of_machine_line_pass_has_hash(self):
self._test_comment("""\
machine foo.domain.com login bar password #pass #comment
machine bar.domain.com login foo password pass
""", '#pass')
@unittest.skipUnless(os.name == 'posix', 'POSIX only test')
def test_security(self):
# This test is incomplete since we are normally not run as root and
# therefore can't test the file ownership being wrong.
d = test_support.TESTFN
os.mkdir(d)
self.addCleanup(test_support.rmtree, d)
fn = os.path.join(d, '.netrc')
with open(fn, 'wt') as f:
f.write("""\
machine foo.domain.com login bar password pass
default login foo password pass
""")
with test_support.EnvironmentVarGuard() as environ:
environ.set('HOME', d)
os.chmod(fn, 0600)
nrc = netrc.netrc()
self.assertEqual(nrc.hosts['foo.domain.com'],
('bar', None, 'pass'))
os.chmod(fn, 0o622)
self.assertRaises(netrc.NetrcParseError, netrc.netrc)
def test_main():
test_support.run_unittest(NetrcTestCase)
if __name__ == "__main__":
test_main()
| 36.381679 | 80 | 0.573017 | [
"Apache-2.0"
] | Jeff-Tian/mybnb | Python27/Lib/test/test_netrc.py | 4,766 | Python |
"""pycodestyle support."""
from pycodestyle import BaseReport, StyleGuide, get_parser, _parse_multi_options
from pylama.lint import Linter as Abstract
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
class Linter(Abstract):
"""pycodestyle runner."""
@staticmethod
def run(path, code=None, params=None, **meta):
"""Check code with pycodestyle.
:return list: List of errors.
"""
parser = get_parser()
for option in parser.option_list:
if option.dest and option.dest in params:
value = params[option.dest]
if isinstance(value, str):
params[option.dest] = option.convert_value(option, value)
for key in ["filename", "exclude", "select", "ignore"]:
if key in params and isinstance(params[key], str):
params[key] = _parse_multi_options(params[key])
P8Style = StyleGuide(reporter=_PycodestyleReport, **params)
buf = StringIO(code)
return P8Style.input_file(path, lines=buf.readlines())
class _PycodestyleReport(BaseReport):
def __init__(self, *args, **kwargs):
super(_PycodestyleReport, self).__init__(*args, **kwargs)
self.errors = []
def init_file(self, filename, lines, expected, line_offset):
"""Prepare storage for errors."""
super(_PycodestyleReport, self).init_file(
filename, lines, expected, line_offset)
self.errors = []
def error(self, line_number, offset, text, check):
"""Save errors."""
code = super(_PycodestyleReport, self).error(
line_number, offset, text, check)
if code:
self.errors.append(dict(
text=text,
type=code.replace('E', 'C'),
col=offset + 1,
lnum=line_number,
))
def get_file_results(self):
"""Get errors.
:return list: List of errors.
"""
return self.errors
| 28.704225 | 80 | 0.597645 | [
"MIT"
] | BHills15/vimrc | vimfiles/bundle/vim-python/submodules/pylama/pylama/lint/pylama_pycodestyle.py | 2,038 | Python |
# Microsoft Azure Linux Agent
#
# Copyright 2014 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.4+ and Openssl 1.0+
#
"""
Module conf loads and parses configuration file
"""
import os
import azurelinuxagent.common.utils.fileutil as fileutil
from azurelinuxagent.common.exception import AgentConfigError
class ConfigurationProvider(object):
"""
Parse amd store key:values in /etc/waagent.conf.
"""
def __init__(self):
self.values = dict()
def load(self, content):
if not content:
raise AgentConfigError("Can't not parse empty configuration")
for line in content.split('\n'):
if not line.startswith("#") and "=" in line:
parts = line.split()[0].split('=')
value = parts[1].strip("\" ")
if value != "None":
self.values[parts[0]] = value
else:
self.values[parts[0]] = None
def get(self, key, default_val):
val = self.values.get(key)
return val if val is not None else default_val
def get_switch(self, key, default_val):
val = self.values.get(key)
if val is not None and val.lower() == 'y':
return True
elif val is not None and val.lower() == 'n':
return False
return default_val
def get_int(self, key, default_val):
try:
return int(self.values.get(key))
except TypeError:
return default_val
except ValueError:
return default_val
__conf__ = ConfigurationProvider()
def load_conf_from_file(conf_file_path, conf=__conf__):
"""
Load conf file from: conf_file_path
"""
if os.path.isfile(conf_file_path) == False:
raise AgentConfigError(("Missing configuration in {0}"
"").format(conf_file_path))
try:
content = fileutil.read_file(conf_file_path)
conf.load(content)
except IOError as err:
raise AgentConfigError(("Failed to load conf file:{0}, {1}"
"").format(conf_file_path, err))
def enable_rdma(conf=__conf__):
return conf.get_switch("OS.EnableRDMA", False) or \
conf.get_switch("OS.UpdateRdmaDriver", False) or \
conf.get_switch("OS.CheckRdmaDriver", False)
def get_logs_verbose(conf=__conf__):
return conf.get_switch("Logs.Verbose", False)
def get_lib_dir(conf=__conf__):
return conf.get("Lib.Dir", "/var/lib/waagent")
def get_published_hostname(conf=__conf__):
return os.path.join(get_lib_dir(conf), 'published_hostname')
def get_dvd_mount_point(conf=__conf__):
return conf.get("DVD.MountPoint", "/mnt/cdrom/secure")
def get_agent_pid_file_path(conf=__conf__):
return conf.get("Pid.File", "/var/run/waagent.pid")
def get_ext_log_dir(conf=__conf__):
return conf.get("Extension.LogDir", "/var/log/azure")
def get_openssl_cmd(conf=__conf__):
return conf.get("OS.OpensslPath", "/usr/bin/openssl")
def get_home_dir(conf=__conf__):
return conf.get("OS.HomeDir", "/home")
def get_passwd_file_path(conf=__conf__):
return conf.get("OS.PasswordPath", "/etc/shadow")
def get_sudoers_dir(conf=__conf__):
return conf.get("OS.SudoersDir", "/etc/sudoers.d")
def get_sshd_conf_file_path(conf=__conf__):
return conf.get("OS.SshdConfigPath", "/etc/ssh/sshd_config")
def get_root_device_scsi_timeout(conf=__conf__):
return conf.get("OS.RootDeviceScsiTimeout", None)
def get_ssh_host_keypair_type(conf=__conf__):
return conf.get("Provisioning.SshHostKeyPairType", "rsa")
def get_provision_enabled(conf=__conf__):
return conf.get_switch("Provisioning.Enabled", True)
def get_allow_reset_sys_user(conf=__conf__):
return conf.get_switch("Provisioning.AllowResetSysUser", False)
def get_regenerate_ssh_host_key(conf=__conf__):
return conf.get_switch("Provisioning.RegenerateSshHostKeyPair", False)
def get_delete_root_password(conf=__conf__):
return conf.get_switch("Provisioning.DeleteRootPassword", False)
def get_decode_customdata(conf=__conf__):
return conf.get_switch("Provisioning.DecodeCustomData", False)
def get_execute_customdata(conf=__conf__):
return conf.get_switch("Provisioning.ExecuteCustomData", False)
def get_password_cryptid(conf=__conf__):
return conf.get("Provisioning.PasswordCryptId", "6")
def get_password_crypt_salt_len(conf=__conf__):
return conf.get_int("Provisioning.PasswordCryptSaltLength", 10)
def get_monitor_hostname(conf=__conf__):
return conf.get_switch("Provisioning.MonitorHostName", False)
def get_httpproxy_host(conf=__conf__):
return conf.get("HttpProxy.Host", None)
def get_httpproxy_port(conf=__conf__):
return conf.get_int("HttpProxy.Port", None)
def get_detect_scvmm_env(conf=__conf__):
return conf.get_switch("DetectScvmmEnv", False)
def get_resourcedisk_format(conf=__conf__):
return conf.get_switch("ResourceDisk.Format", False)
def get_resourcedisk_enable_swap(conf=__conf__):
return conf.get_switch("ResourceDisk.EnableSwap", False)
def get_resourcedisk_mountpoint(conf=__conf__):
return conf.get("ResourceDisk.MountPoint", "/mnt/resource")
def get_resourcedisk_mountoptions(conf=__conf__):
return conf.get("ResourceDisk.MountOptions", None)
def get_resourcedisk_filesystem(conf=__conf__):
return conf.get("ResourceDisk.Filesystem", "ext3")
def get_resourcedisk_swap_size_mb(conf=__conf__):
return conf.get_int("ResourceDisk.SwapSizeMB", 0)
def get_autoupdate_gafamily(conf=__conf__):
return conf.get("AutoUpdate.GAFamily", "Prod")
def get_autoupdate_enabled(conf=__conf__):
return conf.get_switch("AutoUpdate.Enabled", True)
def get_autoupdate_frequency(conf=__conf__):
return conf.get_int("Autoupdate.Frequency", 3600)
def get_enable_overprovisioning(conf=__conf__):
return conf.get_switch("EnableOverProvisioning", False) | 28.270742 | 74 | 0.708372 | [
"Apache-2.0"
] | vittyvk/WALinuxAgent | azurelinuxagent/common/conf.py | 6,474 | Python |
import discord
from discord.ext import commands
import humanize
import traceback
import random
import datetime
import json
class Information(commands.Cog):
def __init__(self, client):
self.client = client
self.launched_at = datetime.datetime.utcnow()
@commands.Cog.listener()
async def on_ready(self):
print("Information is ready")
@commands.command(aliases = ["guild", "guildinfo", "si"])
async def serverinfo(self, ctx):
findbots = sum(1 for member in ctx.guild.members if member.bot)
roles = sum(1 for role in ctx.guild.roles)
embed = discord.Embed(title = 'Infomation about ' + ctx.guild.name + '.', colour = ctx.author.color)
embed.set_thumbnail(url = str(ctx.guild.icon_url))
embed.add_field(name = "Guild's name: ", value = ctx.guild.name)
embed.add_field(name = "Guild's owner: ", value = str(ctx.guild.owner))
embed.add_field(name = "Guild's verification level: ", value = str(ctx.guild.verification_level))
embed.add_field(name = "Guild's id: ", value = f"`{ctx.guild.id}`")
embed.add_field(name = "Guild's member count: ", value = f"{ctx.guild.member_count}")
embed.add_field(name="Bots", value=f"`{findbots}`", inline=True)
embed.add_field(name = "Guild created at: ", value = str(ctx.guild.created_at.strftime("%a, %d %B %Y, %I:%M %p UTC")))
embed.add_field(name = "Number of Roles:", value = f"`{roles}`")
embed.set_footer(text='Bot Made by NightZan999#0194')
embed.set_author(name = ctx.author.name, icon_url = ctx.author.avatar_url)
await ctx.send(embed = embed)
@commands.command(aliases = ["ci"])
async def channelinfo(self, ctx, channel : discord.TextChannel = None):
if channel == None:
channel = ctx.channel
em = discord.Embed(title = f"Info about {channel.name}", color = ctx.author.color, description = f"Here is an insight into {channel.mention}")
em.add_field(name = "ID:", value = f"`{channel.id}`")
em.add_field(name = "Name:", value = f"`{channel.name}`")
em.add_field(name = "Server it belongs to:", value = f"{channel.guild.name}", inline = True)
try:
em.add_field(name = "Category ID:", value = f"`{channel.category_id}`", inline = False)
except:
pass
em.add_field(name = "Topic:", value = f"`{channel.topic}`")
em.add_field(name = "Slowmode:", value = f"`{channel.slowmode_delay}`", inline = True)
em.add_field(name = "People who can see the channel:", value = f"`{len(channel.members)}`", inline = False)
em.add_field(name = "Is NSFW:", value = f"`{channel.is_nsfw()}`")
em.add_field(name = "Is News:", value = f"`{channel.is_news()}`", inline = True)
em.set_footer(text = "invite me ;)", icon_url = ctx.author.avatar_url)
em.set_thumbnail(url = str(ctx.guild.icon_url))
em.set_author(name = ctx.author.name, icon_url = ctx.author.avatar_url)
await ctx.send(embed = em)
@commands.command()
async def userinfo(self, ctx, member : discord.Member = None):
if member == None:
member = ctx.author
pos = sum(m.joined_at < member.joined_at for m in ctx.guild.members if m.joined_at is not None)
roles = [role for role in member.roles]
embed = discord.Embed(title = "👨 Info", color = discord.Color.random(), description = f"Information about: {member.name}")
embed.add_field(name = "Nickname", value = member.nick or None)
embed.add_field(name = "Verification Pending", value = member.pending)
embed.add_field(name = "Status:", value = member.raw_status)
if member.mobile_status:
device = "Mobile"
elif member.desktop_status:
device = "Desktop"
elif member.web_status:
device= "Web"
embed.add_field(name = "Discord Device:", value = device)
embed.add_field(name = "Color", value = member.color)
embed.add_field(name = "Mention:", value = member.mention)
embed.add_field(name = "Top Role:", value = member.top_role.mention)
embed.add_field(name = "Voice State:", value = member.voice or None)
embed.set_footer(icon_url=member.avatar_url, text=f'Requested By: {ctx.author.name}')
await ctx.send(embed=embed)
@userinfo.error
async def userinfo_error(self, ctx, error):
if isinstance(error, commands.BadArgument):
em = discord.Embed(title = f"<:fail:761292267360485378> Userinfo Error", color = ctx.author.color)
em.add_field(name = f"Reason:", value = f"Arguments were of the wrong data type!")
em.add_field(name = "Args", value = "```diff\n+ imp userinfo <user>\n- imp userinfo e\n```")
em.set_thumbnail(url = ctx.author.avatar_url)
em.set_author(name = ctx.author.name, icon_url = ctx.author.avatar_url)
await ctx.send(embed = em)
@commands.command()
async def whois(self,ctx, user : discord.Member = None):
if user == None:
user = ctx.author
em = discord.Embed(title = user.name, color = user.color)
em.add_field(name = "ID:", value = user.id)
em.set_thumbnail(url = user.avatar_url)
em.set_footer(text='Bot Made by NightZan999#0194')
em.set_author(name = ctx.author.name, icon_url = ctx.author.avatar_url)
await ctx.send(embed = em)
@whois.error
async def whois_error(self, ctx, error):
if isinstance(error, commands.BadArgument):
em = discord.Embed(title = f"<:fail:761292267360485378> Whois Error", color = ctx.author.color)
em.add_field(name = f"Reason:", value = f"Arguments were of the wrong data type!")
em.add_field(name = "Args", value = "```\nimp whois [@user]\n```")
em.set_thumbnail(url = ctx.author.avatar_url)
em.set_author(name = ctx.author.name, icon_url = ctx.author.avatar_url)
await ctx.send(embed = em)
@commands.command(aliases = ["bi"])
async def botinfo(self, ctx):
embed = discord.Embed(title = "Botinfo", color = ctx.author.color,
description = "TheImperialGod, is an awesome customizable discord bot with awesome features. Check some information about the bot below!"
)
embed.add_field(name = "First went live on:", value = "1 / 10 / 2020")
embed.add_field(name = "Started coding on:", value = "26 / 9 / 2020")
embed.add_field(name = f"Creator", value = f"NightZan999#0194")
embed.add_field(name = 'Hosting', value = f"Chaotic Destiny Hosting ")
embed.add_field(name = "Servers:", value = f'`{len(self.client.guilds)}`')
embed.add_field(name = 'Customizable Settings:', value = f"Automoderation and utilities! ")
embed.add_field(name = "Database:", value = "SQLite3")
embed.add_field(name = "Website:", value = "<:VERIFIED_DEVELOPER:761297621502656512> [Web Dashboard](https://theimperialgod.ml)")
embed.add_field(name = "Number of Commands:", value = f"`{len(self.client.commands)}` (including special owner commands)")
embed.add_field(name = "**Tech:**", value = "```diff\n+ Library : discord.py\n+ Database : AIOSQLite\n+ Hosting Services : Chaotic Destiny Hosting!\n```", inline = False)
embed.add_field(name = "Users:", value = f'`{len(self.client.users)}`')
embed.set_footer(text='Bot Made by NightZan999#0194', icon_url = ctx.author.avatar_url)
embed.set_author(name = ctx.author.name, icon_url = ctx.author.avatar_url)
await ctx.send(embed = embed)
@commands.command()
async def ping(self, ctx):
embed = discord.Embed(title = ":ping_pong: Pong!", color = ctx.author.color,
description = "The number rlly doesn't matter. Smh!")
embed.add_field(name= "Client Latency", value = f"`{round(self.client.latency * 1000)}ms`")
embed.set_author(name = ctx.author.name, icon_url = ctx.author.avatar_url)
embed.set_footer(text='Bot Made by NightZan999#0194')
await ctx.send(embed = embed)
@commands.command()
async def credits(self, ctx):
em = discord.Embed(title = ":scroll: Credits of TheImperialGod", color = ctx.author.color, description = "Github link is [here](https://github.com/NightZan999/TheImperialGod)")
em.add_field(name = "#1 NightZan999", value = f"""I have done everything on TheImperialGod, coded the entire bot, taken feedback, grown it to {len(self.client.guilds)} servers.\nI am even writing this right now!\nMy hopes are to you, if you like this bot type: `imp support`. That shows you ways to support TheImperialGod"\n\nI have written 70,000 lines of code for the bot and the website, so yeah-""")
em.add_field(name = '#2 Github', value = "I did do all the coding, but I made TheImperialGod open source, this is why many people respond to my issues. Some people have corrected some glitches, and a full credits list is avalible on github")
em.set_author(name = ctx.author.name, icon_url = ctx.author.avatar_url)
em.set_footer(text = "invite me now!")
await ctx.send(embed = em)
@commands.command()
async def uptime(self, ctx):
current_time = datetime.datetime.utcnow()
uptime = (current_time - self.launched_at)
em = discord.Embed(title = "<:zancool:819065864153595945> My Uptime", color = ctx.author.color)
em.add_field(name = "Uptime", value = f"I have been online for **{humanize.naturaldelta(uptime)}**")
em.set_author(name = ctx.author.name, icon_url = ctx.author.avatar_url)
em.set_footer(text = "Requested by {}".format(ctx.author.name), icon_url = ctx.author.avatar_url)
await ctx.send(embed = em)
@commands.command()
async def roleinfo(self, ctx, *, role_: discord.Role = None):
role = role_
if role is None:
await ctx.send("Please provide a valid role")
em = discord.Embed(title = f"Info about {role.name}", color = ctx.author.color, description = f"Here is an insight into {role.mention}")
em.add_field(name = "ID:", value = f"`{role.id}`")
em.add_field(name = "Name:", value = f"`{role.name}`")
em.add_field(name = "Server it belongs to:", value = f"{role.guild.name}", inline = True)
em.add_field(name = "Hoisted:", value = f"`{role.hoist}`")
em.add_field(name = "Managed by extension:", value = f"`{role.managed}`", inline = True)
em.add_field(name = "Boost Role:", value = f"`{role.is_premium_subscriber()}`", inline = True)
em.add_field(name = "Mentionable:", value = f"`{role.mentionable}`" )
em.add_field(name = "Is Default:", value = f"`{role.is_default()}`", inline = True)
em.add_field(name = "Bot Role:", value = f"`{role.is_bot_managed()}`", inline = True)
em.add_field(name = "Color:", value = f"{role.color}")
em.add_field(name = "Created At:", value = f"{role.created_at}", inline = True)
em.add_field(name = "People with it:", value =f"{len(role.members)}", inline = True)
msg = "```diff\n"
if role.permissions.administrator:
msg += "+ Administrator\n"
else:
msg += "- Administrator\n"
if role.permissions.manage_guild:
msg += "+ Manage Server\n"
else:
msg += "- Manage Server\n"
if role.permissions.mention_everyone:
msg += "+ Ping Everyone\n"
else:
msg += "- Ping Everyone\n"
if role.permissions.manage_roles:
msg += "+ Manage Roles\n"
else:
msg += "- Manage Roles\n"
if role.permissions.manage_channels:
msg += "+ Manage Channels\n"
else:
msg += "- Manage Channels\n"
if role.permissions.ban_members:
msg += "+ Ban Members\n"
else:
msg += "- Ban Members\n"
if role.permissions.kick_members:
msg += "+ Kick Members\n"
else:
msg += "- Kick Members\n"
if role.permissions.view_audit_log:
msg += "+ View Audit Log\n"
else:
msg += "- View Audit Log\n"
if role.permissions.manage_messages:
msg += "+ Manage Messages\n"
else:
msg += "- Manage Messages\n"
if role.permissions.add_reactions:
msg += "+ Add Reactions\n"
else:
msg += "- Add Reactions\n"
if role.permissions.view_channel:
msg += "+ Read Messages\n"
else:
msg += "- Read Messages\n"
if role.permissions.send_messages:
msg += "+ Send Messages\n"
else:
msg += "- Send Messages\n"
if role.permissions.embed_links:
msg += "+ Embed Links\n"
else:
msg += "- Embed Links\n"
if role.permissions.read_message_history:
msg += "+ Read Message History\n"
else:
msg += "- Read Message History\n"
if role.permissions.view_guild_insights:
msg += "+ View Guild Insights\n"
else:
msg += "- View Guild Insights\n"
if role.permissions.connect:
msg += "+ Join VC\n"
else:
msg += "- Join VC\n"
if role.permissions.speak:
msg += "+ Speak in VC\n"
else:
msg += "- Speak in VC\n"
if role.permissions.change_nickname:
msg += "+ Change Nickname\n"
else:
msg += "- Change Nickname\n"
if role.permissions.manage_nicknames:
msg += "+ Manage Nicknames\n"
else:
msg += "- Manage Nicknames\n"
if role.permissions.manage_webhooks:
msg += "+ Manage Webhooks\n"
else:
msg += "- Manage Webhooks\n"
if role.permissions.manage_emojis:
msg += "+ Manage Emojis\n"
else:
msg += "- Manage Emojis\n"
msg += "\n```"
em.add_field(name = "Permissions:", value = msg, inline = False)
em.set_footer(text = "invite me ;)", icon_url = ctx.author.avatar_url)
em.set_thumbnail(url = str(ctx.guild.icon_url))
em.set_author(name = ctx.author.name, icon_url = ctx.author.avatar_url)
await ctx.send(embed = em)
@commands.command()
async def evalhelp(self, ctx):
em = discord.Embed(title = "Help with eval!", color = discord.Color.random(), description = "Check this help image for help with Jishaku!")
em.set_image(url = "https://ibb.co/MhythwM")
await ctx.send(embed = em)
@commands.command()
async def stats(self, ctx):
em = discord.Embed(title= "Stats about me", color = self.client.user.color, description = "My stats :partying_face:")
em.add_field(name = "Users:", value = f"{len(self.client.users)}")
em.add_field(name = "Servers:", value = f"{len(self.client.guilds)}")
em.add_field(name = "Total Commands:", value = f"{len(self.client.commands)}")
em.add_field(name = "Channels:", value = f"{len(self.client.channels)}")
await ctx.send(embed = em)
def setup(client):
client.add_cog(Information(client)) | 50.25817 | 411 | 0.606346 | [
"CC0-1.0"
] | EaBro/TheImperialGod | TheImperialGod/cogs/info/info.py | 15,382 | Python |
# -*- coding: utf-8 -*-
import dash
import dash_core_components as dcc
import dash_html_components as html
import plotly.plotly as py
import plotly.graph_objs as go
import networkx as nx
import pickle
import boto3
import io
import numpy
import pandas
############################
# Load data
BUCKET_NAME = 'blog-seq-data' # replace with your bucket name
# list of topics
KEY = 'graph_and_labels' # replace with your object key
s3 = boto3.client('s3')
obj = s3.get_object(Bucket=BUCKET_NAME, Key=KEY)
obj_str = obj['Body'].read()
graph_mat,topic_labels,dist_mat,doc_topic_mat = pickle.loads(obj_str)
topic_list = list(topic_labels.values())
# article info
KEY = 'article_info' # replace with your object key
s3 = boto3.client('s3')
obj = s3.get_object(Bucket=BUCKET_NAME, Key=KEY)
obj_str = obj['Body'].read()
adf = pickle.loads(obj_str)
# topic sequence
KEY = 'topic_seq' # replace with your object key
s3 = boto3.client('s3')
obj = s3.get_object(Bucket=BUCKET_NAME, Key=KEY)
obj_str = obj['Body'].read()
topic_sequence = pickle.loads(obj_str)
############################
app = dash.Dash()
server = app.server
############################
# Layout section
app.layout = html.Div(children=[
html.H1(children='Blog curator'),
html.Div(children='''
Pick a topic you'd like to learn about:
'''),
dcc.Dropdown(
id='my-dropdown',
options=[{'label':topic, 'value':topic_no} for topic_no, topic in enumerate(topic_list)],
value=0
),
html.Div(id='output-container',style={'padding': 10}),
html.Div(children='''
Select where you want to start and finish your reading:
'''),
html.Div(id='output-container',style={'padding': 10}),
html.Div(id='my-datatable')
])
##############################
# Callbacks
@app.callback(
dash.dependencies.Output('my-datatable', 'children'),
[dash.dependencies.Input('my-dropdown', 'value')])
def update_rows(selected_value):
output_arr = []
for doc_no, topic in enumerate(topic_sequence[selected_value]):
if doc_no != 0 and topic == selected_value:
continue
else:
topic = int(topic)
test_str = adf.iloc[topic]['title'] + '. ' + adf.iloc[topic]['author'] + ' ' + adf.iloc[topic]['link'] + ' ' + adf.iloc[topic]['source']
output_arr.append(html.H3(topic_list[int(topic)]))
output_arr.append(html.P(test_str))
return output_arr
##############################
'''
external_css = ["https://fonts.googleapis.com/css?family=Product+Sans:400,400i,700,700i",
"https://cdn.rawgit.com/plotly/dash-app-stylesheets/2cc54b8c03f4126569a3440aae611bbef1d7a5dd/stylesheet.css"]
for css in external_css:
app.css.append_css({"external_url": css})
'''
app.css.append_css({'external_url': 'https://codepen.io/chriddyp/pen/bWLwgP.css'})
if __name__ == '__main__':
app.run_server(debug=True) | 28.686275 | 148 | 0.64149 | [
"MIT"
] | avbatchelor/insight-articles-project | src/dash_app/my_app_v8.py | 2,926 | Python |
# -*- coding: utf-8 -*-
# @Time : 2020/7/7 9:11
# @Author : lightsmile
# @Software: PyCharm
from lightutils import get_file_name
if __name__ == '__main__':
print(get_file_name("hello_world.py"))
| 20.5 | 42 | 0.663415 | [
"MIT"
] | smilelight/lightUtils | examples/get_file_name_demo.py | 205 | Python |
#!/usr/bin/env python3
# Copyright (c) 2018 The Bethel Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import BethelTestFramework
from test_framework.staticr_util import *
import time
class StaticRTxSend(BethelTestFramework):
"""Tests the tx sending after softfork activation."""
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 1
def setup_network(self, split=False):
self.nodes = self.setup_nodes()
self.is_network_split = split
def run_test(self):
#check that a transaction can be sent after the reward changes to static
activate_staticr(self.nodes[0])
blockcount = self.nodes[0].getblockcount()
address = self.nodes[0].getnewaddress()
txid = self.nodes[0].sendtoaddress(address, 100)
# wait for a new block to be mined
while self.nodes[0].getblockcount() == blockcount:
print("waiting for a new block...")
time.sleep(5)
transaction = self.nodes[0].gettransaction(txid)
# check the transaction confirmed
assert(transaction["confirmations"] > 0)
if __name__ == '__main__':
StaticRTxSend().main()
| 30.044444 | 80 | 0.678994 | [
"MIT"
] | diyathrajapakshe/bethel-core | qa/rpc-tests/staticr-tx-send.py | 1,352 | Python |
import time
import base64
from datetime import datetime
import sys
import json
from redis_support_py3.graph_query_support_py3 import Query_Support
from redis_support_py3.construct_data_handlers_py3 import Generate_Handlers
from modbus_redis_server_py3.modbus_serial_ctrl_py3 import ModbusSerialCtrl
from modbus_redis_server_py3.msg_manager_py3 import MessageManager
from modbus_redis_server_py3.rs485_mgr_py3 import RS485_Mgr
from modbus_redis_server_py3.modbus_serial_ctrl_py3 import ModbusSerialCtrl
from modbus_redis_server_py3.msg_manager_py3 import MessageManager
from modbus_redis_server_py3.modbus_statistics_py3 import Statistic_Handler
#from redis_support_py3.redis_rpc_server_py3 import Redis_Rpc_Server
class Modbus_Server( object ):
def __init__( self, msg_handler,generate_handlers,data_structures,remote_dict): # fill in proceedures
self.msg_handler = msg_handler
self.statistic_handler = Statistic_Handler(generate_handlers,data_structures,remote_dict)
self.rpc_server_handle = generate_handlers.construct_rpc_sever(data_structures["PLC_RPC_SERVER"] )
self.rpc_server_handle.register_call_back( "modbus_relay", self.process_modbus_message)
self.rpc_server_handle.register_call_back( "ping_message", self.process_ping_message)
self.rpc_server_handle.add_time_out_function(self.process_null_msg)
self.rpc_server_handle.start()
def process_ping_message(self, address):
temp = self.msg_handler.ping_devices([address])
return temp[0]["result"]
def process_modbus_message( self,input_msg ):
address = input_msg[0]
self.statistic_handler.process_start_message( address )
failure, retries, output_message = self.msg_handler.process_msg( input_msg )
if failure != 0:
output_msg = "@"
self.statistic_handler.log_bad_message( address, retries )
else:
self.statistic_handler.log_good_message( address, retries )
self.statistic_handler.process_end_message()
return output_message
def process_null_msg( self ):
self.statistic_handler.process_null_message()
def find_remotes(qs,link_name):
return_value = {}
query_list = []
query_list = qs.add_match_relationship( query_list,relationship="SITE",label=redis_site["site"] )
query_list = qs.add_match_relationship( query_list,relationship="PLC_SERVER",label=plc_server_name )
query_list = qs.add_match_relationship( query_list, relationship = "IO_LINK",label=link_name)
query_list = qs.add_match_terminal( query_list, relationship = "REMOTE_UNIT")
remote_sets, remote_sources = qs.match_list(query_list)
for i in remote_sources:
return_value[i["modbus_address"]] = i["parameters"]
return return_value
if __name__ == "__main__":
plc_server_name = sys.argv[1]
file_handle = open("system_data_files/redis_server.json",'r')
data = file_handle.read()
file_handle.close()
redis_site = json.loads(data)
qs = Query_Support( redis_site )
# find data structures
query_list = []
query_list = qs.add_match_relationship( query_list,relationship="SITE",label=redis_site["site"] )
query_list = qs.add_match_relationship( query_list,relationship="PLC_SERVER",label=plc_server_name )
query_list = qs.add_match_terminal( query_list,
relationship = "PACKAGE",
property_mask={"name":"PLC_SERVER_DATA"} )
package_sets, package_sources = qs.match_list(query_list)
package = package_sources[0]
generate_handlers = Generate_Handlers(package,qs)
data_structures = package["data_structures"]
#
# finding IO_LINKS
#
#
query_list = []
query_list = qs.add_match_relationship( query_list,relationship="SITE",label=redis_site["site"] )
query_list = qs.add_match_relationship( query_list,relationship="PLC_SERVER",label=plc_server_name )
query_list = qs.add_match_terminal( query_list, relationship = "IO_LINK")
serial_sets, serial_sources = qs.match_list(query_list)
rs485_interface = RS485_Mgr()
interfaces = {}
for i in serial_sources:
i["handler"] = rs485_interface
interfaces[i["name"]] = i
msg_mgr = MessageManager()
for i,item in interfaces.items():
remote_dict = find_remotes(qs,item["name"])
modbus_serial_ctrl = ModbusSerialCtrl( item, remote_dict)
for j,k in remote_dict.items():
msg_mgr.add_device( k["address"], modbus_serial_ctrl )
#print(msg_mgr.ping_devices([100]))
Modbus_Server( msg_mgr,generate_handlers,data_structures,remote_dict )
| 36.560284 | 107 | 0.671581 | [
"MIT"
] | NanoDataCenter/nano_data_center | code/modbus_server_py3.py | 5,155 | Python |
"""Python wrappers around TensorFlow ops.
This file is MACHINE GENERATED! Do not edit.
"""
import collections as _collections
import six as _six
from tensorflow.python import pywrap_tensorflow as _pywrap_tensorflow
from tensorflow.python.eager import context as _context
from tensorflow.python.eager import core as _core
from tensorflow.python.eager import execute as _execute
from tensorflow.python.framework import dtypes as _dtypes
from tensorflow.python.framework import errors as _errors
from tensorflow.python.framework import tensor_shape as _tensor_shape
from tensorflow.core.framework import op_def_pb2 as _op_def_pb2
# Needed to trigger the call to _set_call_cpp_shape_fn.
from tensorflow.python.framework import common_shapes as _common_shapes
from tensorflow.python.framework import op_def_registry as _op_def_registry
from tensorflow.python.framework import ops as _ops
from tensorflow.python.framework import op_def_library as _op_def_library
from tensorflow.python.util.deprecation import deprecated_endpoints
from tensorflow.python.util.tf_export import tf_export
_block_lstm_outputs = ["i", "cs", "f", "o", "ci", "co", "h"]
_BlockLSTMOutput = _collections.namedtuple(
"BlockLSTM", _block_lstm_outputs)
@tf_export('block_lstm')
def block_lstm(seq_len_max, x, cs_prev, h_prev, w, wci, wcf, wco, b, forget_bias=1, cell_clip=3, use_peephole=False, name=None):
r"""Computes the LSTM cell forward propagation for all the time steps.
This is equivalent to applying LSTMBlockCell in a loop, like so:
```python
for x1 in unpack(x):
i1, cs1, f1, o1, ci1, co1, h1 = LSTMBlock(
x1, cs_prev, h_prev, w, wci, wcf, wco, b)
cs_prev = cs1
h_prev = h1
i.append(i1)
cs.append(cs1)
f.append(f1)
o.append(o1)
ci.append(ci1)
co.append(co1)
h.append(h1)
return pack(i), pack(cs), pack(f), pack(o), pack(ci), pack(ch), pack(h)
```
Args:
seq_len_max: A `Tensor` of type `int64`.
Maximum time length actually used by this input. Outputs are padded
with zeros beyond this length.
x: A `Tensor`. Must be one of the following types: `half`, `float32`.
The sequence input to the LSTM, shape (timelen, batch_size, num_inputs).
cs_prev: A `Tensor`. Must have the same type as `x`.
Value of the initial cell state.
h_prev: A `Tensor`. Must have the same type as `x`.
Initial output of cell (to be used for peephole).
w: A `Tensor`. Must have the same type as `x`. The weight matrix.
wci: A `Tensor`. Must have the same type as `x`.
The weight matrix for input gate peephole connection.
wcf: A `Tensor`. Must have the same type as `x`.
The weight matrix for forget gate peephole connection.
wco: A `Tensor`. Must have the same type as `x`.
The weight matrix for output gate peephole connection.
b: A `Tensor`. Must have the same type as `x`. The bias vector.
forget_bias: An optional `float`. Defaults to `1`. The forget gate bias.
cell_clip: An optional `float`. Defaults to `3`.
Value to clip the 'cs' value to.
use_peephole: An optional `bool`. Defaults to `False`.
Whether to use peephole weights.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (i, cs, f, o, ci, co, h).
i: A `Tensor`. Has the same type as `x`. The input gate over the whole time sequence.
cs: A `Tensor`. Has the same type as `x`. The cell state before the tanh over the whole time sequence.
f: A `Tensor`. Has the same type as `x`. The forget gate over the whole time sequence.
o: A `Tensor`. Has the same type as `x`. The output gate over the whole time sequence.
ci: A `Tensor`. Has the same type as `x`. The cell input over the whole time sequence.
co: A `Tensor`. Has the same type as `x`. The cell after the tanh over the whole time sequence.
h: A `Tensor`. Has the same type as `x`. The output h vector over the whole time sequence.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
if forget_bias is None:
forget_bias = 1
forget_bias = _execute.make_float(forget_bias, "forget_bias")
if cell_clip is None:
cell_clip = 3
cell_clip = _execute.make_float(cell_clip, "cell_clip")
if use_peephole is None:
use_peephole = False
use_peephole = _execute.make_bool(use_peephole, "use_peephole")
_, _, _op = _op_def_lib._apply_op_helper(
"BlockLSTM", seq_len_max=seq_len_max, x=x, cs_prev=cs_prev,
h_prev=h_prev, w=w, wci=wci, wcf=wcf, wco=wco, b=b,
forget_bias=forget_bias, cell_clip=cell_clip,
use_peephole=use_peephole, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("forget_bias", _op.get_attr("forget_bias"), "cell_clip",
_op.get_attr("cell_clip"), "use_peephole",
_op.get_attr("use_peephole"), "T", _op.get_attr("T"))
_execute.record_gradient(
"BlockLSTM", _inputs_flat, _attrs, _result, name)
_result = _BlockLSTMOutput._make(_result)
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name, "BlockLSTM",
name, _ctx._post_execution_callbacks, seq_len_max, x, cs_prev, h_prev,
w, wci, wcf, wco, b, "forget_bias", forget_bias, "cell_clip",
cell_clip, "use_peephole", use_peephole)
_result = _BlockLSTMOutput._make(_result)
return _result
except _core._FallbackException:
return block_lstm_eager_fallback(
seq_len_max, x, cs_prev, h_prev, w, wci, wcf, wco, b,
forget_bias=forget_bias, cell_clip=cell_clip,
use_peephole=use_peephole, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def block_lstm_eager_fallback(seq_len_max, x, cs_prev, h_prev, w, wci, wcf, wco, b, forget_bias=1, cell_clip=3, use_peephole=False, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function block_lstm
"""
_ctx = ctx if ctx else _context.context()
if forget_bias is None:
forget_bias = 1
forget_bias = _execute.make_float(forget_bias, "forget_bias")
if cell_clip is None:
cell_clip = 3
cell_clip = _execute.make_float(cell_clip, "cell_clip")
if use_peephole is None:
use_peephole = False
use_peephole = _execute.make_bool(use_peephole, "use_peephole")
_attr_T, _inputs_T = _execute.args_to_matching_eager([x, cs_prev, h_prev, w, wci, wcf, wco, b], _ctx)
(x, cs_prev, h_prev, w, wci, wcf, wco, b) = _inputs_T
seq_len_max = _ops.convert_to_tensor(seq_len_max, _dtypes.int64)
_inputs_flat = [seq_len_max, x, cs_prev, h_prev, w, wci, wcf, wco, b]
_attrs = ("forget_bias", forget_bias, "cell_clip", cell_clip,
"use_peephole", use_peephole, "T", _attr_T)
_result = _execute.execute(b"BlockLSTM", 7, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"BlockLSTM", _inputs_flat, _attrs, _result, name)
_result = _BlockLSTMOutput._make(_result)
return _result
_ops.RegisterShape("BlockLSTM")(None)
_block_lstm_grad_outputs = ["x_grad", "cs_prev_grad", "h_prev_grad", "w_grad",
"wci_grad", "wcf_grad", "wco_grad", "b_grad"]
_BlockLSTMGradOutput = _collections.namedtuple(
"BlockLSTMGrad", _block_lstm_grad_outputs)
@tf_export('block_lstm_grad')
def block_lstm_grad(seq_len_max, x, cs_prev, h_prev, w, wci, wcf, wco, b, i, cs, f, o, ci, co, h, cs_grad, h_grad, use_peephole, name=None):
r"""Computes the LSTM cell backward propagation for the entire time sequence.
This implementation is to be used in conjunction of LSTMBlock.
Args:
seq_len_max: A `Tensor` of type `int64`.
Maximum time length actually used by this input. Outputs are padded
with zeros beyond this length.
x: A `Tensor`. Must be one of the following types: `half`, `float32`.
The sequence input to the LSTM, shape (timelen, batch_size, num_inputs).
cs_prev: A `Tensor`. Must have the same type as `x`.
Value of the initial cell state.
h_prev: A `Tensor`. Must have the same type as `x`.
Initial output of cell (to be used for peephole).
w: A `Tensor`. Must have the same type as `x`. The weight matrix.
wci: A `Tensor`. Must have the same type as `x`.
The weight matrix for input gate peephole connection.
wcf: A `Tensor`. Must have the same type as `x`.
The weight matrix for forget gate peephole connection.
wco: A `Tensor`. Must have the same type as `x`.
The weight matrix for output gate peephole connection.
b: A `Tensor`. Must have the same type as `x`. The bias vector.
i: A `Tensor`. Must have the same type as `x`.
The input gate over the whole time sequence.
cs: A `Tensor`. Must have the same type as `x`.
The cell state before the tanh over the whole time sequence.
f: A `Tensor`. Must have the same type as `x`.
The forget gate over the whole time sequence.
o: A `Tensor`. Must have the same type as `x`.
The output gate over the whole time sequence.
ci: A `Tensor`. Must have the same type as `x`.
The cell input over the whole time sequence.
co: A `Tensor`. Must have the same type as `x`.
The cell after the tanh over the whole time sequence.
h: A `Tensor`. Must have the same type as `x`.
The output h vector over the whole time sequence.
cs_grad: A `Tensor`. Must have the same type as `x`.
The current gradient of cs.
h_grad: A `Tensor`. Must have the same type as `x`.
The gradient of h vector.
use_peephole: A `bool`. Whether to use peephole weights.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (x_grad, cs_prev_grad, h_prev_grad, w_grad, wci_grad, wcf_grad, wco_grad, b_grad).
x_grad: A `Tensor`. Has the same type as `x`. The gradient of x to be back-propped.
cs_prev_grad: A `Tensor`. Has the same type as `x`. The gradient of cs_prev to be back-propped.
h_prev_grad: A `Tensor`. Has the same type as `x`. The gradient of h_prev to be back-propped.
w_grad: A `Tensor`. Has the same type as `x`. The gradient for w to be back-propped.
wci_grad: A `Tensor`. Has the same type as `x`. The gradient for wci to be back-propped.
wcf_grad: A `Tensor`. Has the same type as `x`. The gradient for wcf to be back-propped.
wco_grad: A `Tensor`. Has the same type as `x`. The gradient for wco to be back-propped.
b_grad: A `Tensor`. Has the same type as `x`. The gradient for w to be back-propped.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
use_peephole = _execute.make_bool(use_peephole, "use_peephole")
_, _, _op = _op_def_lib._apply_op_helper(
"BlockLSTMGrad", seq_len_max=seq_len_max, x=x, cs_prev=cs_prev,
h_prev=h_prev, w=w, wci=wci, wcf=wcf, wco=wco, b=b, i=i, cs=cs, f=f,
o=o, ci=ci, co=co, h=h, cs_grad=cs_grad, h_grad=h_grad,
use_peephole=use_peephole, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("use_peephole", _op.get_attr("use_peephole"), "T",
_op.get_attr("T"))
_execute.record_gradient(
"BlockLSTMGrad", _inputs_flat, _attrs, _result, name)
_result = _BlockLSTMGradOutput._make(_result)
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name,
"BlockLSTMGrad", name, _ctx._post_execution_callbacks, seq_len_max, x,
cs_prev, h_prev, w, wci, wcf, wco, b, i, cs, f, o, ci, co, h, cs_grad,
h_grad, "use_peephole", use_peephole)
_result = _BlockLSTMGradOutput._make(_result)
return _result
except _core._FallbackException:
return block_lstm_grad_eager_fallback(
seq_len_max, x, cs_prev, h_prev, w, wci, wcf, wco, b, i, cs, f, o,
ci, co, h, cs_grad, h_grad, use_peephole=use_peephole, name=name,
ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def block_lstm_grad_eager_fallback(seq_len_max, x, cs_prev, h_prev, w, wci, wcf, wco, b, i, cs, f, o, ci, co, h, cs_grad, h_grad, use_peephole, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function block_lstm_grad
"""
_ctx = ctx if ctx else _context.context()
use_peephole = _execute.make_bool(use_peephole, "use_peephole")
_attr_T, _inputs_T = _execute.args_to_matching_eager([x, cs_prev, h_prev, w, wci, wcf, wco, b, i, cs, f, o, ci, co, h, cs_grad, h_grad], _ctx)
(x, cs_prev, h_prev, w, wci, wcf, wco, b, i, cs, f, o, ci, co, h, cs_grad, h_grad) = _inputs_T
seq_len_max = _ops.convert_to_tensor(seq_len_max, _dtypes.int64)
_inputs_flat = [seq_len_max, x, cs_prev, h_prev, w, wci, wcf, wco, b, i, cs, f, o, ci, co, h, cs_grad, h_grad]
_attrs = ("use_peephole", use_peephole, "T", _attr_T)
_result = _execute.execute(b"BlockLSTMGrad", 8, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"BlockLSTMGrad", _inputs_flat, _attrs, _result, name)
_result = _BlockLSTMGradOutput._make(_result)
return _result
_ops.RegisterShape("BlockLSTMGrad")(None)
_lstm_block_cell_outputs = ["i", "cs", "f", "o", "ci", "co", "h"]
_LSTMBlockCellOutput = _collections.namedtuple(
"LSTMBlockCell", _lstm_block_cell_outputs)
@tf_export('lstm_block_cell')
def lstm_block_cell(x, cs_prev, h_prev, w, wci, wcf, wco, b, forget_bias=1, cell_clip=3, use_peephole=False, name=None):
r"""Computes the LSTM cell forward propagation for 1 time step.
This implementation uses 1 weight matrix and 1 bias vector, and there's an
optional peephole connection.
This kernel op implements the following mathematical equations:
```python
xh = [x, h_prev]
[i, f, ci, o] = xh * w + b
f = f + forget_bias
if not use_peephole:
wci = wcf = wco = 0
i = sigmoid(cs_prev * wci + i)
f = sigmoid(cs_prev * wcf + f)
ci = tanh(ci)
cs = ci .* i + cs_prev .* f
cs = clip(cs, cell_clip)
o = sigmoid(cs * wco + o)
co = tanh(cs)
h = co .* o
```
Args:
x: A `Tensor`. Must be one of the following types: `half`, `float32`.
The input to the LSTM cell, shape (batch_size, num_inputs).
cs_prev: A `Tensor`. Must have the same type as `x`.
Value of the cell state at previous time step.
h_prev: A `Tensor`. Must have the same type as `x`.
Output of the previous cell at previous time step.
w: A `Tensor`. Must have the same type as `x`. The weight matrix.
wci: A `Tensor`. Must have the same type as `x`.
The weight matrix for input gate peephole connection.
wcf: A `Tensor`. Must have the same type as `x`.
The weight matrix for forget gate peephole connection.
wco: A `Tensor`. Must have the same type as `x`.
The weight matrix for output gate peephole connection.
b: A `Tensor`. Must have the same type as `x`. The bias vector.
forget_bias: An optional `float`. Defaults to `1`. The forget gate bias.
cell_clip: An optional `float`. Defaults to `3`.
Value to clip the 'cs' value to.
use_peephole: An optional `bool`. Defaults to `False`.
Whether to use peephole weights.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (i, cs, f, o, ci, co, h).
i: A `Tensor`. Has the same type as `x`. The input gate.
cs: A `Tensor`. Has the same type as `x`. The cell state before the tanh.
f: A `Tensor`. Has the same type as `x`. The forget gate.
o: A `Tensor`. Has the same type as `x`. The output gate.
ci: A `Tensor`. Has the same type as `x`. The cell input.
co: A `Tensor`. Has the same type as `x`. The cell after the tanh.
h: A `Tensor`. Has the same type as `x`. The output h vector.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
if forget_bias is None:
forget_bias = 1
forget_bias = _execute.make_float(forget_bias, "forget_bias")
if cell_clip is None:
cell_clip = 3
cell_clip = _execute.make_float(cell_clip, "cell_clip")
if use_peephole is None:
use_peephole = False
use_peephole = _execute.make_bool(use_peephole, "use_peephole")
_, _, _op = _op_def_lib._apply_op_helper(
"LSTMBlockCell", x=x, cs_prev=cs_prev, h_prev=h_prev, w=w, wci=wci,
wcf=wcf, wco=wco, b=b, forget_bias=forget_bias, cell_clip=cell_clip,
use_peephole=use_peephole, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("forget_bias", _op.get_attr("forget_bias"), "cell_clip",
_op.get_attr("cell_clip"), "use_peephole",
_op.get_attr("use_peephole"), "T", _op.get_attr("T"))
_execute.record_gradient(
"LSTMBlockCell", _inputs_flat, _attrs, _result, name)
_result = _LSTMBlockCellOutput._make(_result)
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name,
"LSTMBlockCell", name, _ctx._post_execution_callbacks, x, cs_prev,
h_prev, w, wci, wcf, wco, b, "forget_bias", forget_bias, "cell_clip",
cell_clip, "use_peephole", use_peephole)
_result = _LSTMBlockCellOutput._make(_result)
return _result
except _core._FallbackException:
return lstm_block_cell_eager_fallback(
x, cs_prev, h_prev, w, wci, wcf, wco, b, forget_bias=forget_bias,
cell_clip=cell_clip, use_peephole=use_peephole, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def lstm_block_cell_eager_fallback(x, cs_prev, h_prev, w, wci, wcf, wco, b, forget_bias=1, cell_clip=3, use_peephole=False, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function lstm_block_cell
"""
_ctx = ctx if ctx else _context.context()
if forget_bias is None:
forget_bias = 1
forget_bias = _execute.make_float(forget_bias, "forget_bias")
if cell_clip is None:
cell_clip = 3
cell_clip = _execute.make_float(cell_clip, "cell_clip")
if use_peephole is None:
use_peephole = False
use_peephole = _execute.make_bool(use_peephole, "use_peephole")
_attr_T, _inputs_T = _execute.args_to_matching_eager([x, cs_prev, h_prev, w, wci, wcf, wco, b], _ctx)
(x, cs_prev, h_prev, w, wci, wcf, wco, b) = _inputs_T
_inputs_flat = [x, cs_prev, h_prev, w, wci, wcf, wco, b]
_attrs = ("forget_bias", forget_bias, "cell_clip", cell_clip,
"use_peephole", use_peephole, "T", _attr_T)
_result = _execute.execute(b"LSTMBlockCell", 7, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"LSTMBlockCell", _inputs_flat, _attrs, _result, name)
_result = _LSTMBlockCellOutput._make(_result)
return _result
_ops.RegisterShape("LSTMBlockCell")(None)
_lstm_block_cell_grad_outputs = ["cs_prev_grad", "dicfo", "wci_grad",
"wcf_grad", "wco_grad"]
_LSTMBlockCellGradOutput = _collections.namedtuple(
"LSTMBlockCellGrad", _lstm_block_cell_grad_outputs)
@tf_export('lstm_block_cell_grad')
def lstm_block_cell_grad(x, cs_prev, h_prev, w, wci, wcf, wco, b, i, cs, f, o, ci, co, cs_grad, h_grad, use_peephole, name=None):
r"""Computes the LSTM cell backward propagation for 1 timestep.
This implementation is to be used in conjunction of LSTMBlockCell.
Args:
x: A `Tensor`. Must be one of the following types: `half`, `float32`.
The input to the LSTM cell, shape (batch_size, num_inputs).
cs_prev: A `Tensor`. Must have the same type as `x`.
The previous cell state.
h_prev: A `Tensor`. Must have the same type as `x`. The previous h state.
w: A `Tensor`. Must have the same type as `x`. The weight matrix.
wci: A `Tensor`. Must have the same type as `x`.
The weight matrix for input gate peephole connection.
wcf: A `Tensor`. Must have the same type as `x`.
The weight matrix for forget gate peephole connection.
wco: A `Tensor`. Must have the same type as `x`.
The weight matrix for output gate peephole connection.
b: A `Tensor`. Must have the same type as `x`. The bias vector.
i: A `Tensor`. Must have the same type as `x`. The input gate.
cs: A `Tensor`. Must have the same type as `x`.
The cell state before the tanh.
f: A `Tensor`. Must have the same type as `x`. The forget gate.
o: A `Tensor`. Must have the same type as `x`. The output gate.
ci: A `Tensor`. Must have the same type as `x`. The cell input.
co: A `Tensor`. Must have the same type as `x`. The cell after the tanh.
cs_grad: A `Tensor`. Must have the same type as `x`.
The current gradient of cs.
h_grad: A `Tensor`. Must have the same type as `x`.
The gradient of h vector.
use_peephole: A `bool`. Whether the cell uses peephole connections.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (cs_prev_grad, dicfo, wci_grad, wcf_grad, wco_grad).
cs_prev_grad: A `Tensor`. Has the same type as `x`. The gradient of cs to be back-propped.
dicfo: A `Tensor`. Has the same type as `x`. The derivative wrt to [i, cs, f, o].
wci_grad: A `Tensor`. Has the same type as `x`. The gradient for wci to be back-propped.
wcf_grad: A `Tensor`. Has the same type as `x`. The gradient for wcf to be back-propped.
wco_grad: A `Tensor`. Has the same type as `x`. The gradient for wco to be back-propped.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
use_peephole = _execute.make_bool(use_peephole, "use_peephole")
_, _, _op = _op_def_lib._apply_op_helper(
"LSTMBlockCellGrad", x=x, cs_prev=cs_prev, h_prev=h_prev, w=w,
wci=wci, wcf=wcf, wco=wco, b=b, i=i, cs=cs, f=f, o=o, ci=ci, co=co,
cs_grad=cs_grad, h_grad=h_grad, use_peephole=use_peephole, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("use_peephole", _op.get_attr("use_peephole"), "T",
_op.get_attr("T"))
_execute.record_gradient(
"LSTMBlockCellGrad", _inputs_flat, _attrs, _result, name)
_result = _LSTMBlockCellGradOutput._make(_result)
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name,
"LSTMBlockCellGrad", name, _ctx._post_execution_callbacks, x, cs_prev,
h_prev, w, wci, wcf, wco, b, i, cs, f, o, ci, co, cs_grad, h_grad,
"use_peephole", use_peephole)
_result = _LSTMBlockCellGradOutput._make(_result)
return _result
except _core._FallbackException:
return lstm_block_cell_grad_eager_fallback(
x, cs_prev, h_prev, w, wci, wcf, wco, b, i, cs, f, o, ci, co,
cs_grad, h_grad, use_peephole=use_peephole, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def lstm_block_cell_grad_eager_fallback(x, cs_prev, h_prev, w, wci, wcf, wco, b, i, cs, f, o, ci, co, cs_grad, h_grad, use_peephole, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function lstm_block_cell_grad
"""
_ctx = ctx if ctx else _context.context()
use_peephole = _execute.make_bool(use_peephole, "use_peephole")
_attr_T, _inputs_T = _execute.args_to_matching_eager([x, cs_prev, h_prev, w, wci, wcf, wco, b, i, cs, f, o, ci, co, cs_grad, h_grad], _ctx)
(x, cs_prev, h_prev, w, wci, wcf, wco, b, i, cs, f, o, ci, co, cs_grad, h_grad) = _inputs_T
_inputs_flat = [x, cs_prev, h_prev, w, wci, wcf, wco, b, i, cs, f, o, ci, co, cs_grad, h_grad]
_attrs = ("use_peephole", use_peephole, "T", _attr_T)
_result = _execute.execute(b"LSTMBlockCellGrad", 5, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"LSTMBlockCellGrad", _inputs_flat, _attrs, _result, name)
_result = _LSTMBlockCellGradOutput._make(_result)
return _result
_ops.RegisterShape("LSTMBlockCellGrad")(None)
def _InitOpDefLibrary(op_list_proto_bytes):
op_list = _op_def_pb2.OpList()
op_list.ParseFromString(op_list_proto_bytes)
_op_def_registry.register_op_list(op_list)
op_def_lib = _op_def_library.OpDefLibrary()
op_def_lib.add_op_list(op_list)
return op_def_lib
# op {
# name: "BlockLSTM"
# input_arg {
# name: "seq_len_max"
# type: DT_INT64
# }
# input_arg {
# name: "x"
# type_attr: "T"
# }
# input_arg {
# name: "cs_prev"
# type_attr: "T"
# }
# input_arg {
# name: "h_prev"
# type_attr: "T"
# }
# input_arg {
# name: "w"
# type_attr: "T"
# }
# input_arg {
# name: "wci"
# type_attr: "T"
# }
# input_arg {
# name: "wcf"
# type_attr: "T"
# }
# input_arg {
# name: "wco"
# type_attr: "T"
# }
# input_arg {
# name: "b"
# type_attr: "T"
# }
# output_arg {
# name: "i"
# type_attr: "T"
# }
# output_arg {
# name: "cs"
# type_attr: "T"
# }
# output_arg {
# name: "f"
# type_attr: "T"
# }
# output_arg {
# name: "o"
# type_attr: "T"
# }
# output_arg {
# name: "ci"
# type_attr: "T"
# }
# output_arg {
# name: "co"
# type_attr: "T"
# }
# output_arg {
# name: "h"
# type_attr: "T"
# }
# attr {
# name: "forget_bias"
# type: "float"
# default_value {
# f: 1
# }
# }
# attr {
# name: "cell_clip"
# type: "float"
# default_value {
# f: 3
# }
# }
# attr {
# name: "use_peephole"
# type: "bool"
# default_value {
# b: false
# }
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_HALF
# type: DT_FLOAT
# }
# }
# }
# }
# op {
# name: "BlockLSTMGrad"
# input_arg {
# name: "seq_len_max"
# type: DT_INT64
# }
# input_arg {
# name: "x"
# type_attr: "T"
# }
# input_arg {
# name: "cs_prev"
# type_attr: "T"
# }
# input_arg {
# name: "h_prev"
# type_attr: "T"
# }
# input_arg {
# name: "w"
# type_attr: "T"
# }
# input_arg {
# name: "wci"
# type_attr: "T"
# }
# input_arg {
# name: "wcf"
# type_attr: "T"
# }
# input_arg {
# name: "wco"
# type_attr: "T"
# }
# input_arg {
# name: "b"
# type_attr: "T"
# }
# input_arg {
# name: "i"
# type_attr: "T"
# }
# input_arg {
# name: "cs"
# type_attr: "T"
# }
# input_arg {
# name: "f"
# type_attr: "T"
# }
# input_arg {
# name: "o"
# type_attr: "T"
# }
# input_arg {
# name: "ci"
# type_attr: "T"
# }
# input_arg {
# name: "co"
# type_attr: "T"
# }
# input_arg {
# name: "h"
# type_attr: "T"
# }
# input_arg {
# name: "cs_grad"
# type_attr: "T"
# }
# input_arg {
# name: "h_grad"
# type_attr: "T"
# }
# output_arg {
# name: "x_grad"
# type_attr: "T"
# }
# output_arg {
# name: "cs_prev_grad"
# type_attr: "T"
# }
# output_arg {
# name: "h_prev_grad"
# type_attr: "T"
# }
# output_arg {
# name: "w_grad"
# type_attr: "T"
# }
# output_arg {
# name: "wci_grad"
# type_attr: "T"
# }
# output_arg {
# name: "wcf_grad"
# type_attr: "T"
# }
# output_arg {
# name: "wco_grad"
# type_attr: "T"
# }
# output_arg {
# name: "b_grad"
# type_attr: "T"
# }
# attr {
# name: "use_peephole"
# type: "bool"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_HALF
# type: DT_FLOAT
# }
# }
# }
# }
# op {
# name: "LSTMBlockCell"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# input_arg {
# name: "cs_prev"
# type_attr: "T"
# }
# input_arg {
# name: "h_prev"
# type_attr: "T"
# }
# input_arg {
# name: "w"
# type_attr: "T"
# }
# input_arg {
# name: "wci"
# type_attr: "T"
# }
# input_arg {
# name: "wcf"
# type_attr: "T"
# }
# input_arg {
# name: "wco"
# type_attr: "T"
# }
# input_arg {
# name: "b"
# type_attr: "T"
# }
# output_arg {
# name: "i"
# type_attr: "T"
# }
# output_arg {
# name: "cs"
# type_attr: "T"
# }
# output_arg {
# name: "f"
# type_attr: "T"
# }
# output_arg {
# name: "o"
# type_attr: "T"
# }
# output_arg {
# name: "ci"
# type_attr: "T"
# }
# output_arg {
# name: "co"
# type_attr: "T"
# }
# output_arg {
# name: "h"
# type_attr: "T"
# }
# attr {
# name: "forget_bias"
# type: "float"
# default_value {
# f: 1
# }
# }
# attr {
# name: "cell_clip"
# type: "float"
# default_value {
# f: 3
# }
# }
# attr {
# name: "use_peephole"
# type: "bool"
# default_value {
# b: false
# }
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_HALF
# type: DT_FLOAT
# }
# }
# }
# }
# op {
# name: "LSTMBlockCellGrad"
# input_arg {
# name: "x"
# type_attr: "T"
# }
# input_arg {
# name: "cs_prev"
# type_attr: "T"
# }
# input_arg {
# name: "h_prev"
# type_attr: "T"
# }
# input_arg {
# name: "w"
# type_attr: "T"
# }
# input_arg {
# name: "wci"
# type_attr: "T"
# }
# input_arg {
# name: "wcf"
# type_attr: "T"
# }
# input_arg {
# name: "wco"
# type_attr: "T"
# }
# input_arg {
# name: "b"
# type_attr: "T"
# }
# input_arg {
# name: "i"
# type_attr: "T"
# }
# input_arg {
# name: "cs"
# type_attr: "T"
# }
# input_arg {
# name: "f"
# type_attr: "T"
# }
# input_arg {
# name: "o"
# type_attr: "T"
# }
# input_arg {
# name: "ci"
# type_attr: "T"
# }
# input_arg {
# name: "co"
# type_attr: "T"
# }
# input_arg {
# name: "cs_grad"
# type_attr: "T"
# }
# input_arg {
# name: "h_grad"
# type_attr: "T"
# }
# output_arg {
# name: "cs_prev_grad"
# type_attr: "T"
# }
# output_arg {
# name: "dicfo"
# type_attr: "T"
# }
# output_arg {
# name: "wci_grad"
# type_attr: "T"
# }
# output_arg {
# name: "wcf_grad"
# type_attr: "T"
# }
# output_arg {
# name: "wco_grad"
# type_attr: "T"
# }
# attr {
# name: "use_peephole"
# type: "bool"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_HALF
# type: DT_FLOAT
# }
# }
# }
# }
_op_def_lib = _InitOpDefLibrary(b"\n\215\002\n\tBlockLSTM\022\017\n\013seq_len_max\030\t\022\006\n\001x\"\001T\022\014\n\007cs_prev\"\001T\022\013\n\006h_prev\"\001T\022\006\n\001w\"\001T\022\010\n\003wci\"\001T\022\010\n\003wcf\"\001T\022\010\n\003wco\"\001T\022\006\n\001b\"\001T\032\006\n\001i\"\001T\032\007\n\002cs\"\001T\032\006\n\001f\"\001T\032\006\n\001o\"\001T\032\007\n\002ci\"\001T\032\007\n\002co\"\001T\032\006\n\001h\"\001T\"\033\n\013forget_bias\022\005float\032\005%\000\000\200?\"\031\n\tcell_clip\022\005float\032\005%\000\000@@\"\030\n\014use_peephole\022\004bool\032\002(\000\"\021\n\001T\022\004type:\006\n\0042\002\023\001\n\351\002\n\rBlockLSTMGrad\022\017\n\013seq_len_max\030\t\022\006\n\001x\"\001T\022\014\n\007cs_prev\"\001T\022\013\n\006h_prev\"\001T\022\006\n\001w\"\001T\022\010\n\003wci\"\001T\022\010\n\003wcf\"\001T\022\010\n\003wco\"\001T\022\006\n\001b\"\001T\022\006\n\001i\"\001T\022\007\n\002cs\"\001T\022\006\n\001f\"\001T\022\006\n\001o\"\001T\022\007\n\002ci\"\001T\022\007\n\002co\"\001T\022\006\n\001h\"\001T\022\014\n\007cs_grad\"\001T\022\013\n\006h_grad\"\001T\032\013\n\006x_grad\"\001T\032\021\n\014cs_prev_grad\"\001T\032\020\n\013h_prev_grad\"\001T\032\013\n\006w_grad\"\001T\032\r\n\010wci_grad\"\001T\032\r\n\010wcf_grad\"\001T\032\r\n\010wco_grad\"\001T\032\013\n\006b_grad\"\001T\"\024\n\014use_peephole\022\004bool\"\021\n\001T\022\004type:\006\n\0042\002\023\001\n\200\002\n\rLSTMBlockCell\022\006\n\001x\"\001T\022\014\n\007cs_prev\"\001T\022\013\n\006h_prev\"\001T\022\006\n\001w\"\001T\022\010\n\003wci\"\001T\022\010\n\003wcf\"\001T\022\010\n\003wco\"\001T\022\006\n\001b\"\001T\032\006\n\001i\"\001T\032\007\n\002cs\"\001T\032\006\n\001f\"\001T\032\006\n\001o\"\001T\032\007\n\002ci\"\001T\032\007\n\002co\"\001T\032\006\n\001h\"\001T\"\033\n\013forget_bias\022\005float\032\005%\000\000\200?\"\031\n\tcell_clip\022\005float\032\005%\000\000@@\"\030\n\014use_peephole\022\004bool\032\002(\000\"\021\n\001T\022\004type:\006\n\0042\002\023\001\n\247\002\n\021LSTMBlockCellGrad\022\006\n\001x\"\001T\022\014\n\007cs_prev\"\001T\022\013\n\006h_prev\"\001T\022\006\n\001w\"\001T\022\010\n\003wci\"\001T\022\010\n\003wcf\"\001T\022\010\n\003wco\"\001T\022\006\n\001b\"\001T\022\006\n\001i\"\001T\022\007\n\002cs\"\001T\022\006\n\001f\"\001T\022\006\n\001o\"\001T\022\007\n\002ci\"\001T\022\007\n\002co\"\001T\022\014\n\007cs_grad\"\001T\022\013\n\006h_grad\"\001T\032\021\n\014cs_prev_grad\"\001T\032\n\n\005dicfo\"\001T\032\r\n\010wci_grad\"\001T\032\r\n\010wcf_grad\"\001T\032\r\n\010wco_grad\"\001T\"\024\n\014use_peephole\022\004bool\"\021\n\001T\022\004type:\006\n\0042\002\023\001")
| 36.591667 | 2,646 | 0.624488 | [
"MIT"
] | JustinACoder/H22-GR3-UnrealAI | Plugins/UnrealEnginePython/Binaries/Win64/Lib/site-packages/tensorflow/contrib/rnn/ops/gen_lstm_ops.py | 35,128 | Python |
import numpy as np
from torch import nn
import torch.optim as optim
import torch
import matplotlib.pyplot as plt
import pandas as pd
import data_loader as dl
import time
import copy
import utility
import yaml
import trainer
from PIL import Image
from os import path
Image.MAX_IMAGE_PIXELS = None
from scipy.io import savemat
from sklearn.model_selection import train_test_split
from torchvision import transforms
import os.path
from os import path
BATCH_SIZE = 32
EPOCHS = 100
LR = 0.001
ANNEAL_STRAT = "cos"
FEATURE_EXTRACT = False
APPLY_ZCA_TRANS = True
DATA_DIR = 'data/train_images'
NETS = ['resnext'] # train on resnext
IMAGE_SIZES = [64, 128, 224] # train for 4 resolutions
def main():
# Load the meta data file
df = pd.read_csv('./data/train.csv')
df, label_encoder = utility.encode_labels(df)
num_classes = len(df['label'].value_counts())
np.save('./data/label_encoder_classes.npy', label_encoder.classes_)
# Generate the ZCA matrix if enabled
for image_size in IMAGE_SIZES: # train for every res
if APPLY_ZCA_TRANS:
print("Making ZCA matrix ...")
data_loader = dl.get_full_data_loader(df, data_dir=DATA_DIR,
batch_size=BATCH_SIZE,
image_size=image_size)
train_dataset_arr = next(iter(data_loader))[0].numpy()
zca = utility.ZCA()
zca.fit(train_dataset_arr)
zca_dic = {"zca_matrix": zca.ZCA_mat, "zca_mean": zca.mean}
savemat("./data/zca_data.mat", zca_dic)
print("Completed making ZCA matrix")
# Define normalization
normalize = transforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225],
)
# Define specific transforms
train_transform = transforms.Compose([
utility.AddPadding(),
transforms.Resize((image_size,image_size)),
transforms.RandomHorizontalFlip(p=0.5),
transforms.RandomRotation(degrees=(-90, 90)),
transforms.RandomVerticalFlip(p=0.5),
transforms.ColorJitter(.4,.4,.4),
transforms.ToTensor(),
normalize
])
valid_transform = transforms.Compose([
utility.AddPadding(),
transforms.Resize((image_size,image_size)),
transforms.ToTensor(),
normalize
])
# Create a train and valid dataset
train_dataset = dl.HotelImagesDataset(df, root_dir=DATA_DIR,
transform=train_transform)
valid_dataset = dl.HotelImagesDataset(df, root_dir=DATA_DIR,
transform=valid_transform)
# Get a train and valid data loader
train_loader, valid_loader = dl.get_train_valid_loader(train_dataset,
valid_dataset,
batch_size=BATCH_SIZE,
random_seed=0)
for net_type in NETS: # train for every net
model = utility.initialize_net(num_classes, net_type,
feature_extract=FEATURE_EXTRACT)
# If old model exists, take state from it
if path.exists(f"./models/model_{net_type}.pt"):
print("Resuming training on trained model ...")
model = utility.load_latest_model(model, f'./models/model_{net_type}.pt')
# Gather the parameters to be optimized/updated in this run.
params_to_update = utility.get_model_params_to_train(model, FEATURE_EXTRACT)
# Send model to GPU
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = model.to(device)
# Make criterion
criterion = nn.CrossEntropyLoss()
# Make optimizer + scheduler
optimizer = torch.optim.SGD(params_to_update, lr=0.01, momentum=0.9)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer,
mode='min',
factor=0.01,
patience=3)
trained_model = trainer.train_model(device=device,
model=model,
optimizer=optimizer,
criterion=criterion,
train_loader=train_loader,
valid_loader=valid_loader,
scheduler=scheduler,
net_type=net_type,
epochs=EPOCHS,
apply_zca_trans=APPLY_ZCA_TRANS)
utility.save_current_model(trained_model,
f"./models/model_{net_type}.pt")
if __name__ == "__main__":
main() | 42.193798 | 93 | 0.516994 | [
"MIT"
] | micqu/hotel-challenge | main.py | 5,443 | Python |
# -*- coding: utf-8 -*-
from __future__ import division, print_function, unicode_literals
from django.core.urlresolvers import reverse
from django.http import QueryDict
from .crypto import generate_randomness
from .models import SQRLNut
from .utils import get_user_ip
class SQRLInitialization(object):
"""
SQRL class for initializing SQRL transaction.
This class is mainly responsible for initially creating and storing
:obj:`.models.SQRLNut`. Also this class has helper properties
for getting SQRL urls.
Parameters
----------
request : HttpRequest
Django standard request object
nut : SQRLNut, optional
SQRLNut for which to do SQRL initialization
"""
def __init__(self, request, nut=None):
self.request = request
if nut is not None:
self.nut = nut
def get_or_create_session_key(self):
"""
Get or create the session key from the request object.
When not present yet, this initializes the session for the user.
As a result, the request then returns session cookie to the user
via session middleware.
"""
session_key = self.request.session.session_key
if session_key is None:
self.request.session.create()
session_key = self.request.session.session_key
return session_key
@property
def nut(self):
"""
Cached property for getting :obj:`.models.SQRLNut`.
When accessed for the first time, this property either replaces or creates
new :obj:`.models.SQRLNut` by using :meth:`.managers.SQRLNutManager.replace_or_create`.
All the data for the creation of the nut is created by using :meth:`.generate_nut_kwargs`.
"""
if hasattr(self, '_nut'):
return self._nut
self._nut = SQRLNut.objects.replace_or_create(
**self.generate_nut_kwargs()
)
return self._nut
@nut.setter
def nut(self, value):
self._nut = value
def generate_nut_kwargs(self):
"""
Generate kwargs which can be used to create new :obj:`.models.SQRLNut`.
Returns
-------
dict
All required kwargs to instantiate and create :obj:`.models.SQRLNut`.
"""
randomness = generate_randomness(64)
l = len(randomness) // 2
return {
'session_key': self.get_or_create_session_key(),
'nonce': randomness[:l],
'transaction_nonce': randomness[l:],
'is_transaction_complete': False,
'ip_address': get_user_ip(self.request),
}
def get_sqrl_url(self):
"""
Get the server URL of where SQRL client will make first request.
This method should be customized when a custom namespace should be used
by the SQRL client when generating on the fly per-site public-private keypair.
For example this can be used when a web site is a SAAS in which different
"sub-sites" are determined tenant within a URL path - ``mysaas.com/<tenant>``.
In that case the returned SQRL auth url should be something like -
``mysaas.com/mytenant:sqrl/auth/?nut=<nut value>``.
By using ``:`` within the path will let SQRL client know that up until
that point full domain name should be used to generate public-private keypair.
"""
return reverse('sqrl:auth')
def get_sqrl_url_params(self):
"""
Get SQRL url params to be added as querystring params in the SQRL url.
By default this only adds ``nut=<nut>``.
Returns
-------
str
URLEncoded querystring params
"""
qd = QueryDict('', mutable=True)
qd.update({
'nut': self.nut.nonce,
})
return qd.urlencode()
@property
def url(self):
"""
Property for getting only server-side SQRL auth view URL.
This does not include the full domain within the URL.
The URL is always relative to the current domain of the site.
"""
return (
'{url}?{params}'
''.format(url=self.get_sqrl_url(),
params=self.get_sqrl_url_params())
)
@property
def sqrl_url(self):
"""
Property for getting full SQRL auth view URL including SQRL scheme and full domain with port.
"""
return (
'{scheme}://{host}{url}'
''.format(scheme='sqrl' if self.request.is_secure() else 'qrl',
host=self.request.get_host(),
url=self.url)
)
| 31.581081 | 101 | 0.609114 | [
"MIT"
] | JamesonNetworks/django-sqrl | sqrl/sqrl.py | 4,674 | Python |
from abc import abstractmethod
import math
import numpy as np
import torch as th
import torch.nn as nn
import torch.nn.functional as F
from .fp16_util import convert_module_to_f16, convert_module_to_f32, convert_module_to_f16_2
from .nn import (
checkpoint,
conv_nd,
linear,
avg_pool_nd,
zero_module,
normalization,
timestep_embedding,
PositionalEncoding,
LayerNorm32,
FFN,
)
class AttentionPool2d(nn.Module):
"""
Adapted from CLIP: https://github.com/openai/CLIP/blob/main/clip/model.py
"""
def __init__(
self,
spacial_dim: int,
embed_dim: int,
num_heads_channels: int,
output_dim: int = None,
):
super().__init__()
self.positional_embedding = nn.Parameter(
th.randn(embed_dim, spacial_dim ** 2 + 1) / embed_dim ** 0.5
)
self.qkv_proj = conv_nd(1, embed_dim, 3 * embed_dim, 1)
self.c_proj = conv_nd(1, embed_dim, output_dim or embed_dim, 1)
self.num_heads = embed_dim // num_heads_channels
self.attention = QKVAttention(self.num_heads)
def forward(self, x):
b, c, *_spatial = x.shape
x = x.reshape(b, c, -1) # NC(HW)
x = th.cat([x.mean(dim=-1, keepdim=True), x], dim=-1) # NC(HW+1)
x = x + self.positional_embedding[None, :, :].to(x.dtype) # NC(HW+1)
x = self.qkv_proj(x)
x = self.attention(x)
x = self.c_proj(x)
return x[:, :, 0]
class TimestepBlock(nn.Module):
"""
Any module where forward() takes timestep embeddings as a second argument.
"""
@abstractmethod
def forward(self, x, emb):
"""
Apply the module to `x` given `emb` timestep embeddings.
"""
class ConditionalBlock(nn.Module):
"""
Any module where forward() takes y as a second argument.
"""
@abstractmethod
def forward(self, x, y):
"""
Apply the module to `x` given `y`.
"""
class TimestepEmbedSequential(nn.Sequential, TimestepBlock):
"""
A sequential module that passes timestep embeddings to the children that
support it as an extra input.
"""
def forward(self, x, emb, y=None):
for layer in self:
if isinstance(layer, TimestepBlock):
x = layer(x, emb)
elif isinstance(layer, ConditionalBlock):
x = layer(x, y=y)
else:
x = layer(x)
return x
class Upsample(nn.Module):
"""
An upsampling layer with an optional convolution.
:param channels: channels in the inputs and outputs.
:param use_conv: a bool determining if a convolution is applied.
:param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
upsampling occurs in the inner-two dimensions.
"""
def __init__(self, channels, use_conv, dims=2, out_channels=None):
super().__init__()
self.channels = channels
self.out_channels = out_channels or channels
self.use_conv = use_conv
self.dims = dims
if use_conv:
self.conv = conv_nd(dims, self.channels, self.out_channels, 3, padding=1)
def forward(self, x):
assert x.shape[1] == self.channels
if self.dims == 3:
x = F.interpolate(
x, (x.shape[2], x.shape[3] * 2, x.shape[4] * 2), mode="nearest"
)
else:
x = F.interpolate(x, scale_factor=2, mode="nearest")
if self.use_conv:
x = self.conv(x)
return x
class Downsample(nn.Module):
"""
A downsampling layer with an optional convolution.
:param channels: channels in the inputs and outputs.
:param use_conv: a bool determining if a convolution is applied.
:param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
downsampling occurs in the inner-two dimensions.
"""
def __init__(self, channels, use_conv, dims=2, out_channels=None):
super().__init__()
self.channels = channels
self.out_channels = out_channels or channels
self.use_conv = use_conv
self.dims = dims
stride = 2 if dims != 3 else (1, 2, 2)
if use_conv:
self.op = conv_nd(
dims, self.channels, self.out_channels, 3, stride=stride, padding=1
)
else:
assert self.channels == self.out_channels
self.op = avg_pool_nd(dims, kernel_size=stride, stride=stride)
def forward(self, x):
assert x.shape[1] == self.channels
return self.op(x)
class ResBlock(TimestepBlock):
"""
A residual block that can optionally change the number of channels.
:param channels: the number of input channels.
:param emb_channels: the number of timestep embedding channels.
:param dropout: the rate of dropout.
:param out_channels: if specified, the number of out channels.
:param use_conv: if True and out_channels is specified, use a spatial
convolution instead of a smaller 1x1 convolution to change the
channels in the skip connection.
:param dims: determines if the signal is 1D, 2D, or 3D.
:param use_checkpoint: if True, use gradient checkpointing on this module.
:param up: if True, use this block for upsampling.
:param down: if True, use this block for downsampling.
"""
def __init__(
self,
channels,
emb_channels,
dropout,
out_channels=None,
use_conv=False,
use_scale_shift_norm=False,
dims=2,
use_checkpoint=False,
up=False,
down=False,
):
super().__init__()
self.channels = channels
self.emb_channels = emb_channels
self.dropout = dropout
self.out_channels = out_channels or channels
self.use_conv = use_conv
self.use_checkpoint = use_checkpoint
self.use_scale_shift_norm = use_scale_shift_norm
self.in_layers = nn.Sequential(
normalization(channels),
nn.SiLU(),
conv_nd(dims, channels, self.out_channels, 3, padding=1),
)
self.updown = up or down
if up:
self.h_upd = Upsample(channels, False, dims)
self.x_upd = Upsample(channels, False, dims)
elif down:
self.h_upd = Downsample(channels, False, dims)
self.x_upd = Downsample(channels, False, dims)
else:
self.h_upd = self.x_upd = nn.Identity()
self.emb_layers = nn.Sequential(
nn.SiLU(),
linear(
emb_channels,
2 * self.out_channels if use_scale_shift_norm else self.out_channels,
),
)
self.out_layers = nn.Sequential(
normalization(self.out_channels),
nn.SiLU(),
nn.Dropout(p=dropout),
zero_module(
conv_nd(dims, self.out_channels, self.out_channels, 3, padding=1)
),
)
if self.out_channels == channels:
self.skip_connection = nn.Identity()
elif use_conv:
self.skip_connection = conv_nd(
dims, channels, self.out_channels, 3, padding=1
)
else:
self.skip_connection = conv_nd(dims, channels, self.out_channels, 1)
def forward(self, x, emb):
"""
Apply the block to a Tensor, conditioned on a timestep embedding.
:param x: an [N x C x ...] Tensor of features.
:param emb: an [N x emb_channels] Tensor of timestep embeddings.
:return: an [N x C x ...] Tensor of outputs.
"""
return checkpoint(
self._forward, (x, emb), self.parameters(), self.use_checkpoint
)
def _forward(self, x, emb):
if self.updown:
in_rest, in_conv = self.in_layers[:-1], self.in_layers[-1]
h = in_rest(x)
h = self.h_upd(h)
x = self.x_upd(x)
h = in_conv(h)
else:
h = self.in_layers(x)
emb_out = self.emb_layers(emb).type(h.dtype)
while len(emb_out.shape) < len(h.shape):
emb_out = emb_out[..., None]
if self.use_scale_shift_norm:
out_norm, out_rest = self.out_layers[0], self.out_layers[1:]
scale, shift = th.chunk(emb_out, 2, dim=1)
h = out_norm(h) * (1 + scale) + shift
h = out_rest(h)
else:
h = h + emb_out
h = self.out_layers(h)
return self.skip_connection(x) + h
class SubAttentionBlock(nn.Module):
"""
An attention block that allows spatial positions to attend to each other.
Originally ported from here, but adapted to the N-d case.
https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/models/unet.py#L66.
"""
def __init__(
self,
channels,
num_heads=1,
num_head_channels=-1,
use_checkpoint=False,
use_new_attention_order=False,
cross=False,
enc_attn_dim=None,
norm_type='group',
dropout=0.,
):
super().__init__()
self.channels = channels
if num_head_channels == -1:
self.num_heads = num_heads
else:
assert (
channels % num_head_channels == 0
), f"q,k,v channels {channels} is not divisible by num_head_channels {num_head_channels}"
self.num_heads = channels // num_head_channels
self.use_checkpoint = use_checkpoint
self.norm = normalization(channels) if norm_type == 'group' else LayerNorm32(channels)
qkv_dim = channels * 3 if not cross else channels
self.qkv = conv_nd(1, channels, qkv_dim, 1)
# split heads before split qkv
self.attention = QKVAttentionLegacy(self.num_heads, dropout=dropout)
self.proj_out = conv_nd(1, channels, channels, 1)
if norm_type == 'group':
self.proj_out = zero_module(self.proj_out)
self.dropout = nn.Dropout(p=dropout)
if cross and channels != enc_attn_dim:
self.adjust_kv_dim = conv_nd(1, enc_attn_dim, channels, 1)
def forward(self, x, y=None):
return checkpoint(self._forward, (x, y), self.parameters(), self.use_checkpoint)
def _forward(self, x, y):
b, c, *spatial = x.shape
x = x.reshape(b, c, -1)
qkv = self.qkv(self.norm(x))
if hasattr(self, 'adjust_kv_dim'):
y = self.adjust_kv_dim(y)
h = self.attention(qkv, y=y)
h = self.dropout(self.proj_out(h))
return (x + h).reshape(b, c, *spatial)
class AttentionBlock(ConditionalBlock):
"""
An attention block that allows spatial positions to attend to each other.
Originally ported from here, but adapted to the N-d case.
https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/models/unet.py#L66.
"""
def __init__(self, *args, **kwargs):
super().__init__()
self.attn1 = SubAttentionBlock(*args, **{**kwargs, **{'cross': False}})
if kwargs['cross']:
self.attn2 = SubAttentionBlock(*args, **kwargs)
def forward(self, x, y=None):
x = self.attn1(x)
if hasattr(self, 'attn2'):
x = self.attn2(x, y)
return x
def count_flops_attn(model, _x, y):
"""
A counter for the `thop` package to count the operations in an
attention operation.
Meant to be used like:
macs, params = thop.profile(
model,
inputs=(inputs, timestamps),
custom_ops={QKVAttention: QKVAttention.count_flops},
)
"""
b, c, *spatial = y[0].shape
num_spatial = int(np.prod(spatial))
# We perform two matmuls with the same number of ops.
# The first computes the weight matrix, the second computes
# the combination of the value vectors.
matmul_ops = 2 * b * (num_spatial ** 2) * c
model.total_ops += th.DoubleTensor([matmul_ops])
class QKVAttentionLegacy(nn.Module):
"""
A module which performs QKV attention. Matches legacy QKVAttention + input/ouput heads shaping
"""
def __init__(self, n_heads, dropout=0.):
super().__init__()
self.n_heads = n_heads
self.dropout = nn.Dropout(p=dropout)
def forward(self, qkv, y):
"""
Apply QKV attention.
:param qkv: an [N x (H * 3 * C) x T] tensor of Qs, Ks, and Vs.
:return: an [N x (H * C) x T] tensor after attention.
"""
bs, width, length = qkv.shape
if y is None:
assert width % (3 * self.n_heads) == 0
ch = width // (3 * self.n_heads)
q, k, v = qkv.reshape(bs * self.n_heads, ch * 3, length).split(ch, dim=1)
else:
assert width % (self.n_heads) == 0
ch = width // (self.n_heads)
q = qkv.reshape(bs * self.n_heads, ch, length)
k = v = y.reshape(bs * self.n_heads, ch, -1)
scale = 1 / math.sqrt(math.sqrt(ch))
weight = th.einsum(
"bct,bcs->bts", q * scale, k * scale
) # More stable with f16 than dividing afterwards
weight = self.dropout(th.softmax(weight.float(), dim=-1).type(weight.dtype))
a = th.einsum("bts,bcs->bct", weight, v)
return a.reshape(bs, -1, length)
@staticmethod
def count_flops(model, _x, y):
return count_flops_attn(model, _x, y)
class QKVAttention(nn.Module):
"""
A module which performs QKV attention and splits in a different order.
"""
def __init__(self, n_heads):
super().__init__()
self.n_heads = n_heads
def forward(self, qkv):
"""
Apply QKV attention.
:param qkv: an [N x (3 * H * C) x T] tensor of Qs, Ks, and Vs.
:return: an [N x (H * C) x T] tensor after attention.
"""
bs, width, length = qkv.shape
assert width % (3 * self.n_heads) == 0
ch = width // (3 * self.n_heads)
q, k, v = qkv.chunk(3, dim=1)
scale = 1 / math.sqrt(math.sqrt(ch))
weight = th.einsum(
"bct,bcs->bts",
(q * scale).view(bs * self.n_heads, ch, length),
(k * scale).view(bs * self.n_heads, ch, length),
) # More stable with f16 than dividing afterwards
weight = th.softmax(weight.float(), dim=-1).type(weight.dtype)
a = th.einsum("bts,bcs->bct", weight, v.reshape(bs * self.n_heads, ch, length))
return a.reshape(bs, -1, length)
@staticmethod
def count_flops(model, _x, y):
return count_flops_attn(model, _x, y)
class TransformerEncoder(nn.Module):
def __init__(self, enc_attn_dim, vocab_size, use_checkpoint, clip=False, dropout=0.):
super().__init__()
d_model = enc_attn_dim
self.use_checkpoint = use_checkpoint
self.encoder = nn.ModuleList([])
for _ in range(d_model//64):
self.encoder += [AttentionBlock(d_model, num_heads=d_model//64, norm_type='layernorm', cross=False, use_checkpoint=use_checkpoint, dropout=dropout)]
self.encoder += [FFN(d_model, dropout=dropout)]
self.pos_enc = PositionalEncoding(d_model, dropout=dropout)
self.emb = nn.Embedding(vocab_size, d_model)
if clip:
self.clip_proj = conv_nd(1, enc_attn_dim, enc_attn_dim, 1)
def forward(self, text):
x = self.pos_enc(self.emb(text)).transpose(1, 2)
for idx, layer in enumerate(self.encoder):
x = checkpoint(layer.forward, (x,), layer.parameters(), self.use_checkpoint)
if not hasattr(self, 'clip_proj'):
return x
else:
return self.clip_proj(x[th.arange(x.shape[0]), :, text.argmax(dim=-1)].unsqueeze(-1)).squeeze(-1)
class UNetModel(nn.Module):
"""
The full UNet model with attention and timestep embedding.
:param in_channels: channels in the input Tensor.
:param model_channels: base channel count for the model.
:param out_channels: channels in the output Tensor.
:param num_res_blocks: number of residual blocks per downsample.
:param attention_resolutions: a collection of downsample rates at which
attention will take place. May be a set, list, or tuple.
For example, if this contains 4, then at 4x downsampling, attention
will be used.
:param dropout: the dropout probability.
:param channel_mult: channel multiplier for each level of the UNet.
:param conv_resample: if True, use learned convolutions for upsampling and
downsampling.
:param dims: determines if the signal is 1D, 2D, or 3D.
:param text_length: if specified (as an int), then this model will be
conditional.
:param use_checkpoint: use gradient checkpointing to reduce memory usage.
:param num_heads: the number of attention heads in each attention layer.
:param num_heads_channels: if specified, ignore num_heads and instead use
a fixed channel width per attention head.
:param num_heads_upsample: works with num_heads to set a different number
of heads for upsampling. Deprecated.
:param use_scale_shift_norm: use a FiLM-like conditioning mechanism.
:param resblock_updown: use residual blocks for up/downsampling.
:param use_new_attention_order: use a different attention pattern for potentially
increased efficiency.
"""
def __init__(
self,
image_size,
in_channels,
model_channels,
out_channels,
num_res_blocks,
attention_resolutions,
dropout=0,
channel_mult=(1, 2, 4, 8),
conv_resample=True,
dims=2,
use_checkpoint=False,
use_fp16=True,
num_heads=1,
num_head_channels=-1,
num_heads_upsample=-1,
use_scale_shift_norm=True,
resblock_updown=True,
use_new_attention_order=False,
enc_attn_dim=None,
vocab_size=None,
cross=True,
text_level=False,
dropout_text=0,
cond_text=False,
):
super().__init__()
if num_heads_upsample == -1:
num_heads_upsample = num_heads
self.image_size = image_size
self.in_channels = in_channels
self.model_channels = model_channels
self.out_channels = out_channels
self.num_res_blocks = num_res_blocks
self.attention_resolutions = attention_resolutions
self.dropout = dropout
self.channel_mult = channel_mult
self.conv_resample = conv_resample
self.use_checkpoint = use_checkpoint
self.dtype = th.float16 if use_fp16 else th.float32
self.num_heads = num_heads
self.num_head_channels = num_head_channels
self.num_heads_upsample = num_heads_upsample
self.cond_text = cond_text
time_embed_dim = model_channels * 4
self.time_embed = nn.Sequential(
linear(model_channels, time_embed_dim),
nn.SiLU(),
linear(time_embed_dim, time_embed_dim),
)
if self.cond_text:
self.text_encoder = TransformerEncoder(enc_attn_dim, vocab_size, use_checkpoint, dropout=dropout_text)
else:
cross = False
self.input_blocks = nn.ModuleList(
[
TimestepEmbedSequential(
conv_nd(dims, in_channels, model_channels, 3, padding=1)
)
]
)
self._feature_size = model_channels
input_block_chans = [model_channels]
ch = model_channels
ds = 1
for level, mult in enumerate(channel_mult):
for _ in range(num_res_blocks):
layers = [
ResBlock(
ch,
time_embed_dim,
dropout,
out_channels=mult * model_channels,
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
)
]
ch = mult * model_channels
if ds in attention_resolutions:
layers.append(
AttentionBlock(
ch,
use_checkpoint=use_checkpoint,
num_heads=num_heads,
num_head_channels=num_head_channels,
use_new_attention_order=use_new_attention_order,
enc_attn_dim=enc_attn_dim,
cross=cross,
)
)
self.input_blocks.append(TimestepEmbedSequential(*layers))
self._feature_size += ch
input_block_chans.append(ch)
if level != len(channel_mult) - 1:
out_ch = ch
self.input_blocks.append(
TimestepEmbedSequential(
ResBlock(
ch,
time_embed_dim,
dropout,
out_channels=out_ch,
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
down=True,
)
if resblock_updown
else Downsample(
ch, conv_resample, dims=dims, out_channels=out_ch
)
)
)
ch = out_ch
input_block_chans.append(ch)
ds *= 2
self._feature_size += ch
self.middle_block = TimestepEmbedSequential(
ResBlock(
ch,
time_embed_dim,
dropout,
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
),
AttentionBlock(
ch,
use_checkpoint=use_checkpoint,
num_heads=num_heads,
num_head_channels=num_head_channels,
use_new_attention_order=use_new_attention_order,
enc_attn_dim=enc_attn_dim,
cross=cross,
),
ResBlock(
ch,
time_embed_dim,
dropout,
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
),
)
self._feature_size += ch
self.output_blocks = nn.ModuleList([])
for level, mult in list(enumerate(channel_mult))[::-1]:
for i in range(num_res_blocks + 1):
ich = input_block_chans.pop()
layers = [
ResBlock(
ch + ich,
time_embed_dim,
dropout,
out_channels=model_channels * mult,
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
)
]
ch = model_channels * mult
if ds in attention_resolutions:
layers.append(
AttentionBlock(
ch,
use_checkpoint=use_checkpoint,
num_heads=num_heads_upsample,
num_head_channels=num_head_channels,
use_new_attention_order=use_new_attention_order,
enc_attn_dim=enc_attn_dim,
cross=cross,
)
)
if level and i == num_res_blocks:
out_ch = ch
layers.append(
ResBlock(
ch,
time_embed_dim,
dropout,
out_channels=out_ch,
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
up=True,
)
if resblock_updown
else Upsample(ch, conv_resample, dims=dims, out_channels=out_ch)
)
ds //= 2
self.output_blocks.append(TimestepEmbedSequential(*layers))
self._feature_size += ch
self.out = nn.Sequential(
normalization(ch),
nn.SiLU(),
zero_module(conv_nd(dims, model_channels, out_channels, 3, padding=1)),
)
def convert_to_fp16(self):
"""
Convert the torso of the model to float16.
"""
self.input_blocks.apply(convert_module_to_f16)
self.middle_block.apply(convert_module_to_f16)
self.output_blocks.apply(convert_module_to_f16)
if hasattr(self, 'text_encoder'):
self.text_encoder.apply(convert_module_to_f16_2)
def convert_to_fp32(self):
"""
Convert the torso of the model to float32.
"""
self.input_blocks.apply(convert_module_to_f32)
self.middle_block.apply(convert_module_to_f32)
self.output_blocks.apply(convert_module_to_f32)
if hasattr(self, 'text_encoder'):
self.text_encoder.apply(convert_module_to_f32)
def forward(self, x, timesteps, y=None):
"""
Apply the model to an input batch.
:param x: an [N x C x ...] Tensor of inputs.
:param timesteps: a 1-D batch of timesteps.
:param y: an [N, L] Tensor of texts, if conditional.
:return: an [N x C x ...] Tensor of outputs.
"""
hs = []
emb = self.time_embed(timestep_embedding(timesteps, self.model_channels))
if hasattr(self, 'text_encoder'):
y = self.text_encoder(y)
else:
y = None
h = x.type(self.dtype)
for module in self.input_blocks:
h = module(h, emb, y=y)
hs.append(h)
h = self.middle_block(h, emb, y=y)
for module in self.output_blocks:
h = th.cat([h, hs.pop()], dim=1)
h = module(h, emb, y=y)
h = h.type(x.dtype)
return self.out(h)
class SuperResModel(UNetModel):
"""
A UNetModel that performs super-resolution.
Expects an extra kwarg `low_res` to condition on a low-resolution image.
"""
def __init__(self, image_size, in_channels, *args, **kwargs):
super().__init__(image_size, in_channels * 2, *args, **kwargs)
def forward(self, x, timesteps, **kwargs):
_, _, new_height, new_width = x.shape
upsampled = F.interpolate(kwargs.pop("low_res"), (new_height, new_width), mode="bilinear")
x = th.cat([x, upsampled], dim=1)
return super().forward(x, timesteps, **kwargs)
class Classifier(nn.Module):
"""
The half UNet model with attention and timestep embedding + text encoder as CLIP.
"""
def __init__(
self,
image_size,
in_channels,
model_channels,
num_res_blocks,
attention_resolutions,
dropout=0,
channel_mult=(1, 2, 4, 8),
conv_resample=True,
dims=2,
use_checkpoint=False,
use_fp16=True,
num_heads=1,
num_head_channels=-1,
num_heads_upsample=-1,
use_scale_shift_norm=True,
resblock_updown=True,
use_new_attention_order=False,
enc_attn_dim=None,
vocab_size=None,
cross=False,
dropout_text=0,
):
super().__init__()
self.image_encoder = ImageEncoder(
image_size,
in_channels,
model_channels,
num_res_blocks,
attention_resolutions,
dropout=dropout,
channel_mult=channel_mult,
conv_resample=conv_resample,
dims=dims,
use_checkpoint=use_checkpoint,
use_fp16=use_fp16,
num_heads=num_heads,
num_head_channels=num_head_channels,
num_heads_upsample=num_heads_upsample,
use_scale_shift_norm=use_scale_shift_norm,
resblock_updown=resblock_updown,
use_new_attention_order=use_new_attention_order,
enc_attn_dim=enc_attn_dim,
cross=cross,
)
self.text_encoder = TransformerEncoder(enc_attn_dim, vocab_size, use_checkpoint, clip=True, dropout=dropout_text)
self.logit_scale = LogitScale()
def convert_to_fp16(self):
self.text_encoder.apply(convert_module_to_f16_2)
self.image_encoder.apply(convert_module_to_f16)
def convert_to_fp32(self):
self.text_encoder.apply(convert_module_to_f32)
self.image_encoder.apply(convert_module_to_f32)
def clip_loss(x, timesteps, y):
image_features = self.image_encoder(x, timesteps)
text_features = self.text_encoder(y)
logit_scale = self.logit_scale(image_features.dtype)
return clip_loss(image_features, text_features, logit_scale)
class LogitScale(nn.Module):
def __init__(self):
super().__init__()
self.logit_scale = nn.Parameter(th.ones([]) * np.log(1 / 0.07))
self.max_log_temp = np.log(100)
def forward(self, dtype):
logit_scale = self.max_log_temp - F.softplus(self.max_log_temp - self.logit_scale)
return logit_scale.exp().type(dtype)
class TextEncoder(nn.Module):
def __init__(
self,
enc_attn_dim,
vocab_size,
use_checkpoint,
dropout_text,
):
super().__init__()
self.text_encoder = TransformerEncoder(enc_attn_dim, vocab_size, use_checkpoint, clip=True, dropout=dropout_text)
def forward(self, y):
text_features = self.text_encoder(y)
return F.normalize(text_features, dim=-1)
class ImageEncoder(nn.Module):
"""
The half UNet model with attention and timestep embedding.
"""
def __init__(
self,
image_size,
in_channels,
model_channels,
num_res_blocks,
attention_resolutions,
dropout=0,
channel_mult=(1, 2, 4, 8),
conv_resample=True,
dims=2,
use_checkpoint=False,
use_fp16=True,
num_heads=1,
num_head_channels=-1,
num_heads_upsample=-1,
use_scale_shift_norm=True,
resblock_updown=True,
use_new_attention_order=False,
enc_attn_dim=None,
cross=False,
):
super().__init__()
if num_heads_upsample == -1:
num_heads_upsample = num_heads
self.in_channels = in_channels
self.model_channels = model_channels
self.num_res_blocks = num_res_blocks
self.attention_resolutions = attention_resolutions
self.dropout = dropout
self.channel_mult = channel_mult
self.conv_resample = conv_resample
self.use_checkpoint = use_checkpoint
self.dtype = th.float16 if use_fp16 else th.float32
self.num_heads = num_heads
self.num_head_channels = num_head_channels
self.num_heads_upsample = num_heads_upsample
time_embed_dim = model_channels * 4
self.time_embed = nn.Sequential(
linear(model_channels, time_embed_dim),
nn.SiLU(),
linear(time_embed_dim, time_embed_dim),
)
self.input_blocks = nn.ModuleList(
[
TimestepEmbedSequential(
conv_nd(dims, in_channels, model_channels, 3, padding=1)
)
]
)
self._feature_size = model_channels
input_block_chans = [model_channels]
ch = model_channels
ds = 1
for level, mult in enumerate(channel_mult):
for _ in range(num_res_blocks):
layers = [
ResBlock(
ch,
time_embed_dim,
dropout,
out_channels=mult * model_channels,
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
)
]
ch = mult * model_channels
if ds in attention_resolutions:
layers.append(
AttentionBlock(
ch,
use_checkpoint=use_checkpoint,
num_heads=num_heads,
num_head_channels=num_head_channels,
use_new_attention_order=use_new_attention_order,
cross=cross,
)
)
self.input_blocks.append(TimestepEmbedSequential(*layers))
self._feature_size += ch
input_block_chans.append(ch)
if level != len(channel_mult) - 1:
out_ch = ch
self.input_blocks.append(
TimestepEmbedSequential(
ResBlock(
ch,
time_embed_dim,
dropout,
out_channels=out_ch,
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
down=True,
)
if resblock_updown
else Downsample(
ch, conv_resample, dims=dims, out_channels=out_ch
)
)
)
ch = out_ch
input_block_chans.append(ch)
ds *= 2
self._feature_size += ch
self.middle_block = TimestepEmbedSequential(
ResBlock(
ch,
time_embed_dim,
dropout,
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
),
AttentionBlock(
ch,
use_checkpoint=use_checkpoint,
num_heads=num_heads,
num_head_channels=num_head_channels,
use_new_attention_order=use_new_attention_order,
cross=cross,
),
ResBlock(
ch,
time_embed_dim,
dropout,
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
),
)
self._feature_size += ch
assert num_head_channels != -1
self.out = nn.Sequential(
normalization(ch),
nn.SiLU(),
AttentionPool2d(
(image_size // ds), ch, num_head_channels, enc_attn_dim
),
)
def forward(self, x, timesteps):
"""
Apply the model to an input batch.
:param x: an [N x C x ...] Tensor of inputs.
:param timesteps: a 1-D batch of timesteps.
:return: an [N x K] Tensor of outputs.
"""
emb = self.time_embed(timestep_embedding(timesteps, self.model_channels))
results = []
h = x.type(self.dtype)
for module in self.input_blocks:
h = module(h, emb)
h = self.middle_block(h, emb).type(self.dtype)
image_features = self.out(h)
# normalized features
image_features = F.normalize(image_features, dim=-1)
return image_features | 34.767331 | 160 | 0.55646 | [
"MIT"
] | AranKomat/Diff-DALLE | diff_dalle/unet.py | 36,610 | Python |
'''
This file is automatically generated; Do not edit it. :)
'''
VERSION_INFO = {
'final': True,
'version': '1.19.0',
'branch_nick': 'feature-0-Rucio_1_19_0_release_preparation',
'revision_id': 'dd9f8f94d94996ab5b3aa45b4c23ccc77cff604a',
'revno': 7719
}
| 22.916667 | 64 | 0.676364 | [
"Apache-2.0"
] | pujanm/rucio | lib/rucio/vcsversion.py | 275 | Python |
#! /usr/bin/env python
""" cryptopy.cipher.aes_sbox_analysis
AES Sbox Analysis - a simple analysis of the AES Sbox that determines
the number and size of the permutation subgroups in the transformation.
Could be extended to examine any Sbox ...
Copyright (c) 2002 by Paul A. Lambert
Read LICENSE.txt for license information.
2002-12-05 Added validation of equation form of AES
2002-06-01 Original
"""
# The AES Sbox
sbbytes = (0x63,0x7c,0x77,0x7b,0xf2,0x6b,0x6f,0xc5,
0x30,0x01,0x67,0x2b,0xfe,0xd7,0xab,0x76,
0xca,0x82,0xc9,0x7d,0xfa,0x59,0x47,0xf0,
0xad,0xd4,0xa2,0xaf,0x9c,0xa4,0x72,0xc0,
0xb7,0xfd,0x93,0x26,0x36,0x3f,0xf7,0xcc,
0x34,0xa5,0xe5,0xf1,0x71,0xd8,0x31,0x15,
0x04,0xc7,0x23,0xc3,0x18,0x96,0x05,0x9a,
0x07,0x12,0x80,0xe2,0xeb,0x27,0xb2,0x75,
0x09,0x83,0x2c,0x1a,0x1b,0x6e,0x5a,0xa0,
0x52,0x3b,0xd6,0xb3,0x29,0xe3,0x2f,0x84,
0x53,0xd1,0x00,0xed,0x20,0xfc,0xb1,0x5b,
0x6a,0xcb,0xbe,0x39,0x4a,0x4c,0x58,0xcf,
0xd0,0xef,0xaa,0xfb,0x43,0x4d,0x33,0x85,
0x45,0xf9,0x02,0x7f,0x50,0x3c,0x9f,0xa8,
0x51,0xa3,0x40,0x8f,0x92,0x9d,0x38,0xf5,
0xbc,0xb6,0xda,0x21,0x10,0xff,0xf3,0xd2,
0xcd,0x0c,0x13,0xec,0x5f,0x97,0x44,0x17,
0xc4,0xa7,0x7e,0x3d,0x64,0x5d,0x19,0x73,
0x60,0x81,0x4f,0xdc,0x22,0x2a,0x90,0x88,
0x46,0xee,0xb8,0x14,0xde,0x5e,0x0b,0xdb,
0xe0,0x32,0x3a,0x0a,0x49,0x06,0x24,0x5c,
0xc2,0xd3,0xac,0x62,0x91,0x95,0xe4,0x79,
0xe7,0xc8,0x37,0x6d,0x8d,0xd5,0x4e,0xa9,
0x6c,0x56,0xf4,0xea,0x65,0x7a,0xae,0x08,
0xba,0x78,0x25,0x2e,0x1c,0xa6,0xb4,0xc6,
0xe8,0xdd,0x74,0x1f,0x4b,0xbd,0x8b,0x8a,
0x70,0x3e,0xb5,0x66,0x48,0x03,0xf6,0x0e,
0x61,0x35,0x57,0xb9,0x86,0xc1,0x1d,0x9e,
0xe1,0xf8,0x98,0x11,0x69,0xd9,0x8e,0x94,
0x9b,0x1e,0x87,0xe9,0xce,0x55,0x28,0xdf,
0x8c,0xa1,0x89,0x0d,0xbf,0xe6,0x42,0x68,
0x41,0x99,0x2d,0x0f,0xb0,0x54,0xbb,0x16)
def groups(subbytes):
gdict={} # a dictionary of the cycles indexed by the first cycle element
touched=[0 for i in range(len(subbytes))]
for i in range(len(sbbytes)):
touched.append(0)
for i in range(len(sbbytes)):
element = i
cycle = []
if not touched[element]:
for j in range (i,len(sbbytes)):
touched[element] = 1
cycle.append(element)
element = sbbytes[element]
if element == i:
break
gdict[cycle[1]]=cycle
else:
pass
return gdict
def grpv(subbytes):
"""" Returns a list of tuples (cycle start, cycle size) """
v=[]
z=groups(subbytes)
for i in z.keys():
v.append( [i, len(z[i])] )
return v
def sgv(subbytes):
x = grpv(subbytes)
sum = 0
for i in x:
sum = sum + i[1]
return sum
def main():
cycles = grpv(sbbytes)
print 'The AES sbox contains ',
print len(cycles),
print 'permutation subgroups'
print 'The AES sbox subgroups (start, length) are:'
print cycles
# Make this test module runnable from the command prompt
if __name__ == "__main__":
main()
| 33.009524 | 81 | 0.585978 | [
"MIT"
] | Twilight0/script.module.cryptolib | lib/cryptopy/cipher/aes_sbox_analysis.py | 3,466 | Python |
import csv # csv reader functions
from collections.abc import Counter # count uniques in a file quickly, O(nlogn)
from decimal import Decimal # just to show decimals with lower precision
# Global Variables #
from cfltools.settings import APPFOLDER
class IpAddress:
def __init__(self, ip, numOccurances):
self.ip = ip
self.numOccurances = numOccurances
self.startTime = float('inf')
self.endTime = float('-inf')
def findTimeColumn(row):
"""Dynamically determine which column of a log file contains dates.
Parameters:
row: A row of a logfile
Returns:
iterator: An integer defining the row that contains a valid date
string.
"""
import dateparser
iterator = 0
for item in row:
if item.isdigit():
# This is a hacky way of avoiding integers from
# being detected as date/time information
iterator += 1
continue
this = dateparser.parse(item)
if this:
return iterator
iterator += 1
return None
def findIpColumn(row):
import re
iterator = 0
# What's below are two regular expressions that pattern match to IP
# addresses. I tried using a library for this (netaddr) but that
# started matching to long integers that happened to have the right
# bits for an IP address.
ipv4_address = re.compile("""
^(?:(?:[0-9]|[1-9][0-9]|1[0-9]
{2}|2[0-4][0-9]|25[0-5])\\.)
{3}(?:[0-9]|[1-9][0-9]|1[0-9]
{2}|2[0-4][0-9]|25[0-5])$
""", re.VERBOSE)
ipv6_address = re.compile("""
^(?:(?:[0-9A-Fa-f]{1,4}:)
{6}(?:[0-9A-Fa-f]{1,4}:[0-9A-Fa-f]{1,4}|
(?:(?:[0-9]|[1-9][0-9]|1[0-9]
{2}|2[0-4][0-9]|25[0-5])\\.)
{3}(?:[0-9]|[1-9][0-9]|1[0-9]
{2}|2[0-4][0-9]|25[0-5]))|::
(?:[0-9A-Fa-f]{1,4}:)
{5}(?:[0-9A-Fa-f]{1,4}:
[0-9A-Fa-f]{1,4}|(?:(?:[0-9]|[1-9][0-9]|1[0-9]
{2}|2[0-4][0-9]|25[0-5])\\.)
{3}(?:[0-9]|[1-9][0-9]|1[0-9]
{2}|2[0-4][0-9]|25[0-5]))|(?:[0-9A-Fa-f]{1,4})?::
(?:[0-9A-Fa-f]{1,4}:)
{4}(?:[0-9A-Fa-f]{1,4}:[0-9A-Fa-f]{1,4}|
(?:(?:[0-9]|[1-9][0-9]|1[0-9]
{2}|2[0-4][0-9]|25[0-5])\\.)
{3}(?:[0-9]|[1-9][0-9]|1[0-9]
{2}|2[0-4][0-9]|25[0-5]))|
(?:[0-9A-Fa-f]{1,4}:[0-9A-Fa-f]{1,4})?::
(?:[0-9A-Fa-f]{1,4}:)
{3}(?:[0-9A-Fa-f]{1,4}:[0-9A-Fa-f]{1,4}|
(?:(?:[0-9]|[1-9][0-9]|1[0-9]
{2}|2[0-4][0-9]|25[0-5])\\.)
{3}(?:[0-9]|[1-9][0-9]|1[0-9]
{2}|2[0-4][0-9]|25[0-5]))|
(?:(?:[0-9A-Fa-f]{1,4}:){,2}[0-9A-Fa-f]{1,4})?::
(?:[0-9A-Fa-f]{1,4}:)
{2}(?:[0-9A-Fa-f]{1,4}:[0-9A-Fa-f]{1,4}|
(?:(?:[0-9]|[1-9][0-9]|1[0-9]
{2}|2[0-4][0-9]|25[0-5])\\.)
{3}(?:[0-9]|[1-9][0-9]|1[0-9]
{2}|2[0-4][0-9]|25[0-5]))|
(?:(?:[0-9A-Fa-f]{1,4}:){,3}[0-9A-Fa-f]{1,4})?::
[0-9A-Fa-f]{1,4}:(?:[0-9A-Fa-f]{1,4}:
[0-9A-Fa-f]{1,4}|(?:(?:[0-9]|[1-9][0-9]|1[0-9]
{2}|2[0-4][0-9]|25[0-5])\\.)
{3}(?:[0-9]|[1-9][0-9]|1[0-9]
{2}|2[0-4][0-9]|25[0-5]))|
(?:(?:[0-9A-Fa-f]{1,4}:){,4}[0-9A-Fa-f]{1,4})?::
(?:[0-9A-Fa-f]{1,4}:[0-9A-Fa-f]{1,4}|
(?:(?:[0-9]|[1-9][0-9]|1[0-9]
{2}|2[0-4][0-9]|25[0-5])\\.)
{3}(?:[0-9]|[1-9][0-9]|1[0-9]
{2}|2[0-4][0-9]|25[0-5]))|
(?:(?:[0-9A-Fa-f]{1,4}:){,5}[0-9A-Fa-f]{1,4})?::
[0-9A-Fa-f]{1,4}|(?:(?:[0-9A-Fa-f]{1,4}:)
{,6}[0-9A-Fa-f]{1,4})?::)$
""", re.VERBOSE) # and that's how you regex IPv6
for item in row:
ipv4_check = ipv4_address.match(item)
ipv6_check = ipv6_address.match(item)
if ipv4_check or ipv6_check:
return iterator
iterator = iterator + 1
print("Could not find a column containing IP addresses!")
print("Error in getuniqueip.py, findIpColumn()")
exit(1)
def scrapeIPs(filename):
"""Scrapes all IP addresses from a logfile.
"""
# Encoding must be UTF-8 to allow for international chars
file = open(filename, encoding='utf-8')
logfile_reader = csv.reader(file) # csv reader class
# Put all of the IP addresses into one list. #
print('Getting the size of the logfile....\n')
# Count the number of rows so we can track progress later.
logsize = sum(1 for row in logfile_reader)
# Determine which row contains an IP address.
file.seek(0)
next(logfile_reader)
row = next(logfile_reader)
ip_column = findIpColumn(row)
file.seek(0) # Return to the top of the csv.
next(logfile_reader) # Skip the header row.
print('Processing ' + str(logsize) + ' entries.')
iterator = 0
all_ip_address = []
for entry in logfile_reader:
try:
# For each entry, we will append that entry's IP address to
# a list of all the IPs. We'll return that list later.
entry_ip_address = entry[ip_column]
all_ip_address.append(entry_ip_address)
iterator = iterator + 1
if iterator % 1000 == 0:
percentDone = round(Decimal((iterator / logsize) * 100), 2)
string = 'Currently: Scraping all IPs from file. Entry ' + \
str(iterator) + ' of ' + str(logsize) + \
' Percent Done: ' + str(percentDone) + '%.'
print(string, end='\r')
except UserWarning:
print('\n* * * Invalid entry detected on line ' + str(iterator) +
'.')
iterator = iterator + 1
print('Line data: ')
print('Using column {} for IP address.'.format(ip_column))
print('Data from that column, for this entry, '
'was {}.'.format(entry[ip_column]))
print(entry)
print('\n')
return all_ip_address
def getUniqueIps(all_ip_address):
# Run Counter() on the complete list of IPs. #
iterator = 0
counted_ip_address = Counter(all_ip_address)
unique_ip_address = []
print('=== Creating list of unique IPs. ===')
logsize = len(counted_ip_address)
for address in counted_ip_address:
try:
# Create a new IpAddress() object for each discovered
# IP. Store the address and the counts for its appearance
# in that object.
this_addr = address
this_count = counted_ip_address[address]
newIpAddress = IpAddress(this_addr, this_count)
unique_ip_address.append(newIpAddress)
iterator = iterator + 1
if (iterator % 1000) == 0:
percentDone = round(Decimal((iterator / logsize) * 100), 2)
string = 'Currently: Creating Unique IP List. Entry ' + \
str(iterator) + ' of ' + str(logsize) + \
' Percent Done: ' + str(percentDone) + '%.'
print(string, end='\r')
except UserWarning:
print('\nError creating IP address object!')
print('Crash data:')
print('\tThe address line was:')
print(address)
# Sort the list by most frequently occuring IP. #
percentDone = 100
string = 'Currently: Generating report. Entry ' + str(iterator) + \
' of ' + str(logsize) + ' Percent Done: ' + str(percentDone) + \
'%.'
print(string, '\n')
unique_ip_address.sort(key=lambda x: x.numOccurances, reverse=True)
return unique_ip_address
def sendUniqueToDatabase(unique_ip_address, APPFOLDER, incident_id, conn):
print(APPFOLDER)
c = conn.cursor()
for ip in unique_ip_address:
c.execute("""
INSERT INTO ipaddrs(ip,number_occurances,incident_id,
start_time,end_time)
VALUES(?,?,?,?,?)
""", (ip.ip, ip.numOccurances, incident_id, ip.startTime,
ip.endTime))
conn.commit()
def getTimerange(filename, unique_ip_address):
"""Naive method to determine the time range during which an IP
address appears in a logfile.
This is sort of hacky. I'm using timestring to process fairly arbitrary
text input strings for dates from logs, converting those into POSIX
dates and times, and then comparing that to a simple integer stored
in the object to establish our range.
Parameters:
filename: The logfile we are examining in this job.
unique_ip_address: A list of IpAddress() objects.
Returns:
unique_ip_address: A list of unique IPAddress()
objects with dates included.
"""
import csv
import dateparser
print('Determining date/time ranges for each unique IP...')
file = open(filename, 'r', encoding='utf-8')
logfile_reader = csv.reader(file)
next(logfile_reader)
row = next(logfile_reader)
ip_column = findIpColumn(row)
time_column = findTimeColumn(row)
file.seek(0)
next(logfile_reader)
# TODO: get this runtime under O(n^2)
for ip in unique_ip_address:
file.seek(0)
for entry in logfile_reader:
if ip.ip == entry[ip_column]:
entry_time = dateparser.parse(entry[time_column],
settings={'TIMEZONE': 'UTC',
'RETURN_AS_TIMEZONE_AWARE': True
}).timestamp()
if ip.startTime > entry_time:
ip.startTime = entry_time
if ip.endTime < entry_time:
ip.endTime = entry_time
return unique_ip_address
def run(filename, incident_id, seen):
import configparser
config = configparser.ConfigParser()
config.read(APPFOLDER + '/cfltools.ini')
all_ip_address = scrapeIPs(filename)
unique_ip_address = getUniqueIps(all_ip_address)
unique_ip_address = getTimerange(filename, unique_ip_address)
if not seen:
import sqlite3
db_connection = sqlite3.connect(config['USER']['db_loc'])
print('Adding to database located at {}...'.format(config['USER']['db_loc']))
sendUniqueToDatabase(unique_ip_address, APPFOLDER, incident_id, db_connection)
db_connection.close()
else:
print('File was already added to database. Skipping database export.')
def main():
pass
if __name__ == "__main__":
main()
| 41.375 | 86 | 0.489167 | [
"MIT"
] | bradley-evans/cfltools | cfltools/depreciated/getuniqueip.py | 11,585 | Python |
# -*- coding: utf-8 -*-
"""Top-level package for {{ cookiecutter.project_name }}."""
__project__ = "{{ cookiecutter.project_name }}"
__author__ = "{{ cookiecutter.full_name }}"
__email__ = "{{ cookiecutter.email }}"
__version__ = "{{ cookiecutter.version }}"
| 29 | 60 | 0.666667 | [
"Apache-2.0"
] | AKhranovskiy/cookiecutter-python | {{cookiecutter.project_name}}/{{cookiecutter.project_slug}}/__init__.py | 261 | Python |
# custom PosLemmaTagger based on Chatterbot tagger
import string
from chatterbot import languages
import spacy
from chatterbot import tagging
class CustomPosLemmaTagger(tagging.PosLemmaTagger):
def __init__(self, language=None):
super(CustomPosLemmaTagger, self).__init__(language=None)
def get_bigram_pair_string(self, text):
"""
Return a string of text containing part-of-speech, lemma pairs.
"""
bigram_pairs = []
if len(text) <= 2:
text_without_punctuation = text.translate(self.punctuation_table)
if len(text_without_punctuation) >= 1:
text = text_without_punctuation
document = self.nlp(text)
if len(text) <= 2:
bigram_pairs = [
token.lemma_.lower() for token in document
]
else:
tokens = [
token for token in document if token.is_alpha and not token.is_stop
]
if len(tokens) < 2:
tokens = [
token for token in document if token.is_alpha
]
for index in range(0, len(tokens)):
bigram_pairs.append('{}:{}'.format(
tokens[index].pos_,
tokens[index].lemma_.lower()
))
if not bigram_pairs:
bigram_pairs = [
token.lemma_.lower() for token in document
]
return ' '.join(bigram_pairs)
| 27.925926 | 83 | 0.55305 | [
"BSD-3-Clause"
] | sciutrux/cbotami | tagging.py | 1,508 | Python |
"""Download handlers for different schemes"""
import logging
from twisted.internet import defer
from scrapy import signals
from scrapy.exceptions import NotConfigured, NotSupported
from scrapy.utils.httpobj import urlparse_cached
from scrapy.utils.misc import create_instance, load_object
from scrapy.utils.python import without_none_values
logger = logging.getLogger(__name__)
class DownloadHandlers:
def __init__(self, crawler):
self._crawler = crawler
self._schemes = {} # stores acceptable schemes on instancing | 存储实例化可接受的协议
self._handlers = {} # stores instanced handlers for schemes | 存储实例化可接受的处理函数
self._notconfigured = {} # remembers failed handlers | 存储失败的处理程序
# 返回不为None的处理函数路径
handlers = without_none_values(
crawler.settings.getwithbase('DOWNLOAD_HANDLERS'))
for scheme, clspath in handlers.items():
# 实例化各个协议的处理函数
self._schemes[scheme] = clspath
self._load_handler(scheme, skip_lazy=True)
# s.engine_stopped todo 这里有一个信号,暂时还不知道具体用处
crawler.signals.connect(self._close, signals.engine_stopped)
def _get_handler(self, scheme):
"""Lazy-load the downloadhandler for a scheme
only on the first request for that scheme.
仅在对该协议的第一个请求时才延迟加载该协议的下载处理程序。
"""
# 注释的第一次请求才延迟再加是在init初始化就加载完成, 这里不会重复加载的意思
if scheme in self._handlers:
return self._handlers[scheme]
if scheme in self._notconfigured:
return None
if scheme not in self._schemes:
self._notconfigured[scheme] = 'no handler available for that scheme'
return None
return self._load_handler(scheme)
def _load_handler(self, scheme, skip_lazy=False):
path = self._schemes[scheme]
try:
# 将路径对应的类导入
dhcls = load_object(path)
if skip_lazy and getattr(dhcls, 'lazy', True):
# 自定义懒加载或者类中自带这个属性,则跳过
return None
# 实例化
dh = create_instance(
objcls=dhcls,
settings=self._crawler.settings,
crawler=self._crawler,
)
except NotConfigured as ex:
# 报错,则加入到未配置的协议
self._notconfigured[scheme] = str(ex)
return None
except Exception as ex:
logger.error('Loading "%(clspath)s" for scheme "%(scheme)s"',
{"clspath": path, "scheme": scheme},
exc_info=True, extra={'crawler': self._crawler})
self._notconfigured[scheme] = str(ex)
return None
else:
# 如果没报错,则加入到字典中,并返回实例
self._handlers[scheme] = dh
return dh
def download_request(self, request, spider):
# 这里就是真正的下载器了
scheme = urlparse_cached(request).scheme
# 利用合适的协议,找到合适的处理函数
handler = self._get_handler(scheme)
if not handler:
raise NotSupported(f"Unsupported URL scheme '{scheme}': {self._notconfigured[scheme]}")
return handler.download_request(request, spider)
@defer.inlineCallbacks
def _close(self, *_a, **_kw):
for dh in self._handlers.values():
if hasattr(dh, 'close'):
yield dh.close()
| 35.569892 | 99 | 0.620314 | [
"BSD-3-Clause"
] | Hugking/scrapy | scrapy/core/downloader/handlers/__init__.py | 3,766 | Python |
#!/usr/bin/env python
# -*- coding:utf-8 _*-
# @author : Lin Luo / Bruce Liu
# @time : 2020/1/3 21:35
# @contact : [email protected] / [email protected]
import argparse
parser = argparse.ArgumentParser()
parser.add_argument_group()
parser.add_argument('-c', '--config', help='config file for run and operation', required=False)
group = parser.add_mutually_exclusive_group()
group.add_argument('-a', '--add', help='add sk with ip', required=False)
group.add_argument('-d', '--delete', help='delete sk by sk or ip', required=False)
# group.add_argument('-e', '-examine', help='examine the status of ip', required=False)
group.add_argument('-r', '--run', help='run the main project', action='store_true', required=False)
group.add_argument('-t', '--test', help='test the config file, default path is conf/odyn.conf', action='store_true',
required=False)
group.add_argument('-s', '--stop', help='stop the main project', action='store_true', required=False)
args = parser.parse_args()
| 50.6 | 116 | 0.697628 | [
"Apache-2.0"
] | BruceWW/odyn | utils/args.py | 1,012 | Python |
import numpy as np
from yt.data_objects.selection_objects.data_selection_objects import (
YTSelectionContainer,
YTSelectionContainer3D,
)
from yt.data_objects.static_output import Dataset
from yt.funcs import ensure_list, validate_iterable, validate_object
from yt.geometry.selection_routines import points_in_cells
from yt.utilities.exceptions import YTIllDefinedCutRegion
from yt.utilities.on_demand_imports import _scipy
class YTCutRegion(YTSelectionContainer3D):
"""
This is a data object designed to allow individuals to apply logical
operations to fields and filter as a result of those cuts.
Parameters
----------
data_source : YTSelectionContainer3D
The object to which cuts will be applied.
conditionals : list of strings
A list of conditionals that will be evaluated. In the namespace
available, these conditionals will have access to 'obj' which is a data
object of unknown shape, and they must generate a boolean array. For
instance, conditionals = ["obj['temperature'] < 1e3"]
Examples
--------
>>> import yt
>>> ds = yt.load("RedshiftOutput0005")
>>> sp = ds.sphere("max", (1.0, 'Mpc'))
>>> cr = ds.cut_region(sp, ["obj['temperature'] < 1e3"])
"""
_type_name = "cut_region"
_con_args = ("base_object", "conditionals")
_derived_quantity_chunking = "all"
def __init__(
self,
data_source,
conditionals,
ds=None,
field_parameters=None,
base_object=None,
locals=None,
):
if locals is None:
locals = {}
validate_object(data_source, YTSelectionContainer)
validate_iterable(conditionals)
for condition in conditionals:
validate_object(condition, str)
validate_object(ds, Dataset)
validate_object(field_parameters, dict)
validate_object(base_object, YTSelectionContainer)
if base_object is not None:
# passing base_object explicitly has been deprecated,
# but we handle it here for backward compatibility
if data_source is not None:
raise RuntimeError("Cannot use both base_object and data_source")
data_source = base_object
self.conditionals = ensure_list(conditionals)
if isinstance(data_source, YTCutRegion):
# If the source is also a cut region, add its conditionals
# and set the source to be its source.
# Preserve order of conditionals.
self.conditionals = data_source.conditionals + self.conditionals
data_source = data_source.base_object
super(YTCutRegion, self).__init__(
data_source.center, ds, field_parameters, data_source=data_source
)
self.base_object = data_source
self.locals = locals
self._selector = None
# Need to interpose for __getitem__, fwidth, fcoords, icoords, iwidth,
# ires and get_data
def chunks(self, fields, chunking_style, **kwargs):
# We actually want to chunk the sub-chunk, not ourselves. We have no
# chunks to speak of, as we do not data IO.
for chunk in self.index._chunk(self.base_object, chunking_style, **kwargs):
with self.base_object._chunked_read(chunk):
with self._chunked_read(chunk):
self.get_data(fields)
yield self
def get_data(self, fields=None):
fields = ensure_list(fields)
self.base_object.get_data(fields)
ind = self._cond_ind
for field in fields:
f = self.base_object[field]
if f.shape != ind.shape:
parent = getattr(self, "parent", self.base_object)
self.field_data[field] = parent[field][self._part_ind(field[0])]
else:
self.field_data[field] = self.base_object[field][ind]
@property
def blocks(self):
# We have to take a slightly different approach here. Note that all
# that .blocks has to yield is a 3D array and a mask.
for obj, m in self.base_object.blocks:
m = m.copy()
with obj._field_parameter_state(self.field_parameters):
for cond in self.conditionals:
ss = eval(cond)
m = np.logical_and(m, ss, m)
if not np.any(m):
continue
yield obj, m
@property
def _cond_ind(self):
ind = None
obj = self.base_object
locals = self.locals.copy()
if "obj" in locals:
raise RuntimeError(
'"obj" has been defined in the "locals" ; '
"this is not supported, please rename the variable."
)
locals["obj"] = obj
with obj._field_parameter_state(self.field_parameters):
for cond in self.conditionals:
res = eval(cond, locals)
if ind is None:
ind = res
if ind.shape != res.shape:
raise YTIllDefinedCutRegion(self.conditionals)
np.logical_and(res, ind, ind)
return ind
def _part_ind_KDTree(self, ptype):
"""Find the particles in cells using a KDTree approach."""
parent = getattr(self, "parent", self.base_object)
units = "code_length"
pos = np.stack(
[
self[("index", "x")].to(units),
self[("index", "y")].to(units),
self[("index", "z")].to(units),
],
axis=1,
).value
dx = np.stack(
[
self[("index", "dx")].to(units),
self[("index", "dy")].to(units),
self[("index", "dz")].to(units),
],
axis=1,
).value
ppos = np.stack(
[
parent[(ptype, "particle_position_x")],
parent[(ptype, "particle_position_y")],
parent[(ptype, "particle_position_z")],
],
axis=1,
).value
mask = np.zeros(ppos.shape[0], dtype=bool)
levels = self[("index", "grid_level")].astype("int32").value
if levels.size == 0:
return mask
levelmin = levels.min()
levelmax = levels.max()
for lvl in range(levelmax, levelmin - 1, -1):
# Filter out cells not in the current level
lvl_mask = levels == lvl
dx_loc = dx[lvl_mask]
pos_loc = pos[lvl_mask]
grid_tree = _scipy.spatial.cKDTree(pos_loc, boxsize=1)
# Compute closest cell for all remaining particles
dist, icell = grid_tree.query(
ppos[~mask], distance_upper_bound=dx_loc.max(), p=np.inf
)
mask_loc = np.isfinite(dist[:])
# Check that particles within dx of a cell are in it
i = icell[mask_loc]
dist = np.abs(ppos[~mask][mask_loc, :] - pos_loc[i])
tmp_mask = np.all(dist <= (dx_loc[i] / 2), axis=1)
mask_loc[mask_loc] = tmp_mask
# Update the particle mask with particles found at this level
mask[~mask] |= mask_loc
return mask
def _part_ind_brute_force(self, ptype):
parent = getattr(self, "parent", self.base_object)
units = "code_length"
mask = points_in_cells(
self[("index", "x")].to(units),
self[("index", "y")].to(units),
self[("index", "z")].to(units),
self[("index", "dx")].to(units),
self[("index", "dy")].to(units),
self[("index", "dz")].to(units),
parent[(ptype, "particle_position_x")].to(units),
parent[(ptype, "particle_position_y")].to(units),
parent[(ptype, "particle_position_z")].to(units),
)
return mask
def _part_ind(self, ptype):
# If scipy is installed, use the fast KD tree
# implementation. Else, fall back onto the direct
# brute-force algorithm.
try:
_scipy.spatial.KDTree
return self._part_ind_KDTree(ptype)
except ImportError:
return self._part_ind_brute_force(ptype)
@property
def icoords(self):
return self.base_object.icoords[self._cond_ind, :]
@property
def fcoords(self):
return self.base_object.fcoords[self._cond_ind, :]
@property
def ires(self):
return self.base_object.ires[self._cond_ind]
@property
def fwidth(self):
return self.base_object.fwidth[self._cond_ind, :]
def _get_bbox(self):
"""
Get the bounding box for the cut region. Here we just use
the bounding box for the source region.
"""
return self.base_object._get_bbox()
| 35.13834 | 83 | 0.579865 | [
"BSD-3-Clause-Clear"
] | chummels/yt | yt/data_objects/selection_objects/cut_region.py | 8,890 | Python |
# terrascript/data/onelogin.py
# Automatically generated by tools/makecode.py (24-Sep-2021 15:23:37 UTC)
#
# For imports without namespace, e.g.
#
# >>> import terrascript.data.onelogin
#
# instead of
#
# >>> import terrascript.data.onelogin.onelogin
#
# This is only available for 'official' and 'partner' providers.
from terrascript.data.onelogin.onelogin import *
| 24.8 | 73 | 0.739247 | [
"BSD-2-Clause"
] | mjuenema/python-terrascript | terrascript/data/onelogin.py | 372 | Python |
import cv2
import os
import json
from collections import defaultdict
filelist_path = '/data/zfp/data/jinan/filelist.json'
img_path = '/data/zfp/data/jinan/visual/%s_fusion.png'
builidng_txt = '/data/zfp/code/mmdetection/results_merge/building/building.txt'
output_dir = '/data/zfp/code/mmdetection/visual/building'
score_thresh = 0.7
result = defaultdict(list)
with open(builidng_txt) as f:
content = f.read().strip().split('\n')
content = [c.strip().split() for c in content]
for c in content:
id_ = int(c[0])
score = float(c[1])
bbox = list(map(int, map(float, c[2:])))
if score > score_thresh:
result[id_].append(bbox)
with open(filelist_path) as f:
filelist = json.load(f)
print(list(filelist.items())[0])
id2name = dict()
valset = []
for k, v in filelist.items():
id2name[v['id']] = k
if v['split'] == 'val':
valset.append(v['id'])
for val_id in valset:
img = cv2.imread(img_path % id2name[val_id])
for bbox in result[val_id]:
x1, y1, x2, y2 = bbox
cv2.rectangle(img, (x1, y1), (x2, y2), (0, 0, 255), 2)
cv2.imwrite(os.path.join(output_dir, id2name[val_id] + '.png'), img)
| 31.184211 | 79 | 0.640506 | [
"Apache-2.0"
] | abcxs/polyrnn | mytools/building_jinan/visual.py | 1,185 | Python |
# -*- coding: utf-8 -*-
"""Common collection classes."""
from __future__ import print_function, division, absolute_import
from functools import reduce
from collections import Mapping, Set
from .compat import isiterable, iteritems, odict, text_type
def make_immutable(value):
# this function is recursive, and if nested data structures fold back on themselves,
# there will likely be recursion errors
if isinstance(value, Mapping):
if isinstance(value, frozendict):
return value
return frozendict((k, make_immutable(v)) for k, v in iteritems(value))
elif isinstance(value, Set):
if isinstance(value, frozenset):
return value
return frozenset(make_immutable(v) for v in value)
elif isiterable(value):
if isinstance(value, tuple):
return value
return tuple(make_immutable(v) for v in value)
else:
return value
# http://stackoverflow.com/a/14620633/2127762
class AttrDict(dict):
"""Sub-classes dict, and further allows attribute-like access to dictionary items.
Examples:
>>> d = AttrDict({'a': 1})
>>> d.a, d['a'], d.get('a')
(1, 1, 1)
>>> d.b = 2
>>> d.b, d['b']
(2, 2)
"""
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
class frozendict(odict):
def __key(self):
return tuple((k, self[k]) for k in sorted(self))
def __hash__(self):
return hash(self.__key())
def __eq__(self, other):
try:
return self.__key() == other.__key()
except AttributeError:
if isinstance(other, Mapping):
return self.__key() == frozendict(other).__key()
return False
def first(seq, key=lambda x: bool(x), default=None, apply=lambda x: x):
"""Give the first value that satisfies the key test.
Args:
seq (iterable):
key (callable): test for each element of iterable
default: returned when all elements fail test
apply (callable): applied to element before return, but not to default value
Returns: first element in seq that passes key, mutated with optional apply
Examples:
>>> first([0, False, None, [], (), 42])
42
>>> first([0, False, None, [], ()]) is None
True
>>> first([0, False, None, [], ()], default='ohai')
'ohai'
>>> import re
>>> m = first(re.match(regex, 'abc') for regex in ['b.*', 'a(.*)'])
>>> m.group(1)
'bc'
The optional `key` argument specifies a one-argument predicate function
like that used for `filter()`. The `key` argument, if supplied, must be
in keyword form. For example:
>>> first([1, 1, 3, 4, 5], key=lambda x: x % 2 == 0)
4
"""
return next((apply(x) for x in seq if key(x)), default() if callable(default) else default)
def firstitem(map, key=lambda k, v: bool(k), default=None, apply=lambda k, v: (k, v)):
return next((apply(k, v) for k, v in map if key(k, v)), default)
def last(seq, key=lambda x: bool(x), default=None, apply=lambda x: x):
return next((apply(x) for x in reversed(seq) if key(x)), default)
def call_each(seq):
"""Calls each element of sequence to invoke the side effect.
Args:
seq:
Returns: None
"""
try:
reduce(lambda _, y: y(), seq)
except TypeError as e:
if text_type(e) != "reduce() of empty sequence with no initial value":
raise
| 30.355932 | 95 | 0.595757 | [
"BSD-3-Clause"
] | AXGKl/be_black | lib/python3.7/site-packages/conda/_vendor/auxlib/collection.py | 3,582 | Python |
def event_handler(source,evt):
if evt == lv.EVENT.CLICKED:
if source == btn1:
# treat "clicked" events only for btn1
print("Clicked")
elif evt == lv.EVENT.VALUE_CHANGED:
print("Toggled")
# create a simple button
btn1 = lv.btn(lv.scr_act(),None)
# attach the callback
btn1.set_event_cb(event_handler)
btn1.align(None,lv.ALIGN.CENTER,0,-40)
label=lv.label(btn1,None)
label.set_text("Button")
# create a toggle button
btn2 = lv.btn(lv.scr_act(),None)
# attach the callback
btn2.set_event_cb(event_handler)
btn2.align(None,lv.ALIGN.CENTER,0,40)
btn2.set_checkable(True)
btn2.toggle()
#btn2.set_fit2(lv.FIT.NONE,lv.FIT.TIGHT)
label=lv.label(btn2,None)
label.set_text("Toggled")
| 25.275862 | 50 | 0.694407 | [
"MIT"
] | Dimsmary/OpenSTM | ArduinoProject/DAC_CONTROLLER/lib/lv_demos/src/lv_ex_widgets/lv_ex_btn/lv_ex_btn_1.py | 733 | Python |
"""List options for creating Placement Groups"""
# :license: MIT, see LICENSE for more details.
import click
from SoftLayer.CLI import environment
from SoftLayer.CLI import formatting
from SoftLayer.managers.vs_placement import PlacementManager as PlacementManager
@click.command()
@environment.pass_env
def cli(env):
"""List options for creating a placement group."""
manager = PlacementManager(env.client)
routers = manager.get_routers()
env.fout(get_router_table(routers))
rules = manager.get_all_rules()
env.fout(get_rule_table(rules))
def get_router_table(routers):
"""Formats output from _get_routers and returns a table. """
table = formatting.Table(['Datacenter', 'Hostname', 'Backend Router Id'], "Available Routers")
for router in routers:
datacenter = router['topLevelLocation']['longName']
table.add_row([datacenter, router['hostname'], router['id']])
return table
def get_rule_table(rules):
"""Formats output from get_all_rules and returns a table. """
table = formatting.Table(['Id', 'KeyName'], "Rules")
for rule in rules:
table.add_row([rule['id'], rule['keyName']])
return table
| 30.410256 | 98 | 0.711636 | [
"MIT"
] | ATGE/softlayer-python | SoftLayer/CLI/virt/placementgroup/create_options.py | 1,186 | Python |
# -*- coding: utf-8 -*-
#
# -----------------------------------------------------------------------------------
# Copyright (c) Microsoft Open Technologies (Shanghai) Co. Ltd. All rights reserved.
#
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# -----------------------------------------------------------------------------------
import sys
sys.path.append("..")
from hackathon import Component, RequiredFeature
from flask import g
from hackathon.database.models import UserHackathonRel, Experiment, UserProfile, Team
from hackathon.hackathon_response import bad_request, precondition_failed, internal_server_error, not_found, ok
from hackathon.constants import EStatus, RGStatus, ReservedUser
import json
class RegisterManager(Component):
hackathon_manager = RequiredFeature("hackathon_manager")
user_manager = RequiredFeature("user_manager")
def get_hackathon_registration(self, num=None):
registers = self.db.find_all_objects_order_by(UserHackathonRel,
num, # limit num
UserHackathonRel.create_time.desc(),
hackathon_id=g.hackathon.id)
return map(lambda x: self.get_registration_with_profile(x), registers)
def get_registration_with_profile(self, register):
register_dic = register.dic()
register_dic['user'] = self.user_manager.user_display_info(register.user)
return register_dic
def get_registration_by_id(self, id):
return self.db.get_object(UserHackathonRel, id)
def get_registration_by_user_and_hackathon(self, user_id, hackathon_id):
return self.db.find_first_object_by(UserHackathonRel, user_id=user_id, hackathon_id=hackathon_id)
def check_register_enrollment(self, hackathon):
max = int(json.loads(hackathon.basic_info)['max_enrollment'])
if max == 0: # means no limit
return True
else:
current_num = self.db.count(UserHackathonRel, UserHackathonRel.hackathon_id == hackathon.id)
return max > current_num
def validate_created_args(self, hackathon, args):
self.log.debug("create_register: %r" % args)
user_id = args['user_id']
register = self.get_registration_by_user_and_hackathon(user_id, hackathon.id)
if register is not None and register.deleted == 0:
self.log.debug("user %d already registered on hackathon %d" % (user_id, hackathon.id))
return False, register.dic()
if hackathon.registration_start_time > self.util.get_now():
return False, precondition_failed("hackathon registration not opened", friendly_message="报名尚未开始")
if hackathon.registration_end_time < self.util.get_now():
return False, precondition_failed("hackathon registration has ended", friendly_message="报名已经结束")
if not self.check_register_enrollment(hackathon):
return False, precondition_failed("hackathon registers reach the upper threshold",
friendly_message="报名人数已满")
return True, ""
def create_registration(self, hackathon, args):
state, return_info = self.validate_created_args(hackathon, args)
if not state:
return return_info
try:
args["status"] = hackathon.is_auto_approve() and RGStatus.AUTO_PASSED or RGStatus.UNAUDIT
return self.db.add_object_kwargs(UserHackathonRel, **args).dic()
except Exception as e:
self.log.error(e)
return internal_server_error("fail to create register")
def update_registration(self, args):
self.log.debug("update_registration: %r" % args)
try:
id = args['id']
register = self.get_registration_by_id(id)
if register is None:
# we can also create a new object here.
return not_found("registration not found")
self.log.debug("update a existed register")
update_items = dict(dict(args).viewitems() - register.dic().viewitems())
if "create_time" in update_items:
update_items.pop("create_time")
update_items["update_time"] = self.util.get_now()
self.db.update_object(register, **update_items)
return register.dic()
except Exception as e:
self.log.error(e)
return internal_server_error("fail to update register")
def delete_registration(self, args):
if "id" not in args:
return bad_request("id not invalid")
try:
register = self.db.find_first_object_by(UserHackathonRel, id == args['id'])
if register is not None:
self.db.delete_object(register)
return ok()
except Exception as ex:
self.log.error(ex)
return internal_server_error("failed in delete register: %s" % args["id"])
def get_registration_detail(self, user_id, hackathon):
detail = {
"hackathon": hackathon.dic(),
"user": self.user_manager.user_display_info(g.user)
}
rel = self.get_registration_by_user_and_hackathon(user_id, hackathon.id)
if rel is None:
# return nothing
return detail
detail["registration"] = rel.dic()
# experiment
try:
experiment = self.db.find_first_object(Experiment,
Experiment.user_id == user_id,
Experiment.hackathon_id == hackathon.id,
Experiment.status.in_([EStatus.STARTING, EStatus.RUNNING]))
if experiment is not None:
detail["experiment"] = experiment.dic()
except Exception as e:
self.log.error(e)
return detail
def is_user_registered(self, user_id, hackathon):
# reservedUser (-1)
if user_id == ReservedUser.DefaultUserID:
return True
# admin
if self.hackathon_manager.validate_admin_privilege(user_id, hackathon.id):
return True
# user
reg = self.get_registration_by_user_and_hackathon(user_id, hackathon.id)
if reg is not None:
return reg.status == RGStatus.AUTO_PASSED or reg.status == RGStatus.AUDIT_PASSED
return False
def get_user_profile(self, user_id):
return self.db.find_first_object_by(UserProfile, user_id=user_id)
def create_user_profile(self, args):
self.log.debug("create_user_profile: %r" % args)
try:
exist = self.get_user_profile(g.user.id)
if not exist:
return self.db.add_object_kwargs(UserProfile, **args).dic()
else:
return self.update_user_profile(args)
except Exception as e:
self.log.debug(e)
return internal_server_error("failed to create user profile")
def update_user_profile(self, args):
self.log.debug("update_user_profile")
try:
u_id = args["user_id"]
user_profile = self.db.find_first_object_by(UserProfile, user_id=u_id)
if user_profile:
self.db.update_object(user_profile, **args)
return user_profile.dic()
else:
return not_found("fail to update user profile")
except Exception as e:
self.log.debug(e)
return internal_server_error("failed to update user profile")
| 43.029703 | 111 | 0.63035 | [
"MIT"
] | akondasif/open-hackathon-bak_01 | open-hackathon-server/src/hackathon/registration/register_mgr.py | 8,728 | Python |
"""
Takes the gradients of the solution to the screen mapping potential problem and
reconstructs the perpendicular deflection field.
"""
import numpy as np
import scipy as sp
import scipy.interpolate
import scipy.misc
import scipy.ndimage
from .constants import M_PROTON_G, ESU, C_CMS
def reconstruct(ri, li, rs, v, x, y, phix, phiy):
"""
Takes x, y gradients to the solution to screen mapping potential problem and
reconstructs the perpendicular deflection fields wBx and wBy.
Args:
ri (float): Distance from source to plasma (cm).
li (float): Distance across plasma (cm).
rs (float): Distance from plasma to screen (cm).
v (float): Velocity of protons (cm/s).
x (array): Plasma x-coordinates (cm).
y (array): Plasma x-coordinates (cm).
phix (array): Gradient of screen mapping potential in x-direction.
phiy (array): Gradient of screen mapping potential in y-direction.
Returns:
wBx (array)
"""
# TODO Add in option for masking the path-int B field.
# Input variables.
magnify = (rs + ri + .5*li)/(ri+.5*li)
map_pot_x = np.copy(phix)
map_pot_y = np.copy(phiy)
plasma_x = np.copy(x)
plasma_y = np.copy(y)
# We multiply the whole expression by magnify to put the perp-deflection
# fields into screen coordinates.
wBx = magnify*(v/rs)*(map_pot_x - plasma_x)
wBy = magnify*(v/rs)*(map_pot_y - plasma_y)
return(wBx, wBy)
def magpath(wBx, wBy):
"""
Takes the perpendicular deflection field and reconstructs the path
integrated magnetic field.
Args:
wBx (array): x-component perpendicular deflection field.
wBy (array): y-component perpendicular deflection field.
Returns:
Bxpath (array): Path integrated magnetic field x-component.
Bypath (array): Path integrated magnetic field y-component.
"""
Bxpath = -(M_PROTON_G*C_CMS/ESU)*wBy
Bypath = (M_PROTON_G*C_CMS/ESU)*wBx
return(Bxpath, Bypath)
def fluximage(ri, li, rs, v, x, y, N, wBx, wBy):
"""
Creates a flux image out of a perpendicular deflection field.
Args:
ri:
li:
rs:
v:
x (array): Perpendicular deflection field x-coordinates.
y (array): Perpendicular deflection field y-coordinates.
wBx (array): Perpendicular deflection field x-component.
wBy (array): Perpendicular deflection field y-component.
Returns:
flux_image (array): Generated flux image.
"""
# TODO Maybe change this to act on the reference flux.
magnify = (rs+ri+.5*li)/(ri+.5*li)
print('Creating interpolator functions...')
#fx = sp.interpolate.RegularGridInterpolator((x[:,0],y[0,:]),x,
# bounds_error=False)
#fy = sp.interpolate.RegularGridInterpolator((x[:,0],y[0,:]),y,
# bounds_error=False)
fwBx = sp.interpolate.RegularGridInterpolator((x[:,0],y[0,:]),wBx,
bounds_error=False)
fwBy = sp.interpolate.RegularGridInterpolator((x[:,0],y[0,:]),wBy,
bounds_error=False)
print('DONE')
prot_num = int(np.sqrt(N))
dx = x[1,0] - x[0,0]
dy = y[0,1] - y[0,0]
# Need to fix this-- cuts off some of the protons when moving to the centers
# of the bins.
samp_x = np.linspace(x[0,0]+.5*dx, x[-1,0]-.5*dx, num=prot_num)
samp_y = np.linspace(y[0,0]+.5*dy, y[0,-1]-.5*dy, num=prot_num)
samp_x, samp_y = np.meshgrid(samp_x, samp_y, indexing='ij')
print('Interpolating proton deflections...')
# The sampling of the coordinates is useless.
#samp_x = fx((samp_x, samp_y))
#samp_y = fy((samp_x, samp_y))
samp_wBx = fwBx((samp_x, samp_y))
samp_wBy = fwBy((samp_x, samp_y))
print('DONE')
screen_x = magnify*samp_x + (rs/v)*samp_wBx
screen_y = magnify*samp_y + (rs/v)*samp_wBy
print('Histogramming protons...')
flux_image = np.histogram2d(screen_x.ravel(), screen_y.ravel(),bins=x.shape)
print('DONE')
return(flux_image[0])
def fluximage2(x, y, phix, phiy, flux0, scale_fact=1, scale_order=3):
"""
An alternative approach to creating a flux image out of a perpendicular deflection field.
Args:
x (array): Plasma x-coordinates (cm).
y (array): Plasma x-coordinates (cm).
phix (array): Gradient of screen mapping potential in x-direction.
phiy (array): Gradient of screen mapping potential in y-direction.
scale_fact: Integer factor by which to upscale arrays before analysis; a larger number slows the algorithm but fills out low-flux regions better
scale_order: Order of the spline interpolation for scipy.ndimage.zoom
Returns:
flux_image (array): Generated flux image.
"""
xgv = x[:,0].flatten()
ygv = y[0,:].flatten()
if scale_fact != 1:
print("Rescaling...")
xgv = scipy.ndimage.zoom(xgv, scale_fact, order=scale_order)
ygv = scipy.ndimage.zoom(ygv, scale_fact, order=scale_order)
phix = scipy.ndimage.zoom(phix, scale_fact, order=scale_order)
phiy = scipy.ndimage.zoom(phiy, scale_fact, order=scale_order)
flux0 = scipy.ndimage.zoom(flux0, scale_fact, order=scale_order)
dx = np.mean(np.diff(xgv))
dy = np.mean(np.diff(ygv))
x_edges = np.append(xgv - dx/2.0, xgv[-1] + dx/2.0)
y_edges = np.append(ygv - dy/2.0, ygv[-1] + dy/2.0)
print('Performing histogram...')
flux_image, _, _ = np.histogram2d(phix.flatten(), phiy.flatten(), bins=[x_edges, y_edges], weights=flux0.flatten())
if scale_fact != 1:
print("Descaling...")
flux_image = scipy.misc.imresize(flux_image, 1./scale_fact, mode='F')
print('DONE')
return(flux_image)
def fluximage3(ri, li, rs, v, x, y, N, wBx, wBy, Ntest):
"""
A Monte Carlo approach to creating a flux image out of a perpendicular deflection field.
Args:
ri:
li:
rs:
v:
N: Number of protons in reality
x (array): Perpendicular deflection field x-coordinates.
y (array): Perpendicular deflection field y-coordinates.
wBx (array): Perpendicular deflection field x-component.
wBy (array): Perpendicular deflection field y-component.
Ntest: Number of test protons (Monte Carlo)
Returns:
flux_image (array): Generated flux image.
"""
# magnify = (rs + ri + li)/(ri)
magnify = (rs+li+ri)/(ri+.5*li)
xgv = x[:,0].flatten()
ygv = y[0,:].flatten()
xmin = np.min(xgv)
xmax = np.max(xgv)
ymin = np.min(ygv)
ymax = np.max(ygv)
dx = np.mean(np.diff(xgv))
dy = np.mean(np.diff(ygv))
x_edges = np.append(xgv - dx/2.0, xgv[-1] + dx/2.0)
y_edges = np.append(ygv - dy/2.0, ygv[-1] + dy/2.0)
# xd: N-element 1d Numpy Array, x positions of particles at deflection plane, in SI units
# yd: N-element 1d Numpy Array, y positions of particles at deflection plane, in SI units
xd = np.random.uniform(xmin, xmax, size=(Ntest,))
yd = np.random.uniform(ymin, ymax, size=(Ntest,))
xyd = np.stack((xd, yd), axis=1)
#del xd, yd
#wBx_rbv = sp.interpolate.RectBivariateSpline(xgv, ygv, wBx)
#wBy_rbv = sp.interpolate.RectBivariateSpline(xgv, ygv, wBy)
#wBxd = wBx_rbv.ev(xd, yd)
#wByd = wBy_rbv.ev(xd, yd)
wBxd = sp.interpolate.interpn((xgv, ygv), wBx, xyd, method='linear')
wByd = sp.interpolate.interpn((xgv, ygv), wBy, xyd, method='linear')
xfd = xd + rs/(magnify*v) * wBxd
yfd = yd + rs/(magnify*v) * wByd
print("Histogramming reference...")
flux_ref, _, _ = np.histogram2d(xd, yd, bins=[x_edges, y_edges])
flux_ref = flux_ref * N/Ntest
print("Histogramming signal...")
flux_image, _, _ = np.histogram2d(xfd, yfd, bins=[x_edges, y_edges])
flux_image = flux_image * N/Ntest
print('DONE')
return(flux_image, flux_ref)
| 33.609053 | 152 | 0.614057 | [
"MIT"
] | flash-center/PROBLEM | problem/deflect.py | 8,167 | Python |
# coding: utf-8
try:
from lxml import etree
except ImportError:
try:
# Python 2.5
import xml.etree.cElementTree as etree
except ImportError:
try:
# Python 2.5
import xml.etree.ElementTree as etree
except ImportError:
try:
# normal cElementTree install
import cElementTree as etree
except ImportError:
import elementtree.ElementTree as etree
# raises ImportError
def create_element_recursively(parent, path):
nodes = path.split('/')
node = parent
for n_str in nodes:
n = node.find(n_str)
if n is None:
node = etree.SubElement(node, n_str)
else:
node = n
return node
| 24.625 | 55 | 0.558376 | [
"MIT"
] | fdelvalle/sdk-python | maxipago/utils/xml.py | 788 | Python |
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import logging
import numpy as np
import sys
import os
import warnings
import paddle
import paddle.nn.quant.quant_layers as quant_layers
from paddle.fluid import dygraph, core, framework, unique_name
from paddle.fluid.executor import Executor, global_scope
from paddle.fluid.param_attr import ParamAttr
from paddle.fluid.initializer import Constant
from paddle.fluid.dygraph.io import INFER_MODEL_SUFFIX, INFER_PARAMS_SUFFIX
from paddle.fluid.io import load_inference_model, save_inference_model
from paddle.fluid.log_helper import get_logger
from .. import quantization_pass
from . import utils
__all__ = ['ImperativeQuantAware']
_logger = get_logger(
__name__, logging.INFO, fmt='%(asctime)s-%(levelname)s: %(message)s')
class ImperativeQuantAware(object):
"""
Applying quantization aware training (QAT) to the dgraph model.
"""
def __init__(
self,
quantizable_layer_type=['Conv2D', 'Linear', 'Conv2DTranspose'],
weight_quantize_type='abs_max',
activation_quantize_type='moving_average_abs_max',
weight_bits=8,
activation_bits=8,
moving_rate=0.9,
weight_preprocess_layer=None,
act_preprocess_layer=None,
weight_quantize_layer=None,
act_quantize_layer=None):
"""
The constructor for ImperativeQuantAware.
Args:
quantizable_layer_type(list[str | layer]): List the type of
layers that will be quantized. Default is ['Conv2D', 'Linear'].
weight_quantize_type(str): quantization type for weights,
which supports 'abs_max' and 'channel_wise_abs_max'.
activation_quantize_type(str): quantization type for activations,
which supports 'abs_max' and 'moving_average_abs_max' now.
If using 'abs_max' mode, the quantization scale will be
calculated dynamically each step in both training and testing
period. If using 'moving_average_abs_max', the static
quantization scale will be calculated during training and
used in inference.
weight_bits(int): quantization bit number for weights, whereas
the bias is not quantized.
activation_bits(int): quantization bit number for activations.
moving_rate(float): the parameter for 'moving_average_abs_max'
quantization.
weight_preprocess_layer(paddle.nn.Layer, optional): A paddle
Layer that defines how to preprocess weight before quantization.
Using this can quickly test if user's preprocess method works
or not. The input is non-quantized weight and function returns
processed weight to be quantized.
If None, the weight will be quantized directly.
Default is None.
act_preprocess_layer(paddle.nn.Layer, optional): A paddle Layer
that defines how to preprocess activation before quantization.
Using this can quickly test if user's preprocess method works
or not. The input is non-quantized activation and function returns
processed activation to be quantized.
If None, the activation will be quantized directly.
Default is None.
weight_quantize_layer(paddle.nn.Layer, optional): A paddle Layer that
defines how to quantize weight.
Using this can quickly test if user's quantization method works or not.
In this layer, user should both define quantization method and
dequantization method, that is, the function's input is non-quantized
weight and returns dequantized weight.
If None, will use uantization op defined by 'weight_quantize_type'.
Default is None.
act_quantize_layer(paddle.nn.Layer, optional): A paddle Layer that defines
how to quantize activation.
Using this can quickly test if user's quantization method works or not.
In this layer, user should both define quantization method and
dequantization method, that is, the function's input is non-quantized
activation and returns dequantized activation.
If None, will use quantization op defined by 'activation_quantize_type'.
Default is None.
Note:
If user sets attribute 'skip_quant' to a Layer that support dynamic
quantization and sets it to true, the layer would not be quantized
during training. If this attribute is not sets or the attribute is
false, the Layer would be qunatized in training.
Examples 1:
.. code-block:: python
import paddle
from paddle.fluid.contrib.slim.quantization \
import ImperativeQuantAware
from paddle.vision.models \
import resnet
model = resnet.resnet50(pretrained=True)
imperative_qat = ImperativeQuantAware(
weight_quantize_type='abs_max',
activation_quantize_type='moving_average_abs_max')
# Add the fake quant logical.
# The original model will be rewrite.
# The outscale of outputs in supportted layers would be calculated.
imperative_qat.quantize(model)
# Fine-tune the quantized model
# ...
# Save quant model for the inference.
imperative_qat.save_quantized_model(
layer=model,
model_path="./resnet50_qat",
input_spec=[
paddle.static.InputSpec(
shape=[None, 3, 224, 224], dtype='float32')])
Examples 2:
.. code-block:: python
import paddle
from paddle.fluid.contrib.slim.quantization \
import ImperativeQuantAware
class ImperativeModel(paddle.nn.Layer):
def __init__(self):
super(ImperativeModel, self).__init__()
# self.linear_0 would skip the quantization.
self.linear_0 = paddle.nn.Linear(784, 400)
self.linear_0.skip_quant = True
# self.linear_1 would not skip the quantization.
self.linear_1 = paddle.nn.Linear(400, 10)
self.linear_1.skip_quant = False
def forward(self, inputs):
x = self.linear_0(inputs)
x = self.linear_1(inputs)
return x
model = ImperativeModel()
imperative_qat = ImperativeQuantAware(
weight_quantize_type='abs_max',
activation_quantize_type='moving_average_abs_max')
# Add the fake quant logical.
# The original model will be rewrite.
#
# There is only one Layer(self.linear1) would be added the
# fake quant logical.
imperative_qat.quantize(model)
# Fine-tune the quantized model
# ...
# Save quant model for the inference.
imperative_qat.save_quantized_model(
layer=model,
model_path="./imperative_model_qat")
"""
super(ImperativeQuantAware, self).__init__()
kwargs = {
"quantizable_layer_type": quantizable_layer_type,
"weight_quantize_type": weight_quantize_type,
"activation_quantize_type": activation_quantize_type,
"weight_bits": weight_bits,
"activation_bits": activation_bits,
"moving_rate": moving_rate,
"weight_preprocess_layer": weight_preprocess_layer,
"act_preprocess_layer": act_preprocess_layer,
"weight_quantize_layer": weight_quantize_layer,
"act_quantize_layer": act_quantize_layer
}
self._quantize_inputs = ImperativeQuantizeInputs(**kwargs)
self._quantize_outputs = ImperativeQuantizeOutputs(moving_rate)
def quantize(self, model):
"""
According to weights' and activations' quantization types,
the model will be added some fake quant ops, such as
fake_quantize_dequantize_moving_average_abs_max,
fake_quantize_dequantize_abs_max and so on. At the same time,
the out_scale value of outputs would be calculated.
Args:
model(paddle.nn.Layer): the model to be quantized.
Returns:
None
Examples:
.. code-block:: python
import paddle
from paddle.fluid.contrib.slim.quantization \
import ImperativeQuantAware
class ImperativeModel(paddle.nn.Layer):
def __init__(self):
super(ImperativeModel, self).__init__()
# self.linear_0 would skip the quantization.
self.linear_0 = paddle.nn.Linear(784, 400)
self.linear_0.skip_quant = True
# self.linear_1 would not skip the quantization.
self.linear_1 = paddle.nn.Linear(400, 10)
self.linear_1.skip_quant = False
def forward(self, inputs):
x = self.linear_0(inputs)
x = self.linear_1(inputs)
return x
model = ImperativeModel()
imperative_qat = ImperativeQuantAware(
weight_quantize_type='abs_max',
activation_quantize_type='moving_average_abs_max')
# Add the fake quant logical.
# The original model will be rewrite.
#
# There is only one Layer(self.linear1) would be added the
# fake quant logical.
imperative_qat.quantize(model)
"""
assert isinstance(model, dygraph.Layer), \
"The model must be the instance of dygraph.Layer."
self._quantize_inputs.apply(model)
self._quantize_outputs.apply(model)
def save_quantized_model(self, layer, path, input_spec=None, **config):
self._quantize_outputs.save_quantized_model(layer, path, input_spec,
**config)
class ImperativeQuantizeInputs(object):
"""
Based on the input params, add the quant_dequant computational
logic both for activation inputs and weight inputs.
"""
def __init__(
self,
quantizable_layer_type=['Conv2D', 'Linear', 'Conv2DTranspose'],
weight_quantize_type='abs_max',
activation_quantize_type='moving_average_abs_max',
weight_bits=8,
activation_bits=8,
moving_rate=0.9,
weight_preprocess_layer=None,
act_preprocess_layer=None,
weight_quantize_layer=None,
act_quantize_layer=None):
"""
The constructor for ImperativeQuantizeInputs.
Please refer to the args of ImperativeQuantAware.
"""
super(ImperativeQuantizeInputs, self).__init__()
self._quantizable_layer_type = tuple(
utils.layer_name_map[layer]
if layer in utils.layer_name_map else layer
for layer in quantizable_layer_type)
for layer in self._quantizable_layer_type:
assert not isinstance(layer, str) \
and layer in utils.fake_quant_input_layers, \
"%s is unspported to be quantized." % layer
quantize_type = {
'abs_max', 'moving_average_abs_max', 'channel_wise_abs_max'
}
assert weight_quantize_type != 'moving_average_abs_max' \
and weight_quantize_type in quantize_type, \
"Unsupported weight_quantize_type: %s. It can only " \
"be abs_max or channel_wise_abs_max." % weight_quantize_type
# TODO (jc): activation_quantize_type supports range_abs_max
assert activation_quantize_type == 'moving_average_abs_max', \
"Unsupported activation_quantize_type: %s. It can " \
"only be moving_average_abs_max now." \
% activation_quantize_type
bits_check = lambda bits: isinstance(bits, int) \
and bits >= 0 and bits <= 16
assert bits_check(weight_bits), \
"weight_bits should be 1, 2,... or 16."
assert bits_check(activation_bits), \
"activation_bits should be 1, 2,... or 16."
layer_check = lambda method: method is None or \
issubclass(method, dygraph.layers.Layer)
assert layer_check(weight_preprocess_layer), \
"weight_preprocess should be nn.Layer."
assert layer_check(act_preprocess_layer), \
"act_preprocess should be nn.Layer."
assert layer_check(weight_quantize_layer), \
"weight_quantize should be nn.Layer."
assert layer_check(act_quantize_layer), \
"act_quantize should be nn.Layer."
self._kwargs = {
"weight_quantize_type": weight_quantize_type,
"activation_quantize_type": activation_quantize_type,
"weight_bits": weight_bits,
"activation_bits": activation_bits,
"moving_rate": moving_rate,
"weight_pre_layer": weight_preprocess_layer,
"act_pre_layer": act_preprocess_layer,
"weight_quant_layer": weight_quantize_layer,
"act_quant_layer": act_quantize_layer
}
def apply(self, model):
"""
Quantize the weights and activations to calculate for specific
layers.
Args:
model(paddle.nn.Layer): The target model which would
calculate the input quantization scale.
Returns:
None
"""
assert isinstance(model, dygraph.Layer), \
"The model must be the instance of dygraph.Layer."
for name, cur_layer in model.named_sublayers():
if not isinstance(cur_layer, self._quantizable_layer_type) \
or (hasattr(cur_layer, "skip_quant") \
and cur_layer.skip_quant == True):
continue
parent_layer, sub_name = \
utils.find_parent_layer_and_sub_name(model, name)
cur_quant_layer = self._get_input_quantized_layer(cur_layer)
setattr(parent_layer, sub_name, cur_quant_layer)
def _get_input_quantized_layer(self, layer):
quant_layer_name = None
for key, value in utils.layer_name_map.items():
if isinstance(layer, value):
quant_layer_name = 'Quantized' + key
break
assert quant_layer_name is not None, \
"The layer %s is unsupported to be quantized." \
% layer.full_name()
return quant_layers.__dict__[quant_layer_name](layer, **self._kwargs)
class ImperativeQuantizeOutputs(object):
"""
Calculate the output scales for target layers.
"""
def __init__(self, moving_rate=0.9):
"""
The constructor for ImperativeQuantizeOutputs.
Args:
moving_rate(float): The decay coefficient of moving average.
The default value is 0.9.
"""
super(ImperativeQuantizeOutputs, self).__init__()
self._moving_rate = moving_rate
def apply(self, model):
"""
Insert the `moving_average_abs_max_scale` layers to calculate the
output scales for specific layers in the dygraph model.
Args:
model(paddle.nn.Layer): The target model which would be
calculate the output quantization scale.
Returns:
None
"""
assert isinstance(model, dygraph.Layer), \
"The model must be the instance of dygraph.Layer."
for cur_name, cur_layer in model.named_sublayers():
if '_act_preprocess' in cur_name:
continue
if not self._is_target_layer(cur_layer):
continue
parent_layer, sub_name = \
utils.find_parent_layer_and_sub_name(model, cur_name)
if isinstance(cur_layer, tuple(utils.fake_quant_output_layers)):
cur_quant_layer = quant_layers.FakeQuantMAOutputScaleLayer(
cur_layer, self._moving_rate)
else:
cur_quant_layer = quant_layers.MAOutputScaleLayer(
cur_layer, self._moving_rate)
setattr(parent_layer, sub_name, cur_quant_layer)
def save_quantized_model(self, model, path, input_spec=None, **config):
"""
Save the quantized model for the inference.
Args:
model (Layer): The model to be saved.
path (str): The path prefix to save model. The format is
``dirname/file_prefix`` or ``file_prefix``.
input_spec (list[InputSpec|Tensor], optional): Describes the input
of the saved model's forward method, which can be described by
InputSpec or example Tensor. If None, all input variables of
the original Layer's forward method would be the inputs of
the saved model. Default None.
**configs (dict, optional): Other save configuration options for
compatibility. We do not recommend using these configurations,
they may be removed in the future. If not necessary, DO NOT use
them. Default None.
The following options are currently supported:
(1) output_spec (list[Tensor]): Selects the output targets of
the saved model. By default, all return variables of original
Layer's forward method are kept as the output of the saved model.
If the provided ``output_spec`` list is not all output variables,
the saved model will be pruned according to the given
``output_spec`` list.
Returns:
None
"""
assert isinstance(model, dygraph.Layer), \
"The model must be the instance of dygraph.Layer."
paddle.jit.save(layer=model, path=path, input_spec=input_spec, **config)
is_dynamic_mode = False
if paddle.in_dynamic_mode():
is_dynamic_mode = True
paddle.enable_static()
place = core.CPUPlace()
scope = global_scope()
exe = Executor(place)
dirname = os.path.dirname(path)
basename = os.path.basename(path)
model_filename = basename + INFER_MODEL_SUFFIX
params_filename = basename + INFER_PARAMS_SUFFIX
[infer_program, feed_target_names, fetch_targets] = (
load_inference_model(
dirname=dirname,
executor=exe,
model_filename=model_filename,
params_filename=params_filename))
self._gather_scales(infer_program, scope)
self._set_skip_quant_attr(infer_program)
save_inference_model(
dirname=dirname,
feeded_var_names=feed_target_names,
target_vars=fetch_targets,
executor=exe,
main_program=infer_program.clone(),
model_filename=model_filename,
params_filename=params_filename)
if is_dynamic_mode:
paddle.disable_static()
def _is_target_layer(self, layer):
"""
Whether the layer needs to calculate output scales.
"""
flag = False
if isinstance(layer, dygraph.Layer):
# exclude fake_quant ops in quant_layers file
if utils.is_leaf_layer(layer) and \
not isinstance(layer, tuple(utils.fake_quant_leaf_layers)):
flag = True
if isinstance(layer, tuple(utils.fake_quant_wrap_layers)):
flag = True
if isinstance(layer, paddle.nn.quant.FloatFunctionalLayer):
flag = True
return flag
def _gather_scales(self, program, scope):
"""
Get all scales from fake ops, save them into the corresponding ops
and delete all moving_average_abs_max_scale ops.
"""
def _gather_input_scale():
target_ops = []
skip_ops = utils.fake_quantize_dequantize_op_types + \
["moving_average_abs_max_scale"]
for block in program.blocks:
for op in block.ops:
if op.type not in skip_ops:
target_ops.append(op)
for op in target_ops:
for in_var_name in utils._get_op_input_var_names(op):
previous_op = utils.find_previous_op(op.block, in_var_name)
if previous_op is not None and \
("quantize_dequantize" in previous_op.type or \
previous_op.type == "moving_average_abs_max_scale"):
scale_name = previous_op.output('OutScale')[0]
in_scale = utils.load_variable_data(scope, scale_name)
in_scale = utils.fp_numpy_to_naive(in_scale)
argname, index = utils._get_input_name_index(
op, in_var_name)
op._set_attr(argname + str(index) + "_threshold",
in_scale)
def _gather_output_scale():
target_ops = []
for block in program.blocks:
for op in block.ops:
if op.type == "moving_average_abs_max_scale":
target_ops.append(op)
for op in target_ops:
in_var_name = op.input('X')[0]
out_var_name = op.output('Out')[0]
block = op.block
previous_op = utils.find_previous_op(block, in_var_name)
next_ops = utils.find_next_ops(block, out_var_name)
out_scale_name = op.output('OutScale')[0]
out_scale = utils.load_variable_data(scope, out_scale_name)
out_scale = utils.fp_numpy_to_naive(out_scale)
if previous_op.type != "feed":
argname, index = utils._get_output_name_index(previous_op,
in_var_name)
previous_op._set_attr(argname + str(index) + "_threshold",
out_scale)
previous_op._set_attr("out_threshold", out_scale)
for next_op in next_ops:
next_op._rename_input(out_var_name, in_var_name)
_gather_input_scale()
_gather_output_scale()
def _set_skip_quant_attr(self, program):
"""
Label the skip quantized ops.
"""
for block in program.blocks:
for op in block.ops:
if self._is_skip_quant_op(block, op):
op._set_attr("skip_quant", True)
def _is_skip_quant_op(self, block, in_op):
"""
The input op should be skipped quantization.
1. the type of input op should be conv2d, depthwise_conv2d or matmul
2. the previous ops of the input op are not fake_quantize_dequantize ops
"""
target_op_types = [
"conv2d", "depthwise_conv2d", "matmul", "conv2d_transpose"
]
if in_op.type not in target_op_types:
return False
previous_ops = [utils.find_previous_op(block, arg_name) \
for arg_name in in_op.input_arg_names]
return any(op is not None and op.type not in \
utils.fake_quantize_dequantize_op_types for op in previous_ops)
| 40.595395 | 88 | 0.60453 | [
"Apache-2.0"
] | MissPenguin/Paddle | python/paddle/fluid/contrib/slim/quantization/imperative/qat.py | 24,682 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# refer to `https://bitbucket.org/akorn/wheezy.captcha`
import random
import string
import os.path
from io import BytesIO
from PIL import Image
from PIL import ImageFilter
from PIL.ImageDraw import Draw
from PIL.ImageFont import truetype
class Bezier:
def __init__(self):
self.tsequence = tuple([t / 20.0 for t in range(21)])
self.beziers = {}
def pascal_row(self, n):
""" Returns n-th row of Pascal's triangle
"""
result = [1]
x, numerator = 1, n
for denominator in range(1, n // 2 + 1):
x *= numerator
x /= denominator
result.append(x)
numerator -= 1
if n & 1 == 0:
result.extend(reversed(result[:-1]))
else:
result.extend(reversed(result))
return result
def make_bezier(self, n):
""" Bezier curves:
http://en.wikipedia.org/wiki/B%C3%A9zier_curve#Generalization
"""
try:
return self.beziers[n]
except KeyError:
combinations = self.pascal_row(n - 1)
result = []
for t in self.tsequence:
tpowers = (t ** i for i in range(n))
upowers = ((1 - t) ** i for i in range(n - 1, -1, -1))
coefs = [c * a * b for c, a, b in zip(combinations,
tpowers, upowers)]
result.append(coefs)
self.beziers[n] = result
return result
class Captcha(object):
def __init__(self):
self._bezier = Bezier()
self._dir = os.path.dirname(__file__)
# self._captcha_path = os.path.join(self._dir, '..', 'static', 'captcha')
@staticmethod
def instance():
if not hasattr(Captcha, "_instance"):
Captcha._instance = Captcha()
return Captcha._instance
def initialize(self, width=200, height=75, color=None, text=None, fonts=None):
# self.image = Image.new('RGB', (width, height), (255, 255, 255))
# 4位验证随机码
self._text = text if text else random.sample(string.ascii_uppercase + string.ascii_uppercase + '3456789', 4)
# print(self._text)
self.fonts = fonts if fonts else \
[os.path.join(self._dir, 'fonts', font) for font in ['Arial.ttf', 'Georgia.ttf', 'actionj.ttf']]
self.width = width
self.height = height
self._color = color if color else self.random_color(0, 200, random.randint(220, 255))
@staticmethod
def random_color(start, end, opacity=None):
red = random.randint(start, end)
green = random.randint(start, end)
blue = random.randint(start, end)
if opacity is None:
return red, green, blue
return red, green, blue, opacity
# draw image
def background(self, image):
Draw(image).rectangle([(0, 0), image.size], fill=self.random_color(238, 255))
return image
@staticmethod
def smooth(image):
return image.filter(ImageFilter.SMOOTH)
def curve(self, image, width=4, number=6, color=None):
dx, height = image.size
dx /= number
path = [(dx * i, random.randint(0, height))
for i in range(1, number)]
bcoefs = self._bezier.make_bezier(number - 1)
points = []
for coefs in bcoefs:
points.append(tuple(sum([coef * p for coef, p in zip(coefs, ps)])
for ps in zip(*path)))
Draw(image).line(points, fill=color if color else self._color, width=width)
return image
def noise(self, image, number=50, level=2, color=None):
width, height = image.size
dx = width / 10
width -= dx
dy = height / 10
height -= dy
draw = Draw(image)
for i in range(number):
x = int(random.uniform(dx, width))
y = int(random.uniform(dy, height))
draw.line(((x, y), (x + level, y)), fill=color if color else self._color, width=level)
return image
def text(self, image, fonts, font_sizes=None, drawings=None, squeeze_factor=0.75, color=None):
color = color if color else self._color
fonts = tuple([truetype(name, size)
for name in fonts
for size in font_sizes or (65, 70, 75)])
draw = Draw(image)
char_images = []
for c in self._text:
font = random.choice(fonts)
c_width, c_height = draw.textsize(c, font=font)
char_image = Image.new('RGB', (c_width, c_height), (0, 0, 0))
char_draw = Draw(char_image)
char_draw.text((0, 0), c, font=font, fill=color)
char_image = char_image.crop(char_image.getbbox())
for drawing in drawings:
d = getattr(self, drawing)
char_image = d(char_image)
char_images.append(char_image)
width, height = image.size
offset = int((width - sum(int(i.size[0] * squeeze_factor)
for i in char_images[:-1]) -
char_images[-1].size[0]) / 2)
for char_image in char_images:
c_width, c_height = char_image.size
mask = char_image.convert('L').point(lambda i: i * 1.97)
image.paste(char_image,
(offset, int((height - c_height) / 2)),
mask)
offset += int(c_width * squeeze_factor)
return image
# draw text
@staticmethod
def warp(image, dx_factor=0.27, dy_factor=0.21):
width, height = image.size
dx = width * dx_factor
dy = height * dy_factor
x1 = int(random.uniform(-dx, dx))
y1 = int(random.uniform(-dy, dy))
x2 = int(random.uniform(-dx, dx))
y2 = int(random.uniform(-dy, dy))
image2 = Image.new('RGB',
(width + abs(x1) + abs(x2),
height + abs(y1) + abs(y2)))
image2.paste(image, (abs(x1), abs(y1)))
width2, height2 = image2.size
return image2.transform(
(width, height), Image.QUAD,
(x1, y1,
-x1, height2 - y2,
width2 + x2, height2 + y2,
width2 - x2, -y1))
@staticmethod
def offset(image, dx_factor=0.1, dy_factor=0.2):
width, height = image.size
dx = int(random.random() * width * dx_factor)
dy = int(random.random() * height * dy_factor)
image2 = Image.new('RGB', (width + dx, height + dy))
image2.paste(image, (dx, dy))
return image2
@staticmethod
def rotate(image, angle=25):
return image.rotate(
random.uniform(-angle, angle), Image.BILINEAR, expand=1)
def captcha(self, path=None, fmt='JPEG'):
"""Create a captcha.
Args:
path: save path, default None.
fmt: image format, PNG / JPEG.
Returns:
A tuple, (name, text, StringIO.value).
For example:
('fXZJN4AFxHGoU5mIlcsdOypa', 'JGW9', '\x89PNG\r\n\x1a\n\x00\x00\x00\r...')
"""
image = Image.new('RGB', (self.width, self.height), (255, 255, 255))
image = self.background(image)
image = self.text(image, self.fonts, drawings=['warp', 'rotate', 'offset'])
image = self.curve(image)
image = self.noise(image)
image = self.smooth(image)
name = "".join(random.sample(string.ascii_lowercase + string.ascii_uppercase + '3456789', 24))
text = "".join(self._text)
out = BytesIO()
image.save(out, format=fmt)
if path:
image.save(os.path.join(path, name), fmt)
return name, text, out.getvalue()
def generate_captcha(self):
self.initialize()
return self.captcha("")
# 全局变量。使用就调用generate_captcha()即可
captcha = Captcha.instance()
if __name__ == '__main__':
x = captcha.generate_captcha()
y = "%s.jpg" % x[1]
print(x)
with open(y,"wb") as f:
f.write(x[2]) | 34.764957 | 116 | 0.547019 | [
"MIT"
] | rymmx/My_information | info/utils/captcha/captcha.py | 8,171 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""pipreqs - Generate pip requirements.txt file based on imports
Usage:
pipreqs [options] <path>
Options:
--use-local Use ONLY local package info instead of querying PyPI
--pypi-server <url> Use custom PyPi server
--proxy <url> Use Proxy, parameter will be passed to requests library. You can also just set the
environments parameter in your terminal:
$ export HTTP_PROXY="http://10.10.1.10:3128"
$ export HTTPS_PROXY="https://10.10.1.10:1080"
--debug Print debug information
--ignore <dirs>... Ignore extra directories, each separated by a comma
--encoding <charset> Use encoding parameter for file open
--savepath <file> Save the list of requirements in the given file
--print Output the list of requirements in the standard output
--force Overwrite existing requirements.txt
--diff <file> Compare modules in requirements.txt to project imports.
--clean <file> Clean up requirements.txt by removing modules that are not imported in project.
"""
from __future__ import print_function, absolute_import
import os
import sys
import re
import logging
import codecs
import ast
import traceback
from docopt import docopt
import requests
from yarg import json2package
from yarg.exceptions import HTTPError
from pipreqs import __version__
REGEXP = [
re.compile(r'^import (.+)$'),
re.compile(r'^from ((?!\.+).*?) import (?:.*)$')
]
if sys.version_info[0] > 2:
open_func = open
py2 = False
else:
open_func = codecs.open
py2 = True
py2_exclude = ["concurrent", "concurrent.futures"]
def get_all_imports(path, encoding=None, extra_ignore_dirs=None):
imports = set()
raw_imports = set()
candidates = []
ignore_errors = False
ignore_dirs = [".hg", ".svn", ".git", ".tox", "__pycache__", "env", "venv"]
if extra_ignore_dirs:
ignore_dirs_parsed = []
for e in extra_ignore_dirs:
ignore_dirs_parsed.append(os.path.basename(os.path.realpath(e)))
ignore_dirs.extend(ignore_dirs_parsed)
for root, dirs, files in os.walk(path):
dirs[:] = [d for d in dirs if d not in ignore_dirs]
candidates.append(os.path.basename(root))
files = [fn for fn in files if os.path.splitext(fn)[1] == ".py"]
candidates += [os.path.splitext(fn)[0] for fn in files]
for file_name in files:
with open_func(os.path.join(root, file_name), "r", encoding=encoding) as f:
contents = f.read()
try:
tree = ast.parse(contents)
for node in ast.walk(tree):
if isinstance(node, ast.Import):
for subnode in node.names:
raw_imports.add(subnode.name)
elif isinstance(node, ast.ImportFrom):
raw_imports.add(node.module)
except Exception as exc:
if ignore_errors:
traceback.print_exc(exc)
logging.warn("Failed on file: %s" % os.path.join(root, file_name))
continue
else:
logging.error("Failed on file: %s" % os.path.join(root, file_name))
raise exc
# Clean up imports
for name in [n for n in raw_imports if n]:
# Sanity check: Name could have been None if the import statement was as from . import X
# Cleanup: We only want to first part of the import.
# Ex: from django.conf --> django.conf. But we only want django as an import
cleaned_name, _, _ = name.partition('.')
imports.add(cleaned_name)
packages = set(imports) - set(set(candidates) & set(imports))
logging.debug('Found packages: {0}'.format(packages))
with open(join("stdlib"), "r") as f:
data = [x.strip() for x in f.readlines()]
data = [x for x in data if x not in py2_exclude] if py2 else data
return sorted(list(set(packages) - set(data)))
def filter_line(l):
return len(l) > 0 and l[0] != "#"
def generate_requirements_file(path, imports):
with open(path, "w") as out_file:
logging.debug('Writing {num} requirements: {imports} to {file}'.format(
num=len(imports),
file=path,
imports=", ".join([x['name'] for x in imports])
))
fmt = '{name}=={version}'
out_file.write('\n'.join(fmt.format(**item) if item['version'] else '{name}'.format(**item)
for item in imports) + '\n')
def output_requirements(imports):
logging.debug('Writing {num} requirements: {imports} to stdout'.format(
num=len(imports),
imports=", ".join([x['name'] for x in imports])
))
fmt = '{name}=={version}'
print('\n'.join(fmt.format(**item) if item['version'] else '{name}'.format(**item)
for item in imports))
def get_imports_info(imports, pypi_server="https://pypi.python.org/pypi/", proxy=None):
result = []
for item in imports:
try:
response = requests.get("{0}{1}/json".format(pypi_server, item), proxies=proxy)
if response.status_code == 200:
if hasattr(response.content, 'decode'):
data = json2package(response.content.decode())
else:
data = json2package(response.content)
elif response.status_code >= 300:
raise HTTPError(status_code=response.status_code,
reason=response.reason)
except HTTPError:
logging.debug(
'Package %s does not exist or network problems', item)
continue
result.append({'name': item, 'version': data.latest_release_id})
return result
def get_locally_installed_packages(encoding=None):
packages = {}
ignore = ["tests", "_tests", "egg", "EGG", "info"]
for path in sys.path:
for root, dirs, files in os.walk(path):
for item in files:
if "top_level" in item:
with open_func(os.path.join(root, item), "r", encoding=encoding) as f:
package = root.split(os.sep)[-1].split("-")
try:
package_import = f.read().strip().split("\n")
except:
continue
for i_item in package_import:
if ((i_item not in ignore) and
(package[0] not in ignore)):
version = None
if len(package) > 1:
version = package[1].replace(
".dist", "").replace(".egg", "")
packages[i_item] = {
'version': version,
'name': package[0]
}
return packages
def get_import_local(imports, encoding=None):
local = get_locally_installed_packages()
result = []
for item in imports:
if item.lower() in local:
result.append(local[item.lower()])
# removing duplicates of package/version
result_unique = [
dict(t)
for t in set([
tuple(d.items()) for d in result
])
]
return result_unique
def get_pkg_names(pkgs):
result = []
with open(join("mapping"), "r") as f:
data = [x.strip().split(":") for x in f.readlines()]
for pkg in pkgs:
toappend = pkg
for item in data:
if item[0] == pkg:
toappend = item[1]
break
if toappend not in result:
result.append(toappend)
return result
def get_name_without_alias(name):
if "import " in name:
match = REGEXP[0].match(name.strip())
if match:
name = match.groups(0)[0]
return name.partition(' as ')[0].partition('.')[0].strip()
def join(f):
return os.path.join(os.path.dirname(__file__), f)
def parse_requirements(file_):
"""Parse a requirements formatted file.
Traverse a string until a delimiter is detected, then split at said
delimiter, get module name by element index, create a dict consisting of
module:version, and add dict to list of parsed modules.
Args:
file_: File to parse.
Raises:
OSerror: If there's any issues accessing the file.
Returns:
tuple: The contents of the file, excluding comments.
"""
modules = []
delim = ["<", ">", "=", "!", "~"] # https://www.python.org/dev/peps/pep-0508/#complete-grammar
try:
f = open_func(file_, "r")
except OSError:
logging.error("Failed on file: {}".format(file_))
raise
else:
data = [x.strip() for x in f.readlines() if x != "\n"]
finally:
f.close()
data = [x for x in data if x[0].isalpha()]
for x in data:
if not any([y in x for y in delim]): # Check for modules w/o a specifier.
modules.append({"name": x, "version": None})
for y in x:
if y in delim:
module = x.split(y)
module_name = module[0]
module_version = module[-1].replace("=", "")
module = {"name": module_name, "version": module_version}
if module not in modules:
modules.append(module)
break
return modules
def compare_modules(file_, imports):
"""Compare modules in a file to imported modules in a project.
Args:
file_ (str): File to parse for modules to be compared.
imports (tuple): Modules being imported in the project.
Returns:
tuple: The modules not imported in the project, but do exist in the
specified file.
"""
modules = parse_requirements(file_)
imports = [imports[i]["name"] for i in range(len(imports))]
modules = [modules[i]["name"] for i in range(len(modules))]
modules_not_imported = set(modules) - set(imports)
return modules_not_imported
def diff(file_, imports):
"""Display the difference between modules in a file and imported modules."""
modules_not_imported = compare_modules(file_, imports)
logging.info("The following modules are in {} but do not seem to be imported: "
"{}".format(file_, ", ".join(x for x in modules_not_imported)))
def clean(file_, imports):
"""Remove modules that aren't imported in project from file."""
modules_not_imported = compare_modules(file_, imports)
re_remove = re.compile("|".join(modules_not_imported))
to_write = []
try:
f = open_func(file_, "r+")
except OSError:
logging.error("Failed on file: {}".format(file_))
raise
else:
for i in f.readlines():
if re_remove.match(i) is None:
to_write.append(i)
f.seek(0)
f.truncate()
for i in to_write:
f.write(i)
finally:
f.close()
logging.info("Successfully cleaned up requirements in " + file_)
def init(args):
encoding = args.get('--encoding')
extra_ignore_dirs = args.get('--ignore')
if extra_ignore_dirs:
extra_ignore_dirs = extra_ignore_dirs.split(',')
candidates = get_all_imports(args['<path>'],
encoding=encoding,
extra_ignore_dirs=extra_ignore_dirs)
candidates = get_pkg_names(candidates)
logging.debug("Found imports: " + ", ".join(candidates))
pypi_server = "https://pypi.python.org/pypi/"
proxy = None
if args["--pypi-server"]:
pypi_server = args["--pypi-server"]
if args["--proxy"]:
proxy = {'http': args["--proxy"], 'https': args["--proxy"]}
if args["--use-local"]:
logging.debug(
"Getting package information ONLY from local installation.")
imports = get_import_local(candidates, encoding=encoding)
else:
logging.debug("Getting packages information from Local/PyPI")
local = get_import_local(candidates, encoding=encoding)
# Get packages that were not found locally
difference = [x for x in candidates
if x.lower() not in [z['name'].lower() for z in local]]
imports = local + get_imports_info(difference,
proxy=proxy,
pypi_server=pypi_server)
path = (args["--savepath"] if args["--savepath"] else
os.path.join(args['<path>'], "requirements.txt"))
if args["--diff"]:
diff(args["--diff"], imports)
return
if args["--clean"]:
clean(args["--clean"], imports)
return
if not args["--print"] and not args["--savepath"] and not args["--force"] and os.path.exists(path):
logging.warning("Requirements.txt already exists, "
"use --force to overwrite it")
return
if args["--print"]:
output_requirements(imports)
logging.info("Successfully output requirements")
else:
generate_requirements_file(path, imports)
logging.info("Successfully saved requirements file in " + path)
def main(): # pragma: no cover
args = docopt(__doc__, version=__version__)
log_level = logging.DEBUG if args['--debug'] else logging.INFO
logging.basicConfig(level=log_level, format='%(levelname)s: %(message)s')
try:
init(args)
except KeyboardInterrupt:
sys.exit(0)
if __name__ == '__main__':
main() # pragma: no cover
| 34.692308 | 108 | 0.565267 | [
"MIT"
] | 0mp/pipenv | pipenv/vendor/pipreqs/pipreqs.py | 13,981 | Python |
#!/usr/bin/env python3
# Copyright (c) 2017-2021 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test getblockstats rpc call
#
from test_framework.blocktools import COINBASE_MATURITY
from test_framework.test_framework import BlinkhashTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
)
import json
import os
TESTSDIR = os.path.dirname(os.path.realpath(__file__))
class GetblockstatsTest(BlinkhashTestFramework):
start_height = 101
max_stat_pos = 2
def add_options(self, parser):
parser.add_argument('--gen-test-data', dest='gen_test_data',
default=False, action='store_true',
help='Generate test data')
parser.add_argument('--test-data', dest='test_data',
default='data/rpc_getblockstats.json',
action='store', metavar='FILE',
help='Test data file')
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
self.supports_cli = False
def get_stats(self):
return [self.nodes[0].getblockstats(hash_or_height=self.start_height + i) for i in range(self.max_stat_pos+1)]
def generate_test_data(self, filename):
mocktime = 1525107225
self.nodes[0].setmocktime(mocktime)
self.nodes[0].createwallet(wallet_name="testwallet")
self.nodes[0].generatetoaddress(COINBASE_MATURITY + 1, self.nodes[0].getnewaddress())
address = self.nodes[0].get_deterministic_priv_key().address
self.nodes[0].sendtoaddress(address=address, amount=10, subtractfeefromamount=True)
self.generate(self.nodes[0], 1)
self.nodes[0].sendtoaddress(address=address, amount=10, subtractfeefromamount=True)
self.nodes[0].sendtoaddress(address=address, amount=10, subtractfeefromamount=False)
self.nodes[0].settxfee(amount=0.003)
self.nodes[0].sendtoaddress(address=address, amount=1, subtractfeefromamount=True)
self.sync_all()
self.generate(self.nodes[0], 1)
self.expected_stats = self.get_stats()
blocks = []
tip = self.nodes[0].getbestblockhash()
blockhash = None
height = 0
while tip != blockhash:
blockhash = self.nodes[0].getblockhash(height)
blocks.append(self.nodes[0].getblock(blockhash, 0))
height += 1
to_dump = {
'blocks': blocks,
'mocktime': int(mocktime),
'stats': self.expected_stats,
}
with open(filename, 'w', encoding="utf8") as f:
json.dump(to_dump, f, sort_keys=True, indent=2)
def load_test_data(self, filename):
with open(filename, 'r', encoding="utf8") as f:
d = json.load(f)
blocks = d['blocks']
mocktime = d['mocktime']
self.expected_stats = d['stats']
# Set the timestamps from the file so that the nodes can get out of Initial Block Download
self.nodes[0].setmocktime(mocktime)
self.sync_all()
for b in blocks:
self.nodes[0].submitblock(b)
def run_test(self):
test_data = os.path.join(TESTSDIR, self.options.test_data)
if self.options.gen_test_data:
self.generate_test_data(test_data)
else:
self.load_test_data(test_data)
self.sync_all()
stats = self.get_stats()
# Make sure all valid statistics are included but nothing else is
expected_keys = self.expected_stats[0].keys()
assert_equal(set(stats[0].keys()), set(expected_keys))
assert_equal(stats[0]['height'], self.start_height)
assert_equal(stats[self.max_stat_pos]['height'], self.start_height + self.max_stat_pos)
for i in range(self.max_stat_pos+1):
self.log.info('Checking block %d\n' % (i))
assert_equal(stats[i], self.expected_stats[i])
# Check selecting block by hash too
blockhash = self.expected_stats[i]['blockhash']
stats_by_hash = self.nodes[0].getblockstats(hash_or_height=blockhash)
assert_equal(stats_by_hash, self.expected_stats[i])
# Make sure each stat can be queried on its own
for stat in expected_keys:
for i in range(self.max_stat_pos+1):
result = self.nodes[0].getblockstats(hash_or_height=self.start_height + i, stats=[stat])
assert_equal(list(result.keys()), [stat])
if result[stat] != self.expected_stats[i][stat]:
self.log.info('result[%s] (%d) failed, %r != %r' % (
stat, i, result[stat], self.expected_stats[i][stat]))
assert_equal(result[stat], self.expected_stats[i][stat])
# Make sure only the selected statistics are included (more than one)
some_stats = {'minfee', 'maxfee'}
stats = self.nodes[0].getblockstats(hash_or_height=1, stats=list(some_stats))
assert_equal(set(stats.keys()), some_stats)
# Test invalid parameters raise the proper json exceptions
tip = self.start_height + self.max_stat_pos
assert_raises_rpc_error(-8, 'Target block height %d after current tip %d' % (tip+1, tip),
self.nodes[0].getblockstats, hash_or_height=tip+1)
assert_raises_rpc_error(-8, 'Target block height %d is negative' % (-1),
self.nodes[0].getblockstats, hash_or_height=-1)
# Make sure not valid stats aren't allowed
inv_sel_stat = 'asdfghjkl'
inv_stats = [
[inv_sel_stat],
['minfee' , inv_sel_stat],
[inv_sel_stat, 'minfee'],
['minfee', inv_sel_stat, 'maxfee'],
]
for inv_stat in inv_stats:
assert_raises_rpc_error(-8, 'Invalid selected statistic %s' % inv_sel_stat,
self.nodes[0].getblockstats, hash_or_height=1, stats=inv_stat)
# Make sure we aren't always returning inv_sel_stat as the culprit stat
assert_raises_rpc_error(-8, 'Invalid selected statistic aaa%s' % inv_sel_stat,
self.nodes[0].getblockstats, hash_or_height=1, stats=['minfee' , 'aaa%s' % inv_sel_stat])
# Mainchain's genesis block shouldn't be found on regtest
assert_raises_rpc_error(-5, 'Block not found', self.nodes[0].getblockstats,
hash_or_height='000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f')
# Invalid number of args
assert_raises_rpc_error(-1, 'getblockstats hash_or_height ( stats )', self.nodes[0].getblockstats, '00', 1, 2)
assert_raises_rpc_error(-1, 'getblockstats hash_or_height ( stats )', self.nodes[0].getblockstats)
if __name__ == '__main__':
GetblockstatsTest().main()
| 41.916667 | 121 | 0.629225 | [
"MIT"
] | blinkhash/blinkhash-core | test/functional/rpc_getblockstats.py | 7,042 | Python |
"""Training run script"""
import argparse
import json
from pathlib import Path
from bisect import bisect_left
import torch
import torch_geometric as tg
import matplotlib.pyplot as plt
import local2global as l2g
from local2global_embedding.embedding import speye, train, embedding, VGAE_model, VGAE_loss, reconstruction_auc
from local2global_embedding.network import largest_connected_component, TGraph
from local2global_embedding.patches import create_patch_data
from local2global_embedding.clustering import distributed_clustering, fennel_clustering, louvain_clustering, metis_clustering
class ResultsDict:
"""
Class for keeping track of results
"""
@classmethod
def load(cls, filename, replace=False):
"""
restore results from file
Args:
filename: input json file
replace: set the replace attribute
Returns:
populated ResultsDict
"""
self = cls(replace=replace)
with open(filename) as f:
self._data.update(json.load(f))
return self
def save(self, filename):
"""
dump contents to json file
Args:
filename: output file path
"""
with open(filename, 'w') as f:
json.dump(self._data, f)
def __init__(self, replace=False):
"""
initialise empty ResultsDict
Args:
replace: set the replace attribute (default: ``False``)
"""
self._data = {'dims': [], 'auc': [], 'args': []}
self.replace = replace #: if ``True``, updates replace existing data, if ``False``, updates append data
def __getitem__(self, item):
return self._data[item]
def _update_index(self, index, aucs: list, args=None):
"""
update data for a given index
Args:
index: integer index into data lists
aucs: new auc values (should be a list)
args: new args data (optional)
"""
if self.replace:
self['auc'][index] = aucs
self['args'][index] = args
else:
self['auc'][index].extend(aucs)
self['args'][index].extend([args] * len(aucs))
def _insert_index(self, index: int, dim: int, aucs: list, args=None):
"""
insert new data at index
Args:
index: integer index into data lists
dim: data dimension for index
aucs: new auc values
args: new args data (optional)
"""
self['auc'].insert(index, aucs)
self['dims'].insert(index, dim)
self['args'].insert(index, [args] * len(aucs))
def update_dim(self, dim, aucs, args=None):
"""
update data for given dimension
Args:
dim: dimension to update
aucs: new auc values
args: new args data (optional)
if ``self.contains_dim(dim) == True``, behaviour depends on the value of
``self.replace``
"""
index = bisect_left(self['dims'], dim)
if index < len(self['dims']) and self['dims'][index] == dim:
self._update_index(index, aucs, args)
else:
self._insert_index(index, dim, aucs, args)
def max_auc(self, dim=None):
"""
return maximum auc values
Args:
dim: if ``dim=None``, return list of values for all dimension, else only return maximum value for ``dim``.
"""
if dim is None:
return [max(aucs) for aucs in self['auc']]
else:
index = bisect_left(self['dims'], dim)
if index < len(self['dims']) and self['dims'][index] == dim:
return max(self['auc'][index])
else:
return 0.
def contains_dim(self, dim):
"""
equivalent to ``dim in self['dims']``
"""
index = bisect_left(self['dims'], dim)
return index < len(self['dims']) and self['dims'][index] == dim
def reduce_to_dims(self, dims):
"""
remove all data for dimensions not in ``dims``
Args:
dims: list of dimensions to keep
"""
index = [i for i, d in enumerate(dims) if self.contains_dim(d)]
for key1 in self._data:
if isinstance(self._data[key1], list):
self._data[key1] = [self[key1][i] for i in index]
return self
def runs(self, dim=None):
"""
return the number of runs
Args:
dim: if ``dim is None``, return list of number of runs for all dimension, else return number of
runs for dimension ``dim``.
"""
if dim is None:
return [len(x) for x in self['auc']]
else:
index = bisect_left(self['dims'], dim)
if index < len(self['dims']) and self['dims'][index] == dim:
return len(self['auc'][index])
else:
return 0
_dataloaders = {} #: dataloaders
def dataloader(name):
"""
decorator for registering dataloader functions
Args:
name: data set name
"""
def loader(func):
_dataloaders[name] = func
return func
return loader
@dataloader('Cora')
def _load_cora():
return tg.datasets.Planetoid(name='Cora', root='/tmp/cora')[0]
@dataloader('PubMed')
def _load_pubmed():
return tg.datasets.Planetoid(name='PubMed', root='/tmp/pubmed')[0]
@dataloader('AMZ_computers')
def _load_amazon_computers():
return tg.datasets.Amazon(root='/tmp/amazon', name='Computers')[0]
@dataloader('AMZ_photo')
def _load_amazon_photos():
return tg.datasets.Amazon(root='/tmp/amazon', name='photo')[0]
def load_data(name):
"""
load data set
Args:
name: name of data set (one of {names})
Returns:
largest connected component of data set
"""
data = _dataloaders[name]()
data = largest_connected_component(data=data)
data.num_nodes = data.x.shape[0]
return data
load_data.__doc__ = load_data.__doc__.format(names=list(_dataloaders.keys()))
def prepare_patches(output_folder, **kwargs):
"""
initialise patch data if ``output_folder`` does not exist, else load existing patch data
Args:
output_folder: folder for storing patch data
**kwargs: arguments passed to :py:func:`~local2global_embedding.patches.create_patch_data`
Returns:
patch_data, patch_graph
"""
output_folder = Path(output_folder)
if output_folder.is_dir():
patch_graph = torch.load(output_folder / 'patch_graph.pt')
patch_data = [torch.load(output_folder / f"patch{i}.pt") for i in range(patch_graph.num_nodes)]
else:
patch_data, patch_graph = create_patch_data(**kwargs)
output_folder.mkdir(parents=True)
torch.save(patch_graph, output_folder / 'patch_graph.pt')
for i, data in enumerate(patch_data):
torch.save(data, output_folder / f'patch{i}.pt')
return patch_data, patch_graph
def csvlist(input_type=str):
"""
Create an argparse type that parses comma separated lists of type ``input_type``
Args:
input_type: type of list elements
Returns:
list parser
"""
def make_list(input_str):
return [input_type(s) for s in input_str.split(',')]
make_list.__doc__ = f"""
argparse type that parses comma separated list of type {input_type}
Args:
input_str: string to be parsed
Returns:
list of elements of type {input_type}
"""
return make_list
_parser = argparse.ArgumentParser(description="Run training example.")
_parser.add_argument('--data', default='Cora', choices=_dataloaders.keys(), help='Dataset to load')
_parser.add_argument('--no_features', action='store_true', help='Discard features and use node identity.')
_parser.add_argument('--num_epochs', type=int, default=200, help='Number of training epochs')
_parser.add_argument('--runs', type=int, default=10, help='Number of training runs (keep best result)')
_parser.add_argument('--dims', type=csvlist(int), default=[2], help='Embedding dimensions (comma-separated)')
_parser.add_argument('--hidden_multiplier', type=int, default=2, help='Hidden dim is `hidden_multiplier` * `dim`')
_parser.add_argument('--target_patch_degree', type=float, default=4.0, help='Target patch degree for sparsification.')
_parser.add_argument('--min_overlap', type=int, default=None, help='Minimum target patch overlap (defaults to `max(dims) + 1`)')
_parser.add_argument('--target_overlap', type=int, default=None, help='Target patch overlap (defaults to twice `min_overlap`)')
_parser.add_argument('--gamma', type=float, default=0.0, help="Value of 'gamma' for RMST sparsification.")
_parser.add_argument('--sparsify', default='resistance', help="Sparsification method to use.",
choices={'resistance', 'rmst', 'none'})
_parser.add_argument('--cluster', default='metis', choices={'louvain', 'distributed', 'fennel', 'metis'}, help="Clustering method to use")
_parser.add_argument('--num_clusters', default=10, type=int, help="Target number of clusters for fennel, or metis.")
_parser.add_argument('--beta', default=0.1, type=float, help="Beta value for distributed")
_parser.add_argument('--num_iters', default=None, type=int, help="Maximum iterations for distributed or fennel (default depends on method choice)")
_parser.add_argument('--lr', default=0.01, type=float, help='Learning rate')
_parser.add_argument('--dist', action='store_true', help='use distance decoder instead of inner product decoder')
_parser.add_argument('--output',
default='.',
help='output folder')
_parser.add_argument('--device', default=None, help="Device used for training e.g., 'cpu', 'cuda'")
_parser.add_argument('--plot', action='store_true', help='Plot embedding performance')
_parser.add_argument('--verbose', action='store_true', help='Show progress info')
def run(**kwargs):
"""
Run training example.
By default this function writes results to the current working directory. To override this use the ``output``
keyword argument.
This function reproduces figure 1(a) of [#l2g]_ if called as ``run(dims=[2**i for i in range(1, 8)], plot=True)``.
Keyword Args:
data: Name of data set to load (one of {``'Cora'``, ``'PubMed'``, ``'AMZ_computers'``, ``'AMZ_photo'``}) (default: ``'Cora'``)
no_features: If ``True``, discard features and use node identity. (default: ``False``)
num_epochs: Number of training epochs (default: ``200``)
runs: Number of training runs (keep best result) (default: ``1``)
dims: list of embedding dimensions (default: ``[2]``)
hidden_multiplier: Hidden dimension is ``hidden_multiplier * dim``
target_patch_degree: Target patch degree for resistance sparsification. (default: ``4``)
min_overlap: Minimum target patch overlap (default: ``max(dims) + 1``)
target_overlap: Target patch overlap (default: ``2 * max(dims)``)
gamma: Value of 'gamma' for RMST sparsification (default: ``0``)
sparsify: Sparsification method to use (one of {``'resistance'``, ``'none'``, ``'rmst'``})
(default: ``'resistance'``)
cluster: Clustering method to use (one of {``'louvain'``, ``'fennel'`` , ``'distributed'``, ``'metis'``})
(default: ``'metis'``)
num_clusters: Target number of clusters for distributed, fennel, or metis.
num_iters: Maximum iterations for distributed or fennel
lr: Learning rate
dist: If ``True``, use distance decoder instead of inner product decoder (default: ``False``)
output: output folder (default: ``'.'``)
device: Device used for training e.g., 'cpu', 'cuda' (defaults to ``'cuda'`` if available else ``'cpu'``)
plot: If ``True``, plot embedding performance (default: ``False``)
verbose: If ``True``, show progress info (default: ``False``)
This function only accepts keyword arguments and is also exposed as a command-line interface.
.. rubric:: References
.. [#l2g] L. G. S. Jeub et al.
“Local2Global: Scaling global representation learning on graphs via local training”.
DLG-KDD’21. 2021. `arXiv:2107.12224 [cs.LG] <https://arxiv.org/abs/2107.12224>`_.
"""
# support calling this as a python function with keyword arguments
args = _parser.parse_args([])
for key, value in kwargs.items():
if key in args:
setattr(args, key, value)
else:
raise TypeError(f'Unknown argument {key}')
output_folder = Path(args.output)
data = load_data(args.data)
neg_edges = tg.utils.negative_sampling(data.edge_index, data.num_nodes)
graph = TGraph(data.edge_index, data.edge_attr)
basename = args.data
dims = args.dims
num_epochs = args.num_epochs
runs = args.runs
min_overlap = args.min_overlap if args.min_overlap is not None else max(dims) + 1
target_overlap = args.target_overlap if args.target_overlap is not None else 2 * max(dims)
if args.no_features:
data.x = None # remove node features (trained with identity)
basename += '_no_features'
if args.dist:
basename += '_dist'
if args.sparsify == 'resistance':
sp_string = f"resistance_deg{args.target_patch_degree}"
elif args.sparsify == 'rmst':
sp_string = f"rmst_gamma{args.gamma}"
elif args.sparsify == 'none':
sp_string = "no_sparsify"
else:
raise RuntimeError(f"Unknown sparsification method '{args.sparsify}'.")
if args.cluster == 'louvain':
cluster_fun = lambda: louvain_clustering(graph)
cluster_string = 'louvain'
elif args.cluster == 'distributed':
cluster_fun = lambda: distributed_clustering(graph, args.beta, rounds=args.num_iters)
cluster_string = f'distributed_beta{args.beta}_it{args.num_iters}'
elif args.cluster == 'fennel':
cluster_fun = lambda: fennel_clustering(graph, num_clusters=args.num_clusters, randomise_order=True,
num_iters=args.num_iters)
cluster_string = f"fennel_n{args.num_clusters}_it{args.num_iters}"
elif args.cluster == 'metis':
cluster_fun = lambda: metis_clustering(graph, num_clusters=args.num_clusters)
cluster_string = f"metis_n{args.num_clusters}"
else:
raise RuntimeError(f"Unknown cluster method '{args.cluster}'.")
cluster_file = output_folder / f"{args.data}_{cluster_string}_clusters.pt"
if cluster_file.is_file():
clusters = torch.load(cluster_file)
else:
clusters = cluster_fun()
torch.save(clusters, cluster_file)
patch_folder = output_folder / f'{args.data}_{cluster_string}_{sp_string}_mo{min_overlap}_to{target_overlap}_patches'
patch_data, patch_graph = prepare_patches(
output_folder=patch_folder,
data=data,
partition_tensor=clusters,
min_overlap=min_overlap,
target_overlap=target_overlap,
sparsify_method=args.sparsify,
gamma=args.gamma,
target_patch_degree=args.target_patch_degree,
verbose=args.verbose)
if args.verbose:
print(f'total edges: {data.num_edges}')
print(f'total patch edges: {sum(c.num_edges for c in patch_data)}')
if args.no_features:
data.x = speye(data.num_nodes) # add identity as node features for training full model
# compute baseline full model if necessary
baseline_file = output_folder / f'{basename}_full_info.json'
training_args = {'lr': args.lr, 'num_epochs': args.num_epochs, 'hidden_multiplier': args.hidden_multiplier}
if baseline_file.is_file():
baseline_data = ResultsDict.load(baseline_file)
else:
baseline_data = ResultsDict()
for d in dims:
r = baseline_data.runs(d)
if r < runs:
if args.verbose:
print(f'training full model for {runs-r} runs and d={d}')
for r_it in range(r, runs):
if args.verbose:
print(f"full model (d={d}) run {r_it + 1} of {runs}")
data = data.to(args.device)
model = train(data,
VGAE_model(d, d * args.hidden_multiplier, data.num_features, dist=args.dist).to(args.device),
loss_fun=VGAE_loss,
num_epochs=num_epochs,
lr=args.lr,
verbose=args.verbose,
)
coords = embedding(model, data)
auc = reconstruction_auc(coords, data, dist=args.dist)
if auc > baseline_data.max_auc(d):
if args.verbose:
print(f"new best (auc={auc})")
torch.save(model.state_dict(), output_folder / f'{basename}_full_d{d}_best_model.pt')
torch.save(coords, output_folder / f'{basename}_full_d{d}_best_coords.pt')
baseline_data.update_dim(d, [auc], training_args)
baseline_data.save(baseline_file)
results_file = patch_folder / f'{basename}_l2g_info.json'
nt_results_file = patch_folder / f'{basename}_nt_info.json'
if results_file.is_file():
results = ResultsDict.load(results_file, replace=True)
else:
results = ResultsDict(replace=True)
if nt_results_file.is_file():
nt_results = ResultsDict.load(nt_results_file, replace=True)
else:
nt_results = ResultsDict(replace=True)
for d in dims:
patch_list = []
update_aligned_embedding = False
for p_ind, patch in enumerate(patch_data):
patch_result_file = patch_folder / f'{basename}_patch{p_ind}_info.json'
if patch_result_file.is_file():
patch_results = ResultsDict.load(patch_result_file)
else:
patch_results = ResultsDict()
coords_file = patch_folder / f'{basename}_patch{p_ind}_d{d}_best_coords.pt'
if coords_file.is_file():
best_coords = torch.load(coords_file)
r = patch_results.runs(d)
if args.no_features:
patch.x = speye(patch.num_nodes)
if r < runs:
if args.verbose:
print(f'training patch{p_ind} for {runs-r} runs and d={d}')
patch = patch.to(args.device)
for r_it in range(r, runs):
if args.verbose:
print(f"patch{p_ind} (d={d}) run {r_it+1} of {runs}")
model = train(patch,
VGAE_model(d, d * args.hidden_multiplier, patch.num_features, dist=args.dist).to(args.device),
loss_fun=VGAE_loss,
num_epochs=num_epochs,
lr=args.lr,
)
coords = embedding(model, patch)
auc = reconstruction_auc(coords, patch, dist=args.dist)
if auc > patch_results.max_auc(d):
if args.verbose:
print(f"new best (auc={auc})")
best_coords = coords
torch.save(model.state_dict(), patch_folder / f'{basename}_patch{p_ind}_d{d}_best_model.pt')
torch.save(best_coords, coords_file)
update_aligned_embedding = True
patch_results.update_dim(d, [auc], training_args)
patch_results.save(patch_result_file)
patch_list.append(l2g.Patch(patch.nodes.cpu().numpy(), best_coords.cpu().numpy()))
patched_embedding_file = patch_folder / f'{basename}_d{d}_coords.pt'
patched_embedding_file_nt = patch_folder / f'{basename}_d{d}_ntcoords.pt'
if update_aligned_embedding or not patched_embedding_file.is_file():
prob = l2g.WeightedAlignmentProblem(patch_list, patch_edges=patch_graph.edges())
ntcoords = prob.mean_embedding()
coords = prob.get_aligned_embedding()
torch.save(coords, patched_embedding_file)
torch.save(ntcoords, patched_embedding_file_nt)
results.update_dim(d, [reconstruction_auc(torch.as_tensor(coords), data, neg_edges, dist=args.dist)])
nt_results.update_dim(d, [reconstruction_auc(torch.as_tensor(ntcoords), data, neg_edges, dist=args.dist)])
results.save(results_file)
nt_results.save(nt_results_file)
baseline_data = baseline_data.reduce_to_dims(dims)
results = results.reduce_to_dims(dims)
nt_results = nt_results.reduce_to_dims(dims)
if args.plot:
plt.figure()
plt.plot(dims, [max(v) for v in baseline_data['auc']], label='full, inner product', marker='o',
color='tab:blue')
plt.plot(dims, results['auc'], '--', label='l2g, inner product', marker='>', color='tab:blue')
plt.plot(dims, nt_results['auc'], ':', label='no-trans, inner product', color='tab:blue',
linewidth=1)
plt.xscale('log')
plt.xticks(dims, dims)
plt.minorticks_off()
plt.xlabel('embedding dimension')
plt.ylabel('AUC')
plt.legend()
oversampling_ratio = sum(p.num_edges for p in patch_data) / data.num_edges
plt.title(f"oversampling ratio: {oversampling_ratio:.2}, #patches: {len(patch_data)}")
plt.savefig(output_folder / f"{basename}_{cluster_string}_{sp_string}_mo{min_overlap}_to{target_overlap}.pdf")
plt.show()
if __name__ == '__main__':
# run main script
args = _parser.parse_args()
run(**vars(args))
| 39.61302 | 147 | 0.61773 | [
"MIT"
] | LJeub/Local2Global_embedding | local2global_embedding/run.py | 21,912 | Python |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.