hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
93feb2b5aaee509b3ca59bd657fd9239d3cc9aa4 | 5,234 | py | Python | rtk/dao/RTKMatrix.py | rakhimov/rtk | adc35e218ccfdcf3a6e3082f6a1a1d308ed4ff63 | [
"BSD-3-Clause"
]
| null | null | null | rtk/dao/RTKMatrix.py | rakhimov/rtk | adc35e218ccfdcf3a6e3082f6a1a1d308ed4ff63 | [
"BSD-3-Clause"
]
| null | null | null | rtk/dao/RTKMatrix.py | rakhimov/rtk | adc35e218ccfdcf3a6e3082f6a1a1d308ed4ff63 | [
"BSD-3-Clause"
]
| 2 | 2020-04-03T04:14:42.000Z | 2021-02-22T05:30:35.000Z | # -*- coding: utf-8 -*-
#
# rtk.dao.RTKMatrix.py is part of The RTK Project
#
# All rights reserved.
# Copyright 2007 - 2017 Andrew Rowland andrew.rowland <AT> reliaqual <DOT> com
"""
===============================================================================
The RTKMatrix Table
===============================================================================
"""
# pylint: disable=E0401
from sqlalchemy import Column, ForeignKey, Integer, String
from sqlalchemy.orm import relationship # pylint: disable=E0401
# Import other RTK modules.
from Utilities import none_to_default # pylint: disable=E0401
from dao.RTKCommonDB import RTK_BASE # pylint: disable=E0401
class RTKMatrix(RTK_BASE):
"""
Class to represent the rtk_matrix table in the RTK Program database.
Matrix types are one of the following:
+-------------+--------------+--------------+
| Row Table | Column Table | Matrix Type |
+-------------+--------------+--------------+
| Function | Hardware | fnctn_hrdwr |
+-------------+--------------+--------------+
| Function | Software | fnctn_sftwr |
+-------------+--------------+--------------+
| Function | Validation | fnctn_vldtn |
+-------------+--------------+--------------+
| Requirement | Hardware | rqrmnt_hrdwr |
+-------------+--------------+--------------+
| Requirement | Software | rqrmnt_sftwr |
+-------------+--------------+--------------+
| Requirement | Validation | rqrmnt_vldtn |
+-------------+--------------+--------------+
| Hardware | Testing | hrdwr_tstng |
+-------------+--------------+--------------+
| Hardware | Validation | hrdwr_vldtn |
+-------------+--------------+--------------+
| Software | Risk | sftwr_rsk |
+-------------+--------------+--------------+
| Software | Validation | sftwr_vldtn |
+-------------+--------------+--------------+
The primary key for this table consists of the revision_id, matrix_id,
column_item_id, and row_item_id.
This table shares a Many-to-One relationship with rtk_revision.
"""
__tablename__ = 'rtk_matrix'
__table_args__ = {'extend_existing': True}
revision_id = Column(
'fld_revision_id',
Integer,
ForeignKey('rtk_revision.fld_revision_id'),
primary_key=True,
nullable=False)
matrix_id = Column('fld_matrix_id', Integer, primary_key=True, default=0)
column_id = Column('fld_column_id', Integer, default=0)
column_item_id = Column(
'fld_column_item_id', Integer, primary_key=True, default=0)
matrix_type = Column('fld_matrix_type', String(128), default='')
parent_id = Column('fld_parent_id', Integer, default=0)
row_id = Column('fld_row_id', Integer, default=0)
row_item_id = Column(
'fld_row_item_id', Integer, primary_key=True, default=0)
value = Column('fld_value', Integer, default=0)
# Define the relationships to other tables in the RTK Program database.
revision = relationship('RTKRevision', back_populates='matrix')
def get_attributes(self):
"""
Retrieve the current values of the RTKMatrix data model attributes.
:return: {revision_id, matrix_id, column_id, column_item_id, parent_id,
row_id, row_item_id, type_id, value} pairs.
:rtype: tuple
"""
_attributes = {
'revision_id': self.revision_id,
'matrix_id': self.matrix_id,
'column_id': self.column_id,
'column_item_id': self.column_item_id,
'matrix_type': self.matrix_type,
'parent_id': self.parent_id,
'row_id': self.row_id,
'row_item_id': self.row_item_id,
'value': self.value
}
return _attributes
def set_attributes(self, values):
"""
Set the RTKMatrix data model attributes.
:param tuple values: tuple of values to assign to the instance
attributes.
:return: (_code, _msg); the error code and error message.
:rtype: tuple
"""
_error_code = 0
_msg = "RTK SUCCESS: Updating RTKMatrix {0:d} attributes.". \
format(self.matrix_id)
try:
self.column_id = int(none_to_default(values['column_id'], 0))
self.column_item_id = int(
none_to_default(values['column_item_id'], 0))
self.matrix_type = str(none_to_default(values['matrix_type'], ''))
self.parent_id = int(none_to_default(values['parent_id'], 0))
self.row_id = int(none_to_default(values['row_id'], 0))
self.row_item_id = int(none_to_default(values['row_item_id'], 0))
self.value = float(none_to_default(values['value'], 0.0))
except KeyError as _err:
_error_code = 40
_msg = "RTK ERROR: Missing attribute {0:s} in attribute " \
"dictionary passed to " \
"RTKMatrix.set_attributes().".format(_err)
return _error_code, _msg
| 40.261538 | 79 | 0.526175 | 4,555 | 0.870271 | 0 | 0 | 0 | 0 | 0 | 0 | 3,188 | 0.609094 |
93ff19152094c70f894a1b56b790e173ed1c2638 | 614 | py | Python | tool/gitautopull.py | chaosannals/trial-python | 740b91fa4b1b1b9839b7524515995a6d417612ca | [
"MIT"
]
| null | null | null | tool/gitautopull.py | chaosannals/trial-python | 740b91fa4b1b1b9839b7524515995a6d417612ca | [
"MIT"
]
| 8 | 2020-12-26T07:48:15.000Z | 2022-03-12T00:25:14.000Z | tool/gitautopull.py | chaosannals/trial-python | 740b91fa4b1b1b9839b7524515995a6d417612ca | [
"MIT"
]
| null | null | null | import os
import shutil
def pull_default(folder=None):
cwd = os.getcwd()
if None == folder:
folder = cwd
for path in os.listdir(folder):
project_path = os.path.join(folder, path)
if os.path.isdir(project_path):
dot_git_folder = os.path.join(project_path, '.git')
if os.path.isdir(dot_git_folder):
print('[git pull start] {}'.format(project_path))
os.chdir(project_path)
os.system('git pull')
print('[git pull end] {}'.format(project_path))
os.chdir(cwd)
pull_default()
input('按回车结束')
| 29.238095 | 65 | 0.583062 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 73 | 0.116987 |
93ff6193a2b94edc476d54fd31667524f6fc80f3 | 815 | py | Python | pyogp/apps/web/django/pyogp_webbot/urls.py | grobertson/PyOGP.Apps | 03583baa8d3a2438b0d0a5452ee8c9e56aace9fd | [
"Apache-2.0"
]
| null | null | null | pyogp/apps/web/django/pyogp_webbot/urls.py | grobertson/PyOGP.Apps | 03583baa8d3a2438b0d0a5452ee8c9e56aace9fd | [
"Apache-2.0"
]
| null | null | null | pyogp/apps/web/django/pyogp_webbot/urls.py | grobertson/PyOGP.Apps | 03583baa8d3a2438b0d0a5452ee8c9e56aace9fd | [
"Apache-2.0"
]
| null | null | null | from django.conf.urls.defaults import *
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
urlpatterns = patterns('',
# Example:
# (r'^pyogp_webbot/', include('pyogp_webbot.foo.urls')),
# Uncomment the admin/doc line below and add 'django.contrib.admindocs'
# to INSTALLED_APPS to enable admin documentation:
# (r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
# (r'^admin/', include(admin.site.urls)),
(r'^$', 'pyogp_webbot.login.views.index'),
(r'^pyogp_webbot/$', 'pyogp_webbot.login.views.index'),
(r'^pyogp_webbot/login/$', 'pyogp_webbot.login.views.login'),
(r'^pyogp_webbot/login/login_request/$', 'pyogp_webbot.login.views.login_request'),
)
| 37.045455 | 87 | 0.692025 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 666 | 0.817178 |
93ffdac053f4b224bf9ac1f85bcc5aea184dd502 | 9,300 | py | Python | emit.py | richardbenson91477/simile | aa1faa8902d24e57133cd2c9982e5d4eef6f913f | [
"Unlicense"
]
| null | null | null | emit.py | richardbenson91477/simile | aa1faa8902d24e57133cd2c9982e5d4eef6f913f | [
"Unlicense"
]
| null | null | null | emit.py | richardbenson91477/simile | aa1faa8902d24e57133cd2c9982e5d4eef6f913f | [
"Unlicense"
]
| null | null | null | ''' code emitters '''
import out, enums as e
class s:
''' state '''
# long_len
# arg_regs, arg_regs_n
# regs
# stack_regs
pass
def init (long_len):
s.long_len = long_len
if long_len == 8:
s.arg_regs = ['%rdi', '%rsi', '%rdx', '%rcx', 'r8', 'r9']
s.arg_regs_n = len(s.arg_regs)
s.regs = ['%rax', '%rbx', '%r10']
s.stack_regs = ['%rsp', '%rbp']
elif long_len == 4:
s.arg_regs = []
s.arg_regs_n = 0
s.regs = ['%eax', '%ebx', '%ecx']
s.stack_regs = ['%esp', '%ebp']
else:
out.error ('what year is this???')
return False
return True
def emit (fn_cur, et, val, val2 = None):
if et == e.EMIT_DEF:
out.put ('.section .text', i_n = 0)
out.put ('.globl ' + val, i_n = 0)
out.put (val + ':', i_n = 0)
out.put ('push ' + s.stack_regs [1])
out.put ('mov ' + s.stack_regs [0] + ', ' + s.stack_regs [1])
out.put ('xor ' + s.regs [0] + ', ' + s.regs [0])
elif et == e.EMIT_RET:
if val:
if not get_val (fn_cur, val, s.regs [0]):
return False
out.put ('pop ' + s.stack_regs [0])
out.put ('ret')
elif et == e.EMIT_END:
if not fn_cur.flow_ret_t:
out.put ('pop ' + s.stack_regs [1])
out.put ('ret')
if fn_cur.data_n:
out.put ('.section .data', i_n = 0)
for datum in fn_cur.data:
if datum._type == e.DATA_LONG:
out.put (datum.name_s + ': .zero ' + str(datum._len), i_n = 0)
elif datum._type == e.DATA_LARRAY:
out.put (datum.name_s + ': .zero ' + str(datum._len), i_n = 0)
elif datum._type == e.DATA_STR:
out.put (datum.name_s + ': .string ' + datum.val, i_n = 0)
elif et == e.EMIT_CALL:
arg_n = len (val2)
for arg_i, arg in enumerate (val2):
if arg_i < s.arg_regs_n:
if not get_val (fn_cur, arg, s.arg_regs [arg_i]):
return False
else:
if not get_val (fn_cur, arg, s.regs [0]):
return False
out.put ('push ' + s.regs [0])
out.put ('call ' + val)
if arg_n > s.arg_regs_n:
out.put ('add $' + str((arg_n - s.arg_regs_n) * s.long_len) +\
', ' + s.stack_regs [0])
elif et == e.EMIT_PUSH:
if not get_val (fn_cur, val, s.regs [0]):
return False
out.put ('push ' + s.regs [0])
elif et == e.EMIT_IF:
if not get_val (fn_cur, val, s.regs [0]):
return False
out.put ('test ' + s.regs [0] + ', ' + s.regs [0])
out.put ('jz ' + fn_cur.name_s + '.else.' +\
str(fn_cur.flow_cur [fn_cur.flow_n - 1][1]))
elif et == e.EMIT_ELSE:
out.put ('jmp ' + fn_cur.name_s + '.endif.' +\
str(fn_cur.flow_cur [fn_cur.flow_n - 1][1]))
out.put (fn_cur.name_s + '.else.' +\
str(fn_cur.flow_cur [fn_cur.flow_n - 1][1]) + ':', i_n = 0)
elif et == e.EMIT_ENDIF:
out.put (fn_cur.name_s + '.endif.' +\
str(fn_cur.flow_cur [fn_cur.flow_n - 1][1]) + ':', i_n = 0)
elif et == e.EMIT_WHILE:
out.put (fn_cur.name_s + '.while.' +\
str(fn_cur.flow_cur [fn_cur.flow_n - 1][1]) + ':', i_n = 0)
if not get_val (fn_cur, val, s.regs [0]):
return False
out.put ('test ' + s.regs [0] + ', ' + s.regs [0])
out.put ('jz ' + fn_cur.name_s + '.wend.' +\
str(fn_cur.flow_cur [fn_cur.flow_n - 1][1]))
elif et == e.EMIT_WEND:
out.put ('jmp ' + fn_cur.name_s + '.while.' +\
str(fn_cur.flow_cur [fn_cur.flow_n - 1][1]))
out.put (fn_cur.name_s + '.wend.' +\
str(fn_cur.flow_cur [fn_cur.flow_n - 1][1]) + ':', i_n = 0)
elif et == e.EMIT_ADD:
if not get_val (fn_cur, val, s.regs [1]):
return False
out.put ('add ' + s.regs [1] + ', ' + s.regs [0])
elif et == e.EMIT_SUB:
if not get_val (fn_cur, val, s.regs [1]):
return False
out.put ('sub ' + s.regs [1] + ', ' + s.regs [0])
elif et == e.EMIT_MUL:
if not get_val (fn_cur, val, s.regs [1]):
return False
out.put ('imul ' + s.regs [1] + ', ' + s.regs [0])
elif et == e.EMIT_DIV:
if not get_val (fn_cur, val, s.regs [1]):
return False
out.put ('cltd')
out.put ('idiv ' + s.regs [1])
elif et == e.EMIT_RES:
if not set_val (fn_cur, val):
return False
elif et == e.EMIT_SET:
if not get_val (fn_cur, val2, s.regs [0]):
return False
if not set_val (fn_cur, val):
return False
elif et == e.EMIT_ADDTO:
if not get_val (fn_cur, val2, s.regs [1]):
return False
if not get_val (fn_cur, val, s.regs [0]):
return False
out.put ('add ' + s.regs [1] + ', ' + s.regs [0])
if not set_val (fn_cur, val):
return False
elif et == e.EMIT_SUBFROM:
if not get_val (fn_cur, val2, s.regs [1]):
return False
if not get_val (fn_cur, val, s.regs [0]):
return False
out.put ('sub ' + s.regs [1] + ', ' + s.regs [0])
if not set_val (fn_cur, val):
return False
elif et == e.EMIT_MULTO:
if not get_val (fn_cur, val2, s.regs [1]):
return False
if not get_val (fn_cur, val, s.regs [0]):
return False
out.put ('imul ' + s.regs [1] + ', ' + s.regs [0])
if not set_val (fn_cur, val):
return False
elif et == e.EMIT_DIVFROM:
if not get_val (fn_cur, val2, s.regs [1]):
return False
if not get_val (fn_cur, val, s.regs [0]):
return False
out.put ('cltd')
out.put ('idiv ' + s.regs [1])
if not set_val (fn_cur, val):
return False
else:
out.put ('uknown emit type')
return False
return True
def get_val (fn_cur, val, reg):
val_type = get_val_type (val)
if not val_type:
out.error ('unknown val type "' + val + '"')
return False
elif val_type == e.VAL_LARRAY:
datum = fn_cur.def_data ('.l' + str(fn_cur.data_larray_n),\
e.DATA_LARRAY, val)
out.put ('mov ' + '$' + datum.name_s + ', ' + reg)
elif val_type == e.VAL_STR:
datum = fn_cur.def_data ('.s' + str(fn_cur.data_str_n),\
e.DATA_STR, val)
out.put ('mov ' + '$' + datum.name_s + ', ' + reg)
elif val_type == e.VAL_LONG:
out.put ('mov $' + val + ', ' + reg)
elif val_type == e.VAL_VAR:
arg_i = fn_cur.get_arg (val)
if arg_i:
arg_i -= 1
if arg_i < s.arg_regs_n:
_s = s.arg_regs [arg_i]
else:
_s = str((arg_i + 1) * s.long_len) + '(' + s.stack_regs [1] +\
')'
out.put ('mov ' + _s + ', ' + reg)
else:
var = fn_cur.get_or_def_var (val)
if not var:
return False
out.put ('mov ' + var.datum.name_s + ', ' + reg)
elif val_type == e.VAL_VAR_DEREF:
_n = val [1:]
arg_i = fn_cur.get_arg (_n)
if arg_i:
# TODO support this
out.error ('dereferencing arg')
return False
else:
var = fn_cur.get_or_def_var (_n)
if not var:
return False
out.put ('mov ' + var.datum.name_s + ', ' + reg)
out.put ('mov (' + reg + '), ' + reg)
return True
def set_val (fn_cur, val):
reg0 = s.regs [0]
reg2 = s.regs [2]
val_type = get_val_type (val)
if \
val_type == e.VAL_STR or\
val_type == e.VAL_LARRAY or\
val_type == e.VAL_LONG:
out.error ('can\'t assign to this type')
return False
elif val_type == e.VAL_VAR:
arg_i = fn_cur.get_arg (val)
if arg_i:
arg_i -= 1
if arg_i < s.arg_regs_n:
_s = s.arg_regs [arg_i]
else:
_s = str((arg_i + 1) * s.long_len) + '(' + s.stack_regs [1] +\
')'
out.put ('mov ' + reg0 + ', ' + _s)
else:
var = fn_cur.get_or_def_var (val)
if not var:
return False
out.put ('mov ' + reg0 + ', ' + var.datum.name_s)
elif val_type == e.VAL_VAR_DEREF:
_n = val [1:]
arg_i = fn_cur.get_arg (_n)
if arg_i:
out.error ('can\'t modify function arg')
return False
else:
var = fn_cur.get_or_def_var (_n)
if not var:
return False
out.put ('mov ' + var.datum.name_s + ', ' + reg2)
out.put ('mov ' + reg0 + ', (' + reg2 + ')')
return True
def get_val_type (_s):
if not _s:
return e.VAL_NONE
elif _s [0] == '-' or _s.isdigit () or _s [0] == "'":
return e.VAL_LONG
elif _s [0] == '[':
return e.VAL_LARRAY
elif _s [0] == '"':
return e.VAL_STR
elif _s [0] == '@':
return e.VAL_VAR_DEREF
else:
return e.VAL_VAR
| 30.693069 | 78 | 0.475484 | 105 | 0.01129 | 0 | 0 | 0 | 0 | 0 | 0 | 860 | 0.092473 |
9e00237613a99687f5e6e25a05d24aa9f51580f2 | 2,291 | py | Python | _static/cookbook/gravmag_euler_classic_expanding_window.py | fatiando/v0.1 | 1ab9876b247c67834b8e1c874d5b1d86f82802e2 | [
"BSD-3-Clause"
]
| null | null | null | _static/cookbook/gravmag_euler_classic_expanding_window.py | fatiando/v0.1 | 1ab9876b247c67834b8e1c874d5b1d86f82802e2 | [
"BSD-3-Clause"
]
| null | null | null | _static/cookbook/gravmag_euler_classic_expanding_window.py | fatiando/v0.1 | 1ab9876b247c67834b8e1c874d5b1d86f82802e2 | [
"BSD-3-Clause"
]
| null | null | null | """
GravMag: Classic 3D Euler deconvolution of magnetic data using an
expanding window
"""
from fatiando import logger, mesher, gridder, utils, gravmag
from fatiando.vis import mpl, myv
log = logger.get()
log.info(logger.header())
# Make a model
bounds = [-5000, 5000, -5000, 5000, 0, 5000]
model = [
mesher.Prism(-1500, -500, -1500, -500, 1000, 2000, {'magnetization':2}),
mesher.Prism(500, 1500, 500, 2000, 1000, 2000, {'magnetization':2})]
# Generate some data from the model
shape = (100, 100)
area = bounds[0:4]
xp, yp, zp = gridder.regular(area, shape, z=-1)
# Add a constant baselevel
baselevel = 10
# Convert from nanoTesla to Tesla because euler and derivatives require things
# in SI
tf = (utils.nt2si(gravmag.prism.tf(xp, yp, zp, model, inc=-45, dec=0))
+ baselevel)
# Calculate the derivatives using FFT
xderiv = gravmag.fourier.derivx(xp, yp, tf, shape)
yderiv = gravmag.fourier.derivy(xp, yp, tf, shape)
zderiv = gravmag.fourier.derivz(xp, yp, tf, shape)
mpl.figure()
titles = ['Total field', 'x derivative', 'y derivative', 'z derivative']
for i, f in enumerate([tf, xderiv, yderiv, zderiv]):
mpl.subplot(2, 2, i + 1)
mpl.title(titles[i])
mpl.axis('scaled')
mpl.contourf(yp, xp, f, shape, 50)
mpl.colorbar()
mpl.m2km()
mpl.show()
# Pick the centers of the expanding windows
# The number of final solutions will be the number of points picked
mpl.figure()
mpl.suptitle('Pick the centers of the expanding windows')
mpl.axis('scaled')
mpl.contourf(yp, xp, tf, shape, 50)
mpl.colorbar()
centers = mpl.pick_points(area, mpl.gca(), xy2ne=True)
# Run the euler deconvolution on an expanding window
# Structural index is 3
index = 3
results = []
for center in centers:
results.append(
gravmag.euler.expanding_window(xp, yp, zp, tf, xderiv, yderiv, zderiv,
index, gravmag.euler.classic, center, 500, 5000))
print "Base level used: %g" % (baselevel)
print "Estimated base level: %g" % (results[-1]['baselevel'])
print "Estimated source location: %s" % (str(results[-1]['point']))
myv.figure()
myv.points([r['point'] for r in results], size=300.)
myv.prisms(model, opacity=0.5)
axes = myv.axes(myv.outline(bounds), ranges=[b*0.001 for b in bounds])
myv.wall_bottom(bounds)
myv.wall_north(bounds)
myv.show()
| 32.728571 | 78 | 0.690965 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 719 | 0.313837 |
9e00350a4a2fb7dd0ecbdf440b5912df33e77fb3 | 324 | py | Python | popmemes/backend/popmemes/migrations/0002_auto_20190122_0939.py | hangulu/twitter | e4461652d35fc6d547e93f364b56c1a1637c5547 | [
"MIT"
]
| null | null | null | popmemes/backend/popmemes/migrations/0002_auto_20190122_0939.py | hangulu/twitter | e4461652d35fc6d547e93f364b56c1a1637c5547 | [
"MIT"
]
| 7 | 2019-12-29T08:23:25.000Z | 2022-02-26T14:04:51.000Z | popmemes/backend/popmemes/migrations/0002_auto_20190122_0939.py | hangulu/twitter | e4461652d35fc6d547e93f364b56c1a1637c5547 | [
"MIT"
]
| null | null | null | # Generated by Django 2.1.5 on 2019-01-22 09:39
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('popmemes', '0001_initial'),
]
operations = [
migrations.RenameModel(
old_name='Popmemes',
new_name='PopImage',
),
]
| 18 | 47 | 0.58642 | 239 | 0.737654 | 0 | 0 | 0 | 0 | 0 | 0 | 91 | 0.280864 |
9e06be00254eeed4fa569ee854389c69ab61f743 | 6,434 | py | Python | tests/ampligraph/datasets/test_datasets.py | ojasviyadav/AmpliGraph | 07ce70ff9e30812ac8f4a34d245d1d5decec27f7 | [
"Apache-2.0"
]
| null | null | null | tests/ampligraph/datasets/test_datasets.py | ojasviyadav/AmpliGraph | 07ce70ff9e30812ac8f4a34d245d1d5decec27f7 | [
"Apache-2.0"
]
| null | null | null | tests/ampligraph/datasets/test_datasets.py | ojasviyadav/AmpliGraph | 07ce70ff9e30812ac8f4a34d245d1d5decec27f7 | [
"Apache-2.0"
]
| null | null | null | # Copyright 2019 The AmpliGraph Authors. All Rights Reserved.
#
# This file is Licensed under the Apache License, Version 2.0.
# A copy of the Licence is available in LICENCE, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
from ampligraph.datasets import load_wn18, load_fb15k, load_fb15k_237, load_yago3_10, load_wn18rr, load_wn11, load_fb13
from ampligraph.datasets.datasets import _clean_data
import numpy as np
def test_clean_data():
X = {
'train': np.array([['a', 'b', 'c'], ['d', 'e', 'f'], ['g', 'h', 'i'], ['j', 'k', 'l']]),
'valid': np.array([['a', 'b', 'c'], ['x', 'e', 'f'], ['g', 'a', 'i'], ['j', 'k', 'y']]),
'test': np.array([['a', 'b', 'c'], ['d', 'e', 'x'], ['g', 'b', 'i'], ['y', 'k', 'l']]),
}
clean_X, valid_idx, test_idx = _clean_data(X, return_idx=True)
np.testing.assert_array_equal(clean_X['train'], X['train'])
np.testing.assert_array_equal(clean_X['valid'], np.array([['a', 'b', 'c']]))
np.testing.assert_array_equal(clean_X['test'], np.array([['a', 'b', 'c'], ['g', 'b', 'i']]))
np.testing.assert_array_equal(valid_idx, np.array([True, False, False, False]))
np.testing.assert_array_equal(test_idx, np.array([True, False, True, False]))
def test_load_wn18():
wn18 = load_wn18()
assert len(wn18['train']) == 141442
assert len(wn18['valid']) == 5000
assert len(wn18['test']) == 5000
ent_train = np.union1d(np.unique(wn18["train"][:, 0]), np.unique(wn18["train"][:, 2]))
ent_valid = np.union1d(np.unique(wn18["valid"][:, 0]), np.unique(wn18["valid"][:, 2]))
ent_test = np.union1d(np.unique(wn18["test"][:, 0]), np.unique(wn18["test"][:, 2]))
distinct_ent = np.union1d(np.union1d(ent_train, ent_valid), ent_test)
distinct_rel = np.union1d(np.union1d(np.unique(wn18["train"][:, 1]), np.unique(wn18["train"][:, 1])),
np.unique(wn18["train"][:, 1]))
assert len(distinct_ent) == 40943
assert len(distinct_rel) == 18
def test_load_fb15k():
fb15k = load_fb15k()
assert len(fb15k['train']) == 483142
assert len(fb15k['valid']) == 50000
assert len(fb15k['test']) == 59071
# ent_train = np.union1d(np.unique(fb15k["train"][:,0]), np.unique(fb15k["train"][:,2]))
# ent_valid = np.union1d(np.unique(fb15k["valid"][:,0]), np.unique(fb15k["valid"][:,2]))
# ent_test = np.union1d(np.unique(fb15k["test"][:,0]), np.unique(fb15k["test"][:,2]))
# distinct_ent = np.union1d(np.union1d(ent_train, ent_valid), ent_test)
# distinct_rel = np.union1d(np.union1d(np.unique(fb15k["train"][:,1]), np.unique(fb15k["train"][:,1])),
# np.unique(fb15k["train"][:,1]))
# assert len(distinct_ent) == 14951
# assert len(distinct_rel) == 1345
def test_load_fb15k_237():
fb15k_237 = load_fb15k_237()
assert len(fb15k_237['train']) == 272115
# - 9 because 9 triples containing unseen entities are removed
assert len(fb15k_237['valid']) == 17535 - 9
# - 28 because 28 triples containing unseen entities are removed
assert len(fb15k_237['test']) == 20466 - 28
def test_yago_3_10():
yago_3_10 = load_yago3_10()
assert len(yago_3_10['train']) == 1079040
assert len(yago_3_10['valid']) == 5000 - 22
assert len(yago_3_10['test']) == 5000 - 18
# ent_train = np.union1d(np.unique(yago_3_10["train"][:,0]), np.unique(yago_3_10["train"][:,2]))
# ent_valid = np.union1d(np.unique(yago_3_10["valid"][:,0]), np.unique(yago_3_10["valid"][:,2]))
# ent_test = np.union1d(np.unique(yago_3_10["test"][:,0]), np.unique(yago_3_10["test"][:,2]))
# assert len(set(ent_valid) - set(ent_train)) == 22
# assert len (set(ent_test) - ((set(ent_valid) & set(ent_train)) | set(ent_train))) == 18
# distinct_ent = np.union1d(np.union1d(ent_train, ent_valid), ent_test)
# distinct_rel = np.union1d(np.union1d(np.unique(yago_3_10["train"][:,1]), np.unique(yago_3_10["train"][:,1])),
# np.unique(yago_3_10["train"][:,1]))
# assert len(distinct_ent) == 123182
# assert len(distinct_rel) == 37
def test_wn18rr():
wn18rr = load_wn18rr()
ent_train = np.union1d(np.unique(wn18rr["train"][:, 0]), np.unique(wn18rr["train"][:, 2]))
ent_valid = np.union1d(np.unique(wn18rr["valid"][:, 0]), np.unique(wn18rr["valid"][:, 2]))
ent_test = np.union1d(np.unique(wn18rr["test"][:, 0]), np.unique(wn18rr["test"][:, 2]))
distinct_ent = np.union1d(np.union1d(ent_train, ent_valid), ent_test)
distinct_rel = np.union1d(np.union1d(np.unique(wn18rr["train"][:, 1]), np.unique(wn18rr["train"][:, 1])),
np.unique(wn18rr["train"][:, 1]))
assert len(wn18rr['train']) == 86835
# - 210 because 210 triples containing unseen entities are removed
assert len(wn18rr['valid']) == 3034 - 210
# - 210 because 210 triples containing unseen entities are removed
assert len(wn18rr['test']) == 3134 - 210
def test_wn11():
wn11 = load_wn11(clean_unseen=False)
assert len(wn11['train']) == 110361
assert len(wn11['valid']) == 5215
assert len(wn11['test']) == 21035
assert len(wn11['valid_labels']) == 5215
assert len(wn11['test_labels']) == 21035
assert sum(wn11['valid_labels']) == 2606
assert sum(wn11['test_labels']) == 10493
wn11 = load_wn11(clean_unseen=True)
assert len(wn11['train']) == 110361
assert len(wn11['valid']) == 5215 - 338
assert len(wn11['test']) == 21035 - 1329
assert len(wn11['valid_labels']) == 5215 - 338
assert len(wn11['test_labels']) == 21035 - 1329
assert sum(wn11['valid_labels']) == 2409
assert sum(wn11['test_labels']) == 9706
def test_fb13():
fb13 = load_fb13(clean_unseen=False)
assert len(fb13['train']) == 316232
assert len(fb13['valid']) == 5908 + 5908
assert len(fb13['test']) == 23733 + 23731
assert len(fb13['valid_labels']) == 5908 + 5908
assert len(fb13['test_labels']) == 23733 + 23731
assert sum(fb13['valid_labels']) == 5908
assert sum(fb13['test_labels']) == 23733
fb13 = load_fb13(clean_unseen=True)
assert len(fb13['train']) == 316232
assert len(fb13['valid']) == 5908 + 5908
assert len(fb13['test']) == 23733 + 23731
assert len(fb13['valid_labels']) == 5908 + 5908
assert len(fb13['test_labels']) == 23733 + 23731
assert sum(fb13['valid_labels']) == 5908
assert sum(fb13['test_labels']) == 23733
| 42.328947 | 119 | 0.615636 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,496 | 0.387939 |
9e08eda6cab019bc0097ad8470c08bcc09a74c92 | 5,959 | py | Python | code/geometry/sector.py | Prometheus3375/inno-thesis | 72245706fa25b49f333e08d6074d421b5becfdb5 | [
"BSD-3-Clause"
]
| null | null | null | code/geometry/sector.py | Prometheus3375/inno-thesis | 72245706fa25b49f333e08d6074d421b5becfdb5 | [
"BSD-3-Clause"
]
| null | null | null | code/geometry/sector.py | Prometheus3375/inno-thesis | 72245706fa25b49f333e08d6074d421b5becfdb5 | [
"BSD-3-Clause"
]
| null | null | null | from io import StringIO
from math import atan2, ceil
from typing import Literal, Union, overload
from common import PI, Real, TWOPI, deg, real, reduce_angle
from functions import qbezeir_svg_given_middle
from .circle import CircleBase, FixedCircle
from .point import PointBase, Polar
def check_arc(value: float, /):
if not (0 < value < TWOPI):
raise ValueError(f'arc should be in range (0°, 360°), got {deg(value):.0g}°')
class SectorBase:
__slots__ = '_circle', '_arc', '_arm'
def __init__(self, circle: FixedCircle, arc: float, arm: float, /):
self._circle = circle
self._arc = arc
self._arm = arm
@property
def circle(self, /):
return self._circle
@property
def arc(self, /):
return self._arc
@property
def start_arm(self, /):
return self._arm
@property
def end_arm(self, /):
return self._arm - self._arc
@property
def end_arm_reduced(self, /):
return reduce_angle(self.end_arm)
def __repr__(self, /):
return (
f'{self.__class__.__name__}('
f'{self.circle}, '
f'arc={deg(self.arc):.0f}°, '
f'start_arm={deg(self.start_arm):.0f}°'
f')'
)
def copy(self, /):
return self.__class__(self.circle.copy(), self.arc, self.start_arm)
def __getnewargs__(self, /):
return self._circle, self._arc, self._arm
def fix(self, /) -> 'FixedSector':
raise NotImplementedError
def unfix(self, /) -> 'MutableSector':
raise NotImplementedError
def __eq__(self, other, /):
if isinstance(other, SectorBase):
return self.arc == other.arc and self.start_arm == self.start_arm and self.circle == other.circle
return NotImplemented
def __ne__(self, other, /):
if isinstance(other, SectorBase):
return self.arc != other.arc or self.start_arm != self.start_arm or self.circle != other.circle
return NotImplemented
def is_angle_inside(self, fi: Real, /) -> bool:
fi = reduce_angle(fi)
start = self.start_arm
end = self.end_arm_reduced
if end > start:
return end <= fi <= PI or -PI < fi <= start
return end <= fi <= start
def is_point_inside(self, p: PointBase, /) -> bool:
# Another way https://stackoverflow.com/a/13675772
x = p.x - self.circle.center.x
y = p.y - self.circle.center.y
r2 = x * x + y * y
if r2 == 0:
return True
if r2 > self.circle.r2:
return False
return self.is_angle_inside(atan2(y, x))
def __contains__(self, item, /):
if isinstance(item, real):
return self.is_angle_inside(item)
if isinstance(item, PointBase):
return self.is_point_inside(item)
return False
def as_plotly_shape(self, step_angle: Real = PI / 6, /) -> dict:
# Simulate circle arc with quadratic Bezier curves
center = self.circle.center
r = self.circle.radius
n = ceil(self.arc / step_angle) - 1
p0 = Polar(r, self.start_arm) + center
path = StringIO()
path.write(
f'M {center.x} {center.y} '
f'L {p0.x} {p0.y} '
)
arm = self.start_arm
for _ in range(n):
pm = Polar(r, arm - step_angle / 2) + center
arm -= step_angle
p2 = Polar(r, arm) + center
path.write(f'{qbezeir_svg_given_middle(p0, p2, pm)} ')
p0 = p2
p2 = Polar(r, self.end_arm) + center
pm = Polar(r, (arm + self.end_arm) / 2) + center
path.write(f'{qbezeir_svg_given_middle(p0, p2, pm)} Z')
return dict(
type='path',
path=path.getvalue()
)
class FixedSector(SectorBase):
__slots__ = '_hash',
def __init__(self, circle: FixedCircle, arc: float, arm: float, /):
super().__init__(circle, arc, arm)
self._hash = hash(frozenset((circle, arc, arm)))
def fix(self, /):
return self
def unfix(self, /):
return MutableSector(self.circle, self.arc, self.start_arm)
def __hash__(self, /):
return self._hash
class MutableSector(SectorBase):
__slots__ = ()
# TODO: add circle changing
@property
def arc(self, /):
return self._arc
@arc.setter
def arc(self, value: Real, /):
check_arc(value)
self._arc = float(value)
@property
def start_arm(self, /):
return self._arm
@start_arm.setter
def start_arm(self, value: Real, /):
self._arm = reduce_angle(float(value))
@property
def end_arm(self, /):
return self._arm - self._arc
@end_arm.setter
def end_arm(self, value: Real, /):
self._arm = reduce_angle(value + self._arc)
def fix(self, /):
return FixedSector(self.circle, self.arc, self.start_arm)
def unfix(self, /):
return self
def rotate(self, angle: Real, /):
"""
Rotates the sector by the given angle clockwise
"""
self.start_arm -= angle
@overload
def Sector(circle: CircleBase, arc: Real, start_arm: Real = PI, /) -> FixedSector: ...
@overload
def Sector(circle: CircleBase, arc: Real, start_arm: Real = PI, /, *, fix: Literal[True]) -> FixedSector: ...
@overload
def Sector(circle: CircleBase, arc: Real, start_arm: Real = PI, /, *, fix: Literal[False]) -> MutableSector: ...
@overload
def Sector(circle: CircleBase, arc: Real, start_arm: Real = PI, /, *,
fix: bool) -> Union[FixedSector, MutableSector]: ...
def Sector(circle: CircleBase, arc: Real, start_arm: Real = PI, /, *, fix: bool = True) -> SectorBase:
check_arc(arc)
arc = float(arc)
start_arm = float(start_arm)
if fix:
return FixedSector(circle.fix(), arc, start_arm)
return MutableSector(circle.fix(), arc, start_arm)
| 26.721973 | 112 | 0.587515 | 4,719 | 0.791247 | 0 | 0 | 1,322 | 0.221663 | 0 | 0 | 574 | 0.096244 |
9e097d9b8021dd21364c747eb7b2f08352fe9ba6 | 1,155 | py | Python | scripts/gen_mh.py | bplank/DaNplus | 220428109c9ae5abc60e8968a7fe7a4aa6ad92e3 | [
"MIT"
]
| 5 | 2020-12-11T17:11:03.000Z | 2022-01-01T12:14:04.000Z | scripts/gen_mh.py | bplank/DaNplus | 220428109c9ae5abc60e8968a7fe7a4aa6ad92e3 | [
"MIT"
]
| null | null | null | scripts/gen_mh.py | bplank/DaNplus | 220428109c9ae5abc60e8968a7fe7a4aa6ad92e3 | [
"MIT"
]
| null | null | null | import os
COL_SEPARATOR = "\t"
MULTI_SEPARATOR = "$"
for neFile in os.listdir('data/'):
neFile = 'data/' + neFile
out_filename = neFile.replace('.tsv', "_mh.tsv")
if os.path.isfile(out_filename) or '_mh' in neFile or os.path.isdir(neFile):
continue
out_f = open(out_filename, "w")
with open(neFile, "r") as in_f:
for line in in_f:
if len(line) > 2:
token_attrs = line.rstrip().split(COL_SEPARATOR)
if (token_attrs[1] == "O") and (token_attrs[2] == "O"):
new_label = "O"
elif (token_attrs[1] != "O") and (token_attrs[2] == "O"):
new_label = token_attrs[1]
elif (token_attrs[1] == "O") and (token_attrs[2] != "O"):
new_label = token_attrs[2]
else:
labels = [token_attrs[1], token_attrs[2]]
labels.sort()
new_label = labels[0] + MULTI_SEPARATOR + labels[1]
out_f.write(token_attrs[0] + COL_SEPARATOR + new_label + "\n")
else:
out_f.write(line)
out_f.close()
| 38.5 | 80 | 0.507359 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 72 | 0.062338 |
9e0bbeb93835b36e23fb310038a044e9818c4553 | 13,451 | py | Python | kecpkg/commands/sign.py | jberends/kecpkg-tools | 3c288c5b91b619fe76cd3622615f3ffe43509725 | [
"Apache-2.0"
]
| null | null | null | kecpkg/commands/sign.py | jberends/kecpkg-tools | 3c288c5b91b619fe76cd3622615f3ffe43509725 | [
"Apache-2.0"
]
| 7 | 2017-12-07T11:16:07.000Z | 2019-12-11T15:25:07.000Z | kecpkg/commands/sign.py | KE-works/kecpkg-tools | 3c288c5b91b619fe76cd3622615f3ffe43509725 | [
"Apache-2.0"
]
| null | null | null | import os
import sys
from pprint import pprint
import click
from pykechain.utils import temp_chdir
from kecpkg.commands.utils import CONTEXT_SETTINGS
from kecpkg.gpg import get_gpg, list_keys, hash_of_file
from kecpkg.settings import SETTINGS_FILENAME, GNUPG_KECPKG_HOME, load_settings, DEFAULT_SETTINGS, ARTIFACTS_FILENAME, \
ARTIFACTS_SIG_FILENAME
from kecpkg.utils import remove_path, echo_info, echo_success, echo_failure, get_package_dir, unzip_package
@click.command(context_settings=CONTEXT_SETTINGS,
short_help="Perform package signing and key management.")
@click.argument('package', required=False)
@click.option('--settings', '--config', '-s', 'settings_filename',
help="path to the setting file (default `{}`".format(SETTINGS_FILENAME),
type=click.Path(), default=SETTINGS_FILENAME)
@click.option('--keyid', '--key-id', '-k', 'keyid',
help="ID (name, email, KeyID) of the cryptographic key to do the operation with. ")
# @click.option('--passphrase', '-p', 'sign_passphrase', hide_input=True,
# help="Passphrase of the cryptographic key to sign the contents of the package. "
# "Use in combination with `--sign` and `--keyid`")
@click.option('--import-key', '--import', '-i', 'do_import', type=click.Path(exists=True),
help="Import secret keyfile (in .asc) to the KECPKG keyring which will be used for signing. "
"You can export a created key in gpg with `gpg -a --export-secret-key [keyID] > secret_key.asc`.")
@click.option('--delete-key', '-d', 'do_delete_key',
help="Delete key by its fingerprint permanently from the KECPKG keyring. To retrieve the full "
"fingerprint of the key, use the `--list` option and look at the 'fingerprint' section.")
@click.option('--create-key', '-c', 'do_create_key', is_flag=True,
help="Create secret key and add it to the KECPKG keyring.")
@click.option('--export-key', '--export', '-e', 'do_export_key', type=click.Path(),
help="Export public key to filename with `--keyid KeyID` in .ASC format for public distribution.")
@click.option('--clear-keyring', 'do_clear', is_flag=True, default=False,
help="Clear all keys from the KECPKG keyring")
@click.option('--list', '-l', 'do_list', is_flag=True,
help="List all available keys in the KECPKG keyring")
@click.option('--verify-kecpkg', 'do_verify_kecpkg', type=click.Path(exists=True),
help="Verify contents and signature of an existing kecpkg.")
@click.option('--yes', '-y', 'do_yes', is_flag=True,
help="Don't ask questions, just do it.")
@click.option('-v', '--verbose', help="Be more verbose", is_flag=True)
def sign(package=None, **options):
"""Sign the package."""
# noinspection PyShadowingNames
def _do_clear(options):
echo_info("Clearing all keys from the KECPKG keyring")
if not options.get('do_yes'):
options['do_yes'] = click.confirm("Are you sure you want to clear the KECPKG keyring?", default=False)
if options.get('do_yes'):
remove_path(GNUPG_KECPKG_HOME)
echo_success("Completed")
sys.exit(0)
else:
echo_failure("Not removing the KECPKG keyring")
sys.exit(1)
def _do_list(gpg, explain=False):
if explain:
echo_info("Listing all keys from the KECPKG keyring")
result = gpg.list_keys(secret=True)
if len(result):
from tabulate import tabulate
print(tabulate(list_keys(gpg=gpg), headers=("Name", "Comment", "E-mail", "Expires", "Fingerprint")))
else:
if explain:
echo_info("No keys found in KECPKG keyring. Use `--import-key` or `--create-key` to add a "
"secret key to the KECPKG keyring in order to sign KECPKG's.")
sys.exit(1)
# noinspection PyShadowingNames
def _do_import(gpg, options):
echo_info("Importing secret key into KECPKG keyring from '{}'".format(options.get('do_import')))
result = gpg.import_keys(open(os.path.abspath(options.get('do_import')), 'rb').read())
# pprint(result.__dict__)
if result and result.sec_imported:
echo_success("Succesfully imported secret key into the KECPKG keystore")
_do_list(gpg=gpg)
sys.exit(0)
elif result and result.unchanged:
echo_failure("Did not import the secret key into the KECPKG keystore. The key was already "
"in place and was unchanged")
_do_list(gpg=gpg)
sys.exit(1)
echo_failure("Did not import a secret key into the KECPKG keystore. Is something wrong "
"with the file: '{}'? Are you sure it is a ASCII file containing a "
"private key block?".format(options.get('do_import')))
sys.exit(1)
# noinspection PyShadowingNames
def _do_delete_key(gpg, options):
echo_info("Deleting private key with ID '{}' from the KECPKG keyring".format(options.get('do_delete_key')))
# custom call to gpg using --delete-secret-and-public-key
result = gpg.result_map['delete'](gpg)
# noinspection PyProtectedMember
p = gpg._open_subprocess(['--yes', '--delete-secret-and-public-key', options.get('do_delete_key')])
# noinspection PyProtectedMember
gpg._collect_output(p, result, stdin=p.stdin)
# result = gpg.delete_keys(fingerprints=options.get('do_delete_key'),
# secret=True,
# passphrase=options.get('sign_passphrase'))
# pprint(result.__dict__)
if result and result.stderr.find("failed") < 0:
echo_success("Succesfully deleted key")
_do_list(gpg=gpg)
sys.exit(0)
echo_failure("Could not delete key.")
sys.exit(1)
# noinspection PyShadowingNames
def _do_create_key(gpg, options):
echo_info("Will create a secret key and store it into the KECPKG keyring.")
package_dir = get_package_dir(package_name=package, fail=False)
settings = DEFAULT_SETTINGS
if package_dir is not None:
package_name = os.path.basename(package_dir)
echo_info('Package `{}` has been selected'.format(package_name))
settings = load_settings(package_dir=package_dir, settings_filename=options.get('settings_filename'))
key_info = {'name_real': click.prompt("Name", default=settings.get('name')),
'name_comment': click.prompt("Comment", default="KECPKG SIGNING KEY"),
'name_email': click.prompt("Email", default=settings.get('email')),
'expire_date': click.prompt("Expiration in months", default=12,
value_proc=lambda i: "{}m".format(i)), 'key_type': 'RSA',
'key_length': 4096,
'key_usage': '',
'subkey_type': 'RSA',
'subkey_length': 4096,
'subkey_usage': 'encrypt,sign,auth',
'passphrase': ''}
passphrase = click.prompt("Passphrase", hide_input=True)
passphrase_confirmed = click.prompt("Confirm passphrase", hide_input=True)
if passphrase == passphrase_confirmed:
key_info['passphrase'] = passphrase
else:
raise ValueError("The passphrases did not match.")
echo_info("Creating the secret key '{name_real} ({name_comment}) <{name_email}>'".format(**key_info))
echo_info("Please move around mouse or generate other activity to introduce sufficient entropy. "
"This might take a minute...")
result = gpg.gen_key(gpg.gen_key_input(**key_info))
pprint(result.__dict__)
if result and result.stderr.find('KEY_CREATED'):
echo_success("The key is succesfully created")
_do_list(gpg=gpg)
sys.exit(0)
echo_failure("Could not generate the key due to an error: '{}'".format(result.stderr))
sys.exit(1)
# noinspection PyShadowingNames
def _do_export_key(gpg, options):
"""Export public key."""
echo_info("Exporting public key")
if options.get('keyid') is None:
_do_list(gpg=gpg)
options['keyid'] = click.prompt("Provide KeyId (name, comment, email, fingerprint) of the key to export")
result = gpg.export_keys(keyids=[options.get('keyid')], secret=False, armor=True)
if result is not None:
with open(options.get('do_export_key'), 'w') as fd:
fd.write(result)
echo_success("Sucessfully written public key to '{}'".format(options.get('do_export_key')))
sys.exit(0)
echo_failure("Could not export key")
sys.exit(1)
# noinspection PyShadowingNames
def _do_verify_kecpkg(gpg, options):
"""Verify the kecpkg."""
echo_info("Verify the contents of the KECPKG and if the KECPKG is signed with a valid signature.")
current_working_directory = os.getcwd()
with temp_chdir() as d:
unzip_package(package_path=os.path.join(current_working_directory, options.get('do_verify_kecpkg')),
target_path=d)
verify_signature(d, artifacts_filename=ARTIFACTS_FILENAME, artifacts_sig_filename=ARTIFACTS_SIG_FILENAME)
verify_artifacts_hashes(d, artifacts_filename=ARTIFACTS_FILENAME)
sys.exit(0)
#
# Dispatcher to subfunctions
#
if options.get('do_clear'):
_do_clear(options=options)
elif options.get('do_list'):
_do_list(gpg=get_gpg(), explain=True)
elif options.get('do_import'):
_do_import(gpg=get_gpg(), options=options)
elif options.get('do_delete_key'):
_do_delete_key(gpg=get_gpg(), options=options)
elif options.get('do_create_key'):
_do_create_key(gpg=get_gpg(), options=options)
elif options.get('do_export_key'):
_do_export_key(gpg=get_gpg(), options=options)
elif options.get('do_verify_kecpkg'):
_do_verify_kecpkg(gpg=get_gpg(), options=options)
else:
sys.exit(500)
sys.exit(0)
def verify_signature(package_dir, artifacts_filename, artifacts_sig_filename):
"""
Check signature of the package.
:param package_dir: directory fullpath of the package
:param artifacts_filename: path of the artifacts file
:param artifacts_sig_filename: path of the artifacts signature file
:return: None
"""
gpg = get_gpg()
artifacts_fp = os.path.join(package_dir, artifacts_filename)
artifacts_sig_fp = os.path.join(package_dir, artifacts_sig_filename)
if not os.path.exists(artifacts_fp):
echo_failure("Artifacts file does not exist: '{}'".format(artifacts_filename))
sys.exit(1)
if not os.path.exists(artifacts_sig_fp):
echo_failure("Artifacts signature file does not exist: '{}'. Is the package signed?".
format(artifacts_filename))
sys.exit(1)
with open(artifacts_sig_fp, 'rb') as sig_fd:
results = gpg.verify_file(sig_fd, data_filename=artifacts_fp)
if results.valid:
echo_info("Verified the signature and the signature is valid")
echo_info("Signed with: '{}'".format(results.username))
elif not results.valid:
echo_failure("Signature of the package is invalid")
echo_failure(pprint(results.__dict__))
sys.exit(1)
def verify_artifacts_hashes(package_dir, artifacts_filename):
"""
Check the hashes of the artifacts in the package.
:param package_dir: directory fullpath of the package
:param artifacts_filename: filename of the artifacts file
:return:
"""
artifacts_fp = os.path.join(package_dir, artifacts_filename)
if not os.path.exists(artifacts_fp):
echo_failure("Artifacts file does not exist: '{}'".format(artifacts_filename))
sys.exit(1)
with open(artifacts_fp, 'r') as fd:
artifacts = fd.readlines()
# process the file contents
# A line is "README.md,sha256=d831....ccf79a,336"
# ^filename ^algo ^hash ^size in bytes
fails = []
for af in artifacts:
# noinspection PyShadowingBuiltins,PyShadowingBuiltins
filename, hash, orig_size = af.split(',')
algorithm, orig_hash = hash.split('=')
fp = os.path.join(package_dir, filename)
if os.path.exists(fp):
found_hash = hash_of_file(fp, algorithm)
found_size = os.stat(fp).st_size
if found_hash != orig_hash.strip() or found_size != int(orig_size.strip()):
fails.append("File '{}' is changed in the package.".format(filename))
fails.append("File '{}' original checksum: '{}', found: '{}'".format(filename, orig_hash, found_hash))
fails.append("File '{}' original size: {}, found: {}".format(filename, orig_size, found_size))
else:
fails.append("File '{}' does not exist".format(filename))
if fails:
echo_failure('The package has been changed after building the package.')
for fail in fails:
print(fail)
sys.exit(1)
else:
echo_info("Package contents succesfully verified.")
| 46.867596 | 120 | 0.63564 | 0 | 0 | 0 | 0 | 9,886 | 0.734964 | 0 | 0 | 5,375 | 0.399599 |
9e0cb81c07d1ab8a7ecca4e8e8464e12bdf7bef9 | 954 | py | Python | benchmark/generate_data.py | etra0/sapeaob | 3e21bd66f8530f983130c52e37d612cc53181acd | [
"MIT"
]
| 3 | 2021-08-04T13:00:25.000Z | 2021-12-21T21:07:40.000Z | benchmark/generate_data.py | etra0/sapeaob | 3e21bd66f8530f983130c52e37d612cc53181acd | [
"MIT"
]
| null | null | null | benchmark/generate_data.py | etra0/sapeaob | 3e21bd66f8530f983130c52e37d612cc53181acd | [
"MIT"
]
| null | null | null | import argparse
import random
SIGNATURE = b'\xDE\xAD\xBE\xEF\xC0\xFF\xEE\xAA\xBB\xCC\xDD\xEE\xFF\x00\x11\x22\x33\x44\x55\x66\x77\x88\x99\xAA'
# Generate data in chunks of 1024 bytes.
def generate_random_data(size):
data = bytearray()
while len(data) <= size:
data += random.randbytes(1024)
return data
def sign_data(data, signature):
pos = random.randrange(0, len(data) - len(signature))
for (i, p) in enumerate(range(pos, pos + len(signature))):
data[p] = signature[i]
def write_data(data):
with open("output.bin", 'wb') as f:
f.write(data)
def main():
parser = argparse.ArgumentParser(description="Generate some random data")
parser.add_argument("--size", metavar="S", type=int, default=1024 * 1024 * 10, required=False)
args = parser.parse_args()
data = generate_random_data(args.size)
sign_data(data, SIGNATURE)
write_data(data)
if __name__ == "__main__":
main()
| 25.783784 | 111 | 0.669811 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 203 | 0.212788 |
9e0dd95d1aaf80cae2655fcee6b6427ac437b94c | 10,563 | py | Python | doctor/lib/utils.py | freelawproject/doctor | 3858b6f5de7903353f4376303329a986db5b7983 | [
"BSD-2-Clause"
]
| null | null | null | doctor/lib/utils.py | freelawproject/doctor | 3858b6f5de7903353f4376303329a986db5b7983 | [
"BSD-2-Clause"
]
| null | null | null | doctor/lib/utils.py | freelawproject/doctor | 3858b6f5de7903353f4376303329a986db5b7983 | [
"BSD-2-Clause"
]
| null | null | null | import datetime
import io
import os
import re
import subprocess
import warnings
from collections import namedtuple
from decimal import Decimal
from pathlib import Path
import six
from PyPDF2 import PdfFileMerger
from reportlab.pdfgen import canvas
class DoctorUnicodeDecodeError(UnicodeDecodeError):
def __init__(self, obj, *args):
self.obj = obj
UnicodeDecodeError.__init__(self, *args)
def __str__(self):
original = UnicodeDecodeError.__str__(self)
return f"{original}. You passed in {self.obj!r} ({type(self.obj)})"
def force_bytes(s, encoding="utf-8", strings_only=False, errors="strict"):
"""
Similar to smart_bytes, except that lazy instances are resolved to
strings, rather than kept as lazy objects.
If strings_only is True, don't convert (some) non-string-like objects.
"""
# Handle the common case first for performance reasons.
if isinstance(s, bytes):
if encoding == "utf-8":
return s
else:
return s.decode("utf-8", errors).encode(encoding, errors)
if strings_only and is_protected_type(s):
return s
if isinstance(s, six.memoryview):
return bytes(s)
if isinstance(s, Promise):
return six.text_type(s).encode(encoding, errors)
if not isinstance(s, six.string_types):
try:
if six.PY3:
return six.text_type(s).encode(encoding)
else:
return bytes(s)
except UnicodeEncodeError:
if isinstance(s, Exception):
# An Exception subclass containing non-ASCII data that doesn't
# know how to print itself properly. We shouldn't raise a
# further exception.
return b" ".join(
force_bytes(arg, encoding, strings_only, errors) for arg in s
)
return six.text_type(s).encode(encoding, errors)
else:
return s.encode(encoding, errors)
def force_text(s, encoding="utf-8", strings_only=False, errors="strict"):
"""
Similar to smart_text, except that lazy instances are resolved to
strings, rather than kept as lazy objects.
If strings_only is True, don't convert (some) non-string-like objects.
"""
# Handle the common case first for performance reasons.
if issubclass(type(s), six.text_type):
return s
if strings_only and is_protected_type(s):
return s
try:
if not issubclass(type(s), six.string_types):
if six.PY3:
if isinstance(s, bytes):
s = six.text_type(s, encoding, errors)
else:
s = six.text_type(s)
elif hasattr(s, "__unicode__"):
s = six.text_type(s)
else:
s = six.text_type(bytes(s), encoding, errors)
else:
# Note: We use .decode() here, instead of six.text_type(s, encoding,
# errors), so that if s is a SafeBytes, it ends up being a
# SafeText at the end.
s = s.decode(encoding, errors)
except UnicodeDecodeError as e:
if not isinstance(s, Exception):
raise DoctorUnicodeDecodeError(s, *e.args)
else:
# If we get to here, the caller has passed in an Exception
# subclass populated with non-ASCII bytestring data without a
# working unicode method. Try to handle this without raising a
# further exception by individually forcing the exception args
# to unicode.
s = " ".join(force_text(arg, encoding, strings_only, errors) for arg in s)
return s
def smart_text(s, encoding="utf-8", strings_only=False, errors="strict"):
"""
Returns a text object representing 's' -- unicode on Python 2 and str on
Python 3. Treats bytestrings using the 'encoding' codec.
If strings_only is True, don't convert (some) non-string-like objects.
"""
if isinstance(s, Promise):
# The input is the result of a gettext_lazy() call.
return s
return force_text(s, encoding, strings_only, errors)
class Promise(object):
"""
This is just a base class for the proxy class created in
the closure of the lazy function. It can be used to recognize
promises in code.
"""
pass
_PROTECTED_TYPES = six.integer_types + (
type(None),
float,
Decimal,
datetime.datetime,
datetime.date,
datetime.time,
)
def is_protected_type(obj):
"""Determine if the object instance is of a protected type.
Objects of protected types are preserved as-is when passed to
force_text(strings_only=True).
"""
return isinstance(obj, _PROTECTED_TYPES)
def audio_encoder(data):
return namedtuple("AudioFile", data.keys())(*data.values())
def ignore_warnings(test_func):
def do_test(self, *args, **kwargs):
with warnings.catch_warnings():
warnings.simplefilter("ignore", ResourceWarning)
warnings.simplefilter("ignore", DeprecationWarning)
test_func(self, *args, **kwargs)
return do_test
def make_png_thumbnail_for_instance(filepath, max_dimension):
"""Abstract function for making a thumbnail for a PDF
See helper functions below for how to use this in a simple way.
:param filepath: The attr where the PDF is located on the item
:param max_dimension: The longest you want any edge to be
:param response: Flask response object
"""
command = [
"pdftoppm",
"-singlefile",
"-f",
"1",
"-scale-to",
str(max_dimension),
filepath,
"-png",
]
p = subprocess.Popen(
command, close_fds=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
stdout, stderr = p.communicate()
return stdout, stderr.decode("utf-8"), str(p.returncode)
def make_png_thumbnails(filepath, max_dimension, pages, directory):
"""Abstract function for making a thumbnail for a PDF
See helper functions below for how to use this in a simple way.
:param filepath: The attr where the PDF is located on the item
:param max_dimension: The longest you want any edge to be
:param response: Flask response object
"""
for page in pages:
command = [
"pdftoppm",
"-singlefile",
"-f",
str(page),
"-scale-to",
str(max_dimension),
filepath,
"-png",
f"{directory.name}/thumb-{page}",
]
p = subprocess.Popen(
command, close_fds=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
p.communicate()
def pdf_bytes_from_image_array(image_list, output_path) -> None:
"""Make a pdf given an array of Image files
:param image_list: List of images
:type image_list: list
:return: pdf_data
:type pdf_data: PDF as bytes
"""
image_list[0].save(
output_path,
"PDF",
resolution=100.0,
save_all=True,
append_images=image_list[1:],
)
del image_list
def strip_metadata_from_path(file_path):
"""Convert PDF file into PDF and remove metadata from it
Stripping the metadata allows us to hash the PDFs
:param pdf_bytes: PDF as binary content
:return: PDF bytes with metadata removed.
"""
with open(file_path, "rb") as f:
pdf_merger = PdfFileMerger()
pdf_merger.append(io.BytesIO(f.read()))
pdf_merger.addMetadata({"/CreationDate": "", "/ModDate": ""})
byte_writer = io.BytesIO()
pdf_merger.write(byte_writer)
return force_bytes(byte_writer.getvalue())
def strip_metadata_from_bytes(pdf_bytes):
"""Convert PDF bytes into PDF and remove metadata from it
Stripping the metadata allows us to hash the PDFs
:param pdf_bytes: PDF as binary content
:return: PDF bytes with metadata removed.
"""
pdf_merger = PdfFileMerger()
pdf_merger.append(io.BytesIO(pdf_bytes))
pdf_merger.addMetadata({"/CreationDate": "", "/ModDate": ""})
byte_writer = io.BytesIO()
pdf_merger.write(byte_writer)
return force_bytes(byte_writer.getvalue())
def cleanup_form(form):
"""Clean up a form object"""
os.remove(form.cleaned_data["fp"])
def make_file(filename, dir=None):
filepath = f"{Path.cwd()}/doctor/test_assets/{filename}"
with open(filepath, "rb") as f:
return {"file": (filename, f.read())}
def make_buffer(filename, dir=None):
filepath = f"{Path.cwd()}/doctor/test_assets/{filename}"
with open(filepath, "rb") as f:
return {"file": ("filename", f.read())}
def pdf_has_images(path: str) -> bool:
"""Check raw PDF for embedded images.
We need to check if a PDF contains any images. If a PDF contains images it
likely has content that needs to be scanned.
:param path: Location of PDF to process.
:return: Does the PDF contain images?
:type: bool
"""
with open(path, "rb") as pdf_file:
pdf_bytes = pdf_file.read()
return True if re.search(rb"/Image ?/", pdf_bytes) else False
def ocr_needed(path: str, content: str) -> bool:
"""Check if OCR is needed on a PDF
Check if images are in PDF or content is empty.
:param path: The path to the PDF
:param content: The content extracted from the PDF.
:return: Whether OCR should be run on the document.
"""
if content.strip() == "" or pdf_has_images(path):
return True
return False
def make_page_with_text(page, data, h, w):
"""Make a page with text
:param page:
:param data:
:param h:
:param w:
:return:
"""
packet = io.BytesIO()
can = canvas.Canvas(packet, pagesize=(w, h))
# Set to a standard size and font for now.
can.setFont("Helvetica", 9)
# Make the text transparent
can.setFillAlpha(0)
for i in range(len(data["level"])):
try:
letter, (x, y, ww, hh), pg = (
data["text"][i],
(data["left"][i], data["top"][i], data["width"][i], data["height"][i]),
data["page_num"][i],
)
except:
continue
# Adjust the text to an 8.5 by 11 inch page
sub = ((11 * 72) / h) * int(hh)
x = ((8.5 * 72) / w) * int(x)
y = ((11 * 72) / h) * int(y)
yy = (11 * 72) - y
if int(page) == int(pg):
can.drawString(x, yy - sub, letter)
can.showPage()
can.save()
packet.seek(0)
return packet
| 30.528902 | 87 | 0.619142 | 508 | 0.048092 | 0 | 0 | 0 | 0 | 0 | 0 | 4,204 | 0.397993 |
9e0e1c62ee116428b55cffa380260139fb9ea5d8 | 906 | py | Python | src/xrl/env_tester.py | k4ntz/XmodRL | dffb416bcd91010d8075ee1ac00cc4b9a3021967 | [
"MIT"
]
| null | null | null | src/xrl/env_tester.py | k4ntz/XmodRL | dffb416bcd91010d8075ee1ac00cc4b9a3021967 | [
"MIT"
]
| null | null | null | src/xrl/env_tester.py | k4ntz/XmodRL | dffb416bcd91010d8075ee1ac00cc4b9a3021967 | [
"MIT"
]
| 1 | 2021-11-10T18:09:27.000Z | 2021-11-10T18:09:27.000Z | import gym
import numpy as np
import os
import random
import matplotlib.pyplot as plt
from atariari.benchmark.wrapper import AtariARIWrapper
# YarsRevenge
#
env_name = "DemonAttackDeterministic-v4"
def print_labels(env_info):
# extract raw features
labels = env_info["labels"]
print(labels)
env = AtariARIWrapper(gym.make(env_name))
name = env.unwrapped.spec.id
#ballgame = any(game in name for game in ["Pong", "Tennis"])
print(np.int16(3))
üsad
n_actions = env.action_space.n
_ = env.reset()
obs, _, done, info = env.step(0)
r = 0
for t in range(50000):
plt.imshow(env.render(mode='rgb_array'), interpolation='none')
plt.plot()
plt.pause(0.0001) # pause a bit so that plots are updated
action = random.randint(0, n_actions - 1)
obs, reward, done, info = env.step(action)
r += reward
print(reward)
print_labels(info)
if(done):
break
print(r) | 22.65 | 66 | 0.695364 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 190 | 0.209482 |
9e0ef102e2826e6b9febd80bed5d0193a3687555 | 2,711 | py | Python | packages/lrn/model/question.py | genropy/learn | 019286c1fa1548482f64ccbd91082e069ec62a56 | [
"MIT"
]
| 3 | 2019-11-16T12:38:20.000Z | 2019-11-17T08:44:41.000Z | packages/lrn/model/question.py | genropy/learn | 019286c1fa1548482f64ccbd91082e069ec62a56 | [
"MIT"
]
| null | null | null | packages/lrn/model/question.py | genropy/learn | 019286c1fa1548482f64ccbd91082e069ec62a56 | [
"MIT"
]
| 5 | 2019-11-16T16:22:10.000Z | 2019-11-18T21:46:50.000Z | # encoding: utf-8
from datetime import datetime
class Table(object):
def config_db(self,pkg):
tbl=pkg.table('question', pkey='id', name_long='!![en]Question', name_plural='!![en]Questions',caption_field='question')
self.sysFields(tbl, draftField=True)
tbl.column('question',name_long='!![en]Question', validate_notnull=True)
tbl.column('description', name_long='!![en]Description')
tbl.column('details', name_long='!![en]Details')
tbl.column('user_id',size='22', group='_', name_long='!![en]Inserted by'
).relation('adm.user.id', relation_name='myquestions',
mode='foreignkey', onDelete='raise')
tbl.column('approval_ts', dtype='DH', name_long='!![en]Approval TS')
tbl.column('approved_by_user_id', size='22', group='_', name_long='!![en]Approved by'
).relation('adm.user.id',
relation_name='approved_questions',
mode='foreignkey', onDelete='raise')
tbl.column('main_topic_id',size='22', group='_', name_long='!![en]Main topic'
).relation('topic.id',
relation_name='questions',
mode='foreignkey',
onDelete='setnull')
tbl.column('main_answer_id',size='22', group='_', name_long='!![en]Main answer'
).relation('answer.id',
relation_name='questions',
mode='foreignkey',
onDelete='setnull')
#tbl.formulaColumn('__protected_by_approval_ts',"""($approval_ts IS NOT NULL AND $approved_by_user_id!=:env_user_id)""",dtype='B')
def defaultValues(self):
user_id = self.db.currentEnv.get('user_id')
#Se l'utente ha i giusti requisiti le sue domande e le sue risposte non nascono com ebozza
if 'admin' in self.db.currentEnv['userTags']: #posso pensare ad una condizione migliore e più sofisticata
return dict( __is_draft = False,
approval_ts = datetime.now(),
approved_by_user_id = user_id,
user_id=user_id)
return dict(__is_draft=True, user_id = user_id)
def trigger_onUpdating(self, record, old_record):
#Quando un record passa da bozza ad approvato metto utente approvatore e timestamp di approvazione
if old_record['__is_draft'] and not record['__is_draft']:
record['approval_ts'] = datetime.now()
record['approved_by_user_id'] = self.db.currentEnv.get('user_id')
| 54.22 | 138 | 0.570638 | 2,652 | 0.977876 | 0 | 0 | 0 | 0 | 0 | 0 | 1,009 | 0.37205 |
9e11d3f12bcf35bac083ace9a1b7490250555694 | 3,087 | py | Python | core/api/OxfordAPI.py | vimarind/Complete-GRE-Vocab | 6dc8bb8ed0506ed572edd1a01a456d9a27238c94 | [
"MIT"
]
| null | null | null | core/api/OxfordAPI.py | vimarind/Complete-GRE-Vocab | 6dc8bb8ed0506ed572edd1a01a456d9a27238c94 | [
"MIT"
]
| null | null | null | core/api/OxfordAPI.py | vimarind/Complete-GRE-Vocab | 6dc8bb8ed0506ed572edd1a01a456d9a27238c94 | [
"MIT"
]
| null | null | null | import json
import requests
from os import path
class OxfordAPI:
def __init__(self, app_id, app_key, cache_path):
self.app_id = app_id
self.app_key = app_key
self.cache_path = cache_path
def __parse_sense(self, word, sense):
for definition in sense.get('definitions', list()):
word.definitions.append(definition)
for example in sense.get('examples', list()):
word.examples.append(example.get('text', None))
for synonym in sense.get('synonyms', list()):
word.synonyms.append(synonym.get('text', None))
for subsense in sense.get('subsenses', list()):
self.__parse_sense(word, subsense)
def __parse_pronunciation(self, word, pronunciation):
audioFile = pronunciation.get('audioFile', None)
if audioFile is not None:
word.audio_file = audioFile
def __parse_entry(self, word, entry):
for pronunciation in entry.get('pronunciations', list()):
self.__parse_pronunciation(word, pronunciation)
for sense in entry.get('senses', list()):
self.__parse_sense(word, sense)
def __parse_lexical_entry(self, word, lexical_entry):
for entry in lexical_entry.get('entries', list()):
self.__parse_entry(word, entry)
def __parse_result(self, word, result):
for lexical_entry in result.get('lexicalEntries', list()):
self.__parse_lexical_entry(word, lexical_entry)
def __parse_word(self, word, data):
success = False
if data.get('error') is None:
for result in data.get('results', list()):
self.__parse_result(word, result)
success = True
return success
def __get_word_data(self, word):
filepath = self.cache_path + word.text + '.json'
with open(filepath, 'w') as file:
url = "https://od-api.oxforddictionaries.com/api/v2/words/en-us?q=" + word.text
r = requests.get(url, headers={"app_id": self.app_id, "app_key": self.app_key})
file.write(r.text)
return r.json()
def get_word(self, word):
"""
Populates the given word object with the relevant information from the Oxford Dictionary API. First, the word
is looked for in the cache folder, if it exists, load that data. Otherwise, the information is requested from
the OxfordAPI and stored in the cache folder.
:param word: The word object to be populated.
:return: A boolean indicating if the operation has been successful or not.
"""
success = False
if path.exists(self.cache_path):
filepath = self.cache_path + word.text + '.json'
if path.exists(filepath):
with open(filepath, 'r') as file:
data = json.load(file)
else:
data = self.__get_word_data(word)
success = self.__parse_word(word, data)
else:
print('OxfordAPI: Please provide a valid cache path.')
return success
| 38.111111 | 117 | 0.618724 | 3,033 | 0.982507 | 0 | 0 | 0 | 0 | 0 | 0 | 719 | 0.232912 |
9e1202ada111dfedf3e1239998ddc9e7e0c2bac2 | 2,568 | py | Python | linked_list.py | bentsi/data-structures | ce4a3a49ec131550ec0b77875b8f0367addcca05 | [
"Apache-2.0"
]
| null | null | null | linked_list.py | bentsi/data-structures | ce4a3a49ec131550ec0b77875b8f0367addcca05 | [
"Apache-2.0"
]
| null | null | null | linked_list.py | bentsi/data-structures | ce4a3a49ec131550ec0b77875b8f0367addcca05 | [
"Apache-2.0"
]
| 1 | 2021-01-10T15:41:50.000Z | 2021-01-10T15:41:50.000Z | class Node:
def __init__(self, data=None):
self.data = data
self.next = None
class LinkedListIndexError(IndexError):
pass
class LinkedList:
def __init__(self):
self.head = Node()
def _get_last_node(self):
pointer = self.head
while pointer.next is not None:
pointer = pointer.next
return pointer
def get_last_node(self):
return self._get_last_node().data
def append(self, data):
new_node = Node(data=data)
last = self._get_last_node()
last.next = new_node
def print(self):
print(self.__str__())
def __str__(self):
pointer = self.head
idx = 0
ll_str = ""
while pointer.next is not None:
pointer = pointer.next
ll_str += f"{idx}: {pointer.data}\n"
idx += 1
return ll_str
def length(self):
pointer = self.head
counter = 0
while pointer.next is not None:
pointer = pointer.next
counter += 1
return counter
def _get(self, index):
pointer = self.head
counter = 0
if not(0 <= index < self.length()):
raise LinkedListIndexError(f"Index '{index}' does not exist")
while pointer.next is not None:
pointer = pointer.next
if counter == index:
return pointer
counter += 1
def get(self, index):
return self._get(index=index).data
def __getitem__(self, item):
return self.get(index=item)
def erase(self, index):
if index == 0:
prev = self.head
else:
prev = self._get(index=index - 1)
to_del = prev.next
prev.next = to_del.next
data = to_del.data
del to_del
return data
def set(self, index, new_data):
node = self._get(index=index)
node.data = new_data
def __del__(self):
length = self.length()
while length != 0:
self.erase(index=length - 1)
length -= 1
del self.head
if __name__ == '__main__':
ll = LinkedList()
ll.append(data="Fedor")
ll.append(data="Julia")
ll.append(data="Bentsi")
ll.print()
print("Length of the Linked list is: ", ll.length())
idx = 1
print(ll.get(index=idx))
print(f"Data at index {idx} is {ll[idx]}")
print("Deleted: ", ll.erase(index=0))
ll.append(data="Fedor")
ll.append(data="Bentsi")
ll.set(index=3, new_data="Tim Peters")
print(ll)
| 24.457143 | 73 | 0.550234 | 2,109 | 0.821262 | 0 | 0 | 0 | 0 | 0 | 0 | 198 | 0.077103 |
9e13f559b1f0c5960c9398a871cf6613d7ce918c | 5,442 | py | Python | apps/login/views.py | kwarodom/bemoss_web_ui-1 | 6c65c49b8f52bc7d189c9f2391f9098ec0f2dd92 | [
"Unlicense"
]
| null | null | null | apps/login/views.py | kwarodom/bemoss_web_ui-1 | 6c65c49b8f52bc7d189c9f2391f9098ec0f2dd92 | [
"Unlicense"
]
| null | null | null | apps/login/views.py | kwarodom/bemoss_web_ui-1 | 6c65c49b8f52bc7d189c9f2391f9098ec0f2dd92 | [
"Unlicense"
]
| null | null | null | # -*- coding: utf-8 -*-
'''
Copyright (c) 2016, Virginia Tech
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following
disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are those of the authors and should not be
interpreted as representing official policies, either expressed or implied, of the FreeBSD Project.
This material was prepared as an account of work sponsored by an agency of the United States Government. Neither the
United States Government nor the United States Department of Energy, nor Virginia Tech, nor any of their employees,
nor any jurisdiction or organization that has cooperated in the development of these materials, makes any warranty,
express or implied, or assumes any legal liability or responsibility for the accuracy, completeness, or usefulness or
any information, apparatus, product, software, or process disclosed, or represents that its use would not infringe
privately owned rights.
Reference herein to any specific commercial product, process, or service by trade name, trademark, manufacturer, or
otherwise does not necessarily constitute or imply its endorsement, recommendation, favoring by the United States
Government or any agency thereof, or Virginia Tech - Advanced Research Institute. The views and opinions of authors
expressed herein do not necessarily state or reflect those of the United States Government or any agency thereof.
VIRGINIA TECH – ADVANCED RESEARCH INSTITUTE
under Contract DE-EE0006352
#__author__ = "BEMOSS Team"
#__credits__ = ""
#__version__ = "2.0"
#__maintainer__ = "BEMOSS Team"
#__email__ = "[email protected]"
#__website__ = "www.bemoss.org"
#__created__ = "2014-09-12 12:04:50"
#__lastUpdated__ = "2016-03-14 11:23:33"
'''
from django.shortcuts import render_to_response
from django.contrib.auth import authenticate, login
from django.template import RequestContext
from django.http import HttpResponseRedirect
from django.contrib import messages
import logging
logger = logging.getLogger("views")
def login_user(request):
print "inside login_user() method"
# Like before, obtain the context for the user's request.
context = RequestContext(request)
if request.method == 'POST':
# Gather the username and password provided by the user.
# This information is obtained from the login form.
username = request.POST['username']
password = request.POST['password']
# Use Django's machinery to attempt to see if the username/password
# combination is valid - a User object is returned if it is.
user = authenticate(username=username, password=password)
# If we have a User object, the details are correct.
# If None (Python's way of representing the absence of a value), no user
# with matching credentials was found.
if user is not None:
# Is the account active? It could have been disabled.
if user.is_active:
# If the account is valid and active, we can log the user in.
# We'll send the user back to the homepage.
login(request, user)
request.session['zipcode'] = '22204'
logger.info("Login of user : %s", user.username)
return HttpResponseRedirect('/home/')
else:
# An inactive account was used - no logging in!
messages.error(request, 'This account has been disabled by the administrator.')
else:
# Bad login details were provided. So we can't log the user in.
print "Invalid login details: {0}, {1}".format(username, password)
messages.error(request, 'Your username/password information is incorrect.')
#return HttpResponse("Invalid login details supplied.")
return HttpResponseRedirect('/login/')
#render_to_response('login/login.html', {}, context)
else:
# The request is not a HTTP POST, so display the login form.
# This scenario would most likely be a HTTP GET.
return render_to_response('login/login.html', {}, context)
| 52.326923 | 118 | 0.717567 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,126 | 0.757899 |
9e14d6737904e50f196708249c8435de6151b062 | 2,768 | py | Python | custom_html_validator/custom_html_validator.py | koan-u/custom_html_validator | 1a6735146e64d3c346201d10eddfd9ebfe1377c2 | [
"MIT"
]
| null | null | null | custom_html_validator/custom_html_validator.py | koan-u/custom_html_validator | 1a6735146e64d3c346201d10eddfd9ebfe1377c2 | [
"MIT"
]
| null | null | null | custom_html_validator/custom_html_validator.py | koan-u/custom_html_validator | 1a6735146e64d3c346201d10eddfd9ebfe1377c2 | [
"MIT"
]
| null | null | null | from html.parser import HTMLParser
class CustomHTMLValidater(HTMLParser):
__SINGLE_TAGS = [
'area','base','br','col','embed',
'hr','img','input','keygen','link',
'meta','param','source','track','wbr'
]
def __init__(self):
HTMLParser.__init__(self)
self.reset(True)
def reset(self, tag_reset = False):
HTMLParser.reset(self)
self.__core = {
'status': 0,
'detail':'',
'detected_list':[]
}
if tag_reset:
self.__allowed_tags = []
return
def set_allowed_tags(self, __allowed_tags):
self.__allowed_tags = __allowed_tags
return
def handle_starttag(self,tag,attrs):
if self.__core['status'] == 0:
if not tag in self.__allowed_tags:
self.__core['status'] = -1
self.__core['detail'] = 'not_allowed_tag'
else:
for attr in attrs:
if not attr[0] in self.__allowed_tags[tag]:
self.__core['status'] = -1
self.__core['detail'] = 'not_allowed_attr'
return
detected = {
'tag': tag,
'attr': attrs,
'complete': False
}
self.__core['detected_list'].append(detected)
return
def handle_endtag(self,tag):
if self.__core['status'] == 0:
last_index = len(self.__core['detected_list']) - 1
for index in range(last_index, -1, -1):
data = self.__core['detected_list'][index]
if not data['complete']:
if data['tag'] == tag:
data['complete'] = True
return
elif data['tag'] in self.__SINGLE_TAGS:
data['complete'] = True
else:
break
self.__core['status'] = -1
self.__core['detail'] = 'Construction Error'
return
def close(self):
HTMLParser.close(self)
if self.__core['status'] == 0:
errored = False
for data in self.__core['detected_list']:
if not data['complete']:
if data['tag'] in self.__SINGLE_TAGS:
data['complete'] = True
continue
self.__core['status'] = -1
self.__core['detail'] = 'Construction Error'
errored = True
break
if not errored:
self.__core['status'] = 1
self.__core['detail'] = 'ok'
return self.__core
| 33.756098 | 66 | 0.462789 | 2,731 | 0.986633 | 0 | 0 | 0 | 0 | 0 | 0 | 453 | 0.163656 |
9e14d840b0d68fa20db94e8f512ad11ba709e64f | 1,841 | py | Python | boltfile.py | arahmanhamdy/bolt | 8f5d9b8149db833b54a7b353162b2c28a53c8aff | [
"MIT"
]
| 15 | 2016-10-21T14:30:38.000Z | 2021-10-12T04:50:48.000Z | boltfile.py | arahmanhamdy/bolt | 8f5d9b8149db833b54a7b353162b2c28a53c8aff | [
"MIT"
]
| 51 | 2016-02-05T01:24:32.000Z | 2019-12-09T16:52:20.000Z | boltfile.py | arahmanhamdy/bolt | 8f5d9b8149db833b54a7b353162b2c28a53c8aff | [
"MIT"
]
| 6 | 2016-10-17T13:48:16.000Z | 2021-03-28T20:40:14.000Z | import logging
import os.path
import bolt
import bolt.about
PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__))
_src_dir = os.path.join(PROJECT_ROOT, 'bolt')
_test_dir = os.path.join(PROJECT_ROOT, 'test')
_output_dir = os.path.join(PROJECT_ROOT, 'output')
_coverage_dir = os.path.join(_output_dir, 'coverage')
config = {
'pip': {
'command': 'install',
'options': {
'r': './requirements.txt'
}
},
'delete-pyc': {
'sourcedir': _src_dir,
'recursive': True,
'test-pyc': {
'sourcedir': _test_dir,
}
},
'conttest' : {
'task': 'ut'
},
'mkdir': {
'directory': _output_dir,
},
'nose': {
'directory': _test_dir,
'ci': {
'options': {
'with-xunit': True,
'xunit-file': os.path.join(_output_dir, 'unit_tests_log.xml'),
'with-coverage': True,
'cover-erase': True,
'cover-package': 'bolt',
'cover-html': True,
'cover-html-dir': _coverage_dir,
'cover-branches': True,
}
}
},
'setup': {
'command': 'bdist_wheel',
'egg-info': {
'command': 'egg_info'
}
},
'coverage': {
'task': 'nose',
'include': ['bolt'],
'output': os.path.join(_output_dir, 'ut_coverage')
}
}
# Development tasks
bolt.register_task('clear-pyc', ['delete-pyc', 'delete-pyc.test-pyc'])
bolt.register_task('ut', ['clear-pyc', 'nose'])
bolt.register_task('ct', ['conttest'])
bolt.register_task('pack', ['setup', 'setup.egg-info'])
# CI/CD tasks
bolt.register_task('run-unit-tests', ['clear-pyc', 'mkdir', 'nose.ci'])
# Default task (not final).
bolt.register_task('default', ['pip', 'ut'])
| 25.569444 | 78 | 0.523628 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 689 | 0.374253 |
9e15279f716f2f3dcaf04f7c355aee058fe22e12 | 1,120 | py | Python | pyxedit/xedit/object_classes/HDPT.py | leontristain/pyxedit | 4100506930ab6d62d6e2c4a89fee024dbbf87c7b | [
"MIT"
]
| null | null | null | pyxedit/xedit/object_classes/HDPT.py | leontristain/pyxedit | 4100506930ab6d62d6e2c4a89fee024dbbf87c7b | [
"MIT"
]
| 13 | 2019-04-09T06:14:22.000Z | 2020-07-03T07:35:30.000Z | pyxedit/xedit/object_classes/HDPT.py | leontristain/pyxedit | 4100506930ab6d62d6e2c4a89fee024dbbf87c7b | [
"MIT"
]
| null | null | null | from enum import Enum
from pyxedit.xedit.attribute import XEditAttribute
from pyxedit.xedit.generic import XEditGenericObject
class HeadPartTypes(Enum):
Misc = 'Misc'
Face = 'Face'
Eyes = 'Eyes'
Hair = 'Hair'
FacialHair = 'Facial Hair'
Scar = 'Scar'
Eyebrows = 'Eyebrows'
class XEditHeadPart(XEditGenericObject):
SIGNATURE = 'HDPT'
HeadPartTypes = HeadPartTypes
full = full_name = XEditAttribute('FULL')
modl = model_filename = XEditAttribute('Model\\MODL')
data = flags = XEditAttribute('DATA')
pnam = headpart_type = XEditAttribute('PNAM', enum=HeadPartTypes)
hnam = extra_parts = XEditAttribute('HNAM')
nam0 = parts = XEditAttribute('Parts')
tnam = texture_set = base_texture = XEditAttribute('TNAM')
cnam = color = XEditAttribute('CNAM')
rnam = valid_races = resource_list = XEditAttribute('RNAM')
@property
def file_paths(self):
files = ([f'Meshes\\{self.model_filename}'] +
[f'Meshes\\{part["NAM1"].value}' for part in self.parts or []])
return sorted(set([file_ for file_ in files if file_]))
| 31.111111 | 80 | 0.669643 | 987 | 0.88125 | 0 | 0 | 234 | 0.208929 | 0 | 0 | 184 | 0.164286 |
9e158c914469c96413a23f9b7926f662ec188191 | 1,309 | py | Python | assignments/04_head/head.py | emma-huffman/biosystems-analytics-2020 | eaf9c084407fa6d25b815b7d63077ed9aec53447 | [
"MIT"
]
| null | null | null | assignments/04_head/head.py | emma-huffman/biosystems-analytics-2020 | eaf9c084407fa6d25b815b7d63077ed9aec53447 | [
"MIT"
]
| null | null | null | assignments/04_head/head.py | emma-huffman/biosystems-analytics-2020 | eaf9c084407fa6d25b815b7d63077ed9aec53447 | [
"MIT"
]
| null | null | null | #!/usr/bin/env python3
"""
Author : Me <[email protected]>
Date : today
Purpose: Rock the Casbah
"""
import argparse
import io
import os
import sys
# --------------------------------------------------
def get_args():
"""Get command-line arguments"""
parser = argparse.ArgumentParser(
description='Rock the Casbah',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-n',
'--num',
help='Number of lines',
metavar='int',
type=int,
default=10)
parser.add_argument('file',
help='Input File',
type=argparse.FileType('r'))
args = parser.parse_args()
if not args.num > 0:
parser.error(f'--num "{args.num}" must be greater than 0')
return args
# --------------------------------------------------
def main():
"""Make a jazz noise here"""
args = get_args()
for fh in args.file:
print(fh.name)
num_line = 0
for line in fh:
num_line += 1
print(line, end='')
if num_line == args.num:
break
# --------------------------------------------------
if __name__ == '__main__':
main()
| 23.375 | 66 | 0.450726 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 437 | 0.333843 |
9e16d5b26d929cfae2b8a212b8915e0500128d25 | 1,040 | py | Python | tests/CornerCasesTest.py | dpep/py_pluckit | be7c1cd6e2555234f08dd0cb6239db2c249562a4 | [
"MIT"
]
| null | null | null | tests/CornerCasesTest.py | dpep/py_pluckit | be7c1cd6e2555234f08dd0cb6239db2c249562a4 | [
"MIT"
]
| null | null | null | tests/CornerCasesTest.py | dpep/py_pluckit | be7c1cd6e2555234f08dd0cb6239db2c249562a4 | [
"MIT"
]
| null | null | null | #!/usr/bin/env python
import os
import sys
import unittest
from collections import namedtuple
sys.path = [ os.path.abspath(os.path.join(os.path.dirname(__file__), '../..')) ] + sys.path
from pluckit import pluck
class CornerCasesTest(unittest.TestCase):
def test_null_handle(self):
data = [1, 2, 3]
self.assertEqual([ None, None, None ], pluck(data, None))
def test_empty(self):
self.assertEqual([], pluck([], 'k'))
self.assertEqual({}, pluck({}, 'k'))
self.assertEqual(set(), pluck(set(), 'k'))
def test_null(self):
self.assertEqual(None, pluck(None, None))
self.assertEqual(None, pluck(None, 123))
def test_null_values(self):
data = {
None : [1, 2],
'b' : [3, 4],
'c' : [None, 5]
}
self.assertEqual(
{
None : 1,
'b' : 3,
'c' : None,
},
pluck(data, 0)
)
if __name__ == '__main__':
unittest.main()
| 21.22449 | 91 | 0.516346 | 772 | 0.742308 | 0 | 0 | 0 | 0 | 0 | 0 | 59 | 0.056731 |
9e177263a0ebd8c89321b840a71c84edfd2ea746 | 1,520 | py | Python | src/lib/transforms/cross_drop.py | pfnet-research/kaggle-lyft-motion-prediction-4th-place-solution | 0bc51075db31a747eeebb7f4775a3cd26ad5f870 | [
"MIT"
]
| 44 | 2020-12-09T06:15:15.000Z | 2022-03-31T02:37:47.000Z | src/lib/transforms/cross_drop.py | pfnet-research/kaggle-lyft-motion-prediction-4th-place-solution | 0bc51075db31a747eeebb7f4775a3cd26ad5f870 | [
"MIT"
]
| null | null | null | src/lib/transforms/cross_drop.py | pfnet-research/kaggle-lyft-motion-prediction-4th-place-solution | 0bc51075db31a747eeebb7f4775a3cd26ad5f870 | [
"MIT"
]
| 7 | 2020-12-09T10:08:32.000Z | 2021-08-17T01:53:51.000Z | from albumentations.core.transforms_interface import ImageOnlyTransform
import albumentations.augmentations.functional as F
import random
class CrossDrop(ImageOnlyTransform):
def __init__(self, max_h_cut=0.2, max_w_cut=0.2, fill_value=0, always_apply=False, p=0.5):
super(CrossDrop, self).__init__(always_apply, p)
self.max_h_cut = max_h_cut
self.max_w_cut = max_w_cut
self.fill_value = fill_value
def apply(self, image, fill_value=0, holes=(), **params):
return F.cutout(image, holes, fill_value)
def get_params_dependent_on_targets(self, params):
img = params["image"]
height, width = img.shape[:2]
y1 = int(random.random() * self.max_h_cut * height)
x1 = int(random.random() * self.max_w_cut * width)
y2 = int(random.random() * self.max_h_cut * height)
x2 = int(random.random() * self.max_w_cut * width)
y3 = int(random.random() * self.max_h_cut * height)
x3 = int(random.random() * self.max_w_cut * width)
y4 = int(random.random() * self.max_h_cut * height)
x4 = int(random.random() * self.max_w_cut * width)
return {"holes": [
(0, 0, x1, y1),
(width-x2, 0, width, y2),
(0, height-y3, x3, height),
(width-x4, height-y4, width, height)
]}
@property
def targets_as_params(self):
return ["image"]
def get_transform_init_args_names(self):
return ("max_h_cut", "max_w_cut")
| 33.777778 | 94 | 0.620395 | 1,380 | 0.907895 | 0 | 0 | 67 | 0.044079 | 0 | 0 | 43 | 0.028289 |
9e1798f13a1e5958c9273e51efaff12141f4e76c | 9,497 | py | Python | js_components/cms_plugins.py | compoundpartners/js-components | a58a944254354078a0a7b53a4c9a7df50790267a | [
"BSD-3-Clause"
]
| null | null | null | js_components/cms_plugins.py | compoundpartners/js-components | a58a944254354078a0a7b53a4c9a7df50790267a | [
"BSD-3-Clause"
]
| null | null | null | js_components/cms_plugins.py | compoundpartners/js-components | a58a944254354078a0a7b53a4c9a7df50790267a | [
"BSD-3-Clause"
]
| null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import functools
import six
from django.utils.translation import ugettext_lazy as _
from django.template import TemplateDoesNotExist
from django.template.loader import select_template
from cms.plugin_base import CMSPluginBase, CMSPluginBaseMetaclass
from cms.plugin_pool import plugin_pool
from . import models, forms
from .utils.urlmatch import urlmatch
from .constants import (
HIDE_PROMO,
HIDE_PROMO_ROLLOVER,
HIDE_PROMO_VIDEO,
HIDE_TWITTER,
HIDE_COUNTERS,
HIDE_RAWHTML,
HIDE_GATED_CONTENT,
HIDE_FLOAT,
HIDE_LIGHTBOX,
CUSTOM_PLUGINS,
PROMO_CHILD_CLASSES,
)
class LayoutMixin():
def get_layout(self, context, instance, placeholder):
return instance.layout
def get_render_template(self, context, instance, placeholder):
layout = self.get_layout(context, instance, placeholder)
if layout:
template = self.TEMPLATE_NAME % layout
try:
select_template([template])
return template
except TemplateDoesNotExist:
pass
return self.render_template
def render(self, context, instance, placeholder):
context.update({
'instance': instance,
'placeholder': placeholder,
})
return context
class PromoUnitPlugin(LayoutMixin, CMSPluginBase):
module = 'JumpSuite Componens'
TEMPLATE_NAME = 'js_components/promo_%s.html'
name = _('Promo Unit')
model = models.PromoUnit
form = forms.PromoUnitForm
render_template = 'js_components/promo.html'
change_form_template = 'admin/js_components/float.html'
allow_children = True if PROMO_CHILD_CLASSES else False
child_classes = PROMO_CHILD_CLASSES
main_fields = [
'layout',
'alignment',
'title',
'subtitle',
'color',
'image',
'svg',
'icon',
'content',
'rollover_content',
'background_video',
'link_text',
'link_url',
('file_src', 'show_filesize'),
'open_in_new_window',
'full_height',
]
if HIDE_PROMO_ROLLOVER:
main_fields.remove('rollover_content')
if HIDE_PROMO_VIDEO:
main_fields.remove('background_video')
fieldsets = [
(None, {
'fields': main_fields
}),
(_('Advanced settings'), {
'classes': ('collapse',),
'fields': (
'modal_id',
'attributes',
)
}),
]
if not HIDE_PROMO:
plugin_pool.register_plugin(PromoUnitPlugin)
class TwitterFeedPlugin(LayoutMixin, CMSPluginBase):
module = 'JumpSuite Componens'
TEMPLATE_NAME = 'js_components/twitter_%s.html'
name = _('Twitter Feed')
model = models.TwitterFeed
form = forms.TwitterFeedForm
render_template = 'js_components/twitter.html'
if not HIDE_TWITTER:
plugin_pool.register_plugin(TwitterFeedPlugin)
class CountersContainerPlugin(LayoutMixin, CMSPluginBase):
module = 'JumpSuite Componens'
TEMPLATE_NAME = 'js_components/counters_%s.html'
name = _('Counters Container (DO NOT USE, NEED REMOVE)')
model = models.CountersContainer
form = forms.CountersContainerForm
render_template = 'js_components/counters.html'
allow_children = True
child_classes = ['CounterPlugin']
parent_classes = ['Bootstrap4GridRowPlugin']
class CounterPlugin(LayoutMixin, CMSPluginBase):
module = 'JumpSuite Componens'
TEMPLATE_NAME = 'js_components/counter_%s.html'
name = _('Counter')
model = models.Counter
form = forms.CounterForm
render_template = 'js_components/counter.html'
if not HIDE_COUNTERS:
plugin_pool.register_plugin(CountersContainerPlugin)
plugin_pool.register_plugin(CounterPlugin)
#if 'Bootstrap4GridRowPlugin' in plugin_pool.plugins:
#plugin_pool.plugins['Bootstrap4GridRowPlugin'].child_classes.append('CountersContainerPlugin')
class RawHTMLPlugin(CMSPluginBase):
module = 'JumpSuite Componens'
name = _('Raw HTML')
model = models.RawHTML
render_template = 'js_components/html.html'
def render(self, context, instance, placeholder):
context.update({
'instance': instance,
'placeholder': placeholder,
'html': instance.body,
})
return context
class RawHTMLWithIDPlugin(CMSPluginBase):
module = 'JumpSuite Componens'
name = _('Raw HTML with ID')
model = models.RawHTMLWithID
render_template = 'js_components/html.html'
def render(self, context, instance, placeholder):
request = context['request']
html = instance.body
for param in instance.parameters.split(','):
param = param.strip()
key = '[%s]' % param.upper()
html = html.replace(key, request.GET.get(param) or request.POST.get(param, ''))
context.update({
'instance': instance,
'placeholder': placeholder,
'html': html,
})
return context
if not HIDE_RAWHTML:
plugin_pool.register_plugin(RawHTMLPlugin)
plugin_pool.register_plugin(RawHTMLWithIDPlugin)
@plugin_pool.register_plugin
class CustomPlugin(LayoutMixin, CMSPluginBase):
module = 'JumpSuite Componens'
TEMPLATE_NAME = 'js_components/custom_%s.html'
name = _('Custom')
model = models.Custom
form = forms.CustomForm
render_template = 'js_components/custom.html'
def get_form(self, request, obj=None, **kwargs):
Form = super().get_form(request, obj=None, **kwargs)
if self.name in CUSTOM_PLUGINS:
Form.plugin_name=self.name
return Form
for name, parameters in CUSTOM_PLUGINS.items():
p = type(
str(name.replace(' ', '') + 'Plugin'),
(CustomPlugin,),
{'name': name},
)
plugin_pool.register_plugin(p)
class GatedContentPlugin(LayoutMixin, CMSPluginBase):
module = 'JumpSuite Componens'
TEMPLATE_NAME = 'js_components/gated_content_%s.html'
name = _('Gated Content')
model = models.GatedContent
form = forms.GatedContentForm
render_template = 'js_components/gated_content.html'
allow_children = True
if not HIDE_GATED_CONTENT:
plugin_pool.register_plugin(GatedContentPlugin)
@plugin_pool.register_plugin
class AnimatePlugin(LayoutMixin, CMSPluginBase):
module = 'JumpSuite Componens'
TEMPLATE_NAME = 'js_components/animate_%s.html'
name = _('Animate')
model = models.Animate
form = forms.AnimateForm
render_template = 'js_components/animate.html'
allow_children = True
@plugin_pool.register_plugin
class JSFolderPlugin(LayoutMixin, CMSPluginBase):
module = 'JumpSuite Componens'
TEMPLATE_NAME = 'js_components/folder_%s.html'
name = _('Filer listing')
model = models.Folder
form = forms.FolderForm
render_template = 'js_components/folder.html'
def render(self, context, instance, placeholder):
request = context['request']
files = []
if instance.folder:
files = instance.folder.files.all()
if instance.order_by:
files = files.order_by(instance.order_by)
context.update({
'instance': instance,
'placeholder': placeholder,
'files': files,
})
return context
@plugin_pool.register_plugin
class IncludeExcludeContainer(CMSPluginBase):
module = 'JumpSuite Componens'
name = _('Include/Exclude Container')
model = models.IncludeExcludeContainer
render_template = 'js_components/container.html'
change_form_template = 'admin/js_components/change_form_container.html'
allow_children = True
cache = False
def render(self, context, instance, placeholder):
request = context['request']
url = '%s://%s%s' % (request.scheme, request.META['HTTP_HOST'], request.path)
is_shown = urlmatch(','.join(instance.include.split('\n')), url) and not urlmatch(','.join(instance.exclude.split('\n')), url)
context.update({
'instance': instance,
'placeholder': placeholder,
'is_shown': is_shown,
})
return context
class FloatPlugin(LayoutMixin, CMSPluginBase):
module = 'JumpSuite Componens'
name = _('Float Container')
model = models.Float
form = forms.FloatForm
render_template = 'js_components/float.html'
TEMPLATE_NAME = 'js_components/float_%s.html'
#change_form_template = 'admin/js_components/float.html'
allow_children = True
def get_layout(self, context, instance, placeholder):
return '' if instance.alignment in ['left', 'right', 'center'] else instance.alignment
def render(self, context, instance, placeholder):
context.update({
'instance': instance,
'placeholder': placeholder,
'alignment': instance.alignment,
})
return context
if not HIDE_FLOAT:
plugin_pool.register_plugin(FloatPlugin)
class LightboxPlugin(LayoutMixin, CMSPluginBase):
module = 'JumpSuite Componens'
TEMPLATE_NAME = 'js_components/lightbox_%s.html'
name = _('Lightbox')
model = models.Lightbox
form = forms.LightboxForm
render_template = 'js_components/lightbox.html'
allow_children = True
child_classes = ['Bootstrap4PicturePlugin']
if not HIDE_LIGHTBOX:
plugin_pool.register_plugin(LightboxPlugin)
| 29.958991 | 134 | 0.666737 | 7,698 | 0.810572 | 0 | 0 | 2,409 | 0.253659 | 0 | 0 | 2,120 | 0.223228 |
9e18ddf285ec21f8d58dafd4142a06363020741a | 1,232 | py | Python | src/julia/tests/test_juliaoptions.py | dpinol/pyjulia | cec4bf0b0eac7e39cecd8f3e7882563062903d0f | [
"MIT"
]
| 649 | 2016-09-09T07:38:19.000Z | 2022-03-28T04:30:55.000Z | src/julia/tests/test_juliaoptions.py | dpinol/pyjulia | cec4bf0b0eac7e39cecd8f3e7882563062903d0f | [
"MIT"
]
| 362 | 2016-09-08T16:25:30.000Z | 2022-03-05T23:15:05.000Z | src/julia/tests/test_juliaoptions.py | dpinol/pyjulia | cec4bf0b0eac7e39cecd8f3e7882563062903d0f | [
"MIT"
]
| 85 | 2016-11-08T09:32:44.000Z | 2022-03-03T13:10:37.000Z | import pytest
from julia.core import JuliaOptions
# fmt: off
@pytest.mark.parametrize("kwargs, args", [
({}, []),
(dict(compiled_modules=None), []),
(dict(compiled_modules=False), ["--compiled-modules", "no"]),
(dict(compiled_modules="no"), ["--compiled-modules", "no"]),
(dict(depwarn="error"), ["--depwarn", "error"]),
(dict(sysimage="PATH"), ["--sysimage", "PATH"]),
(dict(bindir="PATH"), ["--home", "PATH"]),
])
# fmt: on
def test_as_args(kwargs, args):
assert JuliaOptions(**kwargs).as_args() == args
@pytest.mark.parametrize("kwargs", [
dict(compiled_modules="invalid value"),
dict(bindir=123456789),
])
def test_valueerror(kwargs):
with pytest.raises(ValueError) as excinfo:
JuliaOptions(**kwargs)
assert "Option" in str(excinfo.value)
assert "accept" in str(excinfo.value)
# fmt: off
@pytest.mark.parametrize("kwargs", [
dict(invalid_option=None),
dict(invalid_option_1=None, invalid_option_2=None),
])
# fmt: on
def test_unsupported(kwargs):
with pytest.raises(TypeError) as excinfo:
JuliaOptions(**kwargs)
assert "Unsupported Julia option(s): " in str(excinfo.value)
for key in kwargs:
assert key in str(excinfo.value)
| 28 | 65 | 0.655844 | 0 | 0 | 0 | 0 | 1,150 | 0.933442 | 0 | 0 | 251 | 0.203734 |
9e19326d841517afadd3d42542cc9b11a5c4a5d7 | 1,578 | py | Python | tests/common.py | Algomorph/ext_argparse | fbca26f8a551f84677475a11fb5415ddda78abd9 | [
"Apache-2.0"
]
| 1 | 2021-09-06T23:22:07.000Z | 2021-09-06T23:22:07.000Z | tests/common.py | Algomorph/ext_argparse | fbca26f8a551f84677475a11fb5415ddda78abd9 | [
"Apache-2.0"
]
| 11 | 2021-09-07T14:13:39.000Z | 2021-09-29T15:17:46.000Z | tests/common.py | Algomorph/ext_argparse | fbca26f8a551f84677475a11fb5415ddda78abd9 | [
"Apache-2.0"
]
| null | null | null | import os
import pathlib
import typing
import pytest
from ext_argparse.parameter import Parameter
from ext_argparse.param_enum import ParameterEnum
from enum import Enum
class RoofMaterial(Enum):
SLATE = 0
METAL = 1
CONCRETE = 2
COMPOSITE = 3
SOLAR = 4
CLAY = 5
SYNTHETIC_BARREL = 6
SYNTHETIC_SLATE = 7
SYNTHETIC_CEDAR = 8
class HouseStyle(Enum):
CRAFTSMAN_BUNGALO = 0
CAPE_COD = 1
RANCH = 2
CONTEMPORARY = 3
QUEEN_ANNE = 4
COLONIAL_REVIVAL = 5
TUDOR_REVIVAL = 6
TOWNHOUSE = 7
PRAIRIE = 8
MID_CENTURY_MODERN = 9
NEOCLASSICAL = 10
MEDITERRANEAN = 11
class HouseRoofSettings(ParameterEnum):
year_changed = Parameter(arg_type=int, default=2010, arg_help="The last year when the roof tiles were changed.")
roof_material: typing.Type[RoofMaterial] = Parameter(arg_type=RoofMaterial, default=RoofMaterial.SLATE,
arg_help="Material of the roof tiles.")
class HouseParameters(ParameterEnum):
sturdiness = Parameter(arg_type=float, default=5.0, arg_help="Sturdiness of the house.", shorthand="stu")
year_built = Parameter(arg_type=int, default=2000, arg_help="The year the house was built.")
roof: typing.Type[HouseRoofSettings] = HouseRoofSettings
style = Parameter(arg_type=HouseStyle, default=HouseStyle.CRAFTSMAN_BUNGALO, arg_help="Style of da house.",
shorthand="sty")
@pytest.fixture
def test_data_dir():
return os.path.join(pathlib.Path(__file__).parent.resolve(), "test_data") | 28.690909 | 116 | 0.693916 | 1,278 | 0.809886 | 0 | 0 | 114 | 0.072243 | 0 | 0 | 176 | 0.111534 |
9e1b5f4b3183d1482047160b015715a1f35d97f0 | 389 | py | Python | lambda/exercices/PhotoCollector/photo_uploader_from_csv.py | Mythridor/aws-scripting | 5f978ae7f2b05a40862cbe35d766534fcc40fef0 | [
"MIT"
]
| null | null | null | lambda/exercices/PhotoCollector/photo_uploader_from_csv.py | Mythridor/aws-scripting | 5f978ae7f2b05a40862cbe35d766534fcc40fef0 | [
"MIT"
]
| null | null | null | lambda/exercices/PhotoCollector/photo_uploader_from_csv.py | Mythridor/aws-scripting | 5f978ae7f2b05a40862cbe35d766534fcc40fef0 | [
"MIT"
]
| null | null | null | #! /usr/local/bin/Python3.5
import urllib.request
with open("images.csv", 'r') as csv:
i = 0
for line in csv:
line = line.split(',')
if line[1] != '' and line[1] != "\n":
urllib.request.urlretrieve(line[1].encode('utf-8'), ("img_" + str(i) + ".jpg").encode('utf-8'))
print("Image saved".encode('utf-8'))
i += 1
print("No result")
| 25.933333 | 107 | 0.524422 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 108 | 0.277635 |
9e1b7c970fedf5252f5d2635e9703e31344e54e5 | 1,031 | py | Python | src/main/tools/api.py | NGnius/streamq | aa31085befc7da2e3f7461698b2638a246a73eef | [
"MIT"
]
| null | null | null | src/main/tools/api.py | NGnius/streamq | aa31085befc7da2e3f7461698b2638a246a73eef | [
"MIT"
]
| null | null | null | src/main/tools/api.py | NGnius/streamq | aa31085befc7da2e3f7461698b2638a246a73eef | [
"MIT"
]
| null | null | null | '''
API-related functions in one spot for convenience
Created by NGnius 2019-06-15
'''
from flask import jsonify, request
from threading import Semaphore, RLock
def get_param(param, silent=False):
if request.method == 'GET':
return request.args.get(param)
else:
try:
return request.get_json(force=True, silent=silent)[param]
except KeyError:
return None
def error(status=500, reason=None):
error_response = {'status':status}
if reason is not None:
error_response['reason'] = reason
return jsonify(error_response), status
single_semaphores = dict()
resource_lock = RLock()
def start_single(identifier):
resource_lock.acquire()
if identifier not in single_semaphores:
resource_lock.release()
single_semaphores[identifier] = Semaphore(1)
else:
resource_lock.release()
single_semaphores[identifier].acquire()
def end_single(identifier):
resource_lock.acquire()
single_semaphores[identifier].release()
| 26.435897 | 69 | 0.693501 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 108 | 0.104753 |
9e1ba32a4a9094fb6f7a46a889dbdd7f780c6321 | 323 | py | Python | run.py | kamidox/weixin_producthunt | 24269da93e75374ee481b1b78257b18abda4d0c7 | [
"BSD-3-Clause"
]
| 10 | 2015-01-07T06:01:13.000Z | 2021-02-14T09:11:10.000Z | run.py | kamidox/weixin_producthunt | 24269da93e75374ee481b1b78257b18abda4d0c7 | [
"BSD-3-Clause"
]
| 3 | 2015-01-01T09:56:04.000Z | 2015-01-06T01:34:44.000Z | run.py | kamidox/weixin_producthunt | 24269da93e75374ee481b1b78257b18abda4d0c7 | [
"BSD-3-Clause"
]
| 5 | 2015-01-01T10:31:50.000Z | 2018-03-09T05:22:16.000Z | """
productporter
~~~~~~~~~~~~~~~~~~~~
helper for uwsgi
:copyright: (c) 2014 by the ProductPorter Team.
:license: BSD, see LICENSE for more details.
"""
from productporter.app import create_app
from productporter.configs.production import ProductionConfig
app = create_app(config=ProductionConfig())
| 23.071429 | 61 | 0.693498 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 174 | 0.5387 |
9e1bee51dd0ea1878f4a4736c40b34f0977aa174 | 3,968 | py | Python | built-in/PyTorch/Official/cv/image_classification/MobileNetV1_ID0094_for_PyTorch/benchmark.py | Ascend/modelzoo | f018cfed33dbb1cc2110b9ea2e233333f71cc509 | [
"Apache-2.0"
]
| 12 | 2020-12-13T08:34:24.000Z | 2022-03-20T15:17:17.000Z | built-in/PyTorch/Official/cv/image_classification/MobileNetV1_ID0094_for_PyTorch/benchmark.py | Ascend/modelzoo | f018cfed33dbb1cc2110b9ea2e233333f71cc509 | [
"Apache-2.0"
]
| 1 | 2022-01-20T03:11:05.000Z | 2022-01-20T06:53:39.000Z | built-in/PyTorch/Official/cv/image_classification/MobileNetV1_ID0094_for_PyTorch/benchmark.py | Ascend/modelzoo | f018cfed33dbb1cc2110b9ea2e233333f71cc509 | [
"Apache-2.0"
]
| 2 | 2021-07-10T12:40:46.000Z | 2021-12-17T07:55:15.000Z | # BSD 3-Clause License
#
# Copyright (c) 2017 xxxx
# All rights reserved.
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ============================================================================
import time
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
import torchvision.models as models
from torch.autograd import Variable
class MobileNet(nn.Module):
def __init__(self):
super(MobileNet, self).__init__()
def conv_bn(inp, oup, stride):
return nn.Sequential(
nn.Conv2d(inp, oup, 3, stride, 1, bias=False),
nn.BatchNorm2d(oup),
nn.ReLU(inplace=True)
)
def conv_dw(inp, oup, stride):
return nn.Sequential(
nn.Conv2d(inp, inp, 3, stride, 1, groups=inp, bias=False),
nn.BatchNorm2d(inp),
nn.ReLU(inplace=True),
nn.Conv2d(inp, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
nn.ReLU(inplace=True),
)
self.model = nn.Sequential(
conv_bn( 3, 32, 2),
conv_dw( 32, 64, 1),
conv_dw( 64, 128, 2),
conv_dw(128, 128, 1),
conv_dw(128, 256, 2),
conv_dw(256, 256, 1),
conv_dw(256, 512, 2),
conv_dw(512, 512, 1),
conv_dw(512, 512, 1),
conv_dw(512, 512, 1),
conv_dw(512, 512, 1),
conv_dw(512, 512, 1),
conv_dw(512, 1024, 2),
conv_dw(1024, 1024, 1),
nn.AvgPool2d(7),
)
self.fc = nn.Linear(1024, 1000)
def forward(self, x):
x = self.model(x)
x = x.view(-1, 1024)
x = self.fc(x)
return x
def speed(model, name):
t0 = time.time()
input = torch.rand(1,3,224,224).npu()
input = Variable(input, volatile = True)
t1 = time.time()
model(input)
t2 = time.time()
model(input)
t3 = time.time()
print('%10s : %f' % (name, t3 - t2))
if __name__ == '__main__':
#cudnn.benchmark = True # This will make network slow ??
resnet18 = models.resnet18().npu()
alexnet = models.alexnet().npu()
vgg16 = models.vgg16().npu()
squeezenet = models.squeezenet1_0().npu()
mobilenet = MobileNet().npu()
speed(resnet18, 'resnet18')
speed(alexnet, 'alexnet')
speed(vgg16, 'vgg16')
speed(squeezenet, 'squeezenet')
speed(mobilenet, 'mobilenet')
| 35.115044 | 80 | 0.618952 | 1,415 | 0.356603 | 0 | 0 | 0 | 0 | 0 | 0 | 1,775 | 0.447329 |
9e1da62e19fe4f3008c5d21f24d0decbe6f6039d | 1,012 | py | Python | client/setup.py | nnabeyang/tepra-lite-esp32 | 69cbbafce6a3f8b0214178cc80d2fea024ab8c07 | [
"MIT"
]
| 33 | 2021-09-04T08:46:48.000Z | 2022-02-04T08:12:55.000Z | client/setup.py | nnabeyang/tepra-lite-esp32 | 69cbbafce6a3f8b0214178cc80d2fea024ab8c07 | [
"MIT"
]
| 2 | 2021-09-28T12:05:21.000Z | 2021-12-11T04:08:04.000Z | client/setup.py | nnabeyang/tepra-lite-esp32 | 69cbbafce6a3f8b0214178cc80d2fea024ab8c07 | [
"MIT"
]
| 2 | 2021-09-28T10:51:27.000Z | 2021-12-10T09:56:22.000Z | from setuptools import setup, find_packages
__version__ = '1.0.0'
__author__ = 'Takumi Sueda'
__author_email__ = '[email protected]'
__license__ = 'MIT License'
__classifiers__ = (
'Development Status :: 4 - Beta',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
)
with open('README.md', 'r') as f:
readme = f.read()
setup(
name='tepracli',
version=__version__,
license=__license__,
author=__author__,
author_email=__author_email__,
url='https://github.com/puhitaku/tepra-lite-esp32/tree/master/client',
description='An example of tepra-lite-esp32 client / CLI',
long_description=readme,
long_description_content_type='text/markdown',
classifiers=__classifiers__,
packages=find_packages(),
package_data={'': ['assets/ss3.ttf']},
include_package_data=True,
install_requires=['click', 'pillow', 'qrcode[pil]', 'requests'],
)
| 29.764706 | 74 | 0.6917 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 436 | 0.43083 |
9e1db727da394433477d2ebb717048f5a98a0ab1 | 962 | py | Python | 03.Trabalhando_com_Arquivos/003.path_arquivos.py | heliton1986/Descubra_Python | 66738b295b0c5f526529ce0588fa3189eff110a1 | [
"MIT"
]
| null | null | null | 03.Trabalhando_com_Arquivos/003.path_arquivos.py | heliton1986/Descubra_Python | 66738b295b0c5f526529ce0588fa3189eff110a1 | [
"MIT"
]
| null | null | null | 03.Trabalhando_com_Arquivos/003.path_arquivos.py | heliton1986/Descubra_Python | 66738b295b0c5f526529ce0588fa3189eff110a1 | [
"MIT"
]
| null | null | null | # Como trabalhar com paths
from os import path
import time
def dadosArquivo():
# Verificar se o arquivo existe
arquivoExiste = path.exists("./03.Trabalhando_com_Arquivos/NovoArquivo.txt")
# Verificar se é diretório
ehDiretorio = path.isdir("./03.Trabalhando_com_Arquivos/NovoArquivo.txt")
# Verificar o path desse arquivo
pathArquivo = path.realpath("./03.Trabalhando_com_Arquivos/NovoArquivo.txt")
# Verificar o path relativo
pathRelativo = path.relpath("./03.Trabalhando_com_Arquivos/NovoArquivo.txt")
# Verificar data criação desse arquivo
dataCriacao = time.ctime(path.getctime("./03.Trabalhando_com_Arquivos/NovoArquivo.txt"))
# Data de modificação
dataModificacao = time.ctime(path.getmtime("./03.Trabalhando_com_Arquivos/NovoArquivo.txt"))
print(arquivoExiste)
print(ehDiretorio)
print(pathArquivo)
print(pathRelativo)
print(dataCriacao)
print(dataModificacao)
dadosArquivo() | 31.032258 | 96 | 0.745322 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 489 | 0.505165 |
9e210cf9cae77591487ca0d70ca7341aca8bd44a | 16,303 | py | Python | src/colorpredicate.py | petrusmassabki/color-predicate | 828f62b50985cb795aa5b5743e4f7e5c305d2175 | [
"MIT"
]
| null | null | null | src/colorpredicate.py | petrusmassabki/color-predicate | 828f62b50985cb795aa5b5743e4f7e5c305d2175 | [
"MIT"
]
| null | null | null | src/colorpredicate.py | petrusmassabki/color-predicate | 828f62b50985cb795aa5b5743e4f7e5c305d2175 | [
"MIT"
]
| null | null | null | #!/usr/bin/env python3
import os
import colorsys
import cv2
import numpy as np
from scipy.stats import multivariate_normal
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
class ColorPredicate:
def __init__(self, name, images_path, n_max=10):
self.name = name
self._total_pixel_count = 0
self.images = self.load_images(images_path, n_max)
self.masks = [255 * np.ones(image.shape[:2], np.uint8)
for image in self.images]
self._histogram_channels = None
self._histogram_color_space = None
self._bins = None
self._grid = None
self._ch_indexes = None
self._target_histogram = None
self._background_histogram = None
self._gaussian_smoothed_histogram = None
self._color_predicate = None
self.color_spaces = {
'hsv': cv2.COLOR_BGR2HSV
}
self.ch_ranges = {
'b': (0, 256), 'g': (0, 256), 'r': (0, 256),
'h': (0, 180), 's': (0, 256), 'v': (0, 256)
}
def load_images(self, path, n_max):
"""Load and return a list of up to `n_max` images from `path`."""
images_list = []
n_max = min(n_max, len(os.listdir(path)))
for filename in sorted(os.listdir(path))[:n_max]:
image = cv2.imread(os.path.join(path, filename))
if image is not None:
images_list.append(image)
self._total_pixel_count += image.shape[0] * image.shape[1]
return images_list
def load_masks(self, path):
"""Load and return a list of image masks from path."""
masks_list = []
n_images = len(self.images)
n_masks = len(os.listdir(path))
if n_masks >= len(self.images):
for filename in sorted(os.listdir(path))[:n_images]:
mask_gray = cv2.imread(os.path.join(path, filename), 0)
ret, mask = cv2.threshold(mask_gray, 127, 255,
cv2.THRESH_BINARY)
if mask is not None:
masks_list.append(mask)
self.masks = masks_list
else:
print(f'Directory must contain at least {n_images} image files, '
f'but only {n_masks} were provided. Masks will be ignored.')
@staticmethod
def sample_pixels(target_pixels, bg_pixels, target_sr, bg_rate):
"""Take a random sample of target and background pixels.
Parameters
----------
target_pixels : numpy.ndarray
Array of pixels from target region.
bg_pixels : numpy.ndarray
Array of pixels from background region.
target_sr : int or float
Target pixels sample rate (percentage of total target pixels).
bg_rate : int or float
Ratio of background to target pixels.
A value of 1.0 means equivalent distribution.
Returns
-------
target_pixels_sample : numpy.ndarray
Array of random samples from target region.
bg_pixels_sample : numpy.ndarray
Array of random samples from background region.
"""
n_target_pixels, n_bg_pixels = len(target_pixels), len(bg_pixels)
target_samples = n_target_pixels * target_sr
if n_bg_pixels > 0:
n_bg_samples = target_samples * bg_rate
target_bg_ratio = n_target_pixels / n_bg_pixels
if n_bg_samples > n_bg_pixels:
target_sr = n_bg_pixels / (n_target_pixels * bg_rate)
bg_sr = target_bg_ratio * target_sr * bg_rate
indexes_bg_samples = np.random.choice([0, 1],
size=n_bg_pixels,
p=[(1 - bg_sr), bg_sr])
bg_pixels_sample = bg_pixels[indexes_bg_samples == 1]
else:
bg_pixels_sample = bg_pixels
indexes_target_samples = np.random.choice([0, 1],
size=n_target_pixels,
p=[1 - target_sr, target_sr])
target_pixels_sample = target_pixels[indexes_target_samples == 1]
return target_pixels_sample, bg_pixels_sample
def create_multidimensional_histogram(self, color_space='bgr',
ch_indexes=(0, 1, 2),
bins=(8, 8, 8),
target_sr=1.0,
bg_rate=1.0):
"""Create a multidimensional histogram of instance's images.
Color space can be either RGB or HSV. Dimension is set according
to `ch_indexes` length. Sampling can be specified.
Parameters
----------
color_space : str, optional
Histogram color space. Accepts `bgr` (default) or `hsv`.
ch_indexes : tuple, optional
Sequence of histogram channel indexes. Values refer to
`color_space` string order. E.g, use (0, 2) to create a
2D histogram of channels b and r.
bins : tuple, optional
Sequence of histogram bins. Must be of same length of `ch_indexes`.
target_sr : int or float
Target pixels sample rate (percentage of total target pixels).
bg_rate : int or float
Ratio of background to target pixels. A value of 1.0 means
equivalent distribution.
Returns
-------
self._target_histogram : numpy.ndarray
2D or 3D histogram of sampled target pixels
self._bg_histogram : numpy.ndarray
2D or 3D histogram of samples background pixels
"""
print('Computing histogram...', end=' ')
target_pixels_per_image, bg_pixels_per_image = [], []
if sorted(ch_indexes) in ([0, 1], [0, 2], [1, 2], [0, 1, 2]):
self._histogram_channels = [color_space[i] for i in ch_indexes]
hist_range = [self.ch_ranges[ch] for ch in self._histogram_channels]
else:
raise ValueError('Parameter "ch_indexes" must be a sequence '
'of unique integers between 0 and 2')
for image, mask in zip(self.images, self.masks):
if color_space != 'bgr':
image = cv2.cvtColor(image, self.color_spaces[color_space])
target_pixels_per_image.append(image[mask > 0])
bg_pixels_per_image.append(image[~mask > 0])
target_pixels = np.concatenate(target_pixels_per_image)
bg_pixels = np.concatenate(bg_pixels_per_image)
target_samples, bg_samples = self.sample_pixels(target_pixels,
bg_pixels,
target_sr,
bg_rate)
self._target_histogram, _ = np.histogramdd(target_samples[:, ch_indexes],
bins=bins,
range=hist_range)
self._background_histogram, _ = np.histogramdd(bg_samples[:, ch_indexes],
bins=bins,
range=hist_range)
self._bins = bins
self._histogram_color_space = color_space
self._ch_indexes = ch_indexes
print('Done!')
return self._target_histogram, self._background_histogram
def pdf(self, mean, cov, domain):
"""Multidimensional probability density function."""
pdf = multivariate_normal.pdf(domain, mean=mean, cov=cov)
pdf = pdf.reshape(self._bins)
return pdf
def create_gaussian_smoothed_histogram(self,
t_amp=1.0,
t_cov=0.05,
bg_amp=1.0,
bg_cov=0.025,
threshold=0.01,
norm=True):
"""Create a 2D or 3D gaussian-smoothed histogram.
A gaussian-smoothed histogram is built from target and background
pixels according to [1]: for each pixel in target region, a normal
distribution centered at its position is added to the histogram;
similarly, for each pixel at background, a normal distribution is
subtracted. Finally, thresholding is applied: color frequencies below
threshold times maximum frequency are set to zero.
[1] `Finding skin in color images`, R. Kjeldsen and J. Kender.
Proceedings of the Second International Conference on Automatic Face
and Gesture Recognition, 1996. DOI:10.1109/AFGR.1996.557283
Parameters
----------
t_amp : float, optional
Amplitude of target's normal distribution. Default is 1.0.
t_cov : float, optional
Covariance of target's normal distribution. Default is 0.05.
bg_amp : float, optional
Amplitude of background's normal distribution. Default is 1.0.
bg_cov : float, optional
Covariance of background's normal distribution. Default is 0.025.
threshold : float, optional
Color frequencies below threshold times maximum frequency are
set to zero. Default is 0.01.
norm : bool, optional
When True, histogram is normalized by maximum frequency. Default
is True.
Returns
-------
self._gaussian_smoothed_histogram : numpy.ndarray
2D or 3D gaussian-smoothed histogram.
"""
print('Generating gaussian-smoothed histogram...', end=' ')
self._grid = np.mgrid[tuple([slice(0, b) for b in self._bins])]
domain = np.column_stack([axis.flat for axis in self._grid])
gauss_sum = np.zeros(self._bins, dtype=np.float32)
t_cov = t_cov * min(self._bins)
bg_cov = bg_cov * min(self._bins)
t_hist = self._target_histogram
bg_hist = self._background_histogram
for pos in np.argwhere(t_hist):
pdf = self.pdf(pos, t_cov, domain) * t_amp
gauss_sum += pdf * t_hist[tuple(pos)]
for pos in np.argwhere(bg_hist):
pdf = - self.pdf(pos, bg_cov, domain) * bg_amp
gauss_sum += pdf * bg_hist[tuple(pos)]
gauss_sum[gauss_sum < threshold * np.max(gauss_sum)] = 0
if norm:
gauss_sum = gauss_sum / np.max(gauss_sum)
self._gaussian_smoothed_histogram = gauss_sum
print('Done!')
return self._gaussian_smoothed_histogram
def create_color_predicate(self, threshold=0, save=False, filename='color_predicate'):
"""Create a color predicate from gaussian-smoothed histogram.
Parameters
----------
threshold : int or float, optional
Histogram frequencies above threshold are set to one; frequencies
below threshold are set to zero. Default is 0.
save : bool, optional
If true, color predicate is saved as a numpy array. Default is
False.
filename : str, optional
Color predicate file name. Default is `color_predicate`
Returns
-------
color_predicate : numpy.ndarray
Color predicate with the same dimension as the histogram.
"""
color_predicate = self._gaussian_smoothed_histogram.copy()
color_predicate[color_predicate > threshold] = 1
color_predicate[color_predicate <= threshold] = 0
if save:
np.save(filename, color_predicate)
return color_predicate
def plot_gaussian_smoothed_histogram(self, figsize=(8, 8), dpi=75, save=False):
"""Plot a 2D or 3D gaussian-smoothed histogram.
When 2D, creates a pseudocolor histogram; when 3D, each bin is
represented by a circle with size proportional to its frequency.
Parameters
----------
figsize : tuple, optional
Matplotlib's `figsize` parameter. Default is (8, 8).
dpi : int, optional
Matplotlib's `dpi` parameter. Default is 75.
save : bool, optional
When true, saves the plot as a png file.
"""
print('Plotting gaussian smoothed histogram...', end=' ')
grid = self._grid
ranges = self.ch_ranges
bins = self._bins
channels = self._histogram_channels
histogram = self._gaussian_smoothed_histogram
color_space = self._histogram_color_space
axis = [(ranges[ch][1] / bins[i]) * grid[i] + (ranges[ch][1] / bins[i]) / 2
for i, ch in enumerate(channels)]
if histogram.ndim == 3:
colors = np.vstack((axis[0].flatten() / ranges[channels[0]][1],
axis[1].flatten() / ranges[channels[1]][1],
axis[2].flatten() / ranges[channels[2]][1])).T
colors = colors[:, tuple([channels.index(ch) for ch in color_space])]
if color_space == 'hsv':
colors = np.array([colorsys.hsv_to_rgb(color[0], color[1], color[2])
for color in colors])
elif color_space == 'bgr':
colors = colors[:, ::-1]
fig = plt.figure(figsize=figsize, dpi=dpi)
ax = fig.add_subplot(111, projection='3d')
ax.title.set_position([0.5, 1.1])
ax.set_title(f'3D Color Histogram - '
f'{channels[0].title()} x '
f'{channels[1].title()} x '
f'{channels[2].title()}', fontsize=16)
ax.xaxis.set_tick_params(labelsize=8)
ax.yaxis.set_tick_params(labelsize=8)
ax.zaxis.set_tick_params(labelsize=8)
ax.set_xlim(ranges[channels[0]][0], ranges[channels[0]][1])
ax.set_ylim(ranges[channels[1]][0], ranges[channels[1]][1])
ax.set_zlim(ranges[channels[2]][0], ranges[channels[2]][1])
ax.set_xlabel(channels[0].title(), fontsize=12)
ax.set_ylabel(channels[1].title(), fontsize=12)
ax.set_zlabel(channels[2].title(), fontsize=12)
ax.view_init(azim=45)
ax.scatter(axis[0], axis[1], axis[2],
s=histogram * 1000,
c=colors)
if save:
ch_str = channels[0] + channels[1] + channels[2]
plt.savefig(f'{self.name}_3d_{ch_str}_histogram.png')
plt.show()
if self._gaussian_smoothed_histogram.ndim == 2:
fig = plt.figure(figsize=figsize, dpi=dpi)
ax = fig.add_subplot(111)
ax.set_aspect('equal')
ax.set_title(f'2D Color Histogram - '
f'{channels[0].title()} x '
f'{channels[1].title()}')
ax.set_xlabel(channels[0].title(), fontsize=12)
ax.set_ylabel(channels[1].title(), fontsize=12, rotation=0)
h = ax.pcolormesh(axis[0], axis[1], histogram)
fig.colorbar(h, ax=ax)
if save:
ch_str = channels[0] + channels[1]
plt.savefig(f'{self.name}_2d_{ch_str}_histogram.png')
plt.show()
print('Done!')
@property
def total_pixel_count(self):
return self._total_pixel_count
@property
def gaussian_smoothed_histogram(self):
return self._gaussian_smoothed_histogram
@property
def true_pixels_histogram(self):
return self._target_histogram
@property
def false_pixels_histogram(self):
return self._background_histogram
@property
def color_predicate(self):
return self._color_predicate
def __str__(self):
description = f'''
{self.name.title()} color predicate.
Images: {len(self.images)}
Bins: {self._bins}
Color Space: {self._histogram_color_space}
Channels: {self._ch_indexes}
'''
return description
| 37.825986 | 90 | 0.56235 | 16,098 | 0.987426 | 0 | 0 | 2,407 | 0.147642 | 0 | 0 | 5,784 | 0.354781 |
9e2177e43f0a318b03307485c7498c4b6cef36fa | 2,127 | py | Python | src/environments/migrations/0001_initial.py | nixplay/bullet-train-api | 608422d174443a4d9178d875ccaeb756a771e908 | [
"BSD-3-Clause"
]
| 1,259 | 2021-06-10T11:24:09.000Z | 2022-03-31T10:30:44.000Z | src/environments/migrations/0001_initial.py | nixplay/bullet-train-api | 608422d174443a4d9178d875ccaeb756a771e908 | [
"BSD-3-Clause"
]
| 392 | 2021-06-10T11:12:29.000Z | 2022-03-31T10:13:53.000Z | src/environments/migrations/0001_initial.py | nixplay/bullet-train-api | 608422d174443a4d9178d875ccaeb756a771e908 | [
"BSD-3-Clause"
]
| 58 | 2021-06-11T03:18:07.000Z | 2022-03-31T14:39:10.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-05-25 15:41
from __future__ import unicode_literals
import app.utils
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('projects', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Environment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False,
verbose_name='ID')),
('name', models.CharField(max_length=2000)),
('created_date', models.DateTimeField(auto_now_add=True,
verbose_name='DateCreated')),
('api_key', models.CharField(default=app.utils.create_hash, max_length=100,
unique=True)),
('project', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE,
related_name='environments', to='projects.Project')),
],
options={
'ordering': ['id'],
},
),
migrations.CreateModel(
name='Identity',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False,
verbose_name='ID')),
('identifier', models.CharField(max_length=2000)),
('created_date', models.DateTimeField(auto_now_add=True,
verbose_name='DateCreated')),
('environment', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE,
related_name='identities',
to='environments.Environment')),
],
options={
'ordering': ['id'],
'verbose_name_plural': 'Identities',
},
),
]
| 39.388889 | 99 | 0.487541 | 1,918 | 0.90174 | 0 | 0 | 0 | 0 | 0 | 0 | 369 | 0.173484 |
9e23d085a14f192cef141c0732be27df361cf10b | 4,456 | py | Python | tests/test_basic_train.py | maxwellmckinnon/fastai | b67bf7184ac2be1825697709051c5bcba058a40d | [
"Apache-2.0"
]
| 1 | 2019-04-08T09:52:28.000Z | 2019-04-08T09:52:28.000Z | tests/test_basic_train.py | maxwellmckinnon/fastai | b67bf7184ac2be1825697709051c5bcba058a40d | [
"Apache-2.0"
]
| null | null | null | tests/test_basic_train.py | maxwellmckinnon/fastai | b67bf7184ac2be1825697709051c5bcba058a40d | [
"Apache-2.0"
]
| 1 | 2020-05-19T12:56:20.000Z | 2020-05-19T12:56:20.000Z | """
module: basic_train.py - Model fitting methods
docs : https://docs.fast.ai/train.html
"""
import pytest, fastai
from fastai.vision import *
from utils.fakes import *
from utils.text import *
from utils.mem import *
from fastai.utils.mem import *
from math import isclose
torch_preload_mem()
@pytest.fixture(scope="module")
def data():
path = untar_data(URLs.MNIST_TINY)
data = ImageDataBunch.from_folder(path, ds_tfms=([], []), bs=2)
return data
# this is not a fixture on purpose - the memory measurement tests are very sensitive, so
# they need to be able to get a fresh learn object and not one modified by other tests.
def learn_large_unfit(data):
learn = create_cnn(data, models.resnet18, metrics=accuracy)
return learn
@pytest.fixture(scope="module")
def learn(data): return learn_large_unfit(data)
def test_get_preds():
learn = fake_learner()
with CaptureStdout() as cs:
a = learn.get_preds()
assert learn.data.batch_size == len(a[1])
def test_save_load(learn):
name = 'mnist-tiny-test-save-load'
# testing that all these various sequences don't break each other
model_path = learn.save(name, return_path=True)
learn.load(name, purge=True)
learn.data.sanity_check()
assert 709 == len(learn.data.train_ds)
learn.purge()
learn.load(name)
learn.load(name)
model_path = learn.save(name, return_path=True)
learn.load(name, purge=True)
# basic checks
#assert learn.recorder
assert learn.opt
assert 709 == len(learn.data.train_ds)
# XXX: could use more sanity checks
if os.path.exists(model_path): os.remove(model_path)
def check_mem_expected(used_exp, peaked_exp, mtrace, abs_tol=2, ctx=None):
used_received, peaked_received = mtrace.data()
ctx = f" ({ctx})" if ctx is not None else ""
assert isclose(used_exp, used_received, abs_tol=abs_tol), f"used mem: expected={used_exp} received={used_received}{ctx}"
assert isclose(peaked_exp, peaked_received, abs_tol=abs_tol), f"peaked mem: expected={peaked_exp} received={peaked_received}{ctx}"
def report_mem_real(used_exp, peaked_exp, mtrace, abs_tol=2, ctx=None):
ctx = f" ({ctx})" if ctx is not None else ""
print(f"{mtrace}{ctx}")
#check_mem_expected = report_mem_real
#@pytest.mark.skip(reason="WIP")
@pytest.mark.cuda
def test_save_load_mem_leak(data):
learn = learn_large_unfit(data)
name = 'mnist-tiny-test-save-load'
#learn.fit_one_cycle(1)
# A big difficulty with measuring memory consumption is that it varies quite
# wildly from one GPU model to another.
#
# Perhaps we need sets of different expected numbers per developer's GPUs?
# override check_mem_expected above with report_mem_real to acquire a new set
#
# So for now just testing the specific card I have until a better way is found.
dev_name = torch.cuda.get_device_name(None)
if dev_name != 'GeForce GTX 1070 Ti':
pytest.skip(f"currently only matched for mem usage on specific GPU models, {dev_name} is not one of them")
# save should consume no extra used or peaked memory
with GPUMemTrace() as mtrace:
model_path = learn.save(name, return_path=True)
check_mem_expected(used_exp=0, peaked_exp=0, mtrace=mtrace, abs_tol=10, ctx="save")
# load w/ purge still leaks some the first time it's run
with GPUMemTrace() as mtrace:
learn.load(name, purge=True)
# XXX: very different numbers if done w/o fit first 42 8, w/ fit 24 16
check_mem_expected(used_exp=42, peaked_exp=8, mtrace=mtrace, abs_tol=10, ctx="load")
# subsequent multiple load w/o purge should consume no extra used memory
with GPUMemTrace() as mtrace:
learn.load(name, purge=False)
learn.load(name, purge=False)
check_mem_expected(used_exp=0, peaked_exp=20, mtrace=mtrace, abs_tol=10, ctx="load x 2")
# subsequent multiple load w/ purge should consume no extra used memory
with GPUMemTrace() as mtrace:
learn.load(name, purge=True)
learn.load(name, purge=True)
check_mem_expected(used_exp=0, peaked_exp=20, mtrace=mtrace, abs_tol=10, ctx="load x 2 2nd time")
# purge + load w/ default purge should consume no extra used memory
with GPUMemTrace() as mtrace:
learn.purge()
learn.load(name)
check_mem_expected(used_exp=0, peaked_exp=20, mtrace=mtrace, abs_tol=10, ctx="purge+load")
if os.path.exists(model_path): os.remove(model_path)
| 38.08547 | 134 | 0.710727 | 0 | 0 | 0 | 0 | 2,396 | 0.537702 | 0 | 0 | 1,641 | 0.368268 |
9e246f8197a11b73a278225e329244b30642e5a1 | 196 | py | Python | ch04/return.none.py | kxen42/Learn-Python-Programming-Third-Edition | 851ddc5e6094fadd44f31a9ad1d3876456b04372 | [
"MIT"
]
| 19 | 2021-11-05T22:54:09.000Z | 2022-03-29T15:03:47.000Z | ch04/return.none.py | kxen42/Learn-Python-Programming-Third-Edition | 851ddc5e6094fadd44f31a9ad1d3876456b04372 | [
"MIT"
]
| null | null | null | ch04/return.none.py | kxen42/Learn-Python-Programming-Third-Edition | 851ddc5e6094fadd44f31a9ad1d3876456b04372 | [
"MIT"
]
| 26 | 2021-11-12T17:04:50.000Z | 2022-03-29T01:10:35.000Z | # return.none.py
def func():
pass
func() # the return of this call won't be collected. It's lost.
a = func() # the return of this one instead is collected into `a`
print(a) # prints: None
| 24.5 | 66 | 0.663265 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 140 | 0.714286 |
9e24c2357a655395c364e4237fd2c11303d74334 | 204 | py | Python | pyperi/__init__.py | takeontom/PyPeri | 181b6c60bf5ec5c57cd24418ee4524ed81c9a998 | [
"MIT"
]
| 5 | 2017-03-10T10:43:07.000Z | 2021-04-01T06:28:29.000Z | pyperi/__init__.py | takeontom/PyPeri | 181b6c60bf5ec5c57cd24418ee4524ed81c9a998 | [
"MIT"
]
| 359 | 2016-12-12T20:19:16.000Z | 2022-03-28T09:04:19.000Z | pyperi/__init__.py | takeontom/PyPeri | 181b6c60bf5ec5c57cd24418ee4524ed81c9a998 | [
"MIT"
]
| 3 | 2018-08-12T13:38:30.000Z | 2020-07-10T14:36:31.000Z | # -*- coding: utf-8 -*-
__author__ = """Tom Smith"""
__email__ = '[email protected]'
__version__ = '0.2.0'
from pyperi.pyperi import Peri # noqa
from pyperi.pyperi import PyPeriConnectionError # noqa
| 22.666667 | 55 | 0.70098 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 76 | 0.372549 |
f508079561b7a2a57df3ea9bb24da6c3cf24ed29 | 13,454 | py | Python | examples/cartpole_example/test/cartpole_PID_MPC_sim.py | marcosfelt/sysid-neural-structures-fitting | 80eda427251e8cce1d2a565b5cbca533252315e4 | [
"MIT"
]
| 17 | 2019-11-15T06:27:05.000Z | 2021-10-02T14:24:25.000Z | examples/cartpole_example/test/cartpole_PID_MPC_sim.py | marcosfelt/sysid-neural-structures-fitting | 80eda427251e8cce1d2a565b5cbca533252315e4 | [
"MIT"
]
| null | null | null | examples/cartpole_example/test/cartpole_PID_MPC_sim.py | marcosfelt/sysid-neural-structures-fitting | 80eda427251e8cce1d2a565b5cbca533252315e4 | [
"MIT"
]
| 4 | 2020-09-03T17:01:34.000Z | 2021-11-05T04:09:24.000Z | import numpy as np
import scipy.sparse as sparse
from scipy.integrate import ode
from scipy.interpolate import interp1d
import time
import control
import control.matlab
import numpy.random
import pandas as pd
from ltisim import LinearStateSpaceSystem
from pendulum_model import *
from pyMPC.mpc import MPCController
# Reference model default parameters
k_def = 5.0
tau_def = 120e-3
Acl_c_def = np.array([[0,1,0], [0, 0, k_def], [0, 0, -1/tau_def]])
Bcl_c_def = np.array([[0],
[k_def],
[1/tau_def]
])
# PID default parameters
Ts_PID = 1e-3
# Reference trajectory
t_ref_vec = np.array([0.0, 5.0, 10.0, 20.0, 25.0, 30.0, 40.0, 100.0])
p_ref_vec = np.array([0.0, 0.0, 0.8, 0.8, 0.0, 0.0, 0.8, 0.8])
rp_fun = interp1d(t_ref_vec, p_ref_vec, kind='linear')
def xref_cl_fun_def(t):
return np.array([rp_fun(t), 0.0, 0.0])
# MPC parameters
Ts_MPC_def = 10e-3
Qx_def = 1.0 * sparse.diags([1.0, 0, 10.0]) # Quadratic cost for states x0, x1, ..., x_N-1
QxN_def = Qx_def
Qr_def = 0.0 * sparse.eye(1) # Quadratic cost for u0, u1, ...., u_N-1
QDr_def = 1e-1 / (Ts_MPC_def ** 2) * sparse.eye(1) # Quadratic cost for Du0, Du1, ...., Du_N-1
# Defaults
DEFAULTS_PENDULUM_MPC = {
'xref_cl_fun': xref_cl_fun_def,
'uref': np.array([0.0]), # N
'std_npos': 0*0.001, # m
'std_nphi': 0*0.00005, # rad
'std_dF': 0.05, # N
'w_F':20, # rad
'len_sim': 40, #s
'Acl_c': Acl_c_def,
'Bcl_c': Bcl_c_def,
'Ts_MPC': Ts_MPC_def,
'Np': 100,
'Nc': 50,
'Qx': Qx_def,
'QxN': QxN_def,
'Qr': Qr_def,
'QDr': QDr_def,
'Q_kal': np.diag([0.1, 10, 0.1, 10]),
'R_kal': 1*np.eye(2),
'QP_eps_abs': 1e-3,
'QP_eps_rel': 1e-3,
'seed_val': None
}
def get_parameter(sim_options, par_name):
return sim_options.get(par_name, DEFAULTS_PENDULUM_MPC[par_name])
def get_default_parameters(sim_options):
""" Which parameters are left to default ??"""
default_keys = [key for key in DEFAULTS_PENDULUM_MPC if key not in sim_options]
return default_keys
def simulate_pendulum_MPC(sim_options):
seed_val = get_parameter(sim_options,'seed_val')
if seed_val is not None:
np.random.seed(seed_val)
# In[Sample times]
Ts_MPC = get_parameter(sim_options, 'Ts_MPC')
ratio_Ts = int(Ts_MPC // Ts_PID)
# In[Real System]
Cc = np.array([[1., 0., 0., 0.],
[0., 0., 1., 0.]])
Cd = np.copy(Cc)
nx, nu = 4,1
ny = 2
# In[initialize simulation system]
t0 = 0
phi0 = -0.0 * 2 * np.pi / 360 # initial angle
x0 = np.array([0, 0, phi0, 0]) # initial state
#system_dyn = ode(f_ODE_wrapped).set_integrator('vode', method='bdf') # dopri5
system_dyn = ode(f_ODE_wrapped).set_integrator('dopri5') # dopri5
# system_dyn = ode(f_ODE_wrapped).set_integrator('dopri5')
system_dyn.set_initial_value(x0, t0)
system_dyn.set_f_params(0.0)
#In[MPC params --model]
Acl_c = get_parameter(sim_options, 'Acl_c')
Bcl_c = get_parameter(sim_options, 'Bcl_c')
Ccl_c = np.array([[1., 0., 0],
[0., 0., 1]])
Dcl_c = np.zeros((2, 1))
ncl_x, ncl_u = Bcl_c.shape # number of states and number or inputs
#ncl_y = np.shape(Ccl_c)[0]
#In[MPC matrices discretization]
Acl_d = np.eye(ncl_x) + Acl_c*Ts_MPC
Bcl_d = Bcl_c*Ts_MPC
Ccl_d = Ccl_c
Dcl_d = Dcl_c
x0_cl = np.array([0,0,phi0])
M_cl = LinearStateSpaceSystem(A=Acl_d, B=Bcl_d, C=Ccl_d, D=Dcl_d, x0=x0_cl)
# MPC parameters
Np = get_parameter(sim_options, 'Np')
Nc = get_parameter(sim_options, 'Nc')
Qx = get_parameter(sim_options, 'Qx')
QxN = get_parameter(sim_options, 'QxN')
Qr = get_parameter(sim_options, 'Qr')
QDr = get_parameter(sim_options, 'QDr')
# Constraints
#xmin = np.array([-1.5, -100, -100])
#xmax = np.array([1.5, 100.0, 100])
#umin = np.array([-10])
#umax = np.array([10])
#Dumin = np.array([-100 * Ts_MPC_def])
#Dumax = np.array([100 * Ts_MPC_def])
QP_eps_rel = get_parameter(sim_options, 'QP_eps_rel')
QP_eps_abs = get_parameter(sim_options, 'QP_eps_abs')
# Emergency exit conditions
EMERGENCY_STOP = False
EMERGENCY_POS = 2.0
EMERGENCY_ANGLE = 30 * DEG_TO_RAD
# Reference input and states
xref_cl_fun = get_parameter(sim_options, 'xref_cl_fun') # reference state
xref_cl_fun_v = np.vectorize(xref_cl_fun, signature='()->(n)')
t0 = 0
xref_MPC = xref_cl_fun(t0)
uref = get_parameter(sim_options, 'uref')
uminus1 = np.array([0.0]) # input at time step negative one - used to penalize the first delta u at time instant 0. Could be the same as uref.
kMPC = MPCController(Acl_d, Bcl_d, Np=Np, Nc=Nc, x0=x0_cl, xref=xref_MPC, uminus1=uminus1,
Qx=Qx, QxN=QxN, Qu=Qr, QDu=QDr,
eps_feas=1e3, eps_rel=QP_eps_rel, eps_abs=QP_eps_abs)
try:
kMPC.setup(solve=True) # setup initial problem and also solve it
except:
EMERGENCY_STOP = True
if not EMERGENCY_STOP:
if kMPC.res.info.status != 'solved':
EMERGENCY_STOP = True
# In[initialize PID]
# Default controller parameters -
P = -100.0
I = -1
D = -20
N = 100.0
kP = control.tf(P,1, Ts_PID)
kI = I*Ts_PID*control.tf([0, 1], [1,-1], Ts_PID)
kD = D*control.tf([N, -N], [1.0, Ts_PID*N - 1], Ts_PID)
PID_tf = kP + kD + kI
PID_ss = control.ss(PID_tf)
k_PID = LinearStateSpaceSystem(A=PID_ss.A, B=PID_ss.B, C=PID_ss.C, D=PID_ss.D)
# In[initialize noise]
# Standard deviation of the measurement noise on position and angle
std_npos = get_parameter(sim_options, 'std_npos')
std_nphi = get_parameter(sim_options, 'std_nphi')
# Force disturbance
std_dF = get_parameter(sim_options, 'std_dF')
# Disturbance power spectrum
w_F = get_parameter(sim_options, 'w_F') # bandwidth of the force disturbance
tau_F = 1 / w_F
Hu = control.TransferFunction([1], [1 / w_F, 1])
Hu = Hu * Hu
Hud = control.matlab.c2d(Hu, Ts_PID)
N_sim_imp = tau_F / Ts_PID * 20
t_imp = np.arange(N_sim_imp) * Ts_PID
t, y = control.impulse_response(Hud, t_imp)
y = y[0]
std_tmp = np.sqrt(np.sum(y ** 2)) # np.sqrt(trapz(y**2,t))
Hu = Hu / (std_tmp) * std_dF
N_skip = int(20 * tau_F // Ts_PID) # skip initial samples to get a regime sample of d
t_sim_d = get_parameter(sim_options, 'len_sim') # simulation length (s)
N_sim_d = int(t_sim_d // Ts_PID)
N_sim_d = N_sim_d + N_skip + 1
e = np.random.randn(N_sim_d)
te = np.arange(N_sim_d) * Ts_PID
_, d, _ = control.forced_response(Hu, te, e)
d = d.ravel()
# Simulate in closed loop
len_sim = get_parameter(sim_options, 'len_sim') # simulation length (s)
nsim = int(len_sim // Ts_MPC) #int(np.ceil(len_sim / Ts_MPC)) # simulation length(timesteps) # watch out! +1 added, is it correct?
t_vec = np.zeros((nsim, 1))
status_vec = np.zeros((nsim,1))
x_vec = np.zeros((nsim, nx))
x_ref_vec = np.zeros((nsim, ncl_x))
y_vec = np.zeros((nsim, ny))
y_meas_vec = np.zeros((nsim, ny))
u_vec = np.zeros((nsim, nu))
x_model_vec = np.zeros((nsim,3))
nsim_fast = int(len_sim // Ts_PID)
t_vec_fast = np.zeros((nsim_fast, 1))
x_vec_fast = np.zeros((nsim_fast, nx)) # finer integration grid for performance evaluation
ref_phi_vec_fast = np.zeros((nsim_fast, 1))
y_meas_vec_fast = np.zeros((nsim_fast, ny))
x_ref_vec_fast = np.zeros((nsim_fast, nx)) # finer integration grid for performance evaluatio
u_vec_fast = np.zeros((nsim_fast, nu)) # finer integration grid for performance evaluatio
Fd_vec_fast = np.zeros((nsim_fast, nu)) #
t_int_vec_fast = np.zeros((nsim_fast, 1))
emergency_vec_fast = np.zeros((nsim_fast, 1)) #
t_step = t0
x_step = x0
u_PID = None
t_pred_all = t0 + np.arange(nsim + Np + 1) * Ts_MPC
Xref_MPC_all = xref_cl_fun_v(t_pred_all)
for idx_fast in range(nsim_fast):
## Determine step type: fast simulation only or MPC step
idx_MPC = idx_fast // ratio_Ts
run_MPC_controller = (idx_fast % ratio_Ts) == 0
y_step = Cd.dot(x_step) # y[i] from the system
ymeas_step = np.copy(y_step)
ymeas_step[0] += std_npos * np.random.randn()
ymeas_step[1] += std_nphi * np.random.randn()
y_meas_vec_fast[idx_fast,:] = ymeas_step
# Output for step i
# Ts_MPC outputs
if run_MPC_controller: # it is also a step of the simulation at rate Ts_MPC
if idx_MPC < nsim:
t_vec[idx_MPC, :] = t_step
y_vec[idx_MPC,:] = y_step
y_meas_vec[idx_MPC,:] = ymeas_step
u_vec[idx_MPC, :] = u_PID
x_model_vec[idx_MPC, :] = M_cl.x.ravel()
xref_MPC = xref_cl_fun(t_step)
x_ref_vec[idx_MPC,:] = xref_MPC.ravel()
if not EMERGENCY_STOP:
phi_ref_MPC, info_MPC = kMPC.output(return_status=True) # u[i] = k(\hat x[i]) possibly computed at time instant -1
else:
phi_ref_MPC = np.zeros(nu)
# PID angle CONTROLLER
ref_phi = phi_ref_MPC.ravel()
error_phi = ref_phi - ymeas_step[1]
u_PID = k_PID.output(error_phi)
u_PID[u_PID > 10.0] = 10.0
u_PID[u_PID < -10.0] = -10.0
u_TOT = u_PID
# Ts_fast outputs
t_vec_fast[idx_fast,:] = t_step
x_vec_fast[idx_fast, :] = x_step #system_dyn.y
u_vec_fast[idx_fast,:] = u_TOT
Fd_vec_fast[idx_fast,:] = 0.0
ref_phi_vec_fast[idx_fast,:] = ref_phi
## Update to step i+1
k_PID.update(error_phi)
# Controller simulation step at rate Ts_MPC
if run_MPC_controller:
M_cl.update(ref_phi)
if not EMERGENCY_STOP:
x_cl = np.array([x_step[0], x_step[1], x_step[2]])
Xref_MPC = Xref_MPC_all[idx_MPC:idx_MPC + Np + 1]
xref_MPC = Xref_MPC_all[idx_MPC]
kMPC.update(x_cl, phi_ref_MPC, xref=xref_MPC) # update with measurement and reference
# System simulation step at rate Ts_fast
time_integrate_start = time.perf_counter()
system_dyn.set_f_params(u_TOT)
system_dyn.integrate(t_step + Ts_PID)
x_step = system_dyn.y
t_int_vec_fast[idx_fast,:] = time.perf_counter() - time_integrate_start
# Time update
t_step += Ts_PID
simout = {'t': t_vec, 'x': x_vec, 'u': u_vec, 'y': y_vec, 'y_meas': y_meas_vec, 'x_ref': x_ref_vec, 'status': status_vec, 'Fd_fast': Fd_vec_fast,
't_fast': t_vec_fast, 'x_fast': x_vec_fast, 'x_ref_fast': x_ref_vec_fast, 'u_fast': u_vec_fast, 'y_meas_fast': y_meas_vec_fast, 'emergency_fast': emergency_vec_fast,
'PID_tf': PID_tf, 'Ts_MPC': Ts_MPC, 'ref_phi_fast': ref_phi_vec_fast, 'x_model': x_model_vec,
't_int_fast': t_int_vec_fast
}
return simout
if __name__ == '__main__':
import matplotlib.pyplot as plt
import matplotlib
plt.close('all')
simopt = DEFAULTS_PENDULUM_MPC
time_sim_start = time.perf_counter()
simout = simulate_pendulum_MPC(simopt)
time_sim = time.perf_counter() - time_sim_start
t = simout['t']
x = simout['x']
u = simout['u']
y = simout['y']
y_meas = simout['y_meas']
x_ref = simout['x_ref']
x_fast = simout['x_fast']
y_meas_fast = simout['y_meas_fast']
u_fast = simout['u_fast']
x_model = simout['x_model']
t_fast = simout['t_fast']
x_ref_fast = simout['x_ref_fast']
F_input = simout['Fd_fast']
status = simout['status']
ref_phi_fast = simout['ref_phi_fast']
uref = get_parameter(simopt, 'uref')
nsim = len(t)
nx = x.shape[1]
ny = y.shape[1]
y_ref = x_ref[:, [0, 2]]
fig,axes = plt.subplots(4,1, figsize=(10,10), sharex=True)
axes[0].plot(t, y_meas[:, 0], "b", label='p_meas')
axes[0].plot(t_fast, x_fast[:, 0], "k", label='p')
axes[0].plot(t, x_model[:, 0], "r", label='p model')
axes[0].plot(t, x_ref[:, 0], "k--", label='p reference')
axes[0].set_ylim(-2.0,2.0)
axes[0].set_title("Position (m)")
axes[1].plot(t_fast, x_fast[:, 1], "k", label='v')
axes[1].plot(t, x_model[:, 1], "r", label='v model')
axes[1].set_ylim(-3,3.0)
axes[1].set_title("Speed (m/s)")
axes[2].plot(t, y_meas[:, 1]*RAD_TO_DEG, "b", label='phi_meas')
axes[2].plot(t_fast, x_fast[:, 2]*RAD_TO_DEG, 'k', label="phi")
axes[2].plot(t, x_model[:, 2]*RAD_TO_DEG, "r", label='phi model')
axes[2].plot(t_fast, ref_phi_fast[:,0]*RAD_TO_DEG, "k--", label="phi_ref")
axes[2].set_ylim(-20,20)
axes[2].set_title("Angle (deg)")
axes[3].plot(t, u[:,0], label="F")
axes[3].plot(t_fast, F_input, "k", label="Fd")
axes[3].plot(t, uref*np.ones(np.shape(t)), "r--", label="F_ref")
axes[3].set_ylim(-20,20)
axes[3].set_title("Force (N)")
for ax in axes:
ax.grid(True)
ax.legend()
X = np.hstack((t_fast, x_fast, u_fast, y_meas_fast, F_input))
COL_T = ['time']
COL_X = ['p', 'v', 'theta', 'omega']
COL_U = ['u']
COL_D = ['d']
COL_Y = ['p_meas', 'theta_meas']
COL = COL_T + COL_X + COL_U + COL_Y + COL_D
df_X = pd.DataFrame(X, columns=COL)
df_X.to_csv("pendulum_data_PID.csv", index=False)
| 33.219753 | 179 | 0.612829 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,084 | 0.229226 |
f508880d35de4e5c61d56d0000d4001cccea0293 | 163 | py | Python | server/controllers/main.py | eruixma/trading-app | 3db3bbd27fdc405b9ecb41d9e68330899a3dcc6a | [
"BSD-2-Clause"
]
| 2 | 2019-03-27T04:46:29.000Z | 2019-10-10T13:03:24.000Z | server/controllers/main.py | eruixma/trading-app | 3db3bbd27fdc405b9ecb41d9e68330899a3dcc6a | [
"BSD-2-Clause"
]
| 1 | 2021-06-01T23:31:21.000Z | 2021-06-01T23:31:21.000Z | server/controllers/main.py | eruixma/trading-app | 3db3bbd27fdc405b9ecb41d9e68330899a3dcc6a | [
"BSD-2-Clause"
]
| 1 | 2021-08-19T03:09:05.000Z | 2021-08-19T03:09:05.000Z | from flask import Blueprint, current_app
main = Blueprint('main', __name__)
@main.route('/')
def home():
return current_app.send_static_file('index.html')
| 16.3 | 53 | 0.723926 | 0 | 0 | 0 | 0 | 82 | 0.503067 | 0 | 0 | 21 | 0.128834 |
f508dd3ad06395335728ce6e7db17f8e899fd2f6 | 2,221 | py | Python | pypad/collab.py | candyninja001/pypad | 82bfc104c2524ca54cc415d37d2c21fec471838f | [
"MIT"
]
| null | null | null | pypad/collab.py | candyninja001/pypad | 82bfc104c2524ca54cc415d37d2c21fec471838f | [
"MIT"
]
| null | null | null | pypad/collab.py | candyninja001/pypad | 82bfc104c2524ca54cc415d37d2c21fec471838f | [
"MIT"
]
| null | null | null | from enum import Enum
from .dev import Dev
class Collab(Enum):
UNKNOWN = -1
NONE = 0
RAGNAROK_ONLINE = 1
TAIKO_NO_TATSUJIN = 2
EMIL_CHRONICLE_ONLINE = 3
GUNMA_NO_YABOU = 5
CRYSTAL_DEFENDER = 6
FAMITSU = 7
PRINCESS_PUNT_SWEET = 8
ANDROID = 9
SHINRABANSHO_CHOCO = 10
CAPYBARA_SAN = 11
FREAK_TOWER = 12
SENGOKU_TENKA_TRIGGER = 13
EVANGELION = 14
SEVEN_ELEVEN = 15
CLASH_OF_CLANS = 16
GROOVE_COASTER = 17
RAGNAROK_ODYSSEY_ACE = 18
DRAGONS_DOGMA_QUEST = 19
TAKAOKA_CITY = 20
MONSTER_HUNTER_4G = 21
BATMAN = 22
THIRTY_ONE_ICECREAM = 23
ANGRY_BIRDS = 24
PUZZLE_AND_DRAGONS_Z = 25
HUNTER_X_HUNTER = 26
SANRIO_CHARACTERS = 27
PAD_BATTLE_TOURNAMENT = 28
BEAMS = 29
DRAGON_BALL = 30
SAINT_SEIYA = 31
ROAD_TO_DRAGON = 32
DIVINE_GATE = 33
SUMMONS_BOARD = 34
PICOTTO_KINGDOM = 35
BIKKURIMAN = 36
ANGRY_BIRDS_EPIC = 37
DC_UNIVERSE = 38
CHIBI_1 = 39 # first round chibis - three kingdoms series
FIST_OF_THE_NORTH_STAR = 40
CHIBI_2 = 41 # second round chibis
CHIBI_3 = 44 # third round chibis
FINAL_FANTASY = 45
GHOST_IN_THE_SHELL = 46
DUEL_MASTERS = 47
ATTACK_ON_TITAN = 48
NINJA_HATTORI_KUN = 49
SHONEN_SUNDAY = 50
CROWS_X_WORST = 51 # TODO VERIFY NO OVERLAP WITH VOLTRON
BLEACH = 52
ACE_ATTORNEY = 55
RUROUNI_KENSHIN = 56
PEPPER = 57
KINNIKUMAN = 58
HIRUNE_HIME = 59
MAGAZINE = 60
MONSTER_HUNTER = 61
KAIBUTSU_KUN = 62
VOLTRON = 63 # TODO VERIFY NO OVERLAP WITH CROW X WORST
FULLMETAL_ALCHEMIST = 65
KING_OF_FIGHTERS = 66
YU_YU_HAKUSHO = 67
PERSONA = 68
COCA_COLA = 69
MAGIC_THE_GATHERING = 70
CHRONO_MAGIA = 71
SEVENTH_REBIRTH = 72
CALCIO_FANTASISTA = 73
POWER_PROS = 74
GINTAMA = 75
SWORD_ART_ONLINE = 76
KAMEN_RIDER = 77
YOKAI_WATCH_W = 78
FATE_STAY_NIGHT = 79
STREET_FIGHTER_V = 80
UMAIBOU = 81
MC_DONALDS = 82
SHAMAN_KING = 83
ERROR_999 = 999
DRAGONBOUNDS_AND_DRAGON_CALLERS = 10001
@classmethod
def _missing_(cls, value):
Dev.log(f'Unknown collab: {value}')
return Collab.UNKNOWN | 24.677778 | 61 | 0.662765 | 2,177 | 0.980189 | 0 | 0 | 117 | 0.052679 | 0 | 0 | 190 | 0.085547 |
f50910b14f5b09655a9e1eaecc696a5cfe950b0f | 4,923 | py | Python | settings.py | msetzu/data-mining | 9e01d00964004dea4a2aea88dfe855f785302ef1 | [
"MIT"
]
| 1 | 2018-10-09T14:41:59.000Z | 2018-10-09T14:41:59.000Z | settings.py | msetzu/data-mining | 9e01d00964004dea4a2aea88dfe855f785302ef1 | [
"MIT"
]
| null | null | null | settings.py | msetzu/data-mining | 9e01d00964004dea4a2aea88dfe855f785302ef1 | [
"MIT"
]
| null | null | null | import pandas as pd
from matplotlib.colors import LinearSegmentedColormap
# Dataset
data = pd.read_csv("./hr.csv")
entries = len(data)
bins = 10
# Data analysis
analysis = {
"bins": 10,
"balance_threshold": 0.1
}
# Plot labels
labels = ["satisfaction_level",
"average_montly_hours",
"last_evaluation",
"time_spend_company",
"number_project",
"Work_accident",
"left",
"promotion_last_5years",
"sales",
"salary"]
pretty_prints = ["Self-reported satisfaction",
"AVG Monthly hours",
"Time since last valuation, in years",
"Time in company, in years",
"Projects",
"Accidents",
"Left",
"Promoted (last 5 years)",
"Department",
"Salary"]
short_pretty_prints = ["Injuries",
"Work hours",
"Last evaluation",
"Left",
"Projects",
"Promotion",
"Wage",
"Satisfaction",
"Years in company",
"Dpt."]
departments_pretty_prints = ["Information Technology",
"R&D",
"Accounting",
"Human Resources",
"Management",
"Marketing",
"Product Management",
"Sales",
"Support",
"Technical"]
labels_pretty_print = {k: v for k, v in zip(labels, pretty_prints)}
short_labels_pretty_print = {k: v for k, v in zip(labels, short_pretty_prints)}
labels_pretty_print["salary_int"] = "Salary"
continuous_labels = labels[0:2]
discrete_labels = labels[2:5]
categorical_labels = labels[5:-1]
ordinal_labels = labels[-1:]
correlated_labels = continuous_labels + discrete_labels + ["salary_int"]
categorical_labels_pretty_prints = {
"Work_accident": ("Not Injured", "Injured"),
"left": ("Stayed", "Left"),
"promotion_last_5years": ("Not promoted", "Promoted"),
"sales": tuple(departments_pretty_prints)
}
ordinal_labels_pretty_prints = {
"salary": ("Low", "Medium", "High"),
}
ordered_ordinal_vars = {
"salary": ["low", "medium", "high"]
}
departments = set(data["sales"])
# Scatter plot
scatter = {
"sampling_size": 100, # size of each sample
"samples": 5, # number of samples to extract
"edge_bins": 1, # edge bins possibly containing outliers
"bins": 10,
"replace": True
}
clusetering_types = ["normal", "discrete", "raw"]
# Graphs
palette = {
"main": "#FE4365",
"complementary": "#FC9D9A",
"pr_complementary": "#F9CDAD",
"sc_complementary": "#C8C8A9",
"secondary": "#83AF9B"
}
round_palette = {
"main": palette["secondary"],
"secondary": palette["complementary"],
"pr_complementary": palette["sc_complementary"],
"sc_complementary": palette["secondary"]
}
large_palette = {
"navy": "#001f3f",
"blue": "#0074D9",
"green": "#2ECC40",
"olive": "#3D9970",
"orange": "#FF851B",
"yellow": "#FFDC00",
"red": "#FF4136",
"maroon": "#85144b",
"black": "#111111",
"grey": "#AAAAAA"
}
large_palette_full = {
"navy": "#001f3f",
"blue": "#0074D9",
"aqua": "#7FDBFF",
"teal": "#39CCCC",
"olive": "#3D9970",
"green": "#2ECC40",
"lime": "#01FF70",
"yellow": "#FFDC00",
"orange": "#FF851B",
"red": "#FF4136",
"maroon": "#85144b",
"fuchsia": "#F012BE",
"purple": "#B10DC9",
"black": "#111111",
"grey": "#AAAAAA",
"silver": "#DDDDDD"
}
large_palette_stacked = {
"navy": "#001f3f",
"blue": "#0074D9",
"olive": "#3D9970",
"orange": "#FF851B",
"green": "#2ECC40",
"yellow": "#FFDC00",
"red": "#FF4136",
"maroon": "#85144b",
"black": "#111111",
"grey": "#AAAAAA",
"stack": large_palette["orange"]
}
cmap_pale_pink = LinearSegmentedColormap.from_list("Pale pink",
[palette["pr_complementary"], palette["main"]],
N=1000000)
cmap_pale_pink_and_green = LinearSegmentedColormap.from_list("Pale pink&green",
[palette["main"],
palette["complementary"],
palette["pr_complementary"],
palette["sc_complementary"],
palette["secondary"]],
N=1000000)
| 30.018293 | 98 | 0.487508 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,964 | 0.398944 |
f509ca15e0e12b426c5e187595364f7eea92a920 | 397 | py | Python | GCD - Euclidean (Basic)/Python3/gcdEuclid.py | i-vishi/ds-and-algo | 90a8635db9570eb17539201be29ec1cfd4b5ae18 | [
"MIT"
]
| 1 | 2021-03-01T04:15:08.000Z | 2021-03-01T04:15:08.000Z | GCD - Euclidean (Basic)/Python3/gcdEuclid.py | i-vishi/ds-and-algo | 90a8635db9570eb17539201be29ec1cfd4b5ae18 | [
"MIT"
]
| null | null | null | GCD - Euclidean (Basic)/Python3/gcdEuclid.py | i-vishi/ds-and-algo | 90a8635db9570eb17539201be29ec1cfd4b5ae18 | [
"MIT"
]
| null | null | null | # Author: Vishal Gaur
# Created: 17-01-2021 20:31:34
# function to find GCD using Basic Euclidean Algorithm
def gcdEuclid(a, b):
if a == 0:
return b
else:
return gcdEuclid(b % a, a)
# Driver Code to test above function
a = 14
b = 35
g = gcdEuclid(a, b)
print("GCD of", a, "&", b, "is: ", g)
a = 56
b = 125
g = gcdEuclid(a, b)
print("GCD of", a, "&", b, "is: ", g)
| 17.26087 | 54 | 0.566751 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 179 | 0.450882 |
f50a95d4fbb66571658a68aa0a66854f9c5c4220 | 437 | py | Python | src/my_package/todelete/modules/MotionSymmetryModule.py | laomao0/AIM_DAIN | 8322569498d675d3b2c1f35475c1299cad580bde | [
"MIT"
]
| 3 | 2020-05-08T20:45:57.000Z | 2021-01-18T11:32:38.000Z | src/my_package/todelete/modules/MotionSymmetryModule.py | laomao0/AIM_DAIN | 8322569498d675d3b2c1f35475c1299cad580bde | [
"MIT"
]
| null | null | null | src/my_package/todelete/modules/MotionSymmetryModule.py | laomao0/AIM_DAIN | 8322569498d675d3b2c1f35475c1299cad580bde | [
"MIT"
]
| null | null | null | # modules/InterpolationLayer.py
from torch.nn import Module
from functions.MotionSymmetryLayer import MotionSymmetryLayer
class MotionSymmetryModule(Module):
def __init__(self):
super(MotionSymmetryModule, self).__init__()
self.f = MotionSymmetryLayer()
def forward(self, input1, input2):
return self.f(input1, input2)
#we actually dont need to write the backward code for a module, since we have
| 29.133333 | 81 | 0.741419 | 312 | 0.713959 | 0 | 0 | 0 | 0 | 0 | 0 | 108 | 0.24714 |
f50adc78c47a350acb70a96ee1ecc3c1259e77a6 | 2,997 | py | Python | steer.py | nlw0/pyPyrTools | 91fc0932906054f6d43a32a205069aa25d884545 | [
"MIT"
]
| 1 | 2020-10-13T14:40:39.000Z | 2020-10-13T14:40:39.000Z | steer.py | umeshraj/pyPyrTools | 91fc0932906054f6d43a32a205069aa25d884545 | [
"MIT"
]
| null | null | null | steer.py | umeshraj/pyPyrTools | 91fc0932906054f6d43a32a205069aa25d884545 | [
"MIT"
]
| 1 | 2018-04-26T10:06:47.000Z | 2018-04-26T10:06:47.000Z | import numpy
from steer2HarmMtx import steer2HarmMtx
def steer(*args):
''' Steer BASIS to the specfied ANGLE.
function res = steer(basis,angle,harmonics,steermtx)
BASIS should be a matrix whose columns are vectorized rotated copies
of a steerable function, or the responses of a set of steerable filters.
ANGLE can be a scalar, or a column vector the size of the basis.
HARMONICS (optional, default is N even or odd low frequencies, as for
derivative filters) should be a list of harmonic numbers indicating
the angular harmonic content of the basis.
STEERMTX (optional, default assumes cosine phase harmonic components,
and filter positions at 2pi*n/N) should be a matrix which maps
the filters onto Fourier series components (ordered [cos0 cos1 sin1
cos2 sin2 ... sinN]). See steer2HarmMtx.m
Eero Simoncelli, 7/96. Ported to Python by Rob Young, 5/14. '''
if len(args) < 2:
print 'Error: input parameters basis and angle are required!'
return
basis = args[0]
num = basis.shape[1]
angle = args[1]
if isinstance(angle, (int, long, float)):
angle = numpy.array([angle])
else:
if angle.shape[0] != basis.shape[0] or angle.shape[1] != 1:
print 'ANGLE must be a scalar, or a column vector the size of the basis elements'
return
# If HARMONICS are not passed, assume derivatives.
if len(args) < 3:
if num%2 == 0:
harmonics = numpy.array(range(num/2))*2+1
else:
harmonics = numpy.array(range((15+1)/2))*2
else:
harmonics = args[2]
if len(harmonics.shape) == 1 or harmonics.shape[0] == 1:
# reshape to column matrix
harmonics = harmonics.reshape(harmonics.shape[0], 1)
elif harmonics.shape[0] != 1 and harmonics.shape[1] != 1:
print 'Error: input parameter HARMONICS must be 1D!'
return
if 2*harmonics.shape[0] - (harmonics == 0).sum() != num:
print 'harmonics list is incompatible with basis size!'
return
# If STEERMTX not passed, assume evenly distributed cosine-phase filters:
if len(args) < 4:
steermtx = steer2HarmMtx(harmonics,
numpy.pi*numpy.array(range(num))/num,
'even')
else:
steermtx = args[3]
steervect = numpy.zeros((angle.shape[0], num))
arg = angle * harmonics[numpy.nonzero(harmonics)[0]].T
if all(harmonics):
steervect[:, range(0,num,2)] = numpy.cos(arg)
steervect[:, range(1,num,2)] = numpy.sin(arg)
else:
steervect[:, 1] = numpy.ones((arg.shape[0],1))
steervect[:, range(0,num,2)] = numpy.cos(arg)
steervect[:, range(1,num,2)] = numpy.sin(arg)
steervect = numpy.dot(steervect,steermtx)
if steervect.shape[0] > 1:
tmp = numpy.dot(basis, steervect)
res = sum(tmp).T
else:
res = numpy.dot(basis, steervect.T)
return res
| 34.448276 | 93 | 0.621288 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,277 | 0.426093 |
f50baf297616723fd430fcda467e665dcd88c479 | 2,057 | py | Python | pydest/pydest.py | henworth/pydest | 9537696c39f36f8250082891ddcc0198142d22eb | [
"MIT"
]
| null | null | null | pydest/pydest.py | henworth/pydest | 9537696c39f36f8250082891ddcc0198142d22eb | [
"MIT"
]
| null | null | null | pydest/pydest.py | henworth/pydest | 9537696c39f36f8250082891ddcc0198142d22eb | [
"MIT"
]
| null | null | null | import aiohttp
import asyncio
import os
import zipfile
from pydest.api import API
from pydest.manifest import Manifest
class Pydest:
def __init__(self, api_key, loop=None, client_id=None, client_secret=None):
"""Base class for Pydest
Args:
api_key (str):
Bungie.net API key
loop [optional]:
AsyncIO event loop, if not passed one will be created
client_id (str) [optional]:
Bungie.net application client id
client_secret (str) [optional]:
Bungie.net application client id
"""
headers = {'X-API-KEY': api_key}
self._loop = asyncio.get_event_loop() if loop is None else loop
self._session = aiohttp.ClientSession(loop=self._loop, headers=headers)
self.api = API(self._session, client_id, client_secret)
self._manifest = Manifest(self.api)
async def decode_hash(self, hash_id, definition, language='en'):
"""Get the corresponding static info for an item given it's hash value from the Manifest
Args:
hash_id (str):
The unique identifier of the entity to decode
definition (str):
The type of entity to be decoded (ex. 'DestinyClassDefinition')
language (str):
The language to use when retrieving results from the Manifest
Returns:
json (dict)
Raises:
PydestException
"""
return await self._manifest.decode_hash(hash_id, definition, language)
async def update_manifest(self, language='en'):
"""Update the manifest if there is a newer version available
Args:
language (str) [optional]:
The language corresponding to the manifest to update
"""
await self._manifest.update_manifest(language)
async def close(self):
await self._session.close()
class PydestException(Exception):
pass
class PydestTokenException(Exception):
pass
| 29.811594 | 96 | 0.619349 | 1,928 | 0.937287 | 0 | 0 | 0 | 0 | 1,023 | 0.497326 | 1,115 | 0.542052 |
f50ea0f4e8fc7432c99f5e054323909249a31983 | 784 | py | Python | vaccinate/core/migrations/0120_geography_fields.py | MoralCode/vial | cdaaab053a9cf1cef40104a2cdf480b7932d58f7 | [
"MIT"
]
| 7 | 2021-06-28T17:33:47.000Z | 2022-02-12T21:54:59.000Z | vaccinate/core/migrations/0120_geography_fields.py | MoralCode/vial | cdaaab053a9cf1cef40104a2cdf480b7932d58f7 | [
"MIT"
]
| 104 | 2021-06-17T21:25:30.000Z | 2022-03-28T14:21:57.000Z | vaccinate/core/migrations/0120_geography_fields.py | MoralCode/vial | cdaaab053a9cf1cef40104a2cdf480b7932d58f7 | [
"MIT"
]
| 1 | 2021-06-25T17:52:23.000Z | 2021-06-25T17:52:23.000Z | # Generated by Django 3.2.1 on 2021-05-06 22:30
import django.contrib.gis.db.models.fields
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("core", "0119_populate_source_location_point"),
]
operations = [
migrations.AlterField(
model_name="location",
name="point",
field=django.contrib.gis.db.models.fields.PointField(
blank=True, geography=True, null=True, srid=4326
),
),
migrations.AlterField(
model_name="sourcelocation",
name="point",
field=django.contrib.gis.db.models.fields.GeometryField(
blank=True, geography=True, null=True, srid=4326
),
),
]
| 27.034483 | 68 | 0.58801 | 656 | 0.836735 | 0 | 0 | 0 | 0 | 0 | 0 | 130 | 0.165816 |
f50f1a90c240661a8974cc7923b38f46dce70bae | 29,856 | py | Python | views.py | milos-korenciak/2018.ossconf.sk | f121dde4f313a207e39c2f2e187bdad046b86592 | [
"MIT"
]
| 7 | 2017-07-16T05:59:07.000Z | 2018-01-22T09:35:21.000Z | views.py | milos-korenciak/2018.ossconf.sk | f121dde4f313a207e39c2f2e187bdad046b86592 | [
"MIT"
]
| 17 | 2017-07-31T20:35:24.000Z | 2018-02-26T22:00:12.000Z | views.py | milos-korenciak/2018.ossconf.sk | f121dde4f313a207e39c2f2e187bdad046b86592 | [
"MIT"
]
| 13 | 2017-08-01T17:03:40.000Z | 2021-11-02T13:24:30.000Z | #!/usr/bin/python
# -*- coding: utf8 -*-
import os
import re
import textwrap
import requests
import unicodedata
from datetime import datetime, timedelta
from flask import Flask, g, request, render_template, abort, make_response
from flask_babel import Babel, gettext
from jinja2 import evalcontextfilter, Markup
app = Flask(__name__, static_url_path='/static')
app.config['BABEL_DEFAULT_LOCALE'] = 'sk'
app.jinja_options = {'extensions': ['jinja2.ext.with_', 'jinja2.ext.i18n']}
babel = Babel(app)
EVENT = gettext('PyCon SK 2018')
DOMAIN = 'https://2018.pycon.sk'
API_DOMAIN = 'https://api.pycon.sk'
LANGS = ('en', 'sk')
TIME_FORMAT = '%Y-%m-%dT%H:%M:%S+00:00'
NOW = datetime.utcnow().strftime(TIME_FORMAT)
SRC_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__)))
LOGO_PYCON = 'logo/pycon_logo_square.svg'
LDJSON_SPY = {
"@type": "Organization",
"name": "SPy o. z.",
"url": "https://spy.pycon.sk",
"logo": "https://spy.pycon.sk/img/logo/spy-logo.png",
"sameAs": [
"https://facebook.com/pyconsk",
"https://twitter.com/pyconsk",
"https://www.linkedin.com/company/spy-o--z-",
"https://github.com/pyconsk",
]
}
LDJSON_PYCON = {
"@context": "http://schema.org",
"@type": "Event",
"name": EVENT,
"description": gettext("PyCon will be back at Slovakia in 2018 again. PyCon SK is a community-organized conference "
"for the Python programming language."),
"startDate": "2018-03-09T9:00:00+01:00",
"endDate": "2018-03-11T18:00:00+01:00",
"image": DOMAIN + "/static/img/logo/pycon_long_2018.png",
"location": {
"@type": "Place",
"name": "FIIT STU",
"address": {
"@type": "PostalAddress",
"streetAddress": "Ilkovičova 2",
"addressLocality": "Bratislava 4",
"postalCode": "842 16",
"addressCountry": gettext("Slovak Republic")
},
},
"url": DOMAIN,
"workPerformed": {
"@type": "CreativeWork",
"name": EVENT,
"creator": LDJSON_SPY
}
}
# calendar settings
ICAL_LEN = 70 # length of a calendar (ical) line
ICAL_NL = '\\n\n' # calendar newline
IGNORE_TALKS = ['Break', 'Coffee Break']
TYPE = {
'talk': gettext('Talk'),
'workshop': gettext('Workshop'),
}
TAGS = {
'ai': gettext('Machine Learning / AI'),
'community': gettext('Community / Diversity / Social'),
'data': gettext('Data Science'),
'devops': 'DevOps',
'docs': gettext('Documentation'),
'edu': gettext('Education'),
'generic': gettext('Python General'),
'security': gettext('Security'),
'softskills': gettext('Soft Skills'),
'hardware': gettext('Hardware'),
'web': gettext('Web Development'),
'other': gettext('Other'),
}
FRIDAY_START = datetime(2018, 3, 9, hour=9)
SATURDAY_START = datetime(2018, 3, 10, hour=9)
SUNDAY_START = datetime(2018, 3, 11, hour=10, minute=15)
FRIDAY_TRACK1 = (
{"pause": 5, 'title': gettext("Conference Opening"), 'duration': 25, 'flag': 'other', 'type': 'talk'},
{"pause": 15, 'title': gettext("FaaS and Furious - Zero to Serverless in 60 seconds - Anywhere")},
{"pause": 15, 'title': gettext("Docs or it didn't happen")},
{"pause": 5, 'title': gettext("GraphQL is the new black")},
{"pause": 60, 'title': gettext("To the Google in 80 Days")},
{"pause": 5, 'title': gettext("Unsafe at Any Speed")},
{"pause": 15, 'title': gettext("Protecting Privacy and Security — For Yourself and Your Community")},
{"pause": 5, 'title': gettext("ZODB: The Graph database for Python Developers.")},
{"pause": 15, 'title': gettext("Differentiable programming in Python and Gluon for (not only medical) image analysis")},
{"pause": 5, 'title': gettext("Vim your Python, Python your Vim")},
)
FRIDAY_TRACK2 = (
{"pause": 5, 'title': gettext("Conference Opening in Kiwi.com Hall"), 'duration': 25},
{"pause": 5, 'title': gettext("Python Days in Martin and follow-up activities")},
{"pause": 15, 'title': gettext("Python programming till graduation")},
{"pause": 5, 'title': gettext("Open educational resources for learning Python")},
{"pause": 60, 'title': gettext("About Ninjas and Mentors: CoderDojo in Slovakia")},
{"pause": 5, 'title': gettext("Community based courses")},
{"pause": 15, 'title': gettext("How do we struggle with Python in Martin?")},
{"pause": 5, 'title': gettext("Why hardware attracts kids and adults to IT")},
{"pause": 5, 'title': gettext("Panel discussion: Teaching IT in Slovakia - where is it heading?")},
{"pause": 5, 'title': gettext("EDU Talks"), 'duration': 30, 'language': 'SK', 'flag': 'edu', 'type': 'talk'},
)
FRIDAY_WORKSHOPS1 = (
{"pause": 10, 'title': gettext("How to create interactive maps in Python / R")},
{"pause": 60, 'title': gettext("Working with XML")},
{"pause": 5, 'title': gettext("Managing high-available applications in production")},
)
FRIDAY_WORKSHOPS2 = (
{"pause": 40, 'title': gettext("Workshop: An Introduction to Ansible")},
{"pause": 5, 'title': gettext("Introduction to Machine Learning with Python")},
)
FRIDAY_HALLWAY = (
{"pause": 0, 'title': gettext("OpenPGP key-signing party"), 'duration': 30, 'link': 'https://github.com/pyconsk/2018.pycon.sk/tree/master/openpgp-key-signing-party', 'flag': 'security'},
)
SATURDAY_TRACK1 = (
{"pause": 5, 'title': gettext("Conference Opening"), 'duration': 25, 'flag': 'other', 'type': 'talk'},
{"pause": 5, 'title': gettext("Solutions Reviews")},
{"pause": 15, 'title': gettext("Campaign Automation & Abusing Celery Properly")},
{"pause": 5, 'title': gettext("The Truth about Mastering Big Data")},
{"pause": 5, 'title': gettext("Industrial Machine Learning: Building scalable distributed machine learning pipelines with Python")},
{"pause": 25, 'title': gettext("Programming contest Semi finale"), 'duration': 30, 'flag': 'other', 'link': 'https://app.pycon.sk'},
{"pause": 5, 'title': gettext("Pythonic code, by example")},
{"pause": 15, 'title': gettext("Our DevOps journey, is SRE the next stop?")},
{"pause": 5, 'title': gettext("Implementing distributed systems with Consul")},
{"pause": 15, 'title': gettext("Designing fast and scalable Python MicroServices with django")},
{"pause": 5, 'title': gettext("When your wetware has too many threads - Tips from an ADHDer on how to improve your focus")},
{"pause": 5, 'title': gettext("Programming Python as performance: live coding with FoxDot")},
{"pause": 5, 'title': gettext("Programming Contest Grand Finale"), 'duration': 30, 'flag': 'other', 'type': 'talk', 'language': 'EN'},
{"pause": 5, 'title': gettext("Lightning Talks"), 'duration': 45, 'flag': 'other', 'type': 'talk'},
)
SATURDAY_TRACK2 = (
{"pause": 5, 'title': gettext("Conference Opening in Kiwi.com Hall"), 'duration': 25},
{"pause": 5, 'title': gettext("Meteo data in Python. Effectively.")},
{"pause": 15, 'title': gettext("Around the World in 30 minutes")},
{"pause": 5, 'title': gettext("LOCKED SHIELDS: What a good cyber testing looks like")},
{"pause": 60, 'title': gettext("Kiwi.com in ZOO")},
{"pause": 5, 'title': gettext("Keynote in Kiwi.com Hall"), 'duration': 30, 'flag': 'generic', 'type': 'talk'},
{"pause": 15, 'title': gettext("Skynet your Infrastructure with QUADS")},
{"pause": 5, 'title': gettext("Automated network OS testing")},
{"pause": 15, 'title': gettext("Tools to interact with Bitcoin and Ethereum")},
{"pause": 5, 'title': gettext("7 Steps to a Clean Issue Tracker")},
{"pause": 5, 'title': gettext("The Concierge Paradigm")},
)
SATURDAY_WORKSHOPS1 = (
{"pause": 55, 'title': gettext("Effectively running python applications in Kubernetes/OpenShift")},
{"pause": 5, 'title': gettext("Roboworkshop")},
)
SATURDAY_WORKSHOPS2 = (
{"pause": 55, 'title': gettext("Microbit:Slovakia")},
{"pause": 5, 'title': gettext("Coding in Python: A high-school programming lesson")},
)
SATURDAY_HALLWAY1 = (
{"pause": 0, 'title': gettext("Pandas documentation sprint"), 'duration': 360, 'link': 'https://python-sprints.github.io/pandas/', 'flag': 'docs'},
)
SATURDAY_HALLWAY2 = (
{"pause": 145, 'title': gettext("Programming contest"), 'duration': 95, 'flag': 'other', 'link': 'https://app.pycon.sk'},
{"pause": 5, 'title': gettext("Conference organizers meetup"), 'duration': 30, 'flag': 'community'},
)
SUNDAY_TRACK1 = (
{"pause": 5, 'title': gettext("Charon and the way out from a pickle hell")},
{"pause": 15, 'title': gettext("Making Python Behave")},
{"pause": 5, 'title': gettext("“Secret” information about the code we write")},
{"pause": 60, 'title': gettext("How to connect objects with each other in different situations with Pythonic ways - association, aggregation, composition and etc.")},
{"pause": 5, 'title': gettext("APIs: Gateway to world's data")},
{"pause": 15, 'title': gettext("Getting started with HDF5 and PyTables")},
{"pause": 5, 'title': gettext("Real-time personalized recommendations using embeddings")},
{"pause": 5, 'title': gettext("Quiz"), 'duration': 30, 'flag': 'other', 'type': 'talk'},
)
SUNDAY_WORKSHOPS1 = (
{"pause": 40, 'title': gettext("Real-time transcription and sentiment analysis of audio streams; on the phone and in the browser")},
{"pause": 5, 'title': gettext("Learn MongoDB by modeling PyPI in a document database")},
)
SUNDAY_WORKSHOPS2 = (
{"pause": 15, 'title': gettext("Testing Essentials for Scientists and Engineers")},
{"pause": 5, 'title': gettext("Cython: Speed up your code without going insane")},
)
SUNDAY_WORKSHOPS3 = (
{"pause": 15, 'title': gettext("Meet the pandas")},
{"pause": 5, 'title': gettext("Serverless with OpenFaaS and Python")},
)
SUNDAY_WORKSHOPS4 = (
{"pause": 5, 'title': gettext("Django Girls"), 'duration': 540, 'flag': 'web', 'type': 'workshop'},
)
SUNDAY_HALLWAY = (
{"pause": 5, 'title': gettext("Documentation clinic/helpdesk")},
)
AULA1 = {
'name': gettext('Kiwi.com Hall'),
'number': '-1.61',
}
AULA2 = {
'name': gettext('Python Software Foundation Hall'),
'number': '-1.65',
}
AULA3 = {
'name': gettext('SPy - Hall A'),
'number': '-1.57',
}
AULA4 = {
'name': gettext('SPy - Hall B'),
'number': '-1.57',
}
AULA5 = {
'name': gettext('Django Girls Auditorium'),
'number': '+1.31',
}
HALLWAY = {
'name': gettext('Hallway'),
'number': '',
}
def get_conference_data(url='', filters=''):
"""Connect to API and get public talks and speakers data."""
url = API_DOMAIN + url
if filters:
url = url + '&' + filters
r = requests.get(url)
return r.json()
API_DATA_SPEAKERS = get_conference_data(url='/event/2018/speakers/')
API_DATA_TALKS = get_conference_data(url='/event/2018/talks/')
@app.before_request
def before():
if request.view_args and 'lang_code' in request.view_args:
g.current_lang = request.view_args['lang_code']
if request.view_args['lang_code'] not in LANGS:
return abort(404)
request.view_args.pop('lang_code')
@babel.localeselector
def get_locale():
# try to guess the language from the user accept
# header the browser transmits. The best match wins.
# return request.accept_languages.best_match(['de', 'sk', 'en'])
return g.get('current_lang', app.config['BABEL_DEFAULT_LOCALE'])
@app.template_filter()
@evalcontextfilter
def linebreaks(eval_ctx, value):
"""Converts newlines into <p> and <br />s."""
value = re.sub(r'\r\n|\r|\n', '\n', value) # normalize newlines
paras = re.split('\n{2,}', value)
paras = [u'<p>%s</p>' % p.replace('\n', '<br />') for p in paras]
paras = u'\n\n'.join(paras)
return Markup(paras)
@app.template_filter()
@evalcontextfilter
def linebreaksbr(eval_ctx, value):
"""Converts newlines into <p> and <br />s."""
value = re.sub(r'\r\n|\r|\n', '\n', value) # normalize newlines
paras = re.split('\n{2,}', value)
paras = [u'%s' % p.replace('\n', '<br />') for p in paras]
paras = u'\n\n'.join(paras)
return Markup(paras)
@app.template_filter()
@evalcontextfilter
def strip_accents(eval_ctx, value):
"""Strip non ASCII characters and convert them to ASCII."""
return unicodedata.normalize('NFKD', value).encode('ascii', 'ignore').decode("utf-8")
def _get_template_variables(**kwargs):
"""Collect variables for template that repeats, e.g. are in body.html template"""
lang = get_locale()
variables = {
'title': EVENT,
'logo': LOGO_PYCON, # TODO: Do we need this?
'ld_json': LDJSON_PYCON
}
variables['ld_json']['url'] = DOMAIN + '/' + lang + '/'
variables.update(kwargs)
if 'current_lang' in g:
variables['lang_code'] = g.current_lang
else:
variables['lang_code'] = app.config['BABEL_DEFAULT_LOCALE']
return variables
def generate_track(api_data, track_data, start, flag=None):
"""Helper function to mix'n'match API data, with schedule order defined here, to generate schedule dict"""
template_track_data = []
for talk in track_data:
# Check if talk is in API
talk_api_data = next((item for item in api_data if item['title'] == talk['title']), None)
# If talk is not in API data we'll use text from track_data dict == same structure for template generation
if not talk_api_data:
talk_api_data = talk
if not flag or ('flag' in talk_api_data and flag == talk_api_data['flag']):
# Store data to be displayed in template
template_track_data.append({
"start": start,
"talk": talk_api_data
})
start = start + timedelta(minutes=talk_api_data.get('duration', 0))
# start = start + timedelta(minutes=talk_api_data['duration'])
if not flag:
# Generate break
break_name = gettext('Break')
if talk['pause'] in (40, 60):
break_name = gettext('Lunch 🍱')
if talk['pause'] in (15, 20):
break_name = gettext('Coffee Break ☕')
template_track_data.append({
'start': start,
'talk': {'title': break_name},
'css': 'break'
})
start = start + timedelta(minutes=talk['pause']) # break time does not comes from API always defined in track
return template_track_data
def generate_schedule(api_data, flag=None):
return [
{
'room': AULA1,
'start': FRIDAY_START,
'schedule': generate_track(api_data, FRIDAY_TRACK1, FRIDAY_START, flag=flag),
'day': 'friday',
'block_start': True,
},
{
'room': AULA2,
'start': FRIDAY_START,
'schedule': generate_track(api_data, FRIDAY_TRACK2, FRIDAY_START, flag=flag),
'day': 'friday'
},
{
'room': AULA3,
'start': FRIDAY_START,
'schedule': generate_track(api_data, FRIDAY_WORKSHOPS1, FRIDAY_START+timedelta(minutes=30), flag=flag),
'day': 'friday'
},
{
'room': AULA4,
'start': FRIDAY_START,
'schedule': generate_track(api_data, FRIDAY_WORKSHOPS2, FRIDAY_START+timedelta(minutes=30), flag=flag),
'day': 'friday',
},
{
'room': HALLWAY,
'start': FRIDAY_START+timedelta(minutes=395),
'schedule': generate_track(api_data, FRIDAY_HALLWAY, FRIDAY_START+timedelta(minutes=395), flag=flag),
'day': 'saturday',
'block_end': True,
},
{
'room': AULA1,
'start': SATURDAY_START,
'schedule': generate_track(api_data, SATURDAY_TRACK1, SATURDAY_START, flag=flag),
'day': 'saturday',
'block_start': True,
},
{
'room': AULA2,
'start': SATURDAY_START,
'schedule': generate_track(api_data, SATURDAY_TRACK2, SATURDAY_START, flag=flag),
'day': 'saturday'
},
{
'room': AULA3,
'start': SATURDAY_START,
'schedule': generate_track(api_data, SATURDAY_WORKSHOPS1, SATURDAY_START+timedelta(minutes=30), flag=flag),
'day': 'saturday'
},
{
'room': AULA4,
'start': SATURDAY_START,
'schedule': generate_track(api_data, SATURDAY_WORKSHOPS2, SATURDAY_START+timedelta(minutes=30), flag=flag),
'day': 'saturday'
},
{
'room': HALLWAY,
'start': SATURDAY_START+timedelta(minutes=60),
'schedule': generate_track(api_data, SATURDAY_HALLWAY1, SATURDAY_START+timedelta(minutes=60), flag=flag),
'day': 'saturday',
},
{
'room': HALLWAY,
'start': SATURDAY_START+timedelta(minutes=30),
'schedule': generate_track(api_data, SATURDAY_HALLWAY2, SATURDAY_START+timedelta(minutes=30), flag=flag),
'day': 'saturday',
'block_end': True,
},
{
'room': AULA1,
'start': SUNDAY_START,
'schedule': generate_track(api_data, SUNDAY_TRACK1, SUNDAY_START, flag=flag),
'day': 'sunday',
'block_start': True,
},
{
'room': AULA2,
'start': SUNDAY_START,
'schedule': generate_track(api_data, SUNDAY_WORKSHOPS1, SUNDAY_START, flag=flag),
'day': 'sunday'
},
{
'room': AULA3,
'start': SUNDAY_START,
'schedule': generate_track(api_data, SUNDAY_WORKSHOPS2, SUNDAY_START, flag=flag),
'day': 'sunday'
},
{
'room': AULA4,
'start': SUNDAY_START,
'schedule': generate_track(api_data, SUNDAY_WORKSHOPS3, SUNDAY_START, flag=flag),
'day': 'sunday'
},
{
'room': AULA5,
'start': SUNDAY_START,
'schedule': generate_track(api_data, SUNDAY_WORKSHOPS4, SUNDAY_START-timedelta(minutes=135), flag=flag),
'day': 'sunday',
},
{
'room': HALLWAY,
'start': SUNDAY_START,
'schedule': generate_track(api_data, SUNDAY_HALLWAY, SUNDAY_START+timedelta(minutes=45), flag=flag),
'day': 'sunday',
'block_end': True,
},
]
def _timestamp(dt=None):
if dt is None:
dt = datetime.now()
fmt = '%Y%m%dT%H%M%S'
return dt.strftime(fmt)
def _ignore_talk(title, names=IGNORE_TALKS):
# yes, we can paste unicode symbols, but if we change the symbol this test will still work
max_appended_symbols = 2
return any((title == name or title[:-(_len+1)] == name)
for _len in range(max_appended_symbols) for name in names)
def _hash_event(track, slot):
room = track.get('room')
name = room.get('name')
ts = _timestamp(slot.get('start'))
_hash = str(hash('{name}:{ts}'.format(name=name, ts=ts)))
_hash = _hash.replace('-', '*')
return '-'.join(_hash[i*5:(i+1)*5] for i in range(4))
def _normalize(text, tag=None, subsequent_indent=' ', **kwargs):
# tag must be always included to determine amount of space left in the first line
if tag:
max_width = ICAL_LEN - len(tag) - 1
else:
max_width = ICAL_LEN
text = text.strip().replace('\n', ICAL_NL)
return '\n'.join(textwrap.wrap(text, width=max_width, subsequent_indent=subsequent_indent, **kwargs))
# CALENDAR FUNCTIONS
def generate_event(track, slot):
room = track.get('room')
location = '{name} ({number})'.format(**room)
talk = slot.get('talk')
summary = talk.get('title', 'N/A')
transp = 'OPAQUE'
if _ignore_talk(summary):
# skip breaks
# alternatively we can include breaks into talks (duration=duration+pause)
return {}
summary = _normalize(summary, 'SUMMARY')
start = slot.get('start')
duration = talk.get('duration', 0)
# TODO add missing duration handling (nonzero default duration? title based dictionary?
dtend = _timestamp(start + timedelta(minutes=duration))
dtstart = _timestamp(start)
dtstamp = created = modified = _timestamp()
# event_uuid caused the event not to be imported to calendar
# this creates hash of name:start and split with dashes by 5
uid = _hash_event(track, slot)
author = ''
main_description = ''
tags = ''
speaker = talk.get('primary_speaker')
if speaker:
name = ' '.join([speaker.get(n, '') for n in ['first_name', 'last_name']])
author = '{name}{nl} {nl}'.format(name=name, nl=ICAL_NL)
# this is to determine how many chars do we have in the first line
# if author is used we start at position 1, otherwise it will be prefixed with tag:
desc_tag = 'DESCRIPTION' if not author else ''
abstract = talk.get('abstract', '')
if abstract:
main_description = _normalize(abstract, desc_tag, initial_indent=' ') + ICAL_NL
if 'flag' in talk:
tags = ' {nl} TAGS: {flag}'.format(nl=ICAL_NL, **talk)
description = author + main_description + tags
status = 'CONFIRMED'
sequence = 0 # number of revisions, we will use default zero even if event changed
return {'dtstart': dtstart, 'dtend': dtend, 'dtstamp': dtstamp, 'created': created,
'last-modified': modified, 'uid': uid, 'location': location, 'sequence': sequence,
'description': description, 'status': status, 'summary': summary, 'transp': transp, }
@app.route('/<lang_code>/calendar.ics')
def generate_ics():
# https://tools.ietf.org/html/rfc5545#section-2.1
# https://en.wikipedia.org/wiki/ICalendar#Technical_specifications
omni_schedule = generate_schedule(API_DATA_TALKS)
events = []
uids = set()
for track in omni_schedule:
schedule = track.get('schedule')
for slot in schedule:
evt = generate_event(track, slot)
if evt and evt.get('uid') not in uids:
events.append(evt)
uids.update([evt.get('uid')])
calendar_ics = render_template('calendar.ics', events=events)
response = make_response(calendar_ics.replace('\n', '\r\n'))
response.headers["Content-Type"] = "text/calendar"
return response
@app.route('/<lang_code>/index.html')
def index():
return render_template('index.html', **_get_template_variables(li_index='active'))
@app.route('/<lang_code>/tickets.html')
def tickets():
return render_template('tickets.html', **_get_template_variables(li_tickets='active'))
@app.route('/<lang_code>/<flag>/<day>/schedule.html')
def schedule_day_filter(flag, day):
variables = _get_template_variables(li_schedule_nav='active', li_schedule='active')
variables['flag'] = flag
variables['day'] = day
variables['tags'] = TAGS
variables['all'] = {**TYPE, **TAGS}
variables['data'] = api_data = API_DATA_TALKS
variables['schedule'] = generate_schedule(api_data, flag=flag)
return render_template('schedule.html', **variables)
@app.route('/<lang_code>/<filter>/schedule.html')
def schedule_filter(filter):
variables = _get_template_variables(li_schedule_nav='active', li_schedule='active')
if filter in ('friday', 'saturday', 'sunday'):
variables['day'] = filter
variables['flag'] = None
else:
variables['flag'] = filter
variables['tags'] = TAGS
variables['all'] = {**TYPE, **TAGS}
variables['data'] = api_data = API_DATA_TALKS
variables['schedule'] = generate_schedule(api_data, flag=variables['flag'])
return render_template('schedule.html', **variables)
@app.route('/<lang_code>/schedule.html')
def schedule():
variables = _get_template_variables(li_schedule_nav='active', li_schedule='active')
variables['tags'] = TAGS
variables['all'] = {**TYPE, **TAGS}
variables['data'] = api_data = API_DATA_TALKS
variables['schedule'] = generate_schedule(api_data)
variables['disable_last'] = True
return render_template('schedule.html', **variables)
@app.route('/<lang_code>/<flag>/talks.html')
def talks_filter(flag):
variables = _get_template_variables(li_schedule_nav='active', li_talks='active')
variables['tags'] = TAGS
variables['all'] = {**TYPE, **TAGS}
variables['data'] = get_conference_data(url='/event/2018/talks/?flag=' + flag)
return render_template('talks.html', **variables)
@app.route('/<lang_code>/talks.html')
def talks():
variables = _get_template_variables(li_schedule_nav='active', li_talks='active')
variables['tags'] = TAGS
variables['all'] = {**TYPE, **TAGS}
variables['data'] = API_DATA_TALKS
return render_template('talks.html', **variables)
@app.route('/<lang_code>/speakers.html')
def speakers():
variables = _get_template_variables(li_schedule_nav='active', li_speakers='active')
variables['data'] = API_DATA_SPEAKERS
variables['tags'] = TAGS
variables['all'] = {**TYPE, **TAGS}
return render_template('speakers.html', **variables)
@app.route('/<lang_code>/speakers/<last_name>.html')
def profile(last_name):
variables = _get_template_variables(li_schedule_nav='active')
variables['tags'] = TAGS
variables['all'] = {**TYPE, **TAGS}
for speaker in API_DATA_SPEAKERS:
if speaker['last_name'] == last_name:
variables['speaker'] = speaker
break
variables['talks'] = []
for track in generate_schedule(API_DATA_TALKS):
for talk in track['schedule']:
if ('primary_speaker' in talk['talk'] or 'secondary_speaker' in talk['talk']) and \
talk['talk']['primary_speaker']['last_name'] == variables['speaker']['last_name'] or (
'secondary_speaker' in talk['talk'] and
talk['talk']['secondary_speaker']['last_name'] == variables['speaker']['last_name']):
variables['talks'].append((track, talk))
break
return render_template('profile.html', **variables)
@app.route('/<lang_code>/cfp.html')
def cfp():
return render_template('cfp.html', **_get_template_variables(li_cfp='active'))
@app.route('/<lang_code>/coc.html')
def coc():
return render_template('coc.html', **_get_template_variables(li_coc='active'))
@app.route('/<lang_code>/hall-of-fame.html')
def hall_of_fame():
return render_template('hall-of-fame.html', **_get_template_variables(li_hall_of_fame='active'))
@app.route('/<lang_code>/venue.html')
def venue():
return render_template('venue.html', **_get_template_variables(li_venue='active'))
@app.route('/<lang_code>/sponsoring.html')
def sponsoring():
return render_template('sponsoring.html', **_get_template_variables(li_sponsoring='active'))
def get_mtime(filename):
"""Get last modification time from file"""
mtime = datetime.fromtimestamp(os.path.getmtime(filename))
return mtime.strftime(TIME_FORMAT)
SITEMAP_DEFAULT = {'prio': '0.1', 'freq': 'weekly'}
SITEMAP = {
'sitemap.xml': {'prio': '0.9', 'freq': 'daily', 'lastmod': get_mtime(__file__)},
'index.html': {'prio': '1', 'freq': 'daily'},
'schedule.html': {'prio': '0.9', 'freq': 'daily'},
'speakers.html': {'prio': '0.9', 'freq': 'daily'},
'hall_of_fame.html': {'prio': '0.5', 'freq': 'weekly'},
'tickets.html': {'prio': '0.5', 'freq': 'weekly'},
}
def get_lastmod(route, sitemap_entry):
"""Used by sitemap() below"""
if 'lastmod' in sitemap_entry:
return sitemap_entry['lastmod']
template = route.rule.split('/')[-1]
template_file = os.path.join(SRC_DIR, 'templates', template)
if os.path.exists(template_file):
return get_mtime(template_file)
return NOW
@app.route('/sitemap.xml', methods=['GET'])
def sitemap():
"""Generate sitemap.xml. Makes a list of urls and date modified."""
pages = []
# static pages
for rule in app.url_map.iter_rules():
if "GET" in rule.methods:
if len(rule.arguments) == 0:
indx = rule.rule.replace('/', '')
sitemap_data = SITEMAP.get(indx, SITEMAP_DEFAULT)
pages.append({
'loc': DOMAIN + rule.rule,
'lastmod': get_lastmod(rule, sitemap_data),
'freq': sitemap_data['freq'],
'prio': sitemap_data['prio'],
})
elif 'lang_code' in rule.arguments:
indx = rule.rule.replace('/<lang_code>/', '')
for lang in LANGS:
alternate = []
for alt_lang in LANGS:
if alt_lang != lang:
alternate.append({
'lang': alt_lang,
'url': DOMAIN + rule.rule.replace('<lang_code>', alt_lang)
})
sitemap_data = SITEMAP.get(indx, SITEMAP_DEFAULT)
pages.append({
'loc': DOMAIN + rule.rule.replace('<lang_code>', lang),
'alternate': alternate,
'lastmod': get_lastmod(rule, sitemap_data),
'freq': sitemap_data['freq'],
'prio': sitemap_data['prio'],
})
sitemap_xml = render_template('sitemap_template.xml', pages=pages)
response = make_response(sitemap_xml)
response.headers["Content-Type"] = "text/xml"
return response
if __name__ == "__main__":
app.run(debug=True, host=os.environ.get('FLASK_HOST', '127.0.0.1'), port=int(os.environ.get('FLASK_PORT', 5000)),
use_reloader=True)
| 36.81381 | 190 | 0.612775 | 0 | 0 | 0 | 0 | 8,469 | 0.283548 | 0 | 0 | 11,783 | 0.394502 |
f50f7d07d1b11d4dc8dcf82534a3b3e6a3a87158 | 2,891 | py | Python | ietf/community/migrations/0002_auto_20141222_1749.py | ekr/ietfdb | 8d936836b0b9ff31cda415b0a423e3f5b33ab695 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
]
| 2 | 2021-11-20T03:40:40.000Z | 2021-11-20T03:40:42.000Z | ietf/community/migrations/0002_auto_20141222_1749.py | ekr/ietfdb | 8d936836b0b9ff31cda415b0a423e3f5b33ab695 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
]
| null | null | null | ietf/community/migrations/0002_auto_20141222_1749.py | ekr/ietfdb | 8d936836b0b9ff31cda415b0a423e3f5b33ab695 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
]
| null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('group', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('community', '0001_initial'),
('doc', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='rule',
name='cached_ids',
field=models.ManyToManyField(to='doc.Document'),
preserve_default=True,
),
migrations.AddField(
model_name='rule',
name='community_list',
field=models.ForeignKey(to='community.CommunityList'),
preserve_default=True,
),
migrations.AlterUniqueTogether(
name='rule',
unique_together=set([('community_list', 'rule_type', 'value')]),
),
migrations.AddField(
model_name='listnotification',
name='event',
field=models.ForeignKey(to='doc.DocEvent'),
preserve_default=True,
),
migrations.AddField(
model_name='expectedchange',
name='community_list',
field=models.ForeignKey(to='community.CommunityList'),
preserve_default=True,
),
migrations.AddField(
model_name='expectedchange',
name='document',
field=models.ForeignKey(to='doc.Document'),
preserve_default=True,
),
migrations.AddField(
model_name='emailsubscription',
name='community_list',
field=models.ForeignKey(to='community.CommunityList'),
preserve_default=True,
),
migrations.AddField(
model_name='documentchangedates',
name='document',
field=models.ForeignKey(to='doc.Document'),
preserve_default=True,
),
migrations.AddField(
model_name='displayconfiguration',
name='community_list',
field=models.ForeignKey(to='community.CommunityList'),
preserve_default=True,
),
migrations.AddField(
model_name='communitylist',
name='added_ids',
field=models.ManyToManyField(to='doc.Document'),
preserve_default=True,
),
migrations.AddField(
model_name='communitylist',
name='group',
field=models.ForeignKey(blank=True, to='group.Group', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='communitylist',
name='user',
field=models.ForeignKey(blank=True, to=settings.AUTH_USER_MODEL, null=True),
preserve_default=True,
),
]
| 32.483146 | 88 | 0.570391 | 2,749 | 0.950882 | 0 | 0 | 0 | 0 | 0 | 0 | 607 | 0.209962 |
f510f358811538f9c09860ccdb42030579e71a1a | 928 | py | Python | scripts/fishvalidate.py | justinbois/fishactivity | 6c6ac06c391b75b2725e2e2a61dd80afc34daf31 | [
"MIT"
]
| null | null | null | scripts/fishvalidate.py | justinbois/fishactivity | 6c6ac06c391b75b2725e2e2a61dd80afc34daf31 | [
"MIT"
]
| null | null | null | scripts/fishvalidate.py | justinbois/fishactivity | 6c6ac06c391b75b2725e2e2a61dd80afc34daf31 | [
"MIT"
]
| null | null | null | #!/usr/bin/env python
import argparse
import fishact
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Validate data files.')
parser.add_argument('activity_fname', metavar='activity_file', type=str,
help='Name of activity file.')
parser.add_argument('gtype_fname', metavar='genotype_file', type=str,
help='Name of genotype file.')
args = parser.parse_args()
print('------------------------------------------------')
print('Checking genotype file...')
fishact.validate.test_genotype_file(args.gtype_fname)
print('------------------------------------------------\n\n\n')
print('------------------------------------------------')
print('Checking activity file...')
fishact.validate.test_activity_file(args.activity_fname, args.gtype_fname)
print('------------------------------------------------')
| 37.12 | 78 | 0.519397 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 420 | 0.452586 |
f513b5c28a4eaca8eb08a50fccfcd5204171dfdc | 1,682 | py | Python | scripts/rescale.py | danydoerr/spp_dcj | 1ab9dacb1f0dc34a3ebbeed9e74226a9a53c297a | [
"MIT"
]
| 2 | 2021-08-24T16:03:30.000Z | 2022-03-18T14:52:43.000Z | scripts/rescale.py | danydoerr/spp_dcj | 1ab9dacb1f0dc34a3ebbeed9e74226a9a53c297a | [
"MIT"
]
| null | null | null | scripts/rescale.py | danydoerr/spp_dcj | 1ab9dacb1f0dc34a3ebbeed9e74226a9a53c297a | [
"MIT"
]
| null | null | null | #!/usr/bin/env python3
from sys import stdout,stderr,exit
from optparse import OptionParser
from newick_parser import parse_tree_iterator, Branch
from tree_span import calculateSpan
from copy import deepcopy
def rescale_absolute(tree, max_length):
span = calculateSpan(tree)
s = max_length/float(span)
return rescale(tree, s)
def rescale(tree, scale_factor):
res = deepcopy(tree)
stack = list()
stack.append(res.subtree)
while stack:
x = stack.pop()
if x.length:
x.length *= scale_factor
if type(x) == Branch:
stack.extend(x.subtrees)
return res
if __name__ == '__main__':
usage = 'usage: %prog [options] <NEWICK FILE>'
parser = OptionParser(usage=usage)
parser.add_option('-s', '--scale_factor', dest='scale_factor',
help='Scale factor of distances in tree',
type=float, default=0, metavar='FLOAT')
parser.add_option('-a', '--absolute_length', dest='absolute',
help='Absolute length of maximal distance in tree',
type=float, default=0, metavar='FLOAT')
(options, args) = parser.parse_args()
if len(args) != 1:
parser.print_help()
exit(1)
if not ((options.absolute > 0) ^ (options.scale_factor > 0)):
print('!! Specify either scale factor or absolute length with ' + \
'strictly positive number', file = stderr)
exit(1)
for tree in parse_tree_iterator(open(args[0])):
if options.absolute > 0:
print(rescale_absolute(tree, options.absolute), file = stdout)
else:
print(rescale(tree, options.scale_factor), file = stdout)
| 29 | 75 | 0.633175 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 314 | 0.186683 |
f51915e704bb43425413f02d24086079a01a04be | 743 | py | Python | mytest/playsnake.py | mrzhuzhe/stable-baselines3 | 6c3bc5fa4c3faba951099e3ccb5c74b763134b38 | [
"MIT"
]
| null | null | null | mytest/playsnake.py | mrzhuzhe/stable-baselines3 | 6c3bc5fa4c3faba951099e3ccb5c74b763134b38 | [
"MIT"
]
| null | null | null | mytest/playsnake.py | mrzhuzhe/stable-baselines3 | 6c3bc5fa4c3faba951099e3ccb5c74b763134b38 | [
"MIT"
]
| null | null | null | from stable_baselines3 import PPO
import os
from setup_gym_env import SnakeEnv
import time
#models_dir = "./models/1644408901/" + "40000"
#models_dir = "./models/1644462865/" + "120000"
#models_dir = "./models/1644466638/" + "100000"
models_dir = "./models/1644485414/" + "100000"
env = SnakeEnv()
env.reset()
model = PPO.load(models_dir)
episodes = 10
# snake doesn't known where itself
for episode in range(episodes):
done = False
obs = env.reset()
#while True:#not done:
while not done:
action, _states = model.predict(obs)
#print("action",action)
obs, reward, done, info = env.step(action)
#print('reward',reward)
if done == True:
print(done)
env.render()
| 22.515152 | 50 | 0.641992 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 272 | 0.366083 |
f519a4dd8609848cb4fec6b2221b463e32b9ae3b | 13,105 | py | Python | yoda2h5.py | iamholger/yodf5 | 79ad8d77fd2b48e1b71403339e2502b42a5435c8 | [
"MIT"
]
| 4 | 2020-04-22T11:00:13.000Z | 2020-12-16T17:49:47.000Z | yoda2h5.py | iamholger/yodf5 | 79ad8d77fd2b48e1b71403339e2502b42a5435c8 | [
"MIT"
]
| 4 | 2020-12-17T16:26:16.000Z | 2020-12-17T16:30:34.000Z | yoda2h5.py | iamholger/yodf5 | 79ad8d77fd2b48e1b71403339e2502b42a5435c8 | [
"MIT"
]
| 2 | 2020-05-06T17:30:05.000Z | 2020-12-16T17:58:23.000Z | #!/usr/bin/env python3
import yoda, sys
import h5py
import numpy as np
def chunkIt(seq, num):
avg = len(seq) / float(num)
out = []
last = 0.0
while last < len(seq):
out.append(seq[int(last):int(last + avg)])
last += avg
# Fix size, sometimes there is spillover
# TODO: replace with while if problem persists
if len(out) > num:
out[-2].extend(out[-1])
out = out[0:-1]
if len(out) != num:
raise Exception("something went wrong in chunkIt, the target size differs from the actual size")
return out
def createDatasets(f, binids, variations, depth=1, compression=4):
"""
Create data sets in the HDF5 file.
"""
nbins=len(binids)
nvars=len(variations)
# The fundamental moments/elements of yoda objecs
floats = [
"sumw",
"sumw2",
"sumwx",
"sumwx2",
"sumwy",
"sumwy2",
"sumwxy",
"numEntries",
"xval",
"xerr-",
"xerr+",
"yval",
"yerr-",
"yerr+",
"xmin",
"xmax",
"ymin",
"ymax"
]
# The datasets have 3 axes: binid, weight variation, point in parameter space
for df in floats: f.create_dataset(df, (nbins,nvars,depth), maxshape=(None,None,None), dtype='f' , chunks=True, compression=compression)
# Lookups --- helps when reading data and reconstucting YODA objects
f.create_group("Histo1D")
f.create_group("Histo2D")
f.create_group("Profile1D")
f.create_group("Counter")
f.create_group("Scatter1D")
f.create_group("Scatter2D")
# This is the one that works well with hdf5 when reading std::string in C++
dt = h5py.special_dtype(vlen=str)
# We use these simple lists as lookup tables to associate the elements of the datasets ^^^ with
# the actual YODA Analysis objects
import numpy as np
f.create_dataset("binids", data=np.array(binids, dtype=dt))
f.create_dataset("variations", data=np.array(variations, dtype=dt))
def dbn0ToArray(dbn):
return np.array([dbn.sumW(), dbn.sumW2(), dbn.numEntries()])
def dbn1ToArray(dbn):
"""
The try except block deals with the underflow things not having xmin, xmax
"""
try:
return np.array([dbn.sumW(), dbn.sumW2(), dbn.sumWX(), dbn.sumWX2(), dbn.numEntries(), dbn.xMin(), dbn.xMax()])
except:
return np.array([dbn.sumW(), dbn.sumW2(), dbn.sumWX(), dbn.sumWX2(), dbn.numEntries(), 0, 0])
def H2dbn2ToArray(dbn):
"""
The try except block deals with the underflow things not having xmin, xmax
"""
try:
return np.array([dbn.sumW(), dbn.sumW2(), dbn.sumWX(), dbn.sumWX2(), dbn.sumWY(), dbn.sumWY2(), dbn.sumWXY(), dbn.numEntries(), dbn.xMin(), dbn.xMax(), dbn.yMin(), dbn.yMax()])
except:
return np.array([dbn.sumW(), dbn.sumW2(), dbn.sumWX(), dbn.sumWX2(), dbn.sumWY(), dbn.sumWY2(), dbn.sumWXY(), dbn.numEntries(), 0, 0, 0, 0])
def dbn2ToArray(dbn):
try:
return np.array([dbn.sumW(), dbn.sumW2(), dbn.sumWX(), dbn.sumWX2(), dbn.sumWY(), dbn.sumWY2(), dbn.numEntries(), dbn.xMin(), dbn.xMax()])
except:
return np.array([dbn.sumW(), dbn.sumW2(), dbn.sumWX(), dbn.sumWX2(), dbn.sumWY(), dbn.sumWY2(), dbn.numEntries(), 0, 0])
def point2DToArray(pnt):
return np.array([pnt.val(1), pnt.errMinus(1), pnt.errPlus(1), pnt.val(2), pnt.errMinus(2), pnt.errPlus(2)])
def point1DToArray(pnt):
return np.array([pnt.val(1), pnt.errMinus(1), pnt.errPlus(1)])
def mkSafeHname(hname):
return hname.replace("/","|")
def mkBinids(hdict):
binids= []
for num, hname in enumerate(sorted(list(hdict.keys()))):
if hname.endswith("]"): continue
ao = hdict[hname]
base = ao.path().split("[")[0].replace("/","|")
if ao.type()=="Scatter1D" or ao.type()=="Scatter2D":
temp = ["{}#{}".format(base, i) for i in range(len(ao))]
elif ao.type()=="Counter":
temp = ["{}#{}".format(base, 0)]
else:
suffixes = ["T", "O", "U"]
if ao.type() == "Counter":
suffixes.append(0)
else:
suffixes.extend([i for i in range(len(ao))])
temp = ["{}#{}".format(base, s) for s in suffixes]
binids.extend(temp)
return binids
def mkIndexDict(datadict, allbinids):
ret = {'Histo1D':{}, 'Histo2D':{}, 'Profile1D':{}, 'Scatter1D':{}, 'Scatter2D':{}, 'Counter':{}}
for hname, v in datadict.items():
_hname=mkSafeHname(hname)
try:
ret[datadict[hname].type()][_hname] = [num for num, binid in enumerate(allbinids) if binid.startswith("{}#".format(_hname))]
except Exception as e:
print("oops: ", e)
return ret
def createIndexDS(f, d_idx):
for dtype, objects in d_idx.items():
for _hname, binIdx in objects.items():
f.create_dataset("{}/{}".format(dtype, _hname), data=binIdx , chunks=True)
def fillDatasets(f, binIdx, variations, ddict, hname, depth=0):
if len(binIdx) ==0:
print("Warning, no matching binid for {} --- is this one of the raw ratios maybe???".format(hname))
return
if ddict[hname].type()=='Histo1D':
nFields=7
fdbn = dbn1ToArray
elif ddict[hname].type()=='Histo2D':
nFields=12
fdbn = H2dbn2ToArray
elif ddict[hname].type()=='Profile1D':
fdbn = dbn2ToArray
nFields=9
elif ddict[hname].type()=='Scatter2D':
fdbn = point2DToArray
nFields=6
elif ddict[hname].type()=='Scatter1D':
fdbn = point1DToArray
nFields=3
elif ddict[hname].type()=='Counter':
nFields=3
else:
raise Exception("type {} Not implemented".format(ddict[hname].type()))
# Empty array to be filled and written to datasets
temp = np.zeros((len(binIdx), len(variations), nFields))
hids = [hname]
for v in variations[1:]:
hids.append("{}[{}]".format(hname, v))
# Iterate over variations
for col, hn in enumerate(hids):
# Iterate over bins
H=ddict[hn]
if H.type() == "Counter":
temp[0][col] = np.array([H.sumW(), H.sumW2(), H.numEntries()])
# Things with under/overflow first
elif H.type() not in ["Scatter1D", "Scatter2D", "Histo2D"]:
temp[0][col] = fdbn(H.totalDbn())
temp[1][col] = fdbn(H.overflow())
temp[2][col] = fdbn(H.underflow())
for i in range(len(binIdx)-3):
temp[3+i][col] = fdbn(H.bin(i))
elif H.type() =="Histo2D":
temp[0][col] = fdbn(H.totalDbn())
temp[1][col] = 0.0 # Future proofing
temp[2][col] = 0.0 #
for i in range(len(binIdx)-3):
temp[3+i][col] = fdbn(H.bin(i))
else:
for i in range(len(binIdx)):
temp[i][col] = fdbn(H.point(i))
if ddict[hname].type()=='Histo1D':
f["sumw"][ binIdx[0]:binIdx[-1]+1,:,depth] = temp[:,:,0]
f["sumw2"][ binIdx[0]:binIdx[-1]+1,:,depth] = temp[:,:,1]
f["sumwx"][ binIdx[0]:binIdx[-1]+1,:,depth] = temp[:,:,2]
f["sumwx2"][ binIdx[0]:binIdx[-1]+1,:,depth] = temp[:,:,3]
f["numEntries"][binIdx[0]:binIdx[-1]+1,:,depth] = temp[:,:,4]
f["xmin"][ binIdx[0]:binIdx[-1]+1,:,depth] = temp[:,:,5]
f["xmax"][ binIdx[0]:binIdx[-1]+1,:,depth] = temp[:,:,6]
# elif ddict[hname].type()=='Histo2D':
# f["sumw"][ binIdx[0]:binIdx[-1]+1,:,depth] = temp[:,:,0]
# f["sumw2"][ binIdx[0]:binIdx[-1]+1,:,depth] = temp[:,:,1]
# f["sumwx"][ binIdx[0]:binIdx[-1]+1,:,depth] = temp[:,:,2]
# f["sumwx2"][ binIdx[0]:binIdx[-1]+1,:,depth] = temp[:,:,3]
# f["sumwy"][ binIdx[0]:binIdx[-1]+1,:,depth] = temp[:,:,4]
# f["sumwy2"][ binIdx[0]:binIdx[-1]+1,:,depth] = temp[:,:,5]
# f["sumwxy"][ binIdx[0]:binIdx[-1]+1,:,depth] = temp[:,:,6]
# f["numEntries"][binIdx[0]:binIdx[-1]+1,:,depth] = temp[:,:,7]
# f["xmin"][ binIdx[0]:binIdx[-1]+1,:,depth] = temp[:,:,8]
# f["xmax"][ binIdx[0]:binIdx[-1]+1,:,depth] = temp[:,:,9]
# f["ymin"][ binIdx[0]:binIdx[-1]+1,:,depth] = temp[:,:,10]
# f["ymax"][ binIdx[0]:binIdx[-1]+1,:,depth] = temp[:,:,11]
# elif ddict[hname].type()=='Profile1D':
# f["sumw"][ binIdx[0]:binIdx[-1]+1,:,depth] = temp[:,:,0]
# f["sumw2"][ binIdx[0]:binIdx[-1]+1,:,depth] = temp[:,:,1]
# f["sumwx"][ binIdx[0]:binIdx[-1]+1,:,depth] = temp[:,:,2]
# f["sumwx2"][ binIdx[0]:binIdx[-1]+1,:,depth] = temp[:,:,3]
# f["sumwy"][ binIdx[0]:binIdx[-1]+1,:,depth] = temp[:,:,4]
# f["sumwy2"][ binIdx[0]:binIdx[-1]+1,:,depth] = temp[:,:,5]
# f["numEntries"][binIdx[0]:binIdx[-1]+1,:,depth] = temp[:,:,6]
# f["xmin"][ binIdx[0]:binIdx[-1]+1,:,depth] = temp[:,:,7]
# f["xmax"][ binIdx[0]:binIdx[-1]+1,:,depth] = temp[:,:,8]
# elif ddict[hname].type()=='Scatter1D':
# f["xval"][ binIdx[0]:binIdx[-1]+1,:,depth] = temp[:,:,0]
# f["xerr-"][binIdx[0]:binIdx[-1]+1,:,depth] = temp[:,:,1]
# f["xerr+"][binIdx[0]:binIdx[-1]+1,:,depth] = temp[:,:,2]
# elif ddict[hname].type()=='Scatter2D':
# f["xval"][ binIdx[0]:binIdx[-1]+1,:,depth] = temp[:,:,0]
# f["xerr-"][binIdx[0]:binIdx[-1]+1,:,depth] = temp[:,:,1]
# f["xerr+"][binIdx[0]:binIdx[-1]+1,:,depth] = temp[:,:,2]
# f["yval"][ binIdx[0]:binIdx[-1]+1,:,depth] = temp[:,:,3]
# f["yerr-"][binIdx[0]:binIdx[-1]+1,:,depth] = temp[:,:,4]
# f["yerr+"][binIdx[0]:binIdx[-1]+1,:,depth] = temp[:,:,5]
# elif ddict[hname].type()=='Counter':
# f["sumw"][ binIdx[0]:binIdx[-1]+1,:,depth] = temp[:,:,0]
# f["sumw2"][ binIdx[0]:binIdx[-1]+1,:,depth] = temp[:,:,1]
# f["numEntries"][binIdx[0]:binIdx[-1]+1,:,depth] = temp[:,:,2]
# else:
# raise Exception("yikes")
if __name__=="__main__":
import sys
import optparse, os, sys
op = optparse.OptionParser(usage=__doc__)
op.add_option("-v", "--debug", dest="DEBUG", action="store_true", default=False, help="Turn on some debug messages")
op.add_option("-o", dest="OUTPUT", default="analysisobjects.h5", help="Output HDF5 file (default: %default)")
opts, args = op.parse_args()
YODAFILES = args
from mpi4py import MPI
comm = MPI.COMM_WORLD
size = comm.Get_size()
rank = comm.Get_rank()
binids, VVV, aix, aix_flat, central = None, None, None, None, None
if rank==0:
# TODO if len(args)==1 and os.path.isdir(args[0]) --- hierarchical reading with pnames finding etc
# Let's assume they are all consistent TODO add robustness
DATA0 = yoda.readYODA(args[0])
L = sorted(list(DATA0.keys()))
names = [x for x in L ]# if not "/RAW" in x]
central = [x for x in names if not x.endswith("]")]
variations = [x for x in names if x.endswith("]")]
# TODO In principle one probably should check that all variations are always the
# same, we assume this is the case here
var = []
for c in central:
var.append([x for x in variations if x.startswith(c+"[")])
## Thats the weight and weight variation order we store the data in
VVV = ["CentralWeight"]
import re
p=re.compile("\[(.*?)\]")
for x in var[0]:
try:
VVV.append(p.findall(x)[0])
except Exception as e:
print(x, e)
binids = mkBinids(DATA0)
# Hierarchical, i.e. top layer is the AnalysisObject type
aix = mkIndexDict(DATA0, binids)
# Object name as keys and lists of indices as values
aix_flat = {}
for k, v in aix.items(): aix_flat.update(v)
binids = comm.bcast(binids, root=0)
VVV = comm.bcast(VVV, root=0)
aix = comm.bcast(aix, root=0)
aix_flat = comm.bcast(aix_flat, root=0)
central = comm.bcast(central, root=0)
# NOTE dataset operations are collective
# This require h5py to use and H5 that is build with MPI
try:
f = h5py.File(opts.OUTPUT, "w", driver='mpio', comm=MPI.COMM_WORLD)
except:
f = h5py.File(opts.OUTPUT, "w")
createDatasets(f, binids, VVV, depth=len(YODAFILES))
createIndexDS(f, aix)
rankwork = chunkIt([i for i in range(len(YODAFILES))], size) if rank==0 else None
rankwork = comm.scatter(rankwork, root=0)
# This part is MPI trivial
for num, findex in enumerate(rankwork):
DATA = yoda.readYODA(YODAFILES[findex])
for hname in central:
_hname=mkSafeHname(hname)
fillDatasets(f, aix_flat[_hname], VVV, DATA, hname, depth=findex)
if rank==0:
print("[{}] --- {}/{} complete".format(rank, num, len(rankwork)))
sys.stdout.flush()
f.close()
| 37.766571 | 184 | 0.546814 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,694 | 0.358184 |
f51c993aef58b3c9c160f8b68cd78fc8daf5ff42 | 1,703 | py | Python | main.py | RichezA/UnRecurZipper | dffe16811e3d79fdc20e0aada0f2ffe9c77da9a1 | [
"MIT"
]
| null | null | null | main.py | RichezA/UnRecurZipper | dffe16811e3d79fdc20e0aada0f2ffe9c77da9a1 | [
"MIT"
]
| null | null | null | main.py | RichezA/UnRecurZipper | dffe16811e3d79fdc20e0aada0f2ffe9c77da9a1 | [
"MIT"
]
| null | null | null | import zipfile
import os
import glob
import sys
# Actual directory that we could find somewhere
class Folder:
def __init__(self, path):
self.path = path
print("Current working folder is: " + self.path)
self.checkForZippedFile()
self.checkForDirectories()
def checkForZippedFile(self):
self.filesToUnzip = list()
self.filesToUnzip = glob.glob(os.path.join(self.path,'*.zip'), recursive=True)
# If we find a .zip file in the current directory
for fileToUnzip in self.filesToUnzip:
print("new ZipFile found at: " + fileToUnzip)
zip_ref = zipfile.ZipFile(fileToUnzip, 'r') # We prepare to unzip
zipFilePath = fileToUnzip.split('.zip')[0] # Reformating the path to remove the .zip at the end
print("Current zip is at: " + zipFilePath)
zip_ref.extractall(zipFilePath) # Extracting .zip content
zip_ref.close() # Closing extraction flow
os.remove(zipFilePath + '.zip') # Removing the zip files
Folder(zipFilePath) # Calling Folder again
def checkForDirectories(self):
with os.scandir(self.path) as listOfDirectories:
for entry in listOfDirectories:
# We check if the actual file is a directory and if it isn't the .git one
if not entry.is_file() and entry.name != '.git':
entry = Folder(os.path.join(self.path, entry.name))
# Reading the first arg written in the console (program name not included)
fileTest = Folder(sys.argv[1]) | 47.305556 | 116 | 0.593658 | 1,499 | 0.880211 | 0 | 0 | 0 | 0 | 0 | 0 | 514 | 0.30182 |
f51f1a4cfc64468547a1bf70b97687f67b00823c | 634 | py | Python | dagger/dag_creator/airflow/utils/operator_factories.py | jorgetagle/dagger | dafcfb9df904e512f050aefdacf6581c571bac23 | [
"MIT"
]
| null | null | null | dagger/dag_creator/airflow/utils/operator_factories.py | jorgetagle/dagger | dafcfb9df904e512f050aefdacf6581c571bac23 | [
"MIT"
]
| null | null | null | dagger/dag_creator/airflow/utils/operator_factories.py | jorgetagle/dagger | dafcfb9df904e512f050aefdacf6581c571bac23 | [
"MIT"
]
| null | null | null | from functools import partial
from airflow.operators.python_operator import ShortCircuitOperator
def make_control_flow(is_dummy_operator_short_circuit, dag):
control_flow = ShortCircuitOperator(
task_id="dummy-control-flow",
dag=dag,
provide_context=True,
python_callable=partial(eval_control_flow, is_dummy_operator_short_circuit),
)
return control_flow
def eval_control_flow(is_dummy_operator_short_circuit, **kwargs):
True
if not is_dummy_operator_short_circuit:
return True
if kwargs["task_instance"].next_try_number > 2:
return True
return False
| 25.36 | 84 | 0.747634 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 35 | 0.055205 |
f51fe70db140c3154b176531ad8f28b9ef267b5a | 1,974 | py | Python | predict_CNN.py | slimtomatillo/toxic_waste_dump | 4bc820f0b31f4420e789af11a9338c475c068889 | [
"MIT"
]
| 2 | 2018-07-13T16:44:24.000Z | 2019-10-14T21:31:02.000Z | predict_CNN.py | slimtomatillo/toxic_waste_dump | 4bc820f0b31f4420e789af11a9338c475c068889 | [
"MIT"
]
| null | null | null | predict_CNN.py | slimtomatillo/toxic_waste_dump | 4bc820f0b31f4420e789af11a9338c475c068889 | [
"MIT"
]
| null | null | null | # Imports
import pandas as pd
import pickle
from keras.models import load_model
from preprocess import preprocess
from preprocess import prep_text
#Logging
import logging
logging.getLogger().setLevel(logging.INFO)
logging.info('Loading comments to classify...')
# Enter comment to be classified below
comment_to_classify = ''
def return_label(predicted_probs):
"""
Function that takes in a list of 7 class
probabilities and returns the labels
with probabilities over a certain threshold.
"""
threshold = 0.4
labels = []
classes = ['clean', 'toxic', 'severe toxic', 'obscene',
'threat', 'insult', 'identity hate']
i = 0
while i < len(classes):
if predicted_probs[i] > threshold:
labels.append(classes[i])
i += 1
return (labels)
def predict_label(comment_str):
"""
Function that takes in a comment in
string form and returns the predicted
class labels: not toxic, toxic, severe
toxic, obscene, threat, insults, identity
hate. May output multiple labels.
"""
data = pd.DataFrame(data=[comment_str], columns=['comment_text'])
logging.info('Comments loaded.')
# Preprocess text
X_to_predict = preprocess(data)
# Identify data to make predictions from
X_to_predict = X_to_predict['model_text']
# Format data properly
X_to_predict = prep_text(X_to_predict)
logging.info('Loading model...')
# Load CNN from disk
cnn = load_model('model/CNN/binarycrossentropy_adam/model-04-0.9781.hdf5')
logging.info('Model loaded.')
logging.info('Making prediction(s)...')
# Make predictions
preds = cnn.predict(X_to_predict)
for each_comment, prob in zip(data['comment_text'], preds):
print('COMMENT:')
print(each_comment)
print()
print('PREDICTION:')
print(return_label(prob))
print()
logging.info('Finished.')
predict_label(comment_to_classify)
| 24.675 | 78 | 0.670719 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 847 | 0.429078 |
f5209853412f11170538a00e749d5b0ede34e2eb | 797 | py | Python | 299. Bulls and Cows.py | fossabot/leetcode-2 | 335f1aa3ee785320515c3d3f03c2cb2df3bc13ba | [
"MIT"
]
| 2 | 2018-02-26T09:12:19.000Z | 2019-06-07T13:38:10.000Z | 299. Bulls and Cows.py | fossabot/leetcode-2 | 335f1aa3ee785320515c3d3f03c2cb2df3bc13ba | [
"MIT"
]
| 1 | 2018-12-24T07:03:34.000Z | 2018-12-24T07:03:34.000Z | 299. Bulls and Cows.py | fossabot/leetcode-2 | 335f1aa3ee785320515c3d3f03c2cb2df3bc13ba | [
"MIT"
]
| 2 | 2018-12-24T07:01:03.000Z | 2019-06-07T13:38:07.000Z | class Solution(object):
def getHint(self, secret, guess):
"""
:type secret: str
:type guess: str
:rtype: str
"""
dic = {}
countA = 0
setA = set()
for i in range(len(secret)):
if secret[i] == guess[i]:
countA += 1
setA.add(i)
elif secret[i] not in dic:
dic[secret[i]] = 1
else:
dic[secret[i]] += 1
countB = 0
for i in range(len(guess)):
if i not in setA:
if guess[i] in dic:
countB += 1
dic[guess[i]] -= 1
if dic[guess[i]] == 0:
del dic[guess[i]]
return str(countA)+"A"+str(countB)+"B"
| 28.464286 | 46 | 0.385194 | 796 | 0.998745 | 0 | 0 | 0 | 0 | 0 | 0 | 92 | 0.115433 |
f523394a40a39dc77c57c643a75603c33eb11af0 | 176 | py | Python | lib/models/backbones/efficientdet/__init__.py | shachargluska/centerpose | 01c2c8bfa9d3ee91807f2ffdcc48728d104265bd | [
"MIT"
]
| 245 | 2019-11-29T02:55:25.000Z | 2022-03-30T07:30:18.000Z | lib/models/backbones/efficientdet/__init__.py | shachargluska/centerpose | 01c2c8bfa9d3ee91807f2ffdcc48728d104265bd | [
"MIT"
]
| 24 | 2019-11-29T10:05:00.000Z | 2022-03-30T07:16:06.000Z | lib/models/backbones/efficientdet/__init__.py | FishLiuabc/centerpose | 555d753cd82693476f91f78c53aa4147f5a83015 | [
"MIT"
]
| 45 | 2019-11-29T05:12:02.000Z | 2022-03-21T02:20:36.000Z | from .efficientdet import EfficientDet
def get_efficientdet(num_layers, cfg):
model = EfficientDet(intermediate_channels=cfg.MODEL.INTERMEDIATE_CHANNEL)
return model
| 25.142857 | 78 | 0.8125 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
f528bf891d405b1631574286911aea9a15dea4b2 | 1,566 | py | Python | codesmith/CloudFormation/CogCondPreAuthSettings/cog_cond_pre_auth_settings.py | codesmith-gmbh/forge | 43c334d829a727b48f8e21e273017c51394010f9 | [
"Apache-2.0"
]
| null | null | null | codesmith/CloudFormation/CogCondPreAuthSettings/cog_cond_pre_auth_settings.py | codesmith-gmbh/forge | 43c334d829a727b48f8e21e273017c51394010f9 | [
"Apache-2.0"
]
| null | null | null | codesmith/CloudFormation/CogCondPreAuthSettings/cog_cond_pre_auth_settings.py | codesmith-gmbh/forge | 43c334d829a727b48f8e21e273017c51394010f9 | [
"Apache-2.0"
]
| null | null | null | import json
import logging
import boto3
from box import Box
from crhelper import CfnResource
from schema import Optional
import codesmith.common.naming as naming
from codesmith.common.cfn import resource_properties
from codesmith.common.schema import encoded_bool, non_empty_string, tolerant_schema
from codesmith.common.ssm import put_string_parameter, silent_delete_parameter_from_event
helper = CfnResource()
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
properties_schema = tolerant_schema({
'UserPoolId': non_empty_string,
'UserPoolClientId': non_empty_string,
Optional('All', default=False): encoded_bool,
Optional('Domains', default=[]): [str],
Optional('Emails', default=[]): [str]
})
ssm = boto3.client('ssm')
def validate_properties(props):
return Box(properties_schema.validate(props), camel_killer_box=True)
@helper.create
@helper.update
def create(event, _):
p = validate_properties(resource_properties(event))
parameter_name = naming.cog_cond_pre_auth_parameter_name(p.user_pool_id, p.user_pool_client_id)
parameter_value = json.dumps({'All': p.all, 'Domains': p.domains, 'Emails': p.emails})
put_string_parameter(ssm, parameter_name,
value=parameter_value,
description='Forge Cognito Pre Auth Settings Parameter')
return parameter_name
@helper.delete
def delete(event, _):
return silent_delete_parameter_from_event(ssm, event)
def handler(event, context):
logger.info('event: %s', event)
helper(event, context)
| 29.54717 | 99 | 0.751596 | 0 | 0 | 0 | 0 | 594 | 0.37931 | 0 | 0 | 133 | 0.08493 |
f528d3c7d1c051d306cd7f8c1738faafc34bc81c | 125 | py | Python | Mundo 1/Exercicios/Desafio005.py | yWolfBR/Python-CursoEmVideo | 17bab8ad3c4293daf8377c5d49242942845b3577 | [
"MIT"
]
| null | null | null | Mundo 1/Exercicios/Desafio005.py | yWolfBR/Python-CursoEmVideo | 17bab8ad3c4293daf8377c5d49242942845b3577 | [
"MIT"
]
| null | null | null | Mundo 1/Exercicios/Desafio005.py | yWolfBR/Python-CursoEmVideo | 17bab8ad3c4293daf8377c5d49242942845b3577 | [
"MIT"
]
| null | null | null | n = int(input('Digite um número: '))
print('Seu número é {}. O antecessor é {} e seu sucessor é {}'.format(n, n - 1, n + 1))
| 41.666667 | 87 | 0.592 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 81 | 0.623077 |
f52b501702b28918819f4339d418e24bd36e3fba | 1,506 | py | Python | geeksforgeeks/Data Structures/check_if_subtree.py | codervikash/online-courses | a60efad23af65080a98e7dd038fb2c750237b781 | [
"MIT"
]
| null | null | null | geeksforgeeks/Data Structures/check_if_subtree.py | codervikash/online-courses | a60efad23af65080a98e7dd038fb2c750237b781 | [
"MIT"
]
| null | null | null | geeksforgeeks/Data Structures/check_if_subtree.py | codervikash/online-courses | a60efad23af65080a98e7dd038fb2c750237b781 | [
"MIT"
]
| null | null | null | # Given two binary trees, check if the first tree is subtree of the second one.
# A subtree of a tree T is a tree S consisting of a node in T and all of its descendants in T.
# The subtree corresponding to the root node is the entire tree; the subtree corresponding to any other node is called a proper subtree.
class Node:
# Constructor to create a new node
def __init__(self, data):
self.data = data
self.left = None
self.right = None
def are_identical(root1, root2):
if root1 is None and root2 is None:
return True
if root1 is None or root2 is None:
return False
return (root1.data == root2.data and
are_identical(root1.left, root2.left) and
are_identical(root1.right, root2.right))
def is_subtree(t, s):
if t == None or s == None:
return True
if(are_identical(t, s)):
return True
return (is_subtree(t.left, s) or is_subtree(t.right, s))
# Driver program to test above function
""" TREE 1
Construct the following tree
26
/ \
10 3
/ \ \
4 6 3
\
30
"""
T = Node(26)
T.right = Node(3)
T.right.right = Node(3)
T.left = Node(10)
T.left.left = Node(4)
T.left.left.right = Node(30)
T.left.right = Node(6)
""" TREE 2
Construct the following tree
10
/ \
4 6
\
30
"""
S = Node(10)
S.right = Node(6)
S.left = Node(4)
S.left.right = Node(30)
if is_subtree(T, S):
print "Tree 2 is subtree of Tree 1"
else :
print "Tree 2 is not a subtree of Tree 1"
| 20.351351 | 136 | 0.632802 | 133 | 0.088313 | 0 | 0 | 0 | 0 | 0 | 0 | 689 | 0.457503 |
f52c3d4d080221bb0849b2d7854dea28cf442e0d | 619 | py | Python | papi_sdk/models/search/hotelpage/affiliate.py | stanislav-losev/papi-sdk-python | 4a296745d626ef13c6d1170e9d3569cb1c37eb3c | [
"MIT"
]
| 1 | 2022-02-01T08:53:24.000Z | 2022-02-01T08:53:24.000Z | papi_sdk/models/search/hotelpage/affiliate.py | stanislav-losev/papi-sdk-python | 4a296745d626ef13c6d1170e9d3569cb1c37eb3c | [
"MIT"
]
| 2 | 2021-01-18T07:57:29.000Z | 2021-06-23T11:04:14.000Z | papi_sdk/models/search/hotelpage/affiliate.py | stanislav-losev/papi-sdk-python | 4a296745d626ef13c6d1170e9d3569cb1c37eb3c | [
"MIT"
]
| 3 | 2020-12-30T13:09:45.000Z | 2020-12-30T13:42:33.000Z | from typing import List, Optional
from papi_sdk.models.search.base_affiliate_response import (
BaseAffiliateSearchData,
BaseAffiliateSearchResponse,
BaseHotel,
BaseRate,
)
from papi_sdk.models.search.base_request import BaseAffiliateRequest
class AffiliateHotelPageRequest(BaseAffiliateRequest):
id: str
class Rate(BaseRate):
book_hash: str
class Hotel(BaseHotel):
rates: List[Rate]
class HotelPageAffiliateSearchData(BaseAffiliateSearchData):
hotels: List[Hotel]
class AffiliateHotelPageResponse(BaseAffiliateSearchResponse):
data: Optional[HotelPageAffiliateSearchData]
| 20.633333 | 68 | 0.799677 | 346 | 0.558966 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
f52d16005d54fc06009e6a33b0d9fa26ef35fd47 | 2,093 | py | Python | dl_tutorials/torch_neural_networks.py | learnerzhang/AnalyticsVidhya | 697689a24a9d73785164512cab8ac4ee5494afe8 | [
"Apache-2.0"
]
| 1 | 2018-07-04T09:14:26.000Z | 2018-07-04T09:14:26.000Z | dl_tutorials/torch_neural_networks.py | learnerzhang/AnalyticsVidhya | 697689a24a9d73785164512cab8ac4ee5494afe8 | [
"Apache-2.0"
]
| null | null | null | dl_tutorials/torch_neural_networks.py | learnerzhang/AnalyticsVidhya | 697689a24a9d73785164512cab8ac4ee5494afe8 | [
"Apache-2.0"
]
| null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019-01-02 16:44
# @Author : zhangzhen
# @Site :
# @File : torch_neural_networks.py
# @Software: PyCharm
import torch
import torch.nn as nn
import torch.nn.functional as F
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
# 1 input image channel, 6 output channels, 5 * 5 square convolution
# kernel
self.conv1 = nn.Conv2d(in_channels=1, out_channels=6, kernel_size=5)
self.conv2 = nn.Conv2d(in_channels=6, out_channels=16, kernel_size=5)
# an affine operation: y = Wx + b
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, *input):
# Max pooling over a (2, 2) window
x = F.max_pool2d(F.relu(self.conv1(input[0])), (2, 2))
# if the size is a square you can only specify a single number
x = F.max_pool2d(F.relu(self.conv2(x)), (2, 2))
x = x.view(-1, self.num_flat_features(x))
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
def num_flat_features(self, x):
size = x.size()[1:]
num_features = 1
for s in size:
num_features *= s
return num_features
if __name__ == '__main__':
net = Net()
criterion = nn.MSELoss()
print(net)
params = list(net.parameters())
print("参数个数:", len(params))
for param in params:
print(param.size())
input = torch.randn(1, 1, 32, 32)
target = torch.randn(10)
out = net(input)
loss = criterion(out, target)
print(100 * "=")
print(out, target)
print("Loss:", loss)
print(loss.grad_fn) # MSELoss
print(loss.grad_fn.next_functions[0][0]) # Linear
print(loss.grad_fn.next_functions[0][0].next_functions[0][0]) # ReLU
net.zero_grad()
print('conv1.bias.grad before backward')
print(net.conv1.bias.grad)
loss.backward()
print('conv1.bias.grad after backward')
print(net.conv1.bias.grad)
| 26.493671 | 77 | 0.592929 | 1,087 | 0.517373 | 0 | 0 | 0 | 0 | 0 | 0 | 493 | 0.23465 |
f52eac71ac3094d6c2f5d753f7dc5413e91d3ecd | 422 | py | Python | webhook.py | cadamswaite/RPI-Jekyll-Compiler | acf905b11f41c3bda286d4907a038b7888b1c8fa | [
"Unlicense"
]
| null | null | null | webhook.py | cadamswaite/RPI-Jekyll-Compiler | acf905b11f41c3bda286d4907a038b7888b1c8fa | [
"Unlicense"
]
| null | null | null | webhook.py | cadamswaite/RPI-Jekyll-Compiler | acf905b11f41c3bda286d4907a038b7888b1c8fa | [
"Unlicense"
]
| null | null | null | from bottle import route, run, template
gitdict = {'po2go':{'https://github.com/cadamswaite/po2go.git:master':'https://github.com/cadamswaite/po2go.git:gh-pages'}}
# Handle http requests to the root address
@route('/')
def index():
return 'Go away.'
@route('/build/<name>')
def greet(name):
if name in gitdict:
return 'Building ' + name
else:
return name + 'not found in gitdict'
run(host='0.0.0.0', port=80)
| 23.444444 | 123 | 0.684834 | 0 | 0 | 0 | 0 | 178 | 0.421801 | 0 | 0 | 219 | 0.518957 |
f52ec88be52c378180af93ce81749dca618e2061 | 2,577 | py | Python | shldn/leonard.py | arrieta/shldn | 8335aaeb1bfe91698bd9dfb83487393ede9225e6 | [
"MIT"
]
| null | null | null | shldn/leonard.py | arrieta/shldn | 8335aaeb1bfe91698bd9dfb83487393ede9225e6 | [
"MIT"
]
| null | null | null | shldn/leonard.py | arrieta/shldn | 8335aaeb1bfe91698bd9dfb83487393ede9225e6 | [
"MIT"
]
| null | null | null | """
Leonard always DRIVES Sheldon (this module is the __main__ driver for Sheldon)
"""
import argparse
import sys
import os
try:
from cooper import Sheldon
except:
from .cooper import Sheldon
# Extensions for python source files
EXTENSIONS = [".py", ".mpy"]
def parse_commandline():
parser = argparse.ArgumentParser(
description="Find divisions in Python code")
parser.add_argument("-u", "--human_readable",
help="Display friendlier output",
action="store_true")
parser.add_argument("-r", "--recursive",
help="Scan subdirectories recursively",
action="store_true")
parser.add_argument("path",
type=str,
help="Path to the target file or directory")
return parser.parse_args()
def process_files(files, divs_found, readable, path=""):
for filename in files:
fname = os.path.join(path, filename)
with open(fname) as f:
pysource = f.read()
s = Sheldon(pysource)
try:
s.analyze()
except SyntaxError:
exc_type, exc_obj, exc_tb = sys.exc_info()
print(f"{fname} {exc_tb.tb_lineno} SyntaxError")
continue
divs_found += len(s.divisions)
s.printdivs(fname, s.divisions, readable)
return divs_found
def main():
args = parse_commandline()
if args.human_readable:
def readableprint(*args, **kwargs):
print(*args, **kwargs)
else:
readableprint = lambda *a, **k: None # do - nothing function
files_checked = 0
divs_found = 0
# Directory path
if os.path.isdir(args.path):
for path, dirs, files in os.walk(args.path):
files = [f for f in os.listdir(path) if f.endswith(tuple(EXTENSIONS))]
files_checked += len(files)
divs_found = process_files(files, divs_found, args.human_readable, path=path)
if not args.recursive:
exit(0)
readableprint(f"{files_checked} files checked")
readableprint(f"{divs_found} divisions found")
# File path
elif os.path.isfile(args.path):
files =[f for f in [args.path] if args.path.endswith(tuple(EXTENSIONS))]
divs_found = process_files(files, divs_found, args.human_readable)
readableprint(f"{divs_found} divisions found")
# Error
else:
sys.exit(f"{args.path} doesn't exist!")
if __name__ == "__main__":
main()
| 28.633333 | 89 | 0.592549 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 564 | 0.218859 |
f52eee40e1e0d598ea7f901674518fc574586952 | 705 | py | Python | tests/002_finder/003_changeextension.py | Sam-prog-sudo/boussole | 5d6ec94356f9a91ff4d6d23c1700d3512b67006a | [
"MIT"
]
| 13 | 2016-05-19T15:18:41.000Z | 2022-03-22T15:37:32.000Z | tests/002_finder/003_changeextension.py | Sam-prog-sudo/boussole | 5d6ec94356f9a91ff4d6d23c1700d3512b67006a | [
"MIT"
]
| 38 | 2016-04-07T00:30:58.000Z | 2022-02-28T13:29:33.000Z | tests/002_finder/003_changeextension.py | Sam-prog-sudo/boussole | 5d6ec94356f9a91ff4d6d23c1700d3512b67006a | [
"MIT"
]
| 3 | 2016-05-20T09:21:57.000Z | 2020-10-12T10:56:49.000Z | # -*- coding: utf-8 -*-
def test_001(settings, finder):
result = finder.change_extension("foo.scss", 'css')
assert result == "foo.css"
def test_002(settings, finder):
result = finder.change_extension("foo.backup.scss", 'css')
assert result == "foo.backup.css"
def test_003(settings, finder):
result = finder.change_extension("bar/foo.scss", 'css')
assert result == "bar/foo.css"
def test_004(settings, finder):
result = finder.change_extension("/home/bar/foo.scss", 'css')
assert result == "/home/bar/foo.css"
def test_005(settings, finder):
result = finder.change_extension("/home/bar/foo.backup.scss", 'css')
assert result == "/home/bar/foo.backup.css"
| 26.111111 | 72 | 0.670922 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 219 | 0.310638 |
f52efbe88e2653ae5d1fd37a74f972d83828b114 | 40,749 | py | Python | Lib/site-packages/cherrypy/test/test_core.py | raychorn/svn_Python-2.5.1 | 425005b1b489ba44ec0bb989e077297e8953d9be | [
"PSF-2.0"
]
| null | null | null | Lib/site-packages/cherrypy/test/test_core.py | raychorn/svn_Python-2.5.1 | 425005b1b489ba44ec0bb989e077297e8953d9be | [
"PSF-2.0"
]
| null | null | null | Lib/site-packages/cherrypy/test/test_core.py | raychorn/svn_Python-2.5.1 | 425005b1b489ba44ec0bb989e077297e8953d9be | [
"PSF-2.0"
]
| null | null | null | """Basic tests for the CherryPy core: request handling."""
from cherrypy.test import test
test.prefer_parent_path()
import cherrypy
from cherrypy import _cptools, tools
from cherrypy.lib import http, static
import types
import os
localDir = os.path.dirname(__file__)
log_file = os.path.join(localDir, "test.log")
log_access_file = os.path.join(localDir, "access.log")
favicon_path = os.path.join(os.getcwd(), localDir, "../favicon.ico")
defined_http_methods = ("OPTIONS", "GET", "HEAD", "POST", "PUT", "DELETE",
"TRACE", "CONNECT", "PROPFIND")
def setup_server():
class Root:
def index(self):
return "hello"
index.exposed = True
favicon_ico = tools.staticfile.handler(filename=favicon_path)
def andnow(self):
return "the larch"
andnow.exposed = True
def global_(self):
pass
global_.exposed = True
def delglobal(self):
del self.__class__.__dict__['global_']
delglobal.exposed = True
def defct(self, newct):
newct = "text/%s" % newct
cherrypy.config.update({'tools.response_headers.on': True,
'tools.response_headers.headers':
[('Content-Type', newct)]})
defct.exposed = True
def upload(self, file):
return "Size: %s" % len(file.file.read())
upload.exposed = True
root = Root()
class TestType(type):
"""Metaclass which automatically exposes all functions in each subclass,
and adds an instance of the subclass as an attribute of root.
"""
def __init__(cls, name, bases, dct):
type.__init__(name, bases, dct)
for value in dct.itervalues():
if isinstance(value, types.FunctionType):
value.exposed = True
setattr(root, name.lower(), cls())
class Test(object):
__metaclass__ = TestType
class URL(Test):
_cp_config = {'tools.trailing_slash.on': False}
def index(self, path_info, relative=None):
return cherrypy.url(path_info, relative=bool(relative))
def leaf(self, path_info, relative=None):
return cherrypy.url(path_info, relative=bool(relative))
class Params(Test):
def index(self, thing):
return repr(thing)
def ismap(self, x, y):
return "Coordinates: %s, %s" % (x, y)
def default(self, *args, **kwargs):
return "args: %s kwargs: %s" % (args, kwargs)
class Status(Test):
def index(self):
return "normal"
def blank(self):
cherrypy.response.status = ""
# According to RFC 2616, new status codes are OK as long as they
# are between 100 and 599.
# Here is an illegal code...
def illegal(self):
cherrypy.response.status = 781
return "oops"
# ...and here is an unknown but legal code.
def unknown(self):
cherrypy.response.status = "431 My custom error"
return "funky"
# Non-numeric code
def bad(self):
cherrypy.response.status = "error"
return "bad news"
class Redirect(Test):
class Error:
_cp_config = {"tools.err_redirect.on": True,
"tools.err_redirect.url": "/errpage",
"tools.err_redirect.internal": False,
}
def index(self):
raise NameError("redirect_test")
index.exposed = True
error = Error()
def index(self):
return "child"
def by_code(self, code):
raise cherrypy.HTTPRedirect("somewhere else", code)
by_code._cp_config = {'tools.trailing_slash.extra': True}
def nomodify(self):
raise cherrypy.HTTPRedirect("", 304)
def proxy(self):
raise cherrypy.HTTPRedirect("proxy", 305)
def stringify(self):
return str(cherrypy.HTTPRedirect("/"))
def fragment(self, frag):
raise cherrypy.HTTPRedirect("/some/url#%s" % frag)
def login_redir():
if not getattr(cherrypy.request, "login", None):
raise cherrypy.InternalRedirect("/internalredirect/login")
tools.login_redir = _cptools.Tool('before_handler', login_redir)
def redir_custom():
raise cherrypy.InternalRedirect("/internalredirect/custom_err")
class InternalRedirect(Test):
def index(self):
raise cherrypy.InternalRedirect("/")
def relative(self, a, b):
raise cherrypy.InternalRedirect("cousin?t=6")
def cousin(self, t):
assert cherrypy.request.prev.closed
return cherrypy.request.prev.query_string
def petshop(self, user_id):
if user_id == "parrot":
# Trade it for a slug when redirecting
raise cherrypy.InternalRedirect('/image/getImagesByUser?user_id=slug')
elif user_id == "terrier":
# Trade it for a fish when redirecting
raise cherrypy.InternalRedirect('/image/getImagesByUser?user_id=fish')
else:
# This should pass the user_id through to getImagesByUser
raise cherrypy.InternalRedirect('/image/getImagesByUser?user_id=%s' % user_id)
# We support Python 2.3, but the @-deco syntax would look like this:
# @tools.login_redir()
def secure(self):
return "Welcome!"
secure = tools.login_redir()(secure)
# Since calling the tool returns the same function you pass in,
# you could skip binding the return value, and just write:
# tools.login_redir()(secure)
def login(self):
return "Please log in"
login._cp_config = {'hooks.before_error_response': redir_custom}
def custom_err(self):
return "Something went horribly wrong."
def early_ir(self, arg):
return "whatever"
early_ir._cp_config = {'hooks.before_request_body': redir_custom}
class Image(Test):
def getImagesByUser(self, user_id):
return "0 images for %s" % user_id
class Flatten(Test):
def as_string(self):
return "content"
def as_list(self):
return ["con", "tent"]
def as_yield(self):
yield "content"
def as_dblyield(self):
yield self.as_yield()
as_dblyield._cp_config = {'tools.flatten.on': True}
def as_refyield(self):
for chunk in self.as_yield():
yield chunk
class Error(Test):
_cp_config = {'tools.log_tracebacks.on': True,
}
def custom(self):
raise cherrypy.HTTPError(404, "No, <b>really</b>, not found!")
custom._cp_config = {'error_page.404': os.path.join(localDir, "static/index.html")}
def noexist(self):
raise cherrypy.HTTPError(404, "No, <b>really</b>, not found!")
noexist._cp_config = {'error_page.404': "nonexistent.html"}
def page_method(self):
raise ValueError()
def page_yield(self):
yield "howdy"
raise ValueError()
def page_streamed(self):
yield "word up"
raise ValueError()
yield "very oops"
page_streamed._cp_config = {"response.stream": True}
def cause_err_in_finalize(self):
# Since status must start with an int, this should error.
cherrypy.response.status = "ZOO OK"
cause_err_in_finalize._cp_config = {'request.show_tracebacks': False}
def rethrow(self):
"""Test that an error raised here will be thrown out to the server."""
raise ValueError()
rethrow._cp_config = {'request.throw_errors': True}
class Ranges(Test):
def get_ranges(self, bytes):
return repr(http.get_ranges('bytes=%s' % bytes, 8))
def slice_file(self):
path = os.path.join(os.getcwd(), os.path.dirname(__file__))
return static.serve_file(os.path.join(path, "static/index.html"))
class Expect(Test):
def expectation_failed(self):
expect = cherrypy.request.headers.elements("Expect")
if expect and expect[0].value != '100-continue':
raise cherrypy.HTTPError(400)
raise cherrypy.HTTPError(417, 'Expectation Failed')
class Headers(Test):
def default(self, headername):
"""Spit back out the value for the requested header."""
return cherrypy.request.headers[headername]
def doubledheaders(self):
# From http://www.cherrypy.org/ticket/165:
# "header field names should not be case sensitive sayes the rfc.
# if i set a headerfield in complete lowercase i end up with two
# header fields, one in lowercase, the other in mixed-case."
# Set the most common headers
hMap = cherrypy.response.headers
hMap['content-type'] = "text/html"
hMap['content-length'] = 18
hMap['server'] = 'CherryPy headertest'
hMap['location'] = ('%s://%s:%s/headers/'
% (cherrypy.request.local.ip,
cherrypy.request.local.port,
cherrypy.request.scheme))
# Set a rare header for fun
hMap['Expires'] = 'Thu, 01 Dec 2194 16:00:00 GMT'
return "double header test"
def ifmatch(self):
val = cherrypy.request.headers['If-Match']
cherrypy.response.headers['ETag'] = val
return repr(val)
class HeaderElements(Test):
def get_elements(self, headername):
e = cherrypy.request.headers.elements(headername)
return "\n".join([unicode(x) for x in e])
class Method(Test):
def index(self):
m = cherrypy.request.method
if m in defined_http_methods:
return m
if m == "LINK":
raise cherrypy.HTTPError(405)
else:
raise cherrypy.HTTPError(501)
def parameterized(self, data):
return data
def request_body(self):
# This should be a file object (temp file),
# which CP will just pipe back out if we tell it to.
return cherrypy.request.body
def reachable(self):
return "success"
class Divorce:
"""HTTP Method handlers shouldn't collide with normal method names.
For example, a GET-handler shouldn't collide with a method named 'get'.
If you build HTTP method dispatching into CherryPy, rewrite this class
to use your new dispatch mechanism and make sure that:
"GET /divorce HTTP/1.1" maps to divorce.index() and
"GET /divorce/get?ID=13 HTTP/1.1" maps to divorce.get()
"""
documents = {}
def index(self):
yield "<h1>Choose your document</h1>\n"
yield "<ul>\n"
for id, contents in self.documents.iteritems():
yield (" <li><a href='/divorce/get?ID=%s'>%s</a>: %s</li>\n"
% (id, id, contents))
yield "</ul>"
index.exposed = True
def get(self, ID):
return ("Divorce document %s: %s" %
(ID, self.documents.get(ID, "empty")))
get.exposed = True
root.divorce = Divorce()
class Cookies(Test):
def single(self, name):
cookie = cherrypy.request.cookie[name]
cherrypy.response.cookie[name] = cookie.value
def multiple(self, names):
for name in names:
cookie = cherrypy.request.cookie[name]
cherrypy.response.cookie[name] = cookie.value
class ThreadLocal(Test):
def index(self):
existing = repr(getattr(cherrypy.request, "asdf", None))
cherrypy.request.asdf = "rassfrassin"
return existing
cherrypy.config.update({
'log.error_file': log_file,
'environment': 'test_suite',
'server.max_request_body_size': 200,
'server.max_request_header_size': 500,
})
appconf = {
'/': {'log.access_file': log_access_file},
'/method': {'request.methods_with_bodies': ("POST", "PUT", "PROPFIND")},
}
cherrypy.tree.mount(root, config=appconf)
# Client-side code #
from cherrypy.test import helper
class CoreRequestHandlingTest(helper.CPWebCase):
def testParams(self):
self.getPage("/params/?thing=a")
self.assertBody("'a'")
self.getPage("/params/?thing=a&thing=b&thing=c")
self.assertBody("['a', 'b', 'c']")
# Test friendly error message when given params are not accepted.
ignore = helper.webtest.ignored_exceptions
ignore.append(TypeError)
try:
self.getPage("/params/?notathing=meeting")
self.assertInBody("index() got an unexpected keyword argument 'notathing'")
finally:
ignore.pop()
# Test "% HEX HEX"-encoded URL, param keys, and values
self.getPage("/params/%d4%20%e3/cheese?Gruy%E8re=Bulgn%e9ville")
self.assertBody(r"args: ('\xd4 \xe3', 'cheese') "
r"kwargs: {'Gruy\xe8re': 'Bulgn\xe9ville'}")
# Make sure that encoded = and & get parsed correctly
self.getPage("/params/code?url=http%3A//cherrypy.org/index%3Fa%3D1%26b%3D2")
self.assertBody(r"args: ('code',) "
r"kwargs: {'url': 'http://cherrypy.org/index?a=1&b=2'}")
# Test coordinates sent by <img ismap>
self.getPage("/params/ismap?223,114")
self.assertBody("Coordinates: 223, 114")
def testStatus(self):
self.getPage("/status/")
self.assertBody('normal')
self.assertStatus(200)
self.getPage("/status/blank")
self.assertBody('')
self.assertStatus(200)
self.getPage("/status/illegal")
self.assertStatus(500)
msg = "Illegal response status from server (781 is out of range)."
self.assertErrorPage(500, msg)
self.getPage("/status/unknown")
self.assertBody('funky')
self.assertStatus(431)
self.getPage("/status/bad")
self.assertStatus(500)
msg = "Illegal response status from server ('error' is non-numeric)."
self.assertErrorPage(500, msg)
def testLogging(self):
f = open(log_access_file, "wb")
f.write("")
f.close()
f = open(log_file, "wb")
f.write("")
f.close()
self.getPage("/flatten/as_string")
self.assertBody('content')
self.assertStatus(200)
self.getPage("/flatten/as_yield")
self.assertBody('content')
self.assertStatus(200)
data = open(log_access_file, "rb").readlines()
host = self.HOST
if not host:
# The empty string signifies INADDR_ANY,
# which should respond on localhost.
host = "127.0.0.1"
intro = '%s - - [' % host
if not data[0].startswith(intro):
self.fail("%r doesn't start with %r" % (data[0], intro))
haslength = False
for k, v in self.headers:
if k.lower() == 'content-length':
haslength = True
line = data[-2].strip()
if haslength:
if not line.endswith('] "GET %s/flatten/as_string HTTP/1.1" 200 7 "" ""'
% self.prefix()):
self.fail(line)
else:
if not line.endswith('] "GET %s/flatten/as_string HTTP/1.1" 200 - "" ""'
% self.prefix()):
self.fail(line)
if not data[-1].startswith(intro):
self.fail("%r doesn't start with %r" % (data[-1], intro))
haslength = False
for k, v in self.headers:
if k.lower() == 'content-length':
haslength = True
line = data[-1].strip()
if haslength:
self.assert_(line.endswith('] "GET %s/flatten/as_yield HTTP/1.1" 200 7 "" ""'
% self.prefix()))
else:
self.assert_(line.endswith('] "GET %s/flatten/as_yield HTTP/1.1" 200 - "" ""'
% self.prefix()))
ignore = helper.webtest.ignored_exceptions
ignore.append(ValueError)
try:
# Test that tracebacks get written to the error log.
self.getPage("/error/page_method")
self.assertInBody("raise ValueError()")
data = open(log_file, "rb").readlines()
self.assertEqual(data[0].strip().endswith('HTTP Traceback (most recent call last):'), True)
self.assertEqual(data[-3].strip().endswith('raise ValueError()'), True)
finally:
ignore.pop()
def testSlashes(self):
# Test that requests for index methods without a trailing slash
# get redirected to the same URI path with a trailing slash.
# Make sure GET params are preserved.
self.getPage("/redirect?id=3")
self.assertStatus(('302 Found', '303 See Other'))
self.assertInBody("<a href='%s/redirect/?id=3'>"
"%s/redirect/?id=3</a>" % (self.base(), self.base()))
if self.prefix():
# Corner case: the "trailing slash" redirect could be tricky if
# we're using a virtual root and the URI is "/vroot" (no slash).
self.getPage("")
self.assertStatus(('302 Found', '303 See Other'))
self.assertInBody("<a href='%s/'>%s/</a>" %
(self.base(), self.base()))
# Test that requests for NON-index methods WITH a trailing slash
# get redirected to the same URI path WITHOUT a trailing slash.
# Make sure GET params are preserved.
self.getPage("/redirect/by_code/?code=307")
self.assertStatus(('302 Found', '303 See Other'))
self.assertInBody("<a href='%s/redirect/by_code?code=307'>"
"%s/redirect/by_code?code=307</a>"
% (self.base(), self.base()))
# If the trailing_slash tool is off, CP should just continue
# as if the slashes were correct. But it needs some help
# inside cherrypy.url to form correct output.
self.getPage('/url?path_info=page1')
self.assertBody('%s/url/page1' % self.base())
self.getPage('/url/leaf/?path_info=page1')
self.assertBody('%s/url/page1' % self.base())
def testRedirect(self):
self.getPage("/redirect/")
self.assertBody('child')
self.assertStatus(200)
self.getPage("/redirect/by_code?code=300")
self.assertMatchesBody(r"<a href='(.*)somewhere else'>\1somewhere else</a>")
self.assertStatus(300)
self.getPage("/redirect/by_code?code=301")
self.assertMatchesBody(r"<a href='(.*)somewhere else'>\1somewhere else</a>")
self.assertStatus(301)
self.getPage("/redirect/by_code?code=302")
self.assertMatchesBody(r"<a href='(.*)somewhere else'>\1somewhere else</a>")
self.assertStatus(302)
self.getPage("/redirect/by_code?code=303")
self.assertMatchesBody(r"<a href='(.*)somewhere else'>\1somewhere else</a>")
self.assertStatus(303)
self.getPage("/redirect/by_code?code=307")
self.assertMatchesBody(r"<a href='(.*)somewhere else'>\1somewhere else</a>")
self.assertStatus(307)
self.getPage("/redirect/nomodify")
self.assertBody('')
self.assertStatus(304)
self.getPage("/redirect/proxy")
self.assertBody('')
self.assertStatus(305)
# HTTPRedirect on error
self.getPage("/redirect/error/")
self.assertStatus(('302 Found', '303 See Other'))
self.assertInBody('/errpage')
# Make sure str(HTTPRedirect()) works.
self.getPage("/redirect/stringify", protocol="HTTP/1.0")
self.assertStatus(200)
self.assertBody("(['%s/'], 302)" % self.base())
if cherrypy.server.protocol_version == "HTTP/1.1":
self.getPage("/redirect/stringify", protocol="HTTP/1.1")
self.assertStatus(200)
self.assertBody("(['%s/'], 303)" % self.base())
# check that #fragments are handled properly
# http://skrb.org/ietf/http_errata.html#location-fragments
frag = "foo"
self.getPage("/redirect/fragment/%s" % frag)
self.assertMatchesBody(r"<a href='(.*)\/some\/url\#%s'>\1\/some\/url\#%s</a>" % (frag, frag))
loc = self.assertHeader('Location')
assert loc.endswith("#%s" % frag)
self.assertStatus(('302 Found', '303 See Other'))
def test_InternalRedirect(self):
# InternalRedirect
self.getPage("/internalredirect/")
self.assertBody('hello')
self.assertStatus(200)
# Test passthrough
self.getPage("/internalredirect/petshop?user_id=Sir-not-appearing-in-this-film")
self.assertBody('0 images for Sir-not-appearing-in-this-film')
self.assertStatus(200)
# Test args
self.getPage("/internalredirect/petshop?user_id=parrot")
self.assertBody('0 images for slug')
self.assertStatus(200)
# Test POST
self.getPage("/internalredirect/petshop", method="POST",
body="user_id=terrier")
self.assertBody('0 images for fish')
self.assertStatus(200)
# Test ir before body read
self.getPage("/internalredirect/early_ir", method="POST",
body="arg=aha!")
self.assertBody("Something went horribly wrong.")
self.assertStatus(200)
self.getPage("/internalredirect/secure")
self.assertBody('Please log in')
self.assertStatus(200)
# Relative path in InternalRedirect.
# Also tests request.prev.
self.getPage("/internalredirect/relative?a=3&b=5")
self.assertBody("a=3&b=5")
self.assertStatus(200)
# InternalRedirect on error
self.getPage("/internalredirect/login/illegal/extra/vpath/atoms")
self.assertStatus(200)
self.assertBody("Something went horribly wrong.")
def testFlatten(self):
for url in ["/flatten/as_string", "/flatten/as_list",
"/flatten/as_yield", "/flatten/as_dblyield",
"/flatten/as_refyield"]:
self.getPage(url)
self.assertBody('content')
def testErrorHandling(self):
self.getPage("/error/missing")
self.assertStatus(404)
self.assertErrorPage(404, "The path '/error/missing' was not found.")
ignore = helper.webtest.ignored_exceptions
ignore.append(ValueError)
try:
valerr = '\n raise ValueError()\nValueError'
self.getPage("/error/page_method")
self.assertErrorPage(500, pattern=valerr)
self.getPage("/error/page_yield")
self.assertErrorPage(500, pattern=valerr)
self.getPage("/error/page_streamed")
# Because this error is raised after the response body has
# started, the status should not change to an error status.
self.assertStatus(200)
self.assertBody("word upUnrecoverable error in the server.")
# No traceback should be present
self.getPage("/error/cause_err_in_finalize")
msg = "Illegal response status from server ('ZOO' is non-numeric)."
self.assertErrorPage(500, msg, None)
finally:
ignore.pop()
# Test custom error page.
self.getPage("/error/custom")
self.assertStatus(404)
self.assertBody("Hello, world\r\n" + (" " * 499))
# Test error in custom error page (ticket #305).
# Note that the message is escaped for HTML (ticket #310).
self.getPage("/error/noexist")
self.assertStatus(404)
msg = ("No, <b>really</b>, not found!<br />"
"In addition, the custom error page failed:\n<br />"
"[Errno 2] No such file or directory: 'nonexistent.html'")
self.assertInBody(msg)
if (hasattr(self, 'harness') and
"modpython" in self.harness.__class__.__name__.lower()):
pass
else:
# Test throw_errors (ticket #186).
self.getPage("/error/rethrow")
self.assertInBody("raise ValueError()")
def testRanges(self):
self.getPage("/ranges/get_ranges?bytes=3-6")
self.assertBody("[(3, 7)]")
# Test multiple ranges and a suffix-byte-range-spec, for good measure.
self.getPage("/ranges/get_ranges?bytes=2-4,-1")
self.assertBody("[(2, 5), (7, 8)]")
# Get a partial file.
if cherrypy.server.protocol_version == "HTTP/1.1":
self.getPage("/ranges/slice_file", [('Range', 'bytes=2-5')])
self.assertStatus(206)
self.assertHeader("Content-Type", "text/html")
self.assertHeader("Content-Range", "bytes 2-5/14")
self.assertBody("llo,")
# What happens with overlapping ranges (and out of order, too)?
self.getPage("/ranges/slice_file", [('Range', 'bytes=4-6,2-5')])
self.assertStatus(206)
ct = self.assertHeader("Content-Type")
expected_type = "multipart/byteranges; boundary="
self.assert_(ct.startswith(expected_type))
boundary = ct[len(expected_type):]
expected_body = ("\r\n--%s\r\n"
"Content-type: text/html\r\n"
"Content-range: bytes 4-6/14\r\n"
"\r\n"
"o, \r\n"
"--%s\r\n"
"Content-type: text/html\r\n"
"Content-range: bytes 2-5/14\r\n"
"\r\n"
"llo,\r\n"
"--%s--\r\n" % (boundary, boundary, boundary))
self.assertBody(expected_body)
self.assertHeader("Content-Length")
# Test "416 Requested Range Not Satisfiable"
self.getPage("/ranges/slice_file", [('Range', 'bytes=2300-2900')])
self.assertStatus(416)
# "When this status code is returned for a byte-range request,
# the response SHOULD include a Content-Range entity-header
# field specifying the current length of the selected resource"
self.assertHeader("Content-Range", "bytes */14")
elif cherrypy.server.protocol_version == "HTTP/1.0":
# Test Range behavior with HTTP/1.0 request
self.getPage("/ranges/slice_file", [('Range', 'bytes=2-5')])
self.assertStatus(200)
self.assertBody("Hello, world\r\n")
def testExpect(self):
e = ('Expect', '100-continue')
self.getPage("/headerelements/get_elements?headername=Expect", [e])
self.assertBody('100-continue')
self.getPage("/expect/expectation_failed", [('Content-Length', '200'), e])
self.assertStatus(417)
def testHeaderElements(self):
# Accept-* header elements should be sorted, with most preferred first.
h = [('Accept', 'audio/*; q=0.2, audio/basic')]
self.getPage("/headerelements/get_elements?headername=Accept", h)
self.assertStatus(200)
self.assertBody("audio/basic\n"
"audio/*;q=0.2")
h = [('Accept', 'text/plain; q=0.5, text/html, text/x-dvi; q=0.8, text/x-c')]
self.getPage("/headerelements/get_elements?headername=Accept", h)
self.assertStatus(200)
self.assertBody("text/x-c\n"
"text/html\n"
"text/x-dvi;q=0.8\n"
"text/plain;q=0.5")
# Test that more specific media ranges get priority.
h = [('Accept', 'text/*, text/html, text/html;level=1, */*')]
self.getPage("/headerelements/get_elements?headername=Accept", h)
self.assertStatus(200)
self.assertBody("text/html;level=1\n"
"text/html\n"
"text/*\n"
"*/*")
# Test Accept-Charset
h = [('Accept-Charset', 'iso-8859-5, unicode-1-1;q=0.8')]
self.getPage("/headerelements/get_elements?headername=Accept-Charset", h)
self.assertStatus("200 OK")
self.assertBody("iso-8859-5\n"
"unicode-1-1;q=0.8")
# Test Accept-Encoding
h = [('Accept-Encoding', 'gzip;q=1.0, identity; q=0.5, *;q=0')]
self.getPage("/headerelements/get_elements?headername=Accept-Encoding", h)
self.assertStatus("200 OK")
self.assertBody("gzip;q=1.0\n"
"identity;q=0.5\n"
"*;q=0")
# Test Accept-Language
h = [('Accept-Language', 'da, en-gb;q=0.8, en;q=0.7')]
self.getPage("/headerelements/get_elements?headername=Accept-Language", h)
self.assertStatus("200 OK")
self.assertBody("da\n"
"en-gb;q=0.8\n"
"en;q=0.7")
def testHeaders(self):
# Tests that each header only appears once, regardless of case.
self.getPage("/headers/doubledheaders")
self.assertBody("double header test")
hnames = [name.title() for name, val in self.headers]
for key in ['Content-Length', 'Content-Type', 'Date',
'Expires', 'Location', 'Server']:
self.assertEqual(hnames.count(key), 1)
if cherrypy.server.protocol_version == "HTTP/1.1":
# Test RFC-2047-encoded request and response header values
c = "=E2=84=ABngstr=C3=B6m"
self.getPage("/headers/ifmatch", [('If-Match', '=?utf-8?q?%s?=' % c)])
self.assertBody("u'\\u212bngstr\\xf6m'")
self.assertHeader("ETag", '=?utf-8?b?4oSrbmdzdHLDtm0=?=')
# Test a *LONG* RFC-2047-encoded request and response header value
self.getPage("/headers/ifmatch",
[('If-Match', '=?utf-8?q?%s?=' % (c * 10))])
self.assertBody("u'%s'" % ('\\u212bngstr\\xf6m' * 10))
self.assertHeader("ETag",
'=?utf-8?b?4oSrbmdzdHLDtm3ihKtuZ3N0csO2beKEq25nc3Ryw7Zt4oSrbmdzdHLDtm0=?='
'=?utf-8?b?4oSrbmdzdHLDtm3ihKtuZ3N0csO2beKEq25nc3Ryw7Zt4oSrbmdzdHLDtm0=?='
'=?utf-8?b?4oSrbmdzdHLDtm3ihKtuZ3N0csO2bQ==?=')
# Test that two request headers are collapsed into one.
# See http://www.cherrypy.org/ticket/542.
self.getPage("/headers/Accept-Charset",
headers=[("Accept-Charset", "iso-8859-5"),
("Accept-Charset", "unicode-1-1;q=0.8")])
self.assertBody("iso-8859-5, unicode-1-1;q=0.8")
# If we don't pass a Content-Type header, it should not be present
# in cherrypy.request.headers
self.getPage("/headers/Content-Type",
headers=[])
self.assertStatus(500)
# If Content-Type is present in the request, it should be present in
# cherrypy.request.headers
self.getPage("/headers/Content-Type",
headers=[("Content-type", "application/json")])
self.assertBody("application/json")
def testHTTPMethods(self):
helper.webtest.methods_with_bodies = ("POST", "PUT", "PROPFIND")
# Test that all defined HTTP methods work.
for m in defined_http_methods:
self.getPage("/method/", method=m)
# HEAD requests should not return any body.
if m == "HEAD":
self.assertBody("")
elif m == "TRACE":
# Some HTTP servers (like modpy) have their own TRACE support
self.assertEqual(self.body[:5], "TRACE")
else:
self.assertBody(m)
# Request a PUT method with a form-urlencoded body
self.getPage("/method/parameterized", method="PUT",
body="data=on+top+of+other+things")
self.assertBody("on top of other things")
# Request a PUT method with a file body
b = "one thing on top of another"
h = [("Content-Type", "text/plain"),
("Content-Length", str(len(b)))]
self.getPage("/method/request_body", headers=h, method="PUT", body=b)
self.assertStatus(200)
self.assertBody(b)
# Request a PUT method with no body whatsoever (not an empty one).
# See http://www.cherrypy.org/ticket/650.
# Provide a C-T or webtest will provide one (and a C-L) for us.
h = [("Content-Type", "text/plain")]
self.getPage("/method/reachable", headers=h, method="PUT")
self.assertBody("success")
# Request a custom method with a request body
b = ('<?xml version="1.0" encoding="utf-8" ?>\n\n'
'<propfind xmlns="DAV:"><prop><getlastmodified/>'
'</prop></propfind>')
h = [('Content-Type', 'text/xml'),
('Content-Length', str(len(b)))]
self.getPage("/method/request_body", headers=h, method="PROPFIND", body=b)
self.assertStatus(200)
self.assertBody(b)
# Request a disallowed method
self.getPage("/method/", method="LINK")
self.assertStatus(405)
# Request an unknown method
self.getPage("/method/", method="SEARCH")
self.assertStatus(501)
# For method dispatchers: make sure that an HTTP method doesn't
# collide with a virtual path atom. If you build HTTP-method
# dispatching into the core, rewrite these handlers to use
# your dispatch idioms.
self.getPage("/divorce/get?ID=13")
self.assertBody('Divorce document 13: empty')
self.assertStatus(200)
self.getPage("/divorce/", method="GET")
self.assertBody('<h1>Choose your document</h1>\n<ul>\n</ul>')
self.assertStatus(200)
def testFavicon(self):
# favicon.ico is served by staticfile.
icofilename = os.path.join(localDir, "../favicon.ico")
icofile = open(icofilename, "rb")
data = icofile.read()
icofile.close()
self.getPage("/favicon.ico")
self.assertBody(data)
def testCookies(self):
import sys
if sys.version_info >= (2, 5):
self.getPage("/cookies/single?name=First",
[('Cookie', 'First=Dinsdale;')])
self.assertHeader('Set-Cookie', 'First=Dinsdale')
self.getPage("/cookies/multiple?names=First&names=Last",
[('Cookie', 'First=Dinsdale; Last=Piranha;'),
])
self.assertHeader('Set-Cookie', 'First=Dinsdale')
self.assertHeader('Set-Cookie', 'Last=Piranha')
else:
self.getPage("/cookies/single?name=First",
[('Cookie', 'First=Dinsdale;')])
self.assertHeader('Set-Cookie', 'First=Dinsdale;')
self.getPage("/cookies/multiple?names=First&names=Last",
[('Cookie', 'First=Dinsdale; Last=Piranha;'),
])
self.assertHeader('Set-Cookie', 'First=Dinsdale;')
self.assertHeader('Set-Cookie', 'Last=Piranha;')
def testMaxRequestSize(self):
self.getPage("/", headers=[('From', "x" * 500)])
self.assertStatus(413)
# Test for http://www.cherrypy.org/ticket/421
# (Incorrect border condition in readline of SizeCheckWrapper).
# This hangs in rev 891 and earlier.
lines256 = "x" * 248
self.getPage("/",
headers=[('Host', '%s:%s' % (self.HOST, self.PORT)),
('From', lines256)])
# Test upload
body = """--x
Content-Disposition: form-data; name="file"; filename="hello.txt"
Content-Type: text/plain
%s
--x--
"""
b = body % ("x" * 96)
h = [("Content-type", "multipart/form-data; boundary=x"),
("Content-Length", len(b))]
self.getPage('/upload', h, "POST", b)
self.assertBody('Size: 96')
b = body % ("x" * 200)
h = [("Content-type", "multipart/form-data; boundary=x"),
("Content-Length", len(b))]
self.getPage('/upload', h, "POST", b)
self.assertStatus(413)
def testEmptyThreadlocals(self):
results = []
for x in xrange(20):
self.getPage("/threadlocal/")
results.append(self.body)
self.assertEqual(results, ["None"] * 20)
def testDefaultContentType(self):
self.getPage('/')
self.assertHeader('Content-Type', 'text/html')
self.getPage('/defct/plain')
self.getPage('/')
self.assertHeader('Content-Type', 'text/plain')
self.getPage('/defct/html')
def test_cherrypy_url(self):
# Input relative to current
self.getPage('/url/leaf?path_info=page1')
self.assertBody('%s/url/page1' % self.base())
self.getPage('/url/?path_info=page1')
self.assertBody('%s/url/page1' % self.base())
# Input is 'absolute'; that is, relative to script_name
self.getPage('/url/leaf?path_info=/page1')
self.assertBody('%s/page1' % self.base())
self.getPage('/url/?path_info=/page1')
self.assertBody('%s/page1' % self.base())
# Single dots
self.getPage('/url/leaf?path_info=./page1')
self.assertBody('%s/url/page1' % self.base())
self.getPage('/url/leaf?path_info=other/./page1')
self.assertBody('%s/url/other/page1' % self.base())
self.getPage('/url/?path_info=/other/./page1')
self.assertBody('%s/other/page1' % self.base())
# Double dots
self.getPage('/url/leaf?path_info=../page1')
self.assertBody('%s/page1' % self.base())
self.getPage('/url/leaf?path_info=other/../page1')
self.assertBody('%s/url/page1' % self.base())
self.getPage('/url/leaf?path_info=/other/../page1')
self.assertBody('%s/page1' % self.base())
# Output relative to current path or script_name
self.getPage('/url/?path_info=page1&relative=True')
self.assertBody('page1')
self.getPage('/url/leaf?path_info=/page1&relative=True')
self.assertBody('../page1')
self.getPage('/url/leaf?path_info=../page1&relative=True')
self.assertBody('../page1')
self.getPage('/url/?path_info=other/../page1&relative=True')
self.assertBody('page1')
if __name__ == '__main__':
setup_server()
helper.testmain()
| 38.370056 | 104 | 0.545289 | 38,996 | 0.956981 | 12,795 | 0.313995 | 0 | 0 | 0 | 0 | 15,359 | 0.376917 |
f52efca4ad0dbdcec53aee2fa61bc784274e7d40 | 1,036 | py | Python | day4/solution1.py | zirne/aoc19 | 98feea895f0113ef60738723ca976dcbef0629b9 | [
"MIT"
]
| null | null | null | day4/solution1.py | zirne/aoc19 | 98feea895f0113ef60738723ca976dcbef0629b9 | [
"MIT"
]
| null | null | null | day4/solution1.py | zirne/aoc19 | 98feea895f0113ef60738723ca976dcbef0629b9 | [
"MIT"
]
| null | null | null | # Solution 1
def readInputFile(filename):
f = open(filename, "r")
inputString = f.read()
f.close()
return inputString
input = readInputFile("input.txt").strip()
print(input)
lowest = input.split("-")[0]
highest = input.split("-")[1]
current = int(input.split("-")[0])
print(lowest)
print(highest)
def checkNeverDecreaseRule(n):
n = str(n)
l = len(n)
i = 0
while i < l - 1:
# print("comparing " + n[i] + " with " + n[i + 1] + "...")
if int(n[i]) > int(n[i + 1]):
return False
i += 1
return True
def checkHasAdjacentSame(n):
n = str(n)
l = len(n)
i = 0
adjCount = 0
while i < l - 1:
# print("comparing " + n[i] + " with " + n[i + 1] + "...")
if n[i] == n[i + 1]:
adjCount += 1
i += 1
if adjCount >= 1:
return True
else:
return False
resultArr = []
while current <= int(highest):
if checkNeverDecreaseRule(current) and checkHasAdjacentSame(current):
resultArr.append(current)
#print(checkNeverDecreaseRule(lowest))
#print(checkHasAdjacentSame(lowest))
current += 1
print(len(resultArr)) | 18.836364 | 70 | 0.621622 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 225 | 0.217181 |
f52fa19632597f93eba421103fbc7100653b7f9d | 763 | py | Python | e2e/Tests/Transactions/Verify.py | rikublock/Meros | 7a3ae9c78af388eb523bc8a2c840018fc058ef44 | [
"CC0-1.0"
]
| null | null | null | e2e/Tests/Transactions/Verify.py | rikublock/Meros | 7a3ae9c78af388eb523bc8a2c840018fc058ef44 | [
"CC0-1.0"
]
| null | null | null | e2e/Tests/Transactions/Verify.py | rikublock/Meros | 7a3ae9c78af388eb523bc8a2c840018fc058ef44 | [
"CC0-1.0"
]
| 1 | 2021-02-08T23:46:35.000Z | 2021-02-08T23:46:35.000Z | #Transactions classes.
from e2e.Classes.Transactions.Transaction import Transaction
from e2e.Classes.Transactions.Transactions import Transactions
#TestError Exception.
from e2e.Tests.Errors import TestError
#RPC class.
from e2e.Meros.RPC import RPC
#Sleep standard function.
from time import sleep
#Verify a Transaction.
def verifyTransaction(
rpc: RPC,
tx: Transaction
) -> None:
if rpc.call("transactions", "getTransaction", [tx.hash.hex()]) != tx.toJSON():
raise TestError("Transaction doesn't match.")
#Verify the Transactions.
def verifyTransactions(
rpc: RPC,
transactions: Transactions
) -> None:
#Sleep to ensure data races aren't a problem.
sleep(2)
for tx in transactions.txs:
verifyTransaction(rpc, transactions.txs[tx])
| 23.84375 | 80 | 0.756225 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 229 | 0.300131 |
f52fef7331b7922effcc0ce7dc2004ff0e5e1b57 | 149 | py | Python | src/pygame_utils/sprite/gamecomponent.py | MarronEyes/pygame_utils | 27a1f1328533d04c20ccb95208d44fda3be81a09 | [
"MIT"
]
| null | null | null | src/pygame_utils/sprite/gamecomponent.py | MarronEyes/pygame_utils | 27a1f1328533d04c20ccb95208d44fda3be81a09 | [
"MIT"
]
| null | null | null | src/pygame_utils/sprite/gamecomponent.py | MarronEyes/pygame_utils | 27a1f1328533d04c20ccb95208d44fda3be81a09 | [
"MIT"
]
| null | null | null | import pygame
from graphics.component import Component
class GameComponent(Component):
def __init__(self) -> None:
super().__init__()
| 16.555556 | 40 | 0.718121 | 91 | 0.610738 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
f5301087690900f18790595cf080153f91b40dd0 | 954 | py | Python | motivation_quote/app.py | lukas-weiss/motivation-quote | 90c73342a71f6a8f8b5339b5d080d19ac67083b7 | [
"MIT"
]
| null | null | null | motivation_quote/app.py | lukas-weiss/motivation-quote | 90c73342a71f6a8f8b5339b5d080d19ac67083b7 | [
"MIT"
]
| null | null | null | motivation_quote/app.py | lukas-weiss/motivation-quote | 90c73342a71f6a8f8b5339b5d080d19ac67083b7 | [
"MIT"
]
| null | null | null | import json
import os.path
import logging
import csv
from random import randint
logger = logging.getLogger()
logger.setLevel(logging.INFO)
def get_quote(file):
if os.path.exists(file):
with open(file) as csvfile:
quotes = list(csv.reader(csvfile, delimiter=';'))
max_quotes = len(quotes) - 1
rand_quotes_idx = randint(0, max_quotes)
logger.debug(quotes[rand_quotes_idx])
return quotes[rand_quotes_idx]
else:
logger.info(file + " not found")
def lambda_handler(event, context):
# logger.debug(context.aws_request_id)
quote_entry = get_quote("quotes.csv")
logger.debug(quote_entry)
quote = ""
author = ""
if quote_entry is not None:
quote = quote_entry[0]
author = quote_entry[1]
return {
"statusCode": 200,
"body": json.dumps({
"quote": quote,
"author": author
}),
}
| 25.105263 | 61 | 0.603774 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 102 | 0.106918 |
f531e1bea64fba94ad609a7c42aeb9cf4d1498ca | 3,142 | py | Python | tools/extract_textline.py | bitcoder-17/scale-digits-recognition | b75c658ffdc830784ae4be9c007909e4c8f1d695 | [
"MIT"
]
| null | null | null | tools/extract_textline.py | bitcoder-17/scale-digits-recognition | b75c658ffdc830784ae4be9c007909e4c8f1d695 | [
"MIT"
]
| null | null | null | tools/extract_textline.py | bitcoder-17/scale-digits-recognition | b75c658ffdc830784ae4be9c007909e4c8f1d695 | [
"MIT"
]
| null | null | null | from pathlib import Path
import cv2
import json
import math
import numpy as np
from argparse import ArgumentParser
def distance(p1, p2):
return math.sqrt((p2[0] - p1[0])**2 + (p2[1] - p1[1])**2)
def order_points(points):
pts = {}
for x1, y1 in points:
count_x_larger = 0
count_x_smaller = 0
count_y_larger = 0
count_y_smaller = 0
for x2, y2 in points:
if x1 > x2:
count_x_larger += 1
elif x1 < x2:
count_x_smaller += 1
if y1 > y2:
count_y_larger += 1
elif y1 < y2:
count_y_smaller += 1
p = (x1, y1)
if count_x_larger >= 2 and count_y_larger >= 2:
pts['br'] = p
elif count_x_smaller >= 2 and count_y_larger >= 2:
pts['bl'] = p
elif count_y_smaller >= 2 and count_x_smaller >= 2:
pts['tl'] = p
else:
pts['tr'] = p
return [pts['tl'], pts['tr'], pts['br'], pts['bl']]
def get_padding_box(points, x_factor, y_factor):
tl, tr, br, bl = points
width = int(np.round(max([distance(tl, tr), distance(bl, br)])))
height = int(np.round(max([distance(tl, bl), distance(tr, br)])))
padding_x = x_factor * width
padding_y = y_factor * height
points2 = [
[tl[0] - padding_x, tl[1] - padding_y],
[tr[0] + padding_x, tr[1] - padding_y],
[br[0] + padding_x, br[1] + padding_y],
[bl[0] - padding_x, bl[1] + padding_y],
]
return points2
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument('input_dir', type=str,
help='Directory where the frame image and the json label be')
parser.add_argument('output_dir', type=str,
help='Directory where the textline would be extracted to')
parser.add_argument('--ext', type=str, default='png')
args = parser.parse_args()
input_dir = Path(args.input_dir)
output_dir = Path(args.output_dir)
output_dir.mkdir(parents=True, exist_ok=True)
jsons = list(input_dir.glob('*.json'))
json_path: Path
for json_path in jsons:
label_dict = json.load(open(json_path, 'rt'))
if len(label_dict['shapes']) == 0:
continue
frame = cv2.imread(str(json_path.with_suffix(f'.{args.ext}')))
for i, shape in enumerate(label_dict['shapes']):
points = order_points(shape['points'])
tl, tr, br, bl = points
width = int(np.round(max([distance(tl, tr), distance(bl, br)])))
height = int(np.round(max([distance(tl, bl), distance(tr, br)])))
dst = np.array([[0, 0],
[width - 1, 0],
[width - 1, height - 1],
[0, height - 1]], dtype=np.float32)
M = cv2.getPerspectiveTransform(np.array(points, dtype=np.float32), dst)
warp = cv2.warpPerspective(frame, M, (width, height))
output_path = output_dir.joinpath(json_path.stem + f'.{args.ext}')
cv2.imwrite(str(output_path), warp)
| 33.073684 | 85 | 0.54965 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 248 | 0.078931 |
f532177c4078c1e01572de399b2bc77a18421da8 | 14,159 | py | Python | blender/2.79/scripts/addons/io_coat3D/tex.py | uzairakbar/bpy2.79 | 3a3e0004ac6783c4e4b89d939e4432de99026a85 | [
"MIT"
]
| 2 | 2019-11-27T09:05:42.000Z | 2020-02-20T01:25:23.000Z | io_coat3D/tex.py | 1-MillionParanoidTterabytes/blender-addons-master | acc8fc23a38e6e89099c3e5079bea31ce85da06a | [
"Unlicense"
]
| null | null | null | io_coat3D/tex.py | 1-MillionParanoidTterabytes/blender-addons-master | acc8fc23a38e6e89099c3e5079bea31ce85da06a | [
"Unlicense"
]
| 4 | 2020-02-19T20:02:26.000Z | 2022-02-11T18:47:56.000Z | # ***** BEGIN GPL LICENSE BLOCK *****
#
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ***** END GPL LICENCE BLOCK *****
import bpy
import os
def find_index(objekti):
luku = 0
for tex in objekti.active_material.texture_slots:
if(not(hasattr(tex,'texture'))):
break
luku = luku +1
return luku
def gettex(mat_list, objekti, scene,export):
coat3D = bpy.context.scene.coat3D
coa = objekti.coat3D
if(bpy.context.scene.render.engine == 'VRAY_RENDER' or bpy.context.scene.render.engine == 'VRAY_RENDER_PREVIEW'):
vray = True
else:
vray = False
take_color = 0
take_spec = 0
take_normal = 0
take_disp = 0
bring_color = 1
bring_spec = 1
bring_normal = 1
bring_disp = 1
texcoat = {}
texcoat['color'] = []
texcoat['specular'] = []
texcoat['nmap'] = []
texcoat['disp'] = []
texu = []
if(export):
objekti.coat3D.objpath = export
nimi = os.path.split(export)[1]
osoite = os.path.dirname(export) + os.sep #pitaa ehka muuttaa
for mate in objekti.material_slots:
for tex_slot in mate.material.texture_slots:
if(hasattr(tex_slot,'texture')):
if(tex_slot.texture.type == 'IMAGE'):
if tex_slot.texture.image is not None:
tex_slot.texture.image.reload()
else:
if(os.sys.platform == 'win32'):
osoite = os.path.expanduser("~") + os.sep + 'Documents' + os.sep + '3DC2Blender' + os.sep + 'Textures' + os.sep
else:
osoite = os.path.expanduser("~") + os.sep + '3DC2Blender' + os.sep + 'Textures' + os.sep
ki = os.path.split(coa.applink_name)[1]
ko = os.path.splitext(ki)[0]
just_nimi = ko + '_'
just_nimi_len = len(just_nimi)
print('terve:' + coa.applink_name)
if(len(objekti.material_slots) != 0):
for obj_tex in objekti.active_material.texture_slots:
if(hasattr(obj_tex,'texture')):
if(obj_tex.texture.type == 'IMAGE'):
if(obj_tex.use_map_color_diffuse):
bring_color = 0;
if(obj_tex.use_map_specular):
bring_spec = 0;
if(obj_tex.use_map_normal):
bring_normal = 0;
if(obj_tex.use_map_displacement):
bring_disp = 0;
files = os.listdir(osoite)
for i in files:
tui = i[:just_nimi_len]
if(tui == just_nimi):
texu.append(i)
for yy in texu:
minimi = (yy.rfind('_'))+1
maksimi = (yy.rfind('.'))
tex_name = yy[minimi:maksimi]
koko = ''
koko += osoite
koko += yy
texcoat[tex_name].append(koko)
if((texcoat['color'] or texcoat['nmap'] or texcoat['disp'] or texcoat['specular']) and (len(objekti.material_slots)) == 0):
materials_old = bpy.data.materials.keys()
bpy.ops.material.new()
materials_new = bpy.data.materials.keys()
new_ma = list(set(materials_new).difference(set(materials_old)))
new_mat = new_ma[0]
ki = bpy.data.materials[new_mat]
objekti.data.materials.append(ki)
if(bring_color == 1 and texcoat['color']):
index = find_index(objekti)
tex = bpy.ops.Texture
objekti.active_material.texture_slots.create(index)
total_mat = len(objekti.active_material.texture_slots.items())
useold = ''
for seekco in bpy.data.textures:
if((seekco.name[:5] == 'Color') and (seekco.users_material == ())):
useold = seekco
if(useold == ''):
textures_old = bpy.data.textures.keys()
bpy.data.textures.new('Color',type='IMAGE')
textures_new = bpy.data.textures.keys()
name_te = list(set(textures_new).difference(set(textures_old)))
name_tex = name_te[0]
bpy.ops.image.new(name=name_tex)
bpy.data.images[name_tex].filepath = texcoat['color'][0]
bpy.data.images[name_tex].source = 'FILE'
objekti.active_material.texture_slots[index].texture = bpy.data.textures[name_tex]
objekti.active_material.texture_slots[index].texture.image = bpy.data.images[name_tex]
if(objekti.data.uv_textures.active):
objekti.active_material.texture_slots[index].texture_coords = 'UV'
objekti.active_material.texture_slots[index].uv_layer = objekti.data.uv_textures.active.name
objekti.active_material.texture_slots[index].texture.image.reload()
elif(useold != ''):
objekti.active_material.texture_slots[index].texture = useold
objekti.active_material.texture_slots[index].texture.image = bpy.data.images[useold.name]
objekti.active_material.texture_slots[index].texture.image.filepath = texcoat['color'][0]
if(objekti.data.uv_textures.active):
objekti.active_material.texture_slots[index].texture_coords = 'UV'
objekti.active_material.texture_slots[index].uv_layer = objekti.data.uv_textures.active.name
if(bring_normal == 1 and texcoat['nmap']):
index = find_index(objekti)
tex = bpy.ops.Texture
objekti.active_material.texture_slots.create(index)
total_mat = len(objekti.active_material.texture_slots.items())
useold = ''
for seekco in bpy.data.textures:
if((seekco.name[:6] == 'Normal') and (seekco.users_material == ())):
useold = seekco
if(useold == ''):
textures_old = bpy.data.textures.keys()
bpy.data.textures.new('Normal',type='IMAGE')
textures_new = bpy.data.textures.keys()
name_te = list(set(textures_new).difference(set(textures_old)))
name_tex = name_te[0]
bpy.ops.image.new(name=name_tex)
bpy.data.images[name_tex].filepath = texcoat['nmap'][0]
bpy.data.images[name_tex].source = 'FILE'
objekti.active_material.texture_slots[index].texture = bpy.data.textures[name_tex]
objekti.active_material.texture_slots[index].texture.image = bpy.data.images[name_tex]
if(objekti.data.uv_textures.active):
objekti.active_material.texture_slots[index].texture_coords = 'UV'
objekti.active_material.texture_slots[index].uv_layer = objekti.data.uv_textures.active.name
objekti.active_material.texture_slots[index].use_map_color_diffuse = False
objekti.active_material.texture_slots[index].use_map_normal = True
objekti.active_material.texture_slots[index].texture.image.reload()
if(vray):
bpy.data.textures[name_tex].vray_slot.BRDFBump.map_type = 'TANGENT'
else:
bpy.data.textures[name_tex].use_normal_map = True
objekti.active_material.texture_slots[index].normal_map_space = 'TANGENT'
objekti.active_material.texture_slots[index].normal_factor = 1
elif(useold != ''):
objekti.active_material.texture_slots[index].texture = useold
objekti.active_material.texture_slots[index].texture.image = bpy.data.images[useold.name]
objekti.active_material.texture_slots[index].texture.image.filepath = texcoat['nmap'][0]
if(objekti.data.uv_textures.active):
objekti.active_material.texture_slots[index].texture_coords = 'UV'
objekti.active_material.texture_slots[index].uv_layer = objekti.data.uv_textures.active.name
objekti.active_material.texture_slots[index].use_map_color_diffuse = False
objekti.active_material.texture_slots[index].use_map_normal = True
objekti.active_material.texture_slots[index].normal_factor = 1
if(bring_spec == 1 and texcoat['specular']):
index = find_index(objekti)
objekti.active_material.texture_slots.create(index)
useold = ''
for seekco in bpy.data.textures:
if((seekco.name[:8] == 'Specular') and (seekco.users_material == ())):
useold = seekco
if(useold == ''):
textures_old = bpy.data.textures.keys()
bpy.data.textures.new('Specular',type='IMAGE')
textures_new = bpy.data.textures.keys()
name_te = list(set(textures_new).difference(set(textures_old)))
name_tex = name_te[0]
bpy.ops.image.new(name=name_tex)
bpy.data.images[name_tex].filepath = texcoat['specular'][0]
bpy.data.images[name_tex].source = 'FILE'
objekti.active_material.texture_slots[index].texture = bpy.data.textures[name_tex]
objekti.active_material.texture_slots[index].texture.image = bpy.data.images[name_tex]
if(objekti.data.uv_textures.active):
objekti.active_material.texture_slots[index].texture_coords = 'UV'
objekti.active_material.texture_slots[index].uv_layer = objekti.data.uv_textures.active.name
objekti.active_material.texture_slots[index].use_map_color_diffuse = False
objekti.active_material.texture_slots[index].use_map_specular = True
objekti.active_material.texture_slots[index].texture.image.reload()
elif(useold != ''):
objekti.active_material.texture_slots[index].texture = useold
objekti.active_material.texture_slots[index].texture.image = bpy.data.images[useold.name]
objekti.active_material.texture_slots[index].texture.image.filepath = texcoat['specular'][0]
if(objekti.data.uv_textures.active):
objekti.active_material.texture_slots[index].texture_coords = 'UV'
objekti.active_material.texture_slots[index].uv_layer = objekti.data.uv_textures.active.name
objekti.active_material.texture_slots[index].use_map_color_diffuse = False
objekti.active_material.texture_slots[index].use_map_specular = True
if(bring_disp == 1 and texcoat['disp']):
index = find_index(objekti)
objekti.active_material.texture_slots.create(index)
useold = ''
for seekco in bpy.data.textures:
if((seekco.name[:12] == 'Displacement') and (seekco.users_material == ())):
useold = seekco
if useold == "":
textures_old = bpy.data.textures.keys()
bpy.data.textures.new('Displacement',type='IMAGE')
textures_new = bpy.data.textures.keys()
name_te = list(set(textures_new).difference(set(textures_old)))
name_tex = name_te[0]
bpy.ops.image.new(name=name_tex)
bpy.data.images[name_tex].filepath = texcoat['disp'][0]
bpy.data.images[name_tex].source = 'FILE'
objekti.active_material.texture_slots[index].texture = bpy.data.textures[name_tex]
objekti.active_material.texture_slots[index].texture.image = bpy.data.images[name_tex]
if(objekti.data.uv_textures.active):
objekti.active_material.texture_slots[index].texture_coords = 'UV'
objekti.active_material.texture_slots[index].uv_layer = objekti.data.uv_textures.active.name
objekti.active_material.texture_slots[index].use_map_color_diffuse = False
objekti.active_material.texture_slots[index].use_map_displacement = True
objekti.active_material.texture_slots[index].texture.image.reload()
elif(useold != ''):
objekti.active_material.texture_slots[index].texture = useold
objekti.active_material.texture_slots[index].texture.image = bpy.data.images[useold.name]
objekti.active_material.texture_slots[index].texture.image.filepath = texcoat['disp'][0]
if(objekti.data.uv_textures.active):
objekti.active_material.texture_slots[index].texture_coords = 'UV'
objekti.active_material.texture_slots[index].uv_layer = objekti.data.uv_textures.active.name
objekti.active_material.texture_slots[index].use_map_color_diffuse = False
objekti.active_material.texture_slots[index].use_map_displacement = True
if(vray):
objekti.active_material.texture_slots[index].texture.use_interpolation = False
objekti.active_material.texture_slots[index].displacement_factor = 0.05
else:
disp_modi = ''
for seek_modi in objekti.modifiers:
if(seek_modi.type == 'DISPLACE'):
disp_modi = seek_modi
break
if(disp_modi):
disp_modi.texture = objekti.active_material.texture_slots[index].texture
if(objekti.data.uv_textures.active):
disp_modi.texture_coords = 'UV'
disp_modi.uv_layer = objekti.data.uv_textures.active.name
else:
objekti.modifiers.new('Displace',type='DISPLACE')
objekti.modifiers['Displace'].texture = objekti.active_material.texture_slots[index].texture
if(objekti.data.uv_textures.active):
objekti.modifiers['Displace'].texture_coords = 'UV'
objekti.modifiers['Displace'].uv_layer = objekti.data.uv_textures.active.name
return('FINISHED')
| 42.139881 | 127 | 0.634296 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,381 | 0.097535 |
f5327046ce5fcde6d3a6fd1f71b52eace22cd4ab | 3,613 | py | Python | proxy_prometheus_alerts.py | mhagander/promnagios | 12329b7abbbb76746784c4b706f4784c63bae194 | [
"PostgreSQL"
]
| 1 | 2019-06-07T14:10:14.000Z | 2019-06-07T14:10:14.000Z | proxy_prometheus_alerts.py | mhagander/promnagios | 12329b7abbbb76746784c4b706f4784c63bae194 | [
"PostgreSQL"
]
| null | null | null | proxy_prometheus_alerts.py | mhagander/promnagios | 12329b7abbbb76746784c4b706f4784c63bae194 | [
"PostgreSQL"
]
| null | null | null | #!/usr/bin/env python3
#
# Proxy alerts generated by Prometheus Alertmanager turning them into
# nagios passive alert information.
#
# Copyright 2019-2020, PostgreSQL Infrastructure Team
# Author: Magnus Hagander
#
import argparse
import http.server
import json
import time
import sys
missed_alerts = 0
def send_nagios_alert(status, alertname, hostname, info, severity):
global args
if status == 'firing':
if severity == 'CRITICAL':
alertlevel = 2
elif severity == 'WARNING':
alertlevel = 1
else:
# Actively specified to something that's not warning or critical,
# so we're just going to map it to critical.
alertlevel = 2
elif status == 'resolved':
alertlevel = 0
info = ""
else:
raise Exception("Unknown alert status {0}".format(status))
if args.hostsuffix:
hostname = "{0}.{1}".format(hostname, args.hostsuffix)
out = "[{0}] PROCESS_SERVICE_CHECK_RESULT;{1};{2};{3};{4}\n".format(
int(time.time()),
hostname,
alertname,
alertlevel,
info
)
with open(args.nagioscmd, 'w') as f:
f.write(out)
class NotificationHandler(http.server.BaseHTTPRequestHandler):
def do_GET(self):
if self.path == '/ping':
global missed_alerts
if missed_alerts:
self.send_and_end(500,
'Missed {0} alerts!'.format(
missed_alerts).encode('utf8'))
return
self.send_response(200)
self.send_header('Content-type', 'text/plain')
self.end_headers()
self.wfile.write(b'OK')
return
self.send_and_end(404, 'Not found')
def do_POST(self):
if self.path == '/alert':
if self.headers['Content-Type'] != 'application/json':
self.send_and_end(415, b'Must be json')
return
j = json.loads(self.rfile.read(
int(self.headers['Content-Length'])
).decode('utf8'))
try:
for a in j['alerts']:
status = a['status']
alertname = a['labels']['alertname']
hostname = a['labels']['name']
info = a['annotations']['summary']
severity = a['labels'].get('severity', 'CRITICAL').upper()
send_nagios_alert(status, alertname, hostname, info, severity)
except Exception as e:
print("MISSED ALERT: %s" % e)
global missed_alerts
missed_alerts += 1
self.send_and_end(200, b"OK")
def send_and_end(self, code, msg):
self.send_response(code)
self.end_headers()
self.wfile.write(msg)
if __name__ == "__main__":
global args
parser = argparse.ArgumentParser(
description="Create nagios alerts from prometheus monitors"
)
parser.add_argument('--hostsuffix', help='Suffix to add to hostnamees')
parser.add_argument('--port', help='TCP port to bind to')
parser.add_argument('--nagioscmd', help='Path to nagios command file')
args = parser.parse_args()
if not args.port:
print("Port must be specified")
sys.exit(1)
if not args.nagioscmd:
print("Nagios command path must be specified")
sys.exit(1)
server_address = ('localhost', int(args.port))
httpd = http.server.HTTPServer(server_address, NotificationHandler)
httpd.serve_forever()
| 30.361345 | 82 | 0.568779 | 1,677 | 0.464157 | 0 | 0 | 0 | 0 | 0 | 0 | 966 | 0.267368 |
f53445eedfca0ebec216d205d17da7023b06710f | 172 | py | Python | QuantLib/tsa/smooth/__init__.py | wanhanwan/Packages | 14dfbd70603d45bb6c8c161c56b9ed9cf7c301d3 | [
"MIT"
]
| 5 | 2018-06-29T16:56:10.000Z | 2019-06-20T03:31:44.000Z | QuantLib/tsa/smooth/__init__.py | wanhanwan/Packages | 14dfbd70603d45bb6c8c161c56b9ed9cf7c301d3 | [
"MIT"
]
| null | null | null | QuantLib/tsa/smooth/__init__.py | wanhanwan/Packages | 14dfbd70603d45bb6c8c161c56b9ed9cf7c301d3 | [
"MIT"
]
| 3 | 2018-06-25T06:37:17.000Z | 2018-11-22T08:12:20.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# __init__.py
# @Author : wanhanwan ([email protected])
# @Date : 2019/11/25 下午1:20:12
from .filter import llt_filter | 24.571429 | 46 | 0.668605 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 140 | 0.795455 |
f53682f7365da61bd7d0f3f72b18779b18999529 | 173 | py | Python | pred_learn/utils/various.py | EqThinker/deep-track | c72dc7b182c66c13fb6f5df38b6ed6e78f625a41 | [
"Apache-2.0"
]
| null | null | null | pred_learn/utils/various.py | EqThinker/deep-track | c72dc7b182c66c13fb6f5df38b6ed6e78f625a41 | [
"Apache-2.0"
]
| 8 | 2020-09-25T23:51:58.000Z | 2022-02-10T00:32:55.000Z | pred_learn/utils/various.py | EqThinker/deep-track | c72dc7b182c66c13fb6f5df38b6ed6e78f625a41 | [
"Apache-2.0"
]
| null | null | null |
def get_n_params(model):
pp=0
for p in list(model.parameters()):
nn=1
for s in list(p.size()):
nn = nn*s
pp += nn
return pp
| 17.3 | 38 | 0.479769 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
f536be230ab9f47d327f6fa5a8e54f230ab096d9 | 1,745 | py | Python | chatServer/server.py | RobbeBryssinck/chatApplication | 628ab6acb2b19d26d3e5c064cbea14747041f43e | [
"MIT"
]
| null | null | null | chatServer/server.py | RobbeBryssinck/chatApplication | 628ab6acb2b19d26d3e5c064cbea14747041f43e | [
"MIT"
]
| null | null | null | chatServer/server.py | RobbeBryssinck/chatApplication | 628ab6acb2b19d26d3e5c064cbea14747041f43e | [
"MIT"
]
| null | null | null | import socket
import sys
import os
import optparse
from threading import *
def createServer(ip, port):
# create a TCP socket
sck = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# bind the socket to the port
server_address = (ip, port)
print("starting up on {} port {}".format(*server_address))
sck.bind(server_address)
# put the socket into server mode
sck.listen(5)
return sck
def clientHandler(sck, conn, client, logs):
# initialise user
name = conn.recv(2014)
# receive data
while True:
try:
data = conn.recv(1024)
message = name.decode() + ': ' + data.decode() + '\n'
print(message)
updateClients(message)
logs.write(message)
except:
message = name.decode() + " closed the connection.\n"
logs.write(message)
print(message)
break
conn.close()
def updateClients(message):
for client in clients:
client.send(bytes(message, 'ASCII'))
def main():
# option to set port when launching the server
parser = optparse.OptionParser("Usage: python3 server.py -h <server ip> -p <server port>")
parser.add_option('-p', dest='port', type='int', help="specify server port")
parser.add_option('-h', dest='ip', type='string', help="specify server ip")
(options, args) = parser.parse_args()
port = options.port
ip = options.ip
if port == None:
print(parser.usage)
exit(0)
logs = open('./logs.txt', 'a+')
sck = createServer(ip, port)
while True:
# wait for connection
conn, client = sck.accept()
clients.append(conn)
# log connection
message = client[0] + " connected.\n"
print(message)
logs.write(message)
# start thread
t = Thread(target=clientHandler, args=(sck, conn, client, logs))
t.start()
clients = []
if __name__ == '__main__':
main()
| 19.606742 | 91 | 0.676218 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 450 | 0.25788 |
f537b763bb0939c0d65ba5d32dd7d3fcdadbcca3 | 1,502 | py | Python | tests/test_utils_bytes.py | cwichel/embutils | 188d86d84637088bafef188b3312078048934113 | [
"MIT"
]
| null | null | null | tests/test_utils_bytes.py | cwichel/embutils | 188d86d84637088bafef188b3312078048934113 | [
"MIT"
]
| null | null | null | tests/test_utils_bytes.py | cwichel/embutils | 188d86d84637088bafef188b3312078048934113 | [
"MIT"
]
| null | null | null | #!/usr/bin/python
# -*- coding: ascii -*-
"""
Byte utilities testing.
:date: 2021
:author: Christian Wiche
:contact: [email protected]
:license: The MIT License (MIT)
"""
import unittest
from embutils.utils import bitmask, reverse_bits, reverse_bytes
# -->> Definitions <<------------------
# -->> Test API <<---------------------
class TestBytes(unittest.TestCase):
"""
Test byte utilities.
"""
def test_01_bitmask(self):
"""
Test bitmask generation.
"""
# Test bitmask fill
mask = bitmask(bit=7, fill=True)
assert mask == 0b11111111
# Test bitmask
mask = bitmask(bit=7)
assert mask == 0b10000000
def test_02_reverse_bits(self):
"""
Test bit reverse functionality
"""
# Test using fixed size
rev_bits = reverse_bits(value=0b00101011, size=8)
assert rev_bits == 0b11010100
# Test using minimum size
rev_bits = reverse_bits(value=0b00101011)
assert rev_bits == 0b110101
def test_03_reverse_bytes(self):
"""
Test byte reverse functionality.
"""
# Test using fixed size
rev_bytes = reverse_bytes(value=0x00020304, size=4)
assert rev_bytes == 0x04030200
# Test using minimum size
rev_bytes = reverse_bytes(value=0x00020304)
assert rev_bytes == 0x040302
# -->> Test Execution <<---------------
if __name__ == '__main__':
unittest.main()
| 23.107692 | 63 | 0.581891 | 1,062 | 0.707057 | 0 | 0 | 0 | 0 | 0 | 0 | 631 | 0.420107 |
f538ca85acdf301ac647a1ecf10d45b209f9fdd3 | 2,419 | py | Python | events/migrations/0020_add_event_comments.py | alysivji/GetTogether | 403d9945fff019701de41d081ad4452e771e1ce1 | [
"BSD-2-Clause"
]
| 446 | 2018-01-21T09:22:41.000Z | 2022-03-25T17:46:12.000Z | events/migrations/0020_add_event_comments.py | alysivji/GetTogether | 403d9945fff019701de41d081ad4452e771e1ce1 | [
"BSD-2-Clause"
]
| 272 | 2018-01-03T16:55:39.000Z | 2022-03-11T23:12:30.000Z | events/migrations/0020_add_event_comments.py | alysivji/GetTogether | 403d9945fff019701de41d081ad4452e771e1ce1 | [
"BSD-2-Clause"
]
| 100 | 2018-01-27T02:04:15.000Z | 2021-09-09T09:02:21.000Z | # Generated by Django 2.0 on 2018-03-24 02:55
import datetime
import django.db.models.deletion
from django.db import migrations, models
import mptt.fields
class Migration(migrations.Migration):
dependencies = [("events", "0019_add_org_slug")]
operations = [
migrations.CreateModel(
name="EventComment",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("body", models.TextField()),
(
"created_time",
models.DateTimeField(db_index=True, default=datetime.datetime.now),
),
(
"status",
models.SmallIntegerField(
choices=[(-1, "Removed"), (0, "Pending"), (1, "Approved")],
db_index=True,
default=1,
),
),
("lft", models.PositiveIntegerField(db_index=True, editable=False)),
("rght", models.PositiveIntegerField(db_index=True, editable=False)),
("tree_id", models.PositiveIntegerField(db_index=True, editable=False)),
("level", models.PositiveIntegerField(db_index=True, editable=False)),
],
options={"abstract": False},
),
migrations.AddField(
model_name="eventcomment",
name="author",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to="events.UserProfile"
),
),
migrations.AddField(
model_name="eventcomment",
name="event",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to="events.Event"
),
),
migrations.AddField(
model_name="eventcomment",
name="parent",
field=mptt.fields.TreeForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="children",
to="events.EventComment",
),
),
]
| 32.689189 | 88 | 0.47871 | 2,258 | 0.933444 | 0 | 0 | 0 | 0 | 0 | 0 | 317 | 0.131046 |
f53bf8a6756951f510b486992b5a699a1e895570 | 13,529 | py | Python | ant_algorithm.py | devancakra/Ant-Algorithm-Pencarian-Rute-Tercepat | d766c94ab862e2856412ee19cb883033b914bd3f | [
"MIT"
]
| 1 | 2021-11-08T12:53:16.000Z | 2021-11-08T12:53:16.000Z | ant_algorithm.py | devancakra/Ant-Algorithm-Pencarian-Rute-Tercepat | d766c94ab862e2856412ee19cb883033b914bd3f | [
"MIT"
]
| null | null | null | ant_algorithm.py | devancakra/Ant-Algorithm-Pencarian-Rute-Tercepat | d766c94ab862e2856412ee19cb883033b914bd3f | [
"MIT"
]
| null | null | null | # -*- coding: utf-8 -*-
"""Ant_Algorithm.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1Zjt1SInhoaFEqSmsPjEfWQE7jhugAvZA
# **ANT ALGORITHM BY KELOMPOK 9**
1. Heri Khariono - 18081010002
2. Devan Cakra Mudra Wijaya - 18081010013
3. Ika Nur Habibah - 18081010033
4. Trisa Pratiwi - 18081010036
5. Rifky Akhmad Fernanda - 18081010126
# **1. Import Libraries**
"""
#**********************************IMPORT LIBRARIES*******************************
#Library untuk operasi matematika
import math
#Library untuk membentuk dan memanipulasi segala bentuk graf dan jaringan
import networkx as nx
#Library untuk visualisasi grafik
from matplotlib import pyplot as plt
import matplotlib.patches as mpatches
from pylab import *
#Library untuk mendukung komputasi numerik
import numpy as np
#Library untuk analisis dan manipulasi data tingkat tinggi
import pandas as pn
#Library untuk untuk mengukur waktu eksekusi
from time import time
"""# **2. Read Data**"""
read_jarak_antarkota = pn.read_excel('https://raw.githubusercontent.com/devancakra/Ant-Algorithm-Pencarian-Rute-Tercepat/master/jarak_antarkota.xlsx')
read_kota = pn.read_excel('https://raw.githubusercontent.com/devancakra/Ant-Algorithm-Pencarian-Rute-Tercepat/master/kota.xlsx')
arr_kota = np.array(read_kota)
arr_jarak_antarkota = np.array(read_jarak_antarkota)
#Grafik Map
def Grafik_Map(simpul,sudut):
plt.style.use('ggplot')
fig = plt.figure(figsize=(25.200,15))
x = simpul[:,0]
y = simpul[:,1]
#Mencetak garis
plt.plot( x, y, '--',x, y, 'p',color='#FF8000',#Warna garis
markersize=35, #Ukuran objek
linewidth=2,
markerfacecolor='#00E4B6',#Warna objek
markeredgecolor='#00FF00',#Warna tepi objek
markeredgewidth=2)#Ketebalan tepi titik
plt.title("KOTA")
legend(("Jalur","Kota"), prop = {'size': 30}, loc='lower right')
plt.grid(True)
#plt.tight_layout()
for i in range (len(simpul)):
#Pengidentifikasi kota (nomor setiap kota)
plt.annotate("Kota"+str(i+1),
size=8,
xy=simpul[i],
horizontalalignment='center',
verticalalignment='center') #Kotak dalam anotasi bbox = dict (facecolor = 'none', edgecolor = 'black', boxstyle = 'round, pad = 1')
Grafik_Map(arr_kota,arr_jarak_antarkota)
"""# **3. Implementasi Algoritma Ant**
1. Transisi status, Pembaruan Feromon Lokal, Pembaruan Feromon Global
"""
import random
class Rute_Cepat_ACO:
#Sub class Tepi = objek tepi yang menyimpan simpul a, simpul b, jarak antara ab, dan feromon koneksi
class Tepi:
def __init__(self, a, b, jarak_ab, feromon_koneksi):
self.a = a
self.b = b
self.jarak_ab = jarak_ab
self.feromon_koneksi = feromon_koneksi
#Sub class Semut
class Semut:
def __init__(self, alpha, beta, num_simpul, tepi):
self.alpha = alpha
self.beta = beta
self.num_simpul = num_simpul
self.tepi = tepi
self.survei = None
self.jarak_tempuh = 0.0
#Metode untuk memilih simpul berikutnya yang akan dikunjungi, memvalidasi bahwa node tersebut belum pernah dikunjungi.
def _select_simpul(self):
persebaran = 0.0 #mulai persebaran dari nol,lalu pilih jalur secara acak
k_simpul_sepi = [node for node in range(self.num_simpul) if node not in self.jalur] #Simpul Sepi
heuristic_total = 0.0 #Peningkatan jarak jalan
for simpul_sepi in k_simpul_sepi:
heuristic_total += self.tepi[self.jalur[-1]][simpul_sepi].jarak_ab
for simpul_sepi in k_simpul_sepi:
persebaran += pow(self.tepi[self.jalur[-1]][simpul_sepi].feromon_koneksi, self.alpha) * \
pow((heuristic_total / self.tepi[self.jalur[-1]][simpul_sepi].jarak_ab), self.beta)
nilai_random = random.uniform(0.0, persebaran)
pos_sebar = 0.0
for simpul_sepi in k_simpul_sepi:
pos_sebar += pow(self.tepi[self.jalur[-1]][simpul_sepi].feromon_koneksi, self.alpha) * \
pow((heuristic_total / self.tepi[self.jalur[-1]][simpul_sepi].jarak_ab), self.beta)
if pos_sebar >= nilai_random:
return simpul_sepi
#Metode untuk menemukan cara untuk memilih jalur
def survei_jalur(self):
self.jalur = [random.randint(0, self.num_simpul - 1)]
while len(self.jalur) < self.num_simpul:
self.jalur.append(self._select_simpul())
return self.jalur
#Jarak antara satu simpul dengan simpul lainnya
def get_jarak_tempuh(self):
self.jarak_tempuh = 0.0
for i in range(self.num_simpul):
self.jarak_tempuh += self.tepi[self.jalur[i]][self.jalur[(i + 1) % self.num_simpul]].jarak_ab
return self.jarak_tempuh
# Definisi atribut untuk objek kelas Rute_Cepat_ACO
def __init__(self, mode='ACS', jumlah_semut=10, alpha=1.0, beta=3.0, rho=0.1,
feromon_tersimpan=1.0, feromon_koneksi=1.0, langkah=100, v_simpul=None, m_jarak=None, posting=None):
self.mode = mode
self.jumlah_semut = jumlah_semut
self.rho = rho
self.feromon_tersimpan = feromon_tersimpan
self.langkah = langkah
self.num_simpul = len(v_simpul)
self.v_simpul = v_simpul
if posting is not None:
self.posting = posting
else:
self.posting = range(1, self.num_simpul + 1)
#Deklarasi list tepi kosong
self.tepi = [[None] * self.num_simpul for _ in range(self.num_simpul)]
#Membuat objek tepi untuk setiap tuple i = a j = b dan menyimpannya di list tepi, pada akhirnya array n*n dibuat, dengan n = jumlah simpul
for i in range(self.num_simpul):
for j in range(self.num_simpul):
self.tepi[i][j] = self.tepi[j][i] = self.Tepi(i, j, m_jarak[i][j],feromon_koneksi) #Simpan objek bertipe Endge di list tepi
self.semut = [self.Semut(alpha, beta, self.num_simpul, self.tepi) for _ in range(self.jumlah_semut)] #Buat semut dalam array yang terdiri dari objek kelas Semut
self.jalur_terbaik = None #Atribut jalan terbaik
self.jarak_terbaik = float("inf") #Atribut untuk jarak minimum
#Metode yang menambahkan feromon ke jalan
def _add_feromon(self, jalur, jarak_ab, weight=1.0):
add_feromon = self.feromon_tersimpan / jarak_ab
for i in range(self.num_simpul):
self.tepi[jalur[i]][jalur[(i + 1) % self.num_simpul]].feromon_koneksi += jarak_ab * add_feromon
#Siklus perutean setiap semut
def _acs(self):
for k_langkah in range(self.langkah):
for k_semut in self.semut:
self._add_feromon(k_semut.survei_jalur(), k_semut.get_jarak_tempuh())
if k_semut.jarak_tempuh < self.jarak_terbaik:
self.jalur_terbaik = k_semut.jalur
self.jarak_terbaik = k_semut.jarak_tempuh
for i in range(self.num_simpul):
for j in range(i + 1, self.num_simpul):
self.tepi[i][j].feromon_koneksi *= (1.0 - self.rho)
#Metode yang dimuat saat menjalankan kelas
def run(self):
self._acs()
#Membuat grafik hasil
def plot(self, line_width=1, point_radius=math.sqrt(2.0), annotation_size=10, dpi=120, save=True, name=None):
fig = plt.figure(figsize=(25.200,15))
x = [self.v_simpul[i][0] for i in self.jalur_terbaik]
x.append(x[0])
y = [self.v_simpul[i][1] for i in self.jalur_terbaik]
y.append(y[0])
#Mencetak garis
plt.plot( x, y, '--',x, y, 'p',color='#FF8000',#Warna garis
markersize=35, #Ukuran objek
linewidth=2,
markerfacecolor='#00E4B6',#Warna objek
markeredgecolor='#00FF00',#Warna tepi objek
markeredgewidth=2)#Ketebalan tepi titik
#Membuat tambalan untuk diletakkan di kanan atas gambar
handle1 = mpatches.Patch(color='white', label='Semut: '+str(self.jumlah_semut))
handle2 = mpatches.Patch(color='white', label='Langkah: '+str(self.langkah))
handle3 = mpatches.Patch(color='white', label='Rho: '+str(self.rho))
ax = plt.gca().add_artist(plt.legend(handles=[handle1,handle2,handle3],prop = {'size': 12}))
#Hasil
handle4 = mpatches.Patch(color='white', label='Jarak tempuh: '+str(round(self.jarak_terbaik, 2)))
ax = plt.gca().add_artist(plt.legend(handles=[handle4],prop = {'size': 12},loc='lower left'))
#Data grafik
plt.title(" Perutean ACS - "+self.mode)
legend(("Jalur","Kota"), prop = {'size': 30}, loc='lower right')
plt.grid(True)
#Pengenal kota (nomor tiap kota)
for i in self.jalur_terbaik:
plt.annotate("Kota"+str(i+1),
size=8,
xy=self.v_simpul[i],
horizontalalignment='center',
verticalalignment='center') #Kotak dalam anotasi bbox = dict (facecolor = 'none', edgecolor = 'black', boxstyle = 'round, pad = 1')
plt.show()
return self.jarak_terbaik
"""2. Konfigurasi perutean"""
#Mendefinisikan fungsi untuk mengirim konfigurasi yang berbeda secara teratur
def config(tipe, ts, lg, t_evap):
acs = Rute_Cepat_ACO(mode=tipe, jumlah_semut=ts, langkah=lg, v_simpul=arr_kota, m_jarak=arr_jarak_antarkota, rho=t_evap)
acs.run()
jarak_jalur_akhir = acs.plot()
return jarak_jalur_akhir
#Konfigurasi yang berbeda didefinisikan
txt_config = [] #Teks konfigurasi
jumlah_semut = [] #Ukuran koloni
langkah = [] #Jumlah langkah total
rho = [] #Tingkat penguapan fermones ANTARA 0 dan 1
txt_config.append('Konfigurasi 1'); jumlah_semut.append(50); langkah.append(10); rho.append(0.1);
txt_config.append('Konfigurasi 2'); jumlah_semut.append(100); langkah.append(10); rho.append(0.1);
txt_config.append('Konfigurasi 3'); jumlah_semut.append(250); langkah.append(10); rho.append(0.1);
txt_config.append('Konfigurasi 4'); jumlah_semut.append(50); langkah.append(30); rho.append(0.5);
txt_config.append('Konfigurasi 5'); jumlah_semut.append(90); langkah.append(40); rho.append(0.5);
txt_config.append('Konfigurasi 6'); jumlah_semut.append(150); langkah.append(30); rho.append(0.5);
txt_config.append('Konfigurasi 7'); jumlah_semut.append(50); langkah.append(50); rho.append(0.1);
txt_config.append('Konfigurasi 8'); jumlah_semut.append(200); langkah.append(90); rho.append(0.1);
txt_config.append('Konfigurasi 9'); jumlah_semut.append(150); langkah.append(50); rho.append(0.1);
txt_config.append('Konfigurasi 10'); jumlah_semut.append(80); langkah.append(100); rho.append(0.5);
txt_config.append('Konfigurasi 11'); jumlah_semut.append(100); langkah.append(100); rho.append(0.5);
txt_config.append('Konfigurasi 12'); jumlah_semut.append(150); langkah.append(100); rho.append(0.5);
jarak_ab = [] #Vektor perpindahan akhir di setiap konfigurasi
tempo = [] #Vektor waktu eksekusi algoritma di setiap konfigurasi
for i in range(len(txt_config)):
start_time = time()
jarak_ab.append(config(txt_config[i], jumlah_semut[i], langkah[i], rho[i]))
tempo.append(time()-start_time)
"""3. Pemilihan Hasil Terbaik"""
#Grafik hasil tiga rute terbaik berdasarkan jarak
index1=jarak_ab.index(sorted(jarak_ab,reverse=False)[0])
index2=jarak_ab.index(sorted(jarak_ab,reverse=False)[1])
index3=jarak_ab.index(sorted(jarak_ab,reverse=False)[2])
if index2==index1:
index2=index2+1
if index2==index3:
index3=index3+1
plt.style.use('ggplot')
fig = plt.figure(figsize=(10.80,5))
plt.bar(range(3),sorted(jarak_ab,reverse=False)[0:3], edgecolor='#93329F', color='#5D87B6')
plt.xticks(range(3),(txt_config[index1],txt_config[index2],txt_config[index3]), rotation=70)
plt.ylim(min(jarak_ab[index1],jarak_ab[index2],jarak_ab[index3])-1, max(jarak_ab[index1],jarak_ab[index2],jarak_ab[index3])+1)
plt.title("Hasil konfigurasi terbaik berdasarkan jarak")
plt.ylabel('Jarak tempuh')
plt.xlabel('Konfigurasi rute yang digunakan (jarak)\n\n')
plt.show()
#Grafik hasil tiga rute terbaik berdasarkan waktu
plt.style.use('ggplot')
fig = plt.figure(figsize=(10.80,5))
plt.bar(range(3),(tempo[index1],tempo[index2],tempo[index3]), edgecolor='#282623', color='#138d90')
plt.xticks(range(3),(txt_config[index1],txt_config[index2],txt_config[index3]), rotation=70)
plt.ylim(min(tempo[index1],tempo[index2],tempo[index3])-1, max(tempo[index1],tempo[index2],tempo[index3])+10)
plt.title("Hasil konfigurasi terbaik berdasarkan waktu")
plt.ylabel('Waktu tempuh')
plt.xlabel('Konfigurasi rute yang digunakan (waktu)\n\n')
plt.show()
#Grafik hasil tiga rute terbaik berdasarkan jalur
plt.style.use('ggplot')
fig = plt.figure(figsize=(10.80,5))
plt.bar(range(3),(langkah[index1],langkah[index2],langkah[index3]), edgecolor='#F387FF', color='#0D3E00')
plt.xticks(range(3),(txt_config[index1],txt_config[index2],txt_config[index3]), rotation=70)
plt.ylim(min(langkah[index1],langkah[index2],langkah[index3])-1, max(langkah[index1],langkah[index2],langkah[index3])+1)
plt.title("Hasil konfigurasi terbaik berdasarkan jalur")
plt.ylabel('Jalur tempuh')
plt.xlabel('Konfigurasi rute yang digunakan (jalur)\n\n')
plt.show() | 46.332192 | 168 | 0.663612 | 6,919 | 0.51142 | 0 | 0 | 0 | 0 | 0 | 0 | 4,040 | 0.298618 |
f53d0274845ff18a273019ee23bb400432511d7c | 588 | py | Python | utils/tool.py | yongleex/SBCC | 40f8e67e446fc14fc82ea87f82ee841d62520c71 | [
"MIT"
]
| 4 | 2021-09-04T04:02:57.000Z | 2021-12-27T13:27:26.000Z | utils/tool.py | yongleex/SBCC | 40f8e67e446fc14fc82ea87f82ee841d62520c71 | [
"MIT"
]
| 1 | 2021-09-10T07:40:36.000Z | 2022-01-02T06:23:12.000Z | utils/tool.py | yongleex/SBCC | 40f8e67e446fc14fc82ea87f82ee841d62520c71 | [
"MIT"
]
| 1 | 2021-09-10T07:36:29.000Z | 2021-09-10T07:36:29.000Z | import numpy as np
from scipy.ndimage import maximum_filter
class AttrDict(dict):
__setattr__ = dict.__setitem__
__getattr__ = dict.__getitem__
def signal2noise(r_map):
""" Compute the signal-to-noise ratio of correlation plane.
w*h*c"""
r = r_map.copy()
max_r = maximum_filter(r_map, (5,5,1))
ind = max_r> (r_map+1e-3)
r[ind] = 0.05
r = np.reshape(r, (-1, r.shape[-1]))
r = np.sort(r,axis=0)
ratio = r[-1,:]/r[-2,:]
return ratio
def main():
r = np.random.randn(5,5,3)
signal2noise(r)
if __name__=='__main__':
main()
| 18.375 | 63 | 0.612245 | 91 | 0.154762 | 0 | 0 | 0 | 0 | 0 | 0 | 82 | 0.139456 |
f53f1078d0ccf6010a2d5acd1664c6d7881e41c8 | 8,584 | py | Python | bjtunlp/train.py | bigbosskai/bjtunlp | 58d8ca53fa1d99df2f47f10a0780619c4cdba22f | [
"MIT"
]
| 1 | 2020-12-16T07:18:00.000Z | 2020-12-16T07:18:00.000Z | bjtunlp/train.py | bigbosskai/bjtunlp | 58d8ca53fa1d99df2f47f10a0780619c4cdba22f | [
"MIT"
]
| null | null | null | bjtunlp/train.py | bigbosskai/bjtunlp | 58d8ca53fa1d99df2f47f10a0780619c4cdba22f | [
"MIT"
]
| 1 | 2022-03-12T16:41:32.000Z | 2022-03-12T16:41:32.000Z | import os
import time
import argparse
from tqdm import tqdm
import torch
from torch import optim
from torch import nn
from fastNLP import BucketSampler
from fastNLP import logger
from fastNLP import DataSetIter
from fastNLP import Tester
from fastNLP import cache_results
from bjtunlp.models import BertParser
from bjtunlp.models.metrics import SegAppCharParseF1Metric, CWSPOSMetric, ParserMetric
from bjtunlp.modules.trianglelr import TriangleLR
from bjtunlp.modules.chart import save_table
from bjtunlp.modules.pipe import CTBxJointPipe
from bjtunlp.modules.word_batch import BatchSampler
from bjtunlp.modules.embedding import ElectraEmbedding
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--model_type', type=str,
help=r'Whether to use the first-order model or the second-order model, LOC represents the first-order model CRF2 stands for second-order model. default:LOC',
choices=['LOC', 'CRF2'],
default='LOC')
parser.add_argument('--output', type=str,
help=r'The path where the output model is stored. default:./output',
default=r'output')
parser.add_argument('--dataset', type=str,
help=r'The data set required for training the joint model, which must include the training set, test set and development set, and the data format is CoNLL format. default:./ctb7',
default=r'G:\真正联合\bjtunlp\data\ctb7')
parser.add_argument('--pretraining', type=str,
help='Pre-trained language models Electra downloaded from huggingface. default:./discriminator',
default=r'H:\预训练语言模型\哈工大20G语料-Electra\base\discriminator')
parser.add_argument('--epochs', type=int, help='Number of epoch to train the model. default:15', default=15)
parser.add_argument('--lr', type=float, help='Learning rate setting. default:2e-5', default=2e-5)
parser.add_argument('--batch_size', type=int, help='The number of words fed to the model at a time. default:1000',
default=1000)
parser.add_argument('--clip', type=float, help='Value for gradient clipping nn.utils.clip_grad_value_. default:5.0',
default=5.0)
parser.add_argument('--weight_decay', type=float, help='L2 regularization. default:1e-2',
default=1e-2)
parser.add_argument('--device', type=int,
help='Whether to use GPU for training, 0 means cuda:0, -1 means cpu. default:0',
default=0)
parser.add_argument('--dropout', type=float, help='dropout. default:0.5', default=0.5)
parser.add_argument('--arc_mlp_size', type=int,
help='The hidden dimensions of predicting the dependency arc. default:500',
default=500)
parser.add_argument('--label_mlp_size', type=int,
help='The hidden dimensions of predicting the dependency label. default:100',
default=300)
args = parser.parse_args()
print(args)
context_path = os.getcwd()
save_path = os.path.join(context_path, args.output)
if not os.path.exists(context_path):
os.makedirs(context_path)
model_type = args.model_type
data_name = args.dataset
pretraining = args.pretraining
epochs = args.epochs
lr = args.lr # 0.01~0.001
batch_size = args.batch_size # 1000
clip = args.clip
weight_decay = args.weight_decay
device = torch.device("cuda:%d" % args.device if (torch.cuda.is_available()) else "cpu")
dropout = args.dropout # 0.3~0.6
arc_mlp_size = args.arc_mlp_size # 200, 300
label_mlp_size = args.label_mlp_size
logger.add_file(save_path + '/joint' + time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime(time.time())) + '.log',
level='INFO')
# 将超参数保存的日志中
logger.info(f'model_type:{model_type}')
logger.info(f'data_name:{data_name}')
logger.info(f'pretraining:{pretraining}')
logger.info(f'epochs:{epochs}')
logger.info(f'lr:{lr}')
logger.info(f'batch_size:{batch_size}')
logger.info(f'clip:{clip}')
logger.info(f'weight_decay:{weight_decay}')
logger.info(f'device:{device}')
logger.info(f'dropout:{dropout}')
logger.info(f'arc_mlp_size:{arc_mlp_size}')
logger.info(f'label_mlp_size:{label_mlp_size}')
cache_name = os.path.split(data_name)[-1]
@cache_results(save_path + '/caches/{}.pkl'.format(cache_name), _refresh=False)
def get_data(data_name, pretraining):
data, special_root = CTBxJointPipe().process_from_file(data_name)
data.delete_field('bigrams')
data.delete_field('trigrams')
data.delete_field('chars')
data.rename_field('pre_chars', 'chars')
data.delete_field('pre_bigrams')
data.delete_field('pre_trigrams')
embed = ElectraEmbedding(data.get_vocab('chars'), pretraining)
return data, embed, special_root
data, embed, special_root = get_data(data_name, pretraining)
print(data)
model = BertParser(embed=embed, char_label_vocab=data.get_vocab('char_labels'),
num_pos_label=len(data.get_vocab('char_pos')), arc_mlp_size=arc_mlp_size,
label_mlp_size=label_mlp_size, dropout=dropout,
model=model_type,
special_root=special_root,
use_greedy_infer=False,
)
metric1 = SegAppCharParseF1Metric(data.get_vocab('char_labels'))
metric2 = CWSPOSMetric(data.get_vocab('char_labels'), data.get_vocab('char_pos'))
metric3 = ParserMetric(data.get_vocab('char_labels'))
metrics = [metric1, metric2, metric3]
optimizer = optim.AdamW([param for param in model.parameters() if param.requires_grad], lr=lr,
weight_decay=weight_decay)
sampler = BucketSampler(batch_size=4, seq_len_field_name='seq_lens')
train_batch = DataSetIter(batch_size=4, dataset=data.get_dataset('train'), sampler=sampler,
batch_sampler=BatchSampler(data.get_dataset('train'), batch_size, 'seq_lens'))
scheduler = TriangleLR(optimizer, len(train_batch) * epochs, schedule='linear')
best_score = 0.
best_epoch = 0
table = []
model = model.to(device)
for i in range(epochs):
for batch_x, batch_y in tqdm(train_batch, desc='Epoch: %3d' % i):
optimizer.zero_grad()
if args.device >= 0:
batch_x['chars'] = batch_x['chars'].to(device)
batch_y['char_heads'] = batch_y['char_heads'].to(device)
batch_y['char_labels'] = batch_y['char_labels'].to(device)
batch_y['char_pos'] = batch_y['char_pos'].to(device)
batch_y['sibs'] = batch_y['sibs'].to(device)
output = model(batch_x['chars'], batch_y['char_heads'], batch_y['char_labels'],
batch_y['sibs'])
loss = output['loss']
loss.backward()
nn.utils.clip_grad_value_(model.parameters(), clip)
optimizer.step()
scheduler.step()
dev_tester = Tester(data.get_dataset('dev'), model, batch_size=8, metrics=metrics, device=device, verbose=0)
dev_res = dev_tester.test()
logger.info('Epoch:%3d Dev' % i + dev_tester._format_eval_results(dev_res))
print('Epoch:%3d Dev' % i + dev_tester._format_eval_results(dev_res))
test_tester = Tester(data.get_dataset('test'), model, batch_size=8, metrics=metrics, device=device, verbose=0)
test_res = test_tester.test()
logger.info('Epoch:%3d Test' % i + test_tester._format_eval_results(test_res))
print('Epoch:%3d Test' % i + test_tester._format_eval_results(test_res))
if dev_res['SegAppCharParseF1Metric']['u_f1'] > best_score:
best_score = dev_res['SegAppCharParseF1Metric']['u_f1']
best_epoch = i
torch.save(model, save_path + '/joint.model')
table.append([dev_res, test_res])
print('best performance on test dataset Related to the development set %d' % best_epoch)
print('Save the model in this directory :%s' % save_path)
logger.info('best performance on test dataset Related to the development set %d' % best_epoch)
logger.info('Save the model in this directory :%s' % save_path)
logger.info(str(table[best_epoch]))
save_table(table, save_path + '/results.csv')
if __name__ == '__main__':
main()
| 48.224719 | 203 | 0.65028 | 0 | 0 | 0 | 0 | 548 | 0.063455 | 0 | 0 | 2,398 | 0.277675 |
f53f3f14419ce7e5f5fb052bfc8906e374ee8971 | 7,978 | py | Python | archived/functions/sync_elasticache/redis/LR_sync_redis_model_reuse.py | DS3Lab/LambdaML | 0afca7819e08632ba116fec8e102084e4040a47a | [
"Apache-2.0"
]
| 23 | 2021-05-17T09:24:24.000Z | 2022-01-29T18:40:44.000Z | archived/functions/sync_elasticache/redis/LR_sync_redis_model_reuse.py | DS3Lab/LambdaML | 0afca7819e08632ba116fec8e102084e4040a47a | [
"Apache-2.0"
]
| 2 | 2021-05-17T16:15:12.000Z | 2021-07-20T09:11:22.000Z | archived/functions/sync_elasticache/redis/LR_sync_redis_model_reuse.py | DS3Lab/LambdaML | 0afca7819e08632ba116fec8e102084e4040a47a | [
"Apache-2.0"
]
| 3 | 2021-05-17T09:31:53.000Z | 2021-12-02T16:29:59.000Z | import time
import torch
from torch.autograd import Variable
from torch.utils.data.sampler import SubsetRandomSampler
from archived.elasticache import redis_init
from archived.s3.get_object import get_object
from archived.old_model import LogisticRegression
from data_loader.libsvm_dataset import DenseDatasetWithLines
# lambda setting
redis_location = "test.fifamc.ng.0001.euc1.cache.amazonaws.com"
grad_bucket = "tmp-grads"
model_bucket = "tmp-updates"
local_dir = "/tmp"
w_prefix = "w_"
b_prefix = "b_"
w_grad_prefix = "w_grad_"
b_grad_prefix = "b_grad_"
# algorithm setting
learning_rate = 0.1
batch_size = 100
num_epochs = 2
validation_ratio = .2
shuffle_dataset = True
random_seed = 42
endpoint = redis_init(redis_location)
def handler(event, context):
start_time = time.time()
bucket = event['bucket']
key = event['name']
num_features = event['num_features']
num_classes = event['num_classes']
print('bucket = {}'.format(bucket))
print('key = {}'.format(key))
key_splits = key.split("_")
worker_index = int(key_splits[0])
num_worker = int(key_splits[1])
# read file(dataset) from s3
file = get_object(bucket, key).read().decode('utf-8').split("\n")
print("read data cost {} s".format(time.time() - start_time))
parse_start = time.time()
dataset = DenseDatasetWithLines(file, num_features)
preprocess_start = time.time()
print("libsvm operation cost {}s".format(parse_start - preprocess_start))
# Creating data indices for training and validation splits:
dataset_size = len(dataset)
print("dataset size = {}".format(dataset_size))
indices = list(range(dataset_size))
split = int(np.floor(validation_ratio * dataset_size))
if shuffle_dataset:
np.random.seed(random_seed)
np.random.shuffle(indices)
train_indices, val_indices = indices[split:], indices[:split]
# Creating PT data samplers and loaders:
train_sampler = SubsetRandomSampler(train_indices)
valid_sampler = SubsetRandomSampler(val_indices)
train_loader = torch.utils.data.DataLoader(dataset,
batch_size=batch_size,
sampler=train_sampler)
validation_loader = torch.utils.data.DataLoader(dataset,
batch_size=batch_size,
sampler=valid_sampler)
print("preprocess data cost {} s".format(time.time() - preprocess_start))
model = LogisticRegression(num_features, num_classes)
# Loss and Optimizer
# Softmax is internally computed.
# Set parameters to be updated.
criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
# Training the Model
for epoch in range(num_epochs):
for batch_index, (items, labels) in enumerate(train_loader):
print("------worker {} epoch {} batch {}------".format(worker_index, epoch, batch_index))
batch_start = time.time()
items = Variable(items.view(-1, num_features))
labels = Variable(labels)
# Forward + Backward + Optimize
optimizer.zero_grad()
outputs = model(items)
loss = criterion(outputs, labels)
loss.backward()
print("forward and backward cost {} s".format(time.time()-batch_start))
w_grad = model.linear.weight.grad.data.numpy()
b_grad = model.linear.bias.grad.data.numpy()
print("w_grad before merge = {}".format(w_grad[0][0:5]))
print("b_grad before merge = {}".format(b_grad))
#synchronization starts from that every worker writes their gradients of this batch and epoch
sync_start = time.time()
hset_object(endpoint, grad_bucket, w_grad_prefix + str(worker_index), w_grad.tobytes())
hset_object(endpoint, grad_bucket, b_grad_prefix + str(worker_index), b_grad.tobytes())
#merge gradients among files
merge_start = time.time()
file_postfix = "{}_{}".format(epoch, batch_index)
if worker_index == 0:
merge_start = time.time()
w_grad_merge, b_grad_merge = \
merge_w_b_grads(endpoint,
grad_bucket, num_worker, w_grad.dtype,
w_grad.shape, b_grad.shape,
w_grad_prefix, b_grad_prefix)
print("model average time = {}".format(time.time()-merge_start))
#possible rewrite the file before being accessed. wait until anyone finishes accessing.
put_merged_w_b_grads(endpoint, model_bucket,
w_grad_merge, b_grad_merge,
w_grad_prefix, b_grad_prefix)
hset_object(endpoint, model_bucket, "epoch", epoch)
hset_object(endpoint, model_bucket, "index", batch_index)
#delete_expired_w_b(endpoint,
# model_bucket, epoch, batch_index, w_grad_prefix, b_grad_prefix)
model.linear.weight.grad = Variable(torch.from_numpy(w_grad_merge))
model.linear.bias.grad = Variable(torch.from_numpy(b_grad_merge))
else:
# wait for flag to access
while hget_object(endpoint, model_bucket, "epoch") != None:
if int(hget_object(endpoint, model_bucket, "epoch")) == epoch \
and int(hget_object(endpoint, model_bucket, "index")) == batch_index:
break
time.sleep(0.01)
w_grad_merge, b_grad_merge = get_merged_w_b_grads(endpoint,model_bucket,
w_grad.dtype, w_grad.shape, b_grad.shape,
w_grad_prefix, b_grad_prefix)
hcounter(endpoint, model_bucket, "counter") #flag it if it's accessed.
print("number of access at this time = {}".format(int(hget_object(endpoint, model_bucket, "counter"))))
model.linear.weight.grad = Variable(torch.from_numpy(w_grad_merge))
model.linear.bias.grad = Variable(torch.from_numpy(b_grad_merge))
print("w_grad after merge = {}".format(model.linear.weight.grad.data.numpy()[0][:5]))
print("b_grad after merge = {}".format(model.linear.bias.grad.data.numpy()))
print("synchronization cost {} s".format(time.time() - sync_start))
optimizer.step()
print("batch cost {} s".format(time.time() - batch_start))
if (batch_index + 1) % 10 == 0:
print('Epoch: [%d/%d], Step: [%d/%d], Loss: %.4f'
% (epoch + 1, num_epochs, batch_index + 1, len(train_indices) / batch_size, loss.data))
"""
if worker_index == 0:
while sync_counter(endpoint, bucket, num_workers):
time.sleep(0.001)
clear_bucket(endpoint, model_bucket)
clear_bucket(endpoint, grad_bucket)
"""
# Test the Model
correct = 0
total = 0
for items, labels in validation_loader:
items = Variable(items.view(-1, num_features))
outputs = model(items)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum()
print('Accuracy of the model on the %d test samples: %d %%' % (len(val_indices), 100 * correct / total))
end_time = time.time()
print("elapsed time = {} s".format(end_time - start_time))
| 44.322222 | 120 | 0.587741 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,654 | 0.20732 |
f53f7ca7e55025431c0eddd3b58db5224cb4211d | 177 | py | Python | src/params/NeuronTypes.py | thatmariia/grid-ping | 3c32e48226adddcffba605573daa80cca02b5a57 | [
"BSD-4-Clause"
]
| null | null | null | src/params/NeuronTypes.py | thatmariia/grid-ping | 3c32e48226adddcffba605573daa80cca02b5a57 | [
"BSD-4-Clause"
]
| null | null | null | src/params/NeuronTypes.py | thatmariia/grid-ping | 3c32e48226adddcffba605573daa80cca02b5a57 | [
"BSD-4-Clause"
]
| null | null | null | from enum import Enum
class NeuronTypes(Enum):
"""
Enum class containing neuron types: excitatory and inhibitory.
"""
EX = "excitatory"
IN = "inhibitory"
| 16.090909 | 66 | 0.649718 | 152 | 0.858757 | 0 | 0 | 0 | 0 | 0 | 0 | 102 | 0.576271 |
f5401cd673d6e1e3eddd77c34fed0869702ad889 | 2,346 | py | Python | src/backend/common/manipulators/team_manipulator.py | ofekashery/the-blue-alliance | df0e47d054161fe742ac6198a6684247d0713279 | [
"MIT"
]
| 266 | 2015-01-04T00:10:48.000Z | 2022-03-28T18:42:05.000Z | src/backend/common/manipulators/team_manipulator.py | ofekashery/the-blue-alliance | df0e47d054161fe742ac6198a6684247d0713279 | [
"MIT"
]
| 2,673 | 2015-01-01T20:14:33.000Z | 2022-03-31T18:17:16.000Z | src/backend/common/manipulators/team_manipulator.py | ofekashery/the-blue-alliance | df0e47d054161fe742ac6198a6684247d0713279 | [
"MIT"
]
| 230 | 2015-01-04T00:10:48.000Z | 2022-03-26T18:12:04.000Z | from typing import List
from backend.common.cache_clearing import get_affected_queries
from backend.common.manipulators.manipulator_base import ManipulatorBase
from backend.common.models.cached_model import TAffectedReferences
from backend.common.models.team import Team
class TeamManipulator(ManipulatorBase[Team]):
"""
Handle Team database writes.
"""
@classmethod
def getCacheKeysAndQueries(
cls, affected_refs: TAffectedReferences
) -> List[get_affected_queries.TCacheKeyAndQuery]:
return get_affected_queries.team_updated(affected_refs)
"""
@classmethod
def postDeleteHook(cls, teams):
# To run after the team has been deleted.
for team in teams:
SearchHelper.remove_team_location_index(team)
@classmethod
def postUpdateHook(cls, teams, updated_attr_list, is_new_list):
# To run after models have been updated
for (team, updated_attrs) in zip(teams, updated_attr_list):
if 'city' in updated_attrs or 'state_prov' in updated_attrs or \
'country' in updated_attrs or 'postalcode' in updated_attrs:
try:
LocationHelper.update_team_location(team)
except Exception, e:
logging.error("update_team_location for {} errored!".format(team.key.id()))
logging.exception(e)
try:
SearchHelper.update_team_location_index(team)
except Exception, e:
logging.error("update_team_location_index for {} errored!".format(team.key.id()))
logging.exception(e)
cls.createOrUpdate(teams, run_post_update_hook=False)
"""
@classmethod
def updateMerge(
cls, new_model: Team, old_model: Team, auto_union: bool = True
) -> Team:
cls._update_attrs(new_model, old_model, auto_union)
# Take the new tpid and tpid_year iff the year is newer than or equal to the old one
if (
new_model.first_tpid_year is not None
and new_model.first_tpid_year >= old_model.first_tpid_year
):
old_model.first_tpid_year = new_model.first_tpid_year
old_model.first_tpid = new_model.first_tpid
old_model._dirty = True
return old_model
| 37.238095 | 101 | 0.656436 | 2,071 | 0.882779 | 0 | 0 | 813 | 0.346547 | 0 | 0 | 1,274 | 0.543052 |
f5405ca41fa935c1df325e78905e0a54820977fe | 179 | py | Python | dtf/packages/settings.py | WebPowerLabs/django-trainings | 97f7a96c0fbeb85a001201c74713f7944cb77236 | [
"BSD-3-Clause"
]
| null | null | null | dtf/packages/settings.py | WebPowerLabs/django-trainings | 97f7a96c0fbeb85a001201c74713f7944cb77236 | [
"BSD-3-Clause"
]
| null | null | null | dtf/packages/settings.py | WebPowerLabs/django-trainings | 97f7a96c0fbeb85a001201c74713f7944cb77236 | [
"BSD-3-Clause"
]
| null | null | null | from django.conf import settings
INFUSIONSOFT_COMPANY = getattr(settings, 'INFUSIONSOFT_COMPANY_ID', None)
INFUSIONSOFT_API_KEY = getattr(settings, 'INFUSIONSOFT_API_KEY', None)
| 35.8 | 73 | 0.832402 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 47 | 0.26257 |
f5450958d50c031030e18504e081e98ce995e8e8 | 3,680 | py | Python | measures/over_under_exposure_measure/over_under_exposure_measure.py | HensoldtOptronicsCV/ImageQualityAssessment | 7bb3af2cd20a32415966304c8fa3acb77c54f85d | [
"MIT"
]
| 8 | 2020-06-12T12:49:19.000Z | 2021-04-27T12:10:49.000Z | measures/over_under_exposure_measure/over_under_exposure_measure.py | HensoldtOptronicsCV/ImageQualityAssessment | 7bb3af2cd20a32415966304c8fa3acb77c54f85d | [
"MIT"
]
| null | null | null | measures/over_under_exposure_measure/over_under_exposure_measure.py | HensoldtOptronicsCV/ImageQualityAssessment | 7bb3af2cd20a32415966304c8fa3acb77c54f85d | [
"MIT"
]
| 5 | 2020-04-18T11:30:47.000Z | 2022-03-04T07:05:21.000Z | # MIT License
#
# Copyright (c) 2020 HENSOLDT
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Python implementation of the under/over-exposure measure. We focus on simplicity and readability rather than efficiency.
#
# This code is related to the paper
# M. Teutsch, S. Sedelmaier, S. Moosbauer, G. Eilertsen, T. Walter,
# "An Evaluation of Objective Image Quality Assessment for Thermal Infrared Video Tone Mapping", IEEE CVPR Workshops, 2020.
#
# Please cite the paper if you use the code for your evaluations.
# This measure was originally proposed here:
# G. Eilertsen, R. Mantiuk, J. Unger, "A comparative review of tone-mapping algorithms for high dynamic range video", Eurographics, 2017.
import numpy as np
import cv2
## Calcuate the over- and under-exposure measure (number of over- and under-exposed pixels) for one given tone mapped LDR image.
# @param image_ldr Low Definition Range image (processed image after tone mapping).
def number_of_over_and_under_exposures_pixels(image_ldr):
# calculate exposure measure for one given frame
# calculate histogram of the image
hist, bins = np.histogram(image_ldr, 256, [0, 255])
# fraction of under-exposed pixels
under_exp_pix = sum(hist[0:int(255 * 0.02)])/sum(hist) * 100
# fraction of over-exposed pixels
over_exp_pix = sum(hist[int(255 * 0.95):])/sum(hist) * 100
return over_exp_pix, under_exp_pix
## Calculate over- and under-exposure measure for all (already tone mapped) images in given path.
# @param images_ldr_path Directory path that contains the tone mapped images of one sequence.
def calculate_over_and_under_exposure_measure(images_ldr_path):
sequence_length = len(images_ldr_path)
if sequence_length == 0:
raise ValueError('List of LDR image paths must not be empty.')
under_exp_pix = 0
over_exp_pix = 0
for image_ldr_path in images_ldr_path:
print(".", end = '', flush = True) # show progress
# read tone mapped (TM) image as grayscale image
image_ldr = cv2.imread(str(image_ldr_path), cv2.IMREAD_GRAYSCALE)
curr_over_exp_pix, curr_under_exp_pix = number_of_over_and_under_exposures_pixels(image_ldr)
# fraction of under-exposed pixels
under_exp_pix += curr_under_exp_pix
# fraction of over-exposed pixels
over_exp_pix += curr_over_exp_pix
# calculate average of over- and under-exposed pixels for this sequence
over_exposure = over_exp_pix / sequence_length
under_exposure = under_exp_pix / sequence_length
print() # newline after progress dots
return over_exposure, under_exposure
| 41.818182 | 137 | 0.741848 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,510 | 0.682065 |
f54590a9d9506eac6f07374f1bb10c88ce804b14 | 2,567 | py | Python | tests/test_cascade.py | mathDR/jax-pilco | c6c75cd8d43ba894d8f1da2cf6b7c0eea5e43527 | [
"BSD-3-Clause"
]
| null | null | null | tests/test_cascade.py | mathDR/jax-pilco | c6c75cd8d43ba894d8f1da2cf6b7c0eea5e43527 | [
"BSD-3-Clause"
]
| null | null | null | tests/test_cascade.py | mathDR/jax-pilco | c6c75cd8d43ba894d8f1da2cf6b7c0eea5e43527 | [
"BSD-3-Clause"
]
| null | null | null | from pilco.models.pilco import PILCO
import jax.numpy as jnp
import numpy as np
import objax
import os
import oct2py
import logging
oc = oct2py.Oct2Py(logger=oct2py.get_log())
oc.logger = oct2py.get_log("new_log")
oc.logger.setLevel(logging.INFO)
dir_path = os.path.dirname(os.path.realpath("__file__")) + "/tests/Matlab Code"
oc.addpath(dir_path)
def test_cascade():
objax.random.Generator(0)
d = 2 # State dimenstion
k = 1 # Controller's output dimension
b = 100
horizon = 10
e = jnp.array(
[[10.0]]
) # Max control input. Set too low can lead to Cholesky failures.
# Training Dataset
X0 = objax.random.uniform((b, d + k))
A = objax.random.uniform((d + k, d))
Y0 = jnp.sin(X0).dot(A) + 1e-3 * (objax.random.uniform((b, d)) - 0.5)
pilco = PILCO((X0, Y0))
pilco.controller.max_action = e
pilco.optimize_models(restarts=5)
pilco.optimize_policy(restarts=5)
# Generate input
m = objax.random.uniform((1, d))
s = objax.random.uniform((d, d))
s = s.dot(s.T) # Make s positive semidefinite
M, S, reward = pilco.predict(m, s, horizon)
# convert data to the struct expected by the MATLAB implementation
policy = oct2py.io.Struct()
policy.p = oct2py.io.Struct()
policy.p.w = np.array(pilco.controller.W)
policy.p.b = np.array(pilco.controller.b).T
policy.maxU = e
# convert data to the struct expected by the MATLAB implementation
lengthscales = np.stack(
[np.array(model.kernel.lengthscale) for model in pilco.mgpr.models]
)
variance = np.stack(
[np.array(model.kernel.variance) for model in pilco.mgpr.models]
)
noise = np.stack(
[np.array(model.likelihood.variance) for model in pilco.mgpr.models]
)
hyp = np.log(
np.hstack((lengthscales, np.sqrt(variance[:, None]), np.sqrt(noise[:, None])))
).T
dynmodel = oct2py.io.Struct()
dynmodel.hyp = hyp
dynmodel.inputs = X0
dynmodel.targets = Y0
plant = oct2py.io.Struct()
plant.angi = np.zeros(0)
plant.angi = np.zeros(0)
plant.poli = np.arange(d) + 1
plant.dyni = np.arange(d) + 1
plant.difi = np.arange(d) + 1
# Call function in octave
M_mat, S_mat = oc.pred(
policy, plant, dynmodel, m.T, s, horizon, nout=2, verbose=True
)
# Extract only last element of the horizon
M_mat = M_mat[:, -1]
S_mat = S_mat[:, :, -1]
assert jnp.allclose(M[0], M_mat, rtol=1e-2)
assert jnp.allclose(S, S_mat, rtol=1e-2)
if __name__ == "__main__":
test_cascade()
| 27.602151 | 86 | 0.638878 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 424 | 0.165173 |
f545d16ab5e716cdb065a0e70787360e7d612aef | 275 | py | Python | server/services/main.py | Jordonkopp/Flask-Vue | db842f1a31f2ca4cf51ce1b2a927d6d2ad860c00 | [
"MIT"
]
| 2 | 2019-02-27T16:55:01.000Z | 2019-02-27T20:23:29.000Z | server/services/main.py | Jordonkopp/Flask-Vue | db842f1a31f2ca4cf51ce1b2a927d6d2ad860c00 | [
"MIT"
]
| 5 | 2020-04-30T00:01:01.000Z | 2021-10-05T19:42:15.000Z | server/services/main.py | Jordonkopp/Flask-Vue | db842f1a31f2ca4cf51ce1b2a927d6d2ad860c00 | [
"MIT"
]
| null | null | null | from flask import Blueprint, redirect, url_for
from server.utils.core_utils import logger
# Create Blueprint
main = Blueprint("main", __name__)
# redirect when you visit /
@main.route("/")
def index():
logger.info("Base redirect")
return redirect(url_for('keys'))
| 21.153846 | 46 | 0.727273 | 0 | 0 | 0 | 0 | 99 | 0.36 | 0 | 0 | 75 | 0.272727 |
f5465045af39eda12ecdfeb4fa359c70d7f7cca7 | 528 | py | Python | api/migrations/0003_group_post.py | KolesnikRV/api_final_yatube | 23fdd8b6c2a55ff5c70c62b58ecd69ff1dd23e7d | [
"BSD-3-Clause"
]
| null | null | null | api/migrations/0003_group_post.py | KolesnikRV/api_final_yatube | 23fdd8b6c2a55ff5c70c62b58ecd69ff1dd23e7d | [
"BSD-3-Clause"
]
| null | null | null | api/migrations/0003_group_post.py | KolesnikRV/api_final_yatube | 23fdd8b6c2a55ff5c70c62b58ecd69ff1dd23e7d | [
"BSD-3-Clause"
]
| null | null | null | # Generated by Django 3.1.7 on 2021-03-07 06:43
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('api', '0002_auto_20210306_1136'),
]
operations = [
migrations.AddField(
model_name='group',
name='post',
field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, related_name='group', to='api.post'),
preserve_default=False,
),
]
| 25.142857 | 132 | 0.63447 | 402 | 0.761364 | 0 | 0 | 0 | 0 | 0 | 0 | 107 | 0.202652 |
f546b5a71740ed44a893660c0c2c42d95a14bc63 | 111 | py | Python | iflow/model/cflows/__init__.py | WeitaoZC/iflow | 404ffdbeb27d9fae7d1350de6af84ed7bfdaad99 | [
"MIT"
]
| 11 | 2020-11-01T06:03:57.000Z | 2022-03-10T01:14:03.000Z | iflow/model/cflows/__init__.py | WeitaoZC/iflow | 404ffdbeb27d9fae7d1350de6af84ed7bfdaad99 | [
"MIT"
]
| 1 | 2022-03-14T21:32:51.000Z | 2022-03-14T21:32:51.000Z | iflow/model/cflows/__init__.py | WeitaoZC/iflow | 404ffdbeb27d9fae7d1350de6af84ed7bfdaad99 | [
"MIT"
]
| 2 | 2021-02-03T02:41:14.000Z | 2021-06-08T16:31:02.000Z | from .odefunc import (ODEnet,
ODEfunc)
from .cnf import CNF
from .diffeq_layers import * | 22.2 | 30 | 0.621622 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
f547d48b9bf65696e52de1543f4c4b442a9e0501 | 2,042 | py | Python | python/general-python/create-replica-and-download/createReplicaAndDownload.py | claudeshyaka-esri/developer-support | 016940d74f92a78f362900ab5329aa88c27d0a43 | [
"Apache-2.0"
]
| 272 | 2015-02-11T16:26:39.000Z | 2022-03-31T08:47:33.000Z | python/general-python/create-replica-and-download/createReplicaAndDownload.py | claudeshyaka-esri/developer-support | 016940d74f92a78f362900ab5329aa88c27d0a43 | [
"Apache-2.0"
]
| 254 | 2015-02-11T01:12:35.000Z | 2021-04-22T22:14:20.000Z | python/general-python/create-replica-and-download/createReplicaAndDownload.py | claudeshyaka-esri/developer-support | 016940d74f92a78f362900ab5329aa88c27d0a43 | [
"Apache-2.0"
]
| 211 | 2015-02-10T00:09:07.000Z | 2022-02-24T12:27:40.000Z | import urllib, urllib2, json, time, os
username = "username" #CHANGE
password = "password" #CHANGE
replicaURL = "feature service url/FeatureServer/createReplica" #CHANGE
replicaLayers = [0] #CHANGE
replicaName = "replicaTest" #CHANGE
def sendRequest(request):
response = urllib2.urlopen(request)
readResponse = response.read()
jsonResponse = json.loads(readResponse)
return jsonResponse
print("Generating token")
url = "https://arcgis.com/sharing/rest/generateToken"
data = {'username': username,
'password': password,
'referer': "https://www.arcgis.com",
'f': 'json'}
request = urllib2.Request(url, urllib.urlencode(data))
jsonResponse = sendRequest(request)
token = jsonResponse['token']
print("Creating the replica")
data = {'f' : 'json',
'replicaName' : replicaName,
'layers' : replicaLayers,
'returnAttachments' : 'true',
'returnAttachmentsDatabyURL' : 'false',
'syncModel' : 'none',
'dataFormat' : 'filegdb',
'async' : 'true',
'token': token}
request = urllib2.Request(replicaURL, urllib.urlencode(data))
jsonResponse = sendRequest(request)
print(jsonResponse)
print("Pinging the server")
responseUrl = jsonResponse['statusUrl']
url = "{}?f=json&token={}".format(responseUrl, token)
request = urllib2.Request(url)
jsonResponse = sendRequest(request)
while not jsonResponse.get("status") == "Completed":
time.sleep(5)
request = urllib2.Request(url)
jsonResponse = sendRequest(request)
userDownloads = os.environ['USERPROFILE'] + "\\Downloads"
print("Downloading the replica. In case this fails note that the replica URL is: \n")
jres = jsonResponse['resultUrl']
url = "{0}?token={1}".format(jres, token)
print(url)
f = urllib2.urlopen(url)
with open(userDownloads + "\\" + os.path.basename(jres), "wb") as local_file:
local_file.write(f.read())
print("\n Finished!")
| 34.610169 | 85 | 0.642018 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 643 | 0.314887 |
f54a3cae489a26d054375f7cc639c9b189e844de | 10,980 | py | Python | tests/test_dsl.py | os-climate/declarative-trino-access-control | 8a810fccaca0e089cd17d4a1c888da7bcb36063e | [
"Apache-2.0"
]
| null | null | null | tests/test_dsl.py | os-climate/declarative-trino-access-control | 8a810fccaca0e089cd17d4a1c888da7bcb36063e | [
"Apache-2.0"
]
| 4 | 2022-01-15T14:37:21.000Z | 2022-03-26T12:42:24.000Z | tests/test_dsl.py | os-climate/osc-trino-acl-dsl | 8a810fccaca0e089cd17d4a1c888da7bcb36063e | [
"Apache-2.0"
]
| null | null | null | import re
import textwrap
import yaml
from osc_trino_acl_dsl.dsl2rules import dsl_to_rules
class Table(object):
def __init__(self, catalog: str, schema: str, table: str):
self.catalog: str = str(catalog)
self.schema: str = str(schema)
self.table: str = str(table)
class User(object):
def __init__(self, user: str, group):
self.user = str(user)
if type(group) == set:
self.groups = set([str(e) for e in list(group)])
elif type(group) == list:
self.groups = set([str(e) for e in group])
else:
self.groups = set([str(group)])
def rule_matches(rule: dict, table: Table, user: User) -> bool:
"""emulates trino rule matching semantics"""
if ("catalog" in rule) and (not re.fullmatch(rule["catalog"], table.catalog)):
return False
if ("schema" in rule) and (not re.fullmatch(rule["schema"], table.schema)):
return False
if ("table" in rule) and (not re.fullmatch(rule["table"], table.table)):
return False
if ("user" in rule) and (not re.fullmatch(rule["user"], user.user)):
return False
if "group" in rule:
x = [e for e in list(user.groups) if re.fullmatch(rule["group"], e)]
if len(x) == 0:
return False
return True
def first_matching_rule(user: User, table: Table, rules: list) -> dict:
for rule in rules:
if rule_matches(rule, table, user):
return rule
return None
def rule_permissions(user: User, table: Table, rules: dict) -> tuple:
assert type(rules) == dict
assert "catalogs" in rules
assert "schemas" in rules
assert "tables" in rules
crule = first_matching_rule(user, table, rules["catalogs"])
assert type(crule) == dict
assert "allow" in crule
allow = crule["allow"]
srule = first_matching_rule(user, table, rules["schemas"])
assert type(srule) == dict
assert "owner" in srule
owner = srule["owner"]
trule = first_matching_rule(user, table, rules["tables"])
assert type(trule) == dict
assert "privileges" in trule
privs = trule["privileges"]
return (allow, owner, privs)
_admin = ["SELECT", "INSERT", "DELETE", "OWNERSHIP"]
_public = ["SELECT"]
def test_dsl_minimal():
# a minimal schema: declares one admin group, defaults public, and no other rules
dsl = yaml.load(
textwrap.dedent(
"""
admin:
- group: admins
public: true
catalogs: []
schemas: []
tables: []
"""
),
yaml.SafeLoader,
)
rules = dsl_to_rules(dsl, validate=True)
# test permissions of the admin group
perms = rule_permissions(User("x", "admins"), Table("x", "x", "x"), rules)
assert perms == ("all", True, _admin)
# test permissions of generic user
perms = rule_permissions(User("x", []), Table("x", "x", "x"), rules)
assert perms == ("read-only", False, _public)
def test_dsl_catalog():
dsl = yaml.load(
textwrap.dedent(
"""
admin:
- group: admins
public: true
catalogs:
- catalog: dev
public: false
schemas: []
tables: []
"""
),
yaml.SafeLoader,
)
rules = dsl_to_rules(dsl, validate=True)
# test permissions of the admin group
perms = rule_permissions(User("x", "admins"), Table("x", "x", "x"), rules)
assert perms == ("all", True, _admin)
# test permissions of generic user and non-dev catalog (global default)
perms = rule_permissions(User("x", []), Table("x", "x", "x"), rules)
assert perms == ("read-only", False, _public)
perms = rule_permissions(User("x", []), Table("dev", "x", "x"), rules)
assert perms == ("read-only", False, [])
def test_dsl_schema():
dsl = yaml.load(
textwrap.dedent(
"""
admin:
- group: admins
public: true
catalogs:
- catalog: dev
public: false
schemas:
- catalog: dev
schema: proj1
admin:
- group: devs
- user: usery
public: true
tables: []
"""
),
yaml.SafeLoader,
)
rules = dsl_to_rules(dsl, validate=True)
# test permissions of the admin group
perms = rule_permissions(User("x", "admins"), Table("x", "x", "x"), rules)
assert perms == ("all", True, _admin)
# test permissions of generic user and non-dev catalog (global default)
perms = rule_permissions(User("x", []), Table("x", "x", "x"), rules)
assert perms == ("read-only", False, _public)
# test permissions of the dev group on the dev catalog
perms = rule_permissions(User("x", "devs"), Table("dev", "x", "x"), rules)
assert perms == ("all", False, [])
# devs have admin in proj1 schema for all tables
perms = rule_permissions(User("x", "devs"), Table("dev", "proj1", "x"), rules)
assert perms == ("all", True, _admin)
perms = rule_permissions(User("usery", []), Table("dev", "proj1", "x"), rules)
assert perms == ("all", True, _admin)
# dev-catalog default is non-public (no privs)
perms = rule_permissions(User("x", "nondev"), Table("dev", "x", "x"), rules)
assert perms == ("read-only", False, [])
# inside dev.proj1 schema tables default to public
perms = rule_permissions(User("x", []), Table("dev", "proj1", "x"), rules)
assert perms == ("read-only", False, _public)
def test_dsl_table():
dsl = yaml.load(
textwrap.dedent(
"""
admin:
- group: admins
public: true
catalogs:
- catalog: dev
public: false
schemas:
- catalog: dev
schema: proj1
admin:
- group: devs
- user: usery
public: true
tables:
- catalog: dev
schema: proj1
table: priv1
admin:
- user: userz
public: false
"""
),
yaml.SafeLoader,
)
rules = dsl_to_rules(dsl, validate=True)
# test permissions of the admin group
perms = rule_permissions(User("x", "admins"), Table("x", "x", "x"), rules)
assert perms == ("all", True, _admin)
# global default should be readable
perms = rule_permissions(User("x", []), Table("x", "x", "x"), rules)
assert perms == ("read-only", False, _public)
# dev catalog default should be non-public
perms = rule_permissions(User("x", []), Table("dev", "x", "x"), rules)
assert perms == ("read-only", False, [])
# dev.proj1 schema default should be readable
perms = rule_permissions(User("x", []), Table("dev", "proj1", "x"), rules)
assert perms == ("read-only", False, _public)
# dev.proj1.priv1 should default to non-public
perms = rule_permissions(User("x", []), Table("dev", "proj1", "priv1"), rules)
assert perms == ("read-only", False, [])
# "usery" and "devs" group have schema admin:
perms = rule_permissions(User("x", "devs"), Table("dev", "proj1", "x"), rules)
assert perms == ("all", True, _admin)
perms = rule_permissions(User("usery", []), Table("dev", "proj1", "x"), rules)
assert perms == ("all", True, _admin)
# userz added as table admin for priv1
perms = rule_permissions(User("userz", []), Table("dev", "proj1", "priv1"), rules)
assert perms == ("all", False, _admin)
# but userz is not admin for any other table in proj1
perms = rule_permissions(User("userz", []), Table("dev", "proj1", "x"), rules)
assert perms == ("all", False, _public)
def test_dsl_table_acl():
dsl = yaml.load(
textwrap.dedent(
"""
admin:
- group: admins
public: true
catalogs:
- catalog: dev
public: false
schemas:
- catalog: dev
schema: proj1
admin:
- group: devs
- user: usery
public: true
tables:
- catalog: dev
schema: proj1
table: priv1
public:
filter:
- "population < 1000"
hide:
- column3
acl:
- id:
- user: usera
- user: userb
filter:
- "country = 'london'"
- "year < 2061"
hide:
- column1
- column2
"""
),
yaml.SafeLoader,
)
rules = dsl_to_rules(dsl, validate=True)
# test permissions of the admin group
perms = rule_permissions(User("x", "admins"), Table("x", "x", "x"), rules)
assert perms == ("all", True, _admin)
# global default should be readable
perms = rule_permissions(User("x", []), Table("x", "x", "x"), rules)
assert perms == ("read-only", False, _public)
# dev catalog default should be non-public
perms = rule_permissions(User("x", []), Table("dev", "x", "x"), rules)
assert perms == ("read-only", False, [])
# dev.proj1 schema default should be readable
perms = rule_permissions(User("x", []), Table("dev", "proj1", "x"), rules)
assert perms == ("read-only", False, _public)
# "usery" and "devs" group have schema admin:
perms = rule_permissions(User("x", "devs"), Table("dev", "proj1", "x"), rules)
assert perms == ("all", True, _admin)
perms = rule_permissions(User("usery", []), Table("dev", "proj1", "x"), rules)
assert perms == ("all", True, _admin)
for u in ["usera", "userb"]:
perms = rule_permissions(User(u, []), Table("dev", "proj1", "priv1"), rules)
assert perms == ("read-only", False, _public)
r = first_matching_rule(User(u, []), Table("dev", "proj1", "priv1"), rules["tables"])
assert "filter" in r
assert r["filter"] == "(country = 'london') and (year < 2061)"
assert "columns" in r
assert r["columns"] == [{"name": "column1", "allow": False}, {"name": "column2", "allow": False}]
# dev.proj1.priv1 should default to public
# but with additional row and column acl settings
perms = rule_permissions(User("x", []), Table("dev", "proj1", "priv1"), rules)
assert perms == ("read-only", False, _public)
r = first_matching_rule(User("x", []), Table("dev", "proj1", "priv1"), rules["tables"])
assert "filter" in r
assert r["filter"] == "(country = 'london') and (population < 1000) and (year < 2061)"
assert "columns" in r
assert r["columns"] == [
{"name": "column1", "allow": False},
{"name": "column2", "allow": False},
{"name": "column3", "allow": False},
]
| 32.485207 | 105 | 0.543443 | 530 | 0.04827 | 0 | 0 | 0 | 0 | 0 | 0 | 4,730 | 0.430783 |
f54e716dfa472cc32b79479172fc0cb1532d563d | 1,028 | py | Python | setup.py | henryk/byro-cnss | 77cc4d34a521879f9f225b473964b7384db306b1 | [
"Apache-2.0"
]
| null | null | null | setup.py | henryk/byro-cnss | 77cc4d34a521879f9f225b473964b7384db306b1 | [
"Apache-2.0"
]
| null | null | null | setup.py | henryk/byro-cnss | 77cc4d34a521879f9f225b473964b7384db306b1 | [
"Apache-2.0"
]
| null | null | null | import os
from distutils.command.build import build
from django.core import management
from setuptools import find_packages, setup
try:
with open(os.path.join(os.path.dirname(__file__), 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
except:
long_description = ''
class CustomBuild(build):
def run(self):
management.call_command('compilemessages', verbosity=1)
build.run(self)
cmdclass = {
'build': CustomBuild
}
setup(
name='byro-cnss',
version='0.0.1',
description='Byro plugin for CNSS (Clausewitz-Netzwerk für Strategische Studien e.V.)',
long_description=long_description,
url='https://github.com/henryk/byro-cnss',
author='Henryk Plötz',
author_email='[email protected]',
license='Apache Software License',
install_requires=[],
packages=find_packages(exclude=['tests', 'tests.*']),
include_package_data=True,
cmdclass=cmdclass,
entry_points="""
[byro.plugin]
byro_cnss=byro_cnss:ByroPluginMeta
""",
)
| 23.363636 | 92 | 0.696498 | 132 | 0.128155 | 0 | 0 | 0 | 0 | 0 | 0 | 307 | 0.298058 |
f54f18f6eb1da6e577537fa0c7b336cc4d1057b5 | 2,181 | py | Python | utils/tensor_utils_test.py | zhuchen03/federated | 6bbcdcb856759aa29daa9a510e7d5f34f6915010 | [
"Apache-2.0"
]
| 2 | 2021-10-19T13:55:11.000Z | 2021-11-11T11:26:05.000Z | utils/tensor_utils_test.py | zhuchen03/federated | 6bbcdcb856759aa29daa9a510e7d5f34f6915010 | [
"Apache-2.0"
]
| 2 | 2021-11-10T20:22:35.000Z | 2022-02-10T04:44:40.000Z | utils/tensor_utils_test.py | zhuchen03/federated | 6bbcdcb856759aa29daa9a510e7d5f34f6915010 | [
"Apache-2.0"
]
| 1 | 2021-03-09T09:48:56.000Z | 2021-03-09T09:48:56.000Z | # Copyright 2018, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import tensorflow as tf
from utils import tensor_utils
class TensorUtilsTest(tf.test.TestCase):
def test_zero_all_if_any_non_finite(self):
def expect_ok(structure):
with tf.Graph().as_default():
result, error = tensor_utils.zero_all_if_any_non_finite(structure)
with self.session() as sess:
result, error = sess.run((result, error))
try:
tf.nest.map_structure(np.testing.assert_allclose, result, structure)
except AssertionError:
self.fail('Expected to get input {} back, but instead got {}'.format(
structure, result))
self.assertEqual(error, 0)
expect_ok([])
expect_ok([(), {}])
expect_ok(1.1)
expect_ok([1.0, 0.0])
expect_ok([1.0, 2.0, {'a': 0.0, 'b': -3.0}])
def expect_zeros(structure, expected):
with tf.Graph().as_default():
result, error = tensor_utils.zero_all_if_any_non_finite(structure)
with self.session() as sess:
result, error = sess.run((result, error))
try:
tf.nest.map_structure(np.testing.assert_allclose, result, expected)
except AssertionError:
self.fail('Expected to get zeros, but instead got {}'.format(result))
self.assertEqual(error, 1)
expect_zeros(np.inf, 0.0)
expect_zeros((1.0, (2.0, np.nan)), (0.0, (0.0, 0.0)))
expect_zeros((1.0, (2.0, {
'a': 3.0,
'b': [[np.inf], [np.nan]]
})), (0.0, (0.0, {
'a': 0.0,
'b': [[0.0], [0.0]]
})))
if __name__ == '__main__':
tf.test.main()
| 32.552239 | 79 | 0.64099 | 1,457 | 0.668042 | 0 | 0 | 0 | 0 | 0 | 0 | 708 | 0.324622 |
f54f50b36cac1b6f41d6778991e01f0570bbafab | 3,426 | py | Python | autonmap/__main__.py | zeziba/AUTONMAP | 50a2ae5f0731bc919ccb8978c619d1432b447286 | [
"Apache-2.0"
]
| null | null | null | autonmap/__main__.py | zeziba/AUTONMAP | 50a2ae5f0731bc919ccb8978c619d1432b447286 | [
"Apache-2.0"
]
| null | null | null | autonmap/__main__.py | zeziba/AUTONMAP | 50a2ae5f0731bc919ccb8978c619d1432b447286 | [
"Apache-2.0"
]
| null | null | null | #!/usr/bin/env python3
import logging.handlers
import sys
from sys import argv, modules
from os.path import join
from autonmap import cron_scheduler
from autonmap import launch_client
from autonmap import launch_server
from autonmap.server import server_config as sconfig
"""
This module allows autonmap to interact with the server and client process to
preform the tasks each is assigned.
"""
LOG_FILE = "/tmp/autonmap.log"
LOGGING_LEVEL = logging.INFO
logger = logging.getLogger(__name__)
logger.setLevel(LOGGING_LEVEL)
handler = logging.handlers.TimedRotatingFileHandler(LOG_FILE, when='midnight', backupCount=3)
formatter = logging.Formatter('%(asctime)s %(levelname)-8s %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
class Log(object):
def __init__(self, log, level):
self.logger = log
self.level = level
def write(self, message):
if message.rstrip() != "":
self.logger.log(self.level, message.rstrip())
def flush(self):
pass
def main():
"""
Main routine
:return: None
"""
if len(argv) > 1:
print("Automated nMap Server/Client Manager")
if argv[1] == 'cron':
cron_scheduler.main()
elif argv[1] == "update":
if len(argv) == 3:
file_location = join(sconfig.get_base(), "work.txt")
if str(argv[2]).lower() == "delete":
with open(file_location, "w") as file:
pass # This empties the file of all contents
else:
with open(argv[2], "r") as infile:
with open(file_location, "w+") as outfile:
subnets = set()
for in_line in infile:
subnets.add(in_line)
for out_line in outfile:
subnets.add(out_line)
outfile.seek(0)
outfile.truncate()
for item in subnets:
outfile.write("{}\n".format(item))
elif len(argv) == 3:
if argv[2] in ['start', 'stop', 'update', 'report']:
if argv[1] == 'server':
sys.stdout = Log(log=logger, level=logging.INFO)
sys.stderr = Log(log=logger, level=logging.ERROR)
launch_server.main(argv[2])
elif argv[1] == 'client':
sys.stdout = Log(log=logger, level=logging.INFO)
sys.stderr = Log(log=logger, level=logging.ERROR)
launch_client.main(argv[2])
else:
print("Invalid arguments")
else:
print("Invalid arguments")
else:
print("Usage: {} {} {}".format("python3 -m autonmap",
"client|server|update", "start<client>|stop|report|update|"
"location<update>|delete<update>"))
print("Usage: {} {}".format("python3 -m autonmap", "cron"))
print("\t{} {}".format("python3 -m autonmap", "update ~/workfile.txt"))
print("Client script is located at: \n\t\t{}".format(modules[launch_client.__name__]))
print("The log is located in /tmp/autonmap.log")
if __name__ == "__main__":
main()
| 35.6875 | 98 | 0.537069 | 266 | 0.077642 | 0 | 0 | 0 | 0 | 0 | 0 | 781 | 0.227963 |
f54ff4d5dcb3a333a55f6c56d21b89f6d29ae597 | 6,166 | py | Python | src/logic_gradient.py | Einzberg/BattlesnakeFun | 4276144c3ccfab66e7c9df4717681e305861f76a | [
"MIT"
]
| null | null | null | src/logic_gradient.py | Einzberg/BattlesnakeFun | 4276144c3ccfab66e7c9df4717681e305861f76a | [
"MIT"
]
| null | null | null | src/logic_gradient.py | Einzberg/BattlesnakeFun | 4276144c3ccfab66e7c9df4717681e305861f76a | [
"MIT"
]
| null | null | null | # import random
# from typing import List, Dict
import numpy as np
# import matplotlib.pyplot as plt
def get_info() -> dict:
"""
This controls your Battlesnake appearance and author permissions.
For customization options, see https://docs.battlesnake.com/references/personalization
TIP: If you open your Battlesnake URL in browser you should see this data.
"""
return {
"apiversion": "1",
"author": "Mex", # TODO: Your Battlesnake Username
"color": "#888889", # TODO: Personalize
"head": "silly", # TODO: Personalize
"tail": "curled", # TODO: Personalize
}
# Globals
food_weight = 9
snake_weight = -9
snake_head_weight = -2
wall_weight = -9
board_centre = 1
board_x = None
board_y = None
def gkern(l=10, scale=4):
"""\
creates gaussian kernel with side length `l` and a sigma of `sig`
"""
sig = (l-1)/3
ax = np.linspace(-(l - 1) / 2., (l - 1) / 2., l)
gauss = np.exp(-0.5 * np.square(ax) / np.square(sig))
kernel = np.outer(gauss, gauss)
return scale * kernel / np.max(kernel)
def centre_grad(data: dict) -> np.array:
board_w = data["board"]["width"]
board_h = data["board"]["height"]
gradient_board = gkern(max(board_w, board_h), board_centre)
return gradient_board
def populate_food(board: np.array, data: dict):
for food in data['board']['food']:
food_x, food_y = food['x'], food['y']
kern_size = max(board.shape[0], board.shape[1])
kernel = gkern(kern_size*2 + 1, 1)
mid = kern_size + 1
x_min = mid - food_x
x_max = mid + board.shape[0] - food_x
y_min = mid - food_y
y_max = mid + board.shape[0] - food_y
board += kernel[x_min:x_max, y_min:y_max]*food_weight
def populate_other_snakes(board: np.array, data: dict):
for snake in data['board']['snakes']:
snake_body = snake['body']
for ele in snake_body:
if ele == snake['head']:
if ele == data['you']['head']:
board[ele['x'], ele['y']] = snake_weight
elif snake['length'] < data['you']['length']:
continue
else:
board[ele['x'], ele['y']] = snake_weight
# direction snake head can go are dangerous
board[ele['x'] + 1, ele['y']] += snake_head_weight
board[ele['x'] - 1, ele['y']] += snake_head_weight
board[ele['x'], ele['y'] + 1] += snake_head_weight
board[ele['x'], ele['y'] - 1] += snake_head_weight
else:
board[ele['x'], ele['y']] = snake_weight
def follow_global_max(head: dict, board: np.array) -> str:
global_max = np.unravel_index(np.argmax(board), board.shape)
directions = {
"up": (0,1),
"down": (0,-1),
"left": (-1,0),
"right": (1,0)
}
direction = ""
distance = 10000
for item in directions.items():
curr_dist = (head['x'] + item[1][0] - global_max[0])**2 + (head['y'] + item[1][1] - global_max[1])**2
print(curr_dist, item[0])
if curr_dist < distance:
distance = curr_dist
direction = item[0]
return direction
def follow_grad(head: dict, board: np.array) -> str:
directions = {
"up": (0,1),
"down": (0,-1),
"left": (-1,0),
"right": (1,0)
}
direction = ""
max_score = 0
for item in directions.items():
curr_score = board[head['x'] + item[1][0] + 1, head['y'] + item[1][1] + 1]
if curr_score > max_score:
max_score = curr_score
direction = item[0]
return direction
def choose_move(data: dict) -> str:
board_y = data['board']['height']
board_x = data['board']['width']
board = centre_grad(data)
# print(f'GRADIENT ARRAY: {array_of_arrays}')
populate_other_snakes(board, data)
board = np.pad(board, 1, 'constant', constant_values=wall_weight)
populate_food(board, data)
direction = follow_global_max(data['you']['head'], board)
# direction = follow_grad(array_of_arrays)
print(f'GOING THIS DIRECTION: {direction}')
return direction
data = {
"turn": 14,
"board": {
"height": 11,
"width": 11,
"food": [
{"x": 5, "y": 5},
{"x": 9, "y": 0},
{"x": 2, "y": 6}
],
"hazards": [
{"x": 3, "y": 2}
],
"snakes": [
{
"id": "snake-508e96ac-94ad-11ea-bb37",
"name": "My Snake",
"health": 54,
"body": [
{"x": 0, "y": 0},
{"x": 1, "y": 0},
{"x": 2, "y": 0}
],
"latency": "111",
"head": {"x": 0, "y": 0},
"length": 3,
"shout": "why are we shouting??",
"squad": "",
"customizations":{
"color":"#FF0000",
"head":"pixel",
"tail":"pixel"
}
},
{
"id": "snake-b67f4906-94ae-11ea-bb37",
"name": "Another Snake",
"health": 16,
"body": [
{"x": 5, "y": 4},
{"x": 5, "y": 3},
{"x": 6, "y": 3},
{"x": 6, "y": 2}
],
"latency": "222",
"head": {"x": 5, "y": 4},
"length": 4,
"shout": "I'm not really sure...",
"squad": "",
"customizations":{
"color":"#26CF04",
"head":"silly",
"tail":"curled"
}
}
]
},
"you": {
"id": "snake-508e96ac-94ad-11ea-bb37",
"name": "My Snake",
"health": 54,
"body": [
{"x": 0, "y": 0},
{"x": 1, "y": 0},
{"x": 2, "y": 0}
],
"latency": "111",
"head": {"x": 0, "y": 0},
"length": 3,
"shout": "why are we shouting??",
"squad": "",
"customizations":{
"color":"#FF0000",
"head":"pixel",
"tail":"pixel"
}
}
}
if False:
board = centre_grad(data)
board_x, board_y = 11, 11
populate_other_snakes(board, data)
populate_food(board, data)
board = np.pad(board, 1, 'constant', constant_values=snake_weight)
# plt.imshow(np.rot90(np.fliplr(board)), interpolation='none', origin="lower")
# plt.show()
| 28.155251 | 105 | 0.509569 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,846 | 0.299384 |
f55005f6eda0c8aeadd07c4aee7c84c8198766c5 | 11 | py | Python | src/__init__.py | Peefy/StatisticalLearningMethod.Python | 7324d51b58932052bc518b9e82f64b76f0c39bf0 | [
"Apache-2.0"
]
| 1 | 2018-10-05T08:20:50.000Z | 2018-10-05T08:20:50.000Z | src/__init__.py | Peefy/StatisticalLearningMethod.Python | 7324d51b58932052bc518b9e82f64b76f0c39bf0 | [
"Apache-2.0"
]
| null | null | null | src/__init__.py | Peefy/StatisticalLearningMethod.Python | 7324d51b58932052bc518b9e82f64b76f0c39bf0 | [
"Apache-2.0"
]
| null | null | null |
# pdf.244
| 3.666667 | 9 | 0.545455 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 9 | 0.818182 |
f5538c72ced0bc74b5e82bee2c3ce5f0a35952cd | 11,836 | py | Python | nuclear/help/help.py | igrek51/glue | 6726ba977a21e58b354a5c97f68639f84184be7a | [
"MIT"
]
| 4 | 2019-07-04T20:41:06.000Z | 2020-04-23T18:17:33.000Z | nuclear/help/help.py | igrek51/cliglue | 6726ba977a21e58b354a5c97f68639f84184be7a | [
"MIT"
]
| null | null | null | nuclear/help/help.py | igrek51/cliglue | 6726ba977a21e58b354a5c97f68639f84184be7a | [
"MIT"
]
| null | null | null | import os
import sys
from dataclasses import dataclass, field
from typing import List, Set, Optional
from nuclear.builder.rule import PrimaryOptionRule, ParameterRule, FlagRule, CliRule, SubcommandRule, \
PositionalArgumentRule, ManyArgumentsRule, DictionaryRule, ValueRule
from nuclear.parser.context import RunContext
from nuclear.parser.keyword import format_var_names, format_var_name
from nuclear.parser.parser import Parser
from nuclear.parser.transform import filter_rules
from nuclear.parser.value import generate_value_choices
from nuclear.version import __version__
@dataclass
class _OptionHelp(object):
cmd: str
help: str
parent: '_OptionHelp' = None
rule: SubcommandRule = None
subrules: List[CliRule] = field(default_factory=lambda: [])
internal_options = {'--autocomplete', '--install-bash', '--install-autocomplete'}
def print_help(rules: List[CliRule], app_name: str, version: str, help: str, subargs: List[str], hide_internal: bool):
helps = generate_help(rules, app_name, version, help, subargs, hide_internal)
print('\n'.join(helps))
def print_usage(rules: List[CliRule]):
all_rules, available_subcommands, precommands = help_context(rules, [])
pos_arguments = filter_rules(all_rules, PositionalArgumentRule)
many_args = filter_rules(all_rules, ManyArgumentsRule)
has_commands = bool(filter_rules(available_subcommands, SubcommandRule))
command_name = shell_command_name()
app_bin_prefix = ' '.join([command_name] + precommands)
usage = generate_usage(app_bin_prefix, has_commands, have_rules_options(all_rules), many_args, pos_arguments)
how_to_help = f'Run "{command_name} --help" for more information.'
print('\n'.join([f'Usage: {usage}', how_to_help]))
def generate_help(rules: List[CliRule], app_name: str, version: str, help: str, subargs: List[str],
hide_internal: bool) -> List[str]:
all_rules, available_subcommands, precommands = help_context(rules, subargs)
return generate_subcommand_help(all_rules, app_name, version, help,
precommands, available_subcommands, hide_internal)
def help_context(rules, subargs):
available_subcommands = filter_rules(rules, SubcommandRule)
run_context: Optional[RunContext] = Parser(rules, dry=True).parse_args(subargs)
all_rules: List[CliRule] = run_context.active_rules
active_subcommands: List[SubcommandRule] = run_context.active_subcommands
precommands: List[str] = [_subcommand_short_name(rule) for rule in active_subcommands]
if active_subcommands:
available_subcommands = filter_rules(active_subcommands[-1].subrules, SubcommandRule)
return all_rules, available_subcommands, precommands
def generate_subcommand_help(
all_rules: List[CliRule],
app_name: str,
version: str,
help: str,
precommands: List[str],
subcommands: List[SubcommandRule],
hide_internal: bool,
) -> List[str]:
pos_arguments = filter_rules(all_rules, PositionalArgumentRule)
many_args = filter_rules(all_rules, ManyArgumentsRule)
pos_args_helps: List[_OptionHelp] = _generate_pos_args_helps(pos_arguments, many_args)
options: List[_OptionHelp] = _generate_options_helps(all_rules, hide_internal)
commands: List[_OptionHelp] = _generate_commands_helps(subcommands)
out = []
app_info = app_help_info(app_name, help, version)
if app_info:
out.append(app_info + '\n')
app_bin_prefix = ' '.join([shell_command_name()] + precommands)
out.append('Usage:')
out.append(generate_usage(app_bin_prefix, bool(commands), have_rules_options(all_rules), many_args, pos_arguments))
if pos_args_helps:
out.append('\nArguments:')
__helpers_output(pos_args_helps, out)
if options:
out.append('\nOptions:')
__helpers_output(options, out)
if commands:
out.append('\nCommands:')
__helpers_output(commands, out)
out.append(f'\nRun "{app_bin_prefix} COMMAND --help" for more information on a command.')
return out
def app_help_info(app_name: str, help: str, version: str) -> Optional[str]:
info = app_name_version(app_name, version)
return ' - '.join(filter(bool, [info, help]))
def app_name_version(app_name, version):
infos = []
if app_name:
infos += [app_name]
if version:
version = _normalized_version(version)
infos += [version]
if infos:
infos += [f'(nuclear v{__version__})']
return ' '.join(infos)
def generate_usage(app_bin_prefix, has_commands: bool, has_options: bool, many_args, pos_arguments) -> str:
usage_syntax: str = app_bin_prefix
if has_commands:
usage_syntax += ' [COMMAND]'
if has_options:
usage_syntax += ' [OPTIONS]'
usage_syntax += usage_positional_arguments(pos_arguments)
usage_syntax += usage_many_arguments(many_args)
return usage_syntax
def __helpers_output(commands, out):
padding = _max_name_width(commands)
for helper in commands:
name_padded = helper.cmd.ljust(padding)
if helper.help:
for idx, line in enumerate(helper.help.splitlines()):
if idx == 0:
out.append(f' {name_padded} - {line}')
else:
out.append(' ' * (2 + padding + 3) + line)
else:
out.append(f' {name_padded}')
def print_version(app_name: str, version: str):
print(app_name_version(app_name, version))
def _normalized_version(version: str) -> str:
if version.startswith('v'):
return version
return f'v{version}'
def _max_name_width(helps: List[_OptionHelp]) -> int:
return max(map(lambda h: len(h.cmd), helps))
def _generate_pos_args_helps(
pos_arguments: List[PositionalArgumentRule],
many_args: List[ManyArgumentsRule]
) -> List[_OptionHelp]:
return [_pos_arg_help(rule) for rule in pos_arguments] + \
[_many_args_help(rule) for rule in many_args]
def _generate_options_helps(rules: List[CliRule], hide_internal: bool) -> List[_OptionHelp]:
# filter non-empty
return list(filter(lambda o: o, [_generate_option_help(rule, hide_internal) for rule in rules]))
def _generate_option_help(rule: CliRule, hide_internal: bool) -> Optional[_OptionHelp]:
if isinstance(rule, PrimaryOptionRule):
return _primary_option_help(rule, hide_internal)
elif isinstance(rule, FlagRule):
return _flag_help(rule)
elif isinstance(rule, ParameterRule):
return _parameter_help(rule)
elif isinstance(rule, DictionaryRule):
return _dictionary_help(rule)
return None
def _generate_commands_helps(rules: List[CliRule], parent: _OptionHelp = None, subrules: List[CliRule] = None
) -> List[_OptionHelp]:
commands: List[_OptionHelp] = []
for rule in filter_rules(rules, SubcommandRule):
subsubrules = (subrules or []) + rule.subrules
helper = _subcommand_help(rule, parent, subsubrules)
if rule.run or rule.help:
commands.append(helper)
commands.extend(_generate_commands_helps(rule.subrules, helper, subsubrules))
return commands
def _subcommand_help(rule: SubcommandRule, parent: _OptionHelp, subrules: List[CliRule]) -> _OptionHelp:
pos_args = filter_rules(subrules, PositionalArgumentRule)
many_args = filter_rules(subrules, ManyArgumentsRule)
cmd = _subcommand_prefix(parent) + '|'.join(sorted_keywords(rule.keywords))
cmd += usage_positional_arguments(pos_args)
cmd += usage_many_arguments(many_args)
return _OptionHelp(cmd, rule.help, parent=parent, rule=rule, subrules=subrules)
def _subcommand_prefix(helper: _OptionHelp) -> str:
if not helper:
return ''
return _subcommand_prefix(helper.parent) + '|'.join(sorted_keywords(helper.rule.keywords)) + ' '
def _primary_option_help(rule: PrimaryOptionRule, hide_internal: bool) -> Optional[_OptionHelp]:
if hide_internal:
for keyword in rule.keywords:
if keyword in internal_options:
return None
cmd = ', '.join(sorted_keywords(rule.keywords))
pos_args = filter_rules(rule.subrules, PositionalArgumentRule)
all_args = filter_rules(rule.subrules, ManyArgumentsRule)
cmd += usage_positional_arguments(pos_args)
cmd += usage_many_arguments(all_args)
return _OptionHelp(cmd, rule.help)
def _flag_help(rule: FlagRule) -> _OptionHelp:
cmd = ', '.join(sorted_keywords(rule.keywords))
return _OptionHelp(cmd, rule.help)
def _parameter_help(rule: ParameterRule) -> _OptionHelp:
cmd = ', '.join(sorted_keywords(rule.keywords)) + ' ' + _param_display_name(rule)
default_value = display_default_value(rule.default)
choices_help = display_choices_help(rule)
help_text = join_nonempty_lines(rule.help, default_value, choices_help)
return _OptionHelp(cmd, help_text)
def _dictionary_help(rule: DictionaryRule) -> _OptionHelp:
cmd = ', '.join(sorted_keywords(rule.keywords)) + ' KEY VALUE'
return _OptionHelp(cmd, rule.help)
def _pos_arg_help(rule: PositionalArgumentRule) -> _OptionHelp:
cmd = display_positional_argument(rule)
default_value = display_default_value(rule.default)
choices_help = display_choices_help(rule)
help_text = join_nonempty_lines(rule.help, default_value, choices_help)
return _OptionHelp(cmd, help_text)
def _many_args_help(rule: ManyArgumentsRule) -> _OptionHelp:
cmd = display_many_arguments(rule)
choices_help = display_choices_help(rule)
help_text = join_nonempty_lines(rule.help, choices_help)
return _OptionHelp(cmd, help_text)
def _param_display_name(rule: ParameterRule) -> str:
if rule.name:
return format_var_name(rule.name).upper()
else:
# get name from the longest keyword
names: Set[str] = format_var_names(rule.keywords)
return max(names, key=lambda n: len(n)).upper()
def _argument_var_name(rule: PositionalArgumentRule) -> str:
return format_var_name(rule.name).upper()
def _subcommand_short_name(rule: SubcommandRule) -> str:
return next(iter(rule.keywords))
def sorted_keywords(keywords: Set[str]) -> List[str]:
# shortest keywords first, then alphabetically
return sorted(keywords, key=lambda k: (len(k), k))
def display_positional_argument(rule: PositionalArgumentRule) -> str:
var_name = _argument_var_name(rule)
if rule.required:
return f' {var_name}'
else:
return f' [{var_name}]'
def display_many_arguments(rule: ManyArgumentsRule) -> str:
arg_name = rule.name.upper()
if rule.count_min():
return f' {arg_name}...'
else:
return f' [{arg_name}...]'
def usage_positional_arguments(rules: List[PositionalArgumentRule]) -> str:
return ''.join([display_positional_argument(rule) for rule in rules])
def usage_many_arguments(rules: List[ManyArgumentsRule]) -> str:
return ''.join([display_many_arguments(rule) for rule in rules])
def shell_command_name():
_, command = os.path.split(sys.argv[0])
if command == '__main__.py':
return sys.modules['__main__'].__package__
return command
def have_rules_options(rules: List[CliRule]) -> bool:
return bool(filter_rules(rules, FlagRule, ParameterRule, DictionaryRule, PrimaryOptionRule))
def display_default_value(default) -> Optional[str]:
if default is None:
return None
return 'Default: ' + str(default)
def display_choices_help(rule: ValueRule) -> Optional[str]:
choices = generate_value_choices(rule)
if not choices or not rule.strict_choices:
return None
return 'Choices: ' + ', '.join(choices)
def join_nonempty_lines(*lines: str) -> str:
return '\n'.join(filter(lambda t: t is not None, lines))
| 35.22619 | 119 | 0.710037 | 182 | 0.015377 | 0 | 0 | 193 | 0.016306 | 0 | 0 | 667 | 0.056353 |
f553c00e89c0f5a71a1f1863c8dfb6394c78b550 | 1,997 | py | Python | Apps/Engines/Nuke/NukeTools_1.01/Python/LookAt.py | geoffroygivry/CyclopsVFX-Unity | 6ab9ab122b6c3e6200e90d49a0c2bf774e53d985 | [
"MIT"
]
| 17 | 2017-06-27T04:14:42.000Z | 2022-03-07T03:37:44.000Z | Apps/Engines/Nuke/NukeTools_1.01/Python/LookAt.py | geoffroygivry/Cyclops-VFX | 6ab9ab122b6c3e6200e90d49a0c2bf774e53d985 | [
"MIT"
]
| 2 | 2017-06-14T04:17:51.000Z | 2018-08-23T20:12:44.000Z | Apps/Engines/Nuke/NukeTools_1.01/Python/LookAt.py | geoffroygivry/CyclopsVFX-Unity | 6ab9ab122b6c3e6200e90d49a0c2bf774e53d985 | [
"MIT"
]
| 2 | 2019-03-18T06:18:33.000Z | 2019-08-14T21:07:53.000Z | #The MIT License (MIT)
#
#Copyright (c) 2015 Geoffroy Givry
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
import nuke
LookAtName = "LookAt"
def panelLookAt():
p = nuke.Panel("Look At Panel")
p.addSingleLineInput("LookAt Name:", LookAtName)
p.addButton("Cancel")
p.addButton("OK")
result = p.show()
nameLookAt = p.value("LookAt Name:")
EXpX = 'degrees(atan2(%s.translate.y-translate.y,sqrt(pow(%s.translate.x-translate.x,2)+pow(%s.translate.z-translate.z,2))))' % (nameLookAt, nameLookAt, nameLookAt)
EXpY = '%s.translate.z-this.translate.z >= 0 ? 180+degrees(atan2(%s.translate.x-translate.x,%s.translate.z-translate.z)):180+degrees(atan2(%s.translate.x-translate.x,%s.translate.z-translate.z))' % (nameLookAt, nameLookAt, nameLookAt, nameLookAt, nameLookAt)
nuke.nodes.Axis(name=nameLookAt)
for n in nuke.selectedNodes():
n['rotate'].setExpression(EXpX, 0)
n['rotate'].setExpression(EXpY, 1) | 46.44186 | 263 | 0.725588 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,487 | 0.744617 |
f554392a1fb675c44914e72f8067a655af6c342c | 464 | py | Python | graphql/main.py | py-in-the-sky/appengine-swapi | 824d770cd11e5510b2300d1e248a9474e3fde8c2 | [
"MIT"
]
| null | null | null | graphql/main.py | py-in-the-sky/appengine-swapi | 824d770cd11e5510b2300d1e248a9474e3fde8c2 | [
"MIT"
]
| null | null | null | graphql/main.py | py-in-the-sky/appengine-swapi | 824d770cd11e5510b2300d1e248a9474e3fde8c2 | [
"MIT"
]
| null | null | null | """
`main` is the top level module where AppEngine gets access
to your Flask application.
"""
from app import create_app
from config import config
from os import environ
if environ['SERVER_SOFTWARE'].startswith('Development'):
app_config = config['development']
else:
app_config = config['production']
app = create_app(app_config)
# Note: We don't need to call run() since our application is
# embedded within the App Engine WSGI application server.
| 22.095238 | 60 | 0.752155 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 265 | 0.571121 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.