hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
410af5985b4b1c70714faefbc090b58a89a67ddd | 270 | py | Python | main/run_camera_calibrate.py | mcekwonu/pycamera | 0d2c9cb70e8cbc9012e252c06ff1827ae8544cf2 | [
"MIT"
]
| null | null | null | main/run_camera_calibrate.py | mcekwonu/pycamera | 0d2c9cb70e8cbc9012e252c06ff1827ae8544cf2 | [
"MIT"
]
| null | null | null | main/run_camera_calibrate.py | mcekwonu/pycamera | 0d2c9cb70e8cbc9012e252c06ff1827ae8544cf2 | [
"MIT"
]
| null | null | null | """Script to run camera calibration"""
from camera import Camera
camera = Camera(source_dir='/home/mce/Documents/bubble3D/calibration/Cam01', outfilename='Cam01',
target_dir='results/camera', verbose=True)
camera.calibrate()
camera.compute_undistort()
| 30 | 97 | 0.740741 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 109 | 0.403704 |
410b5247c45f5769ae5e7ac2ac703fb1bb25e83e | 1,994 | py | Python | Hub/Program/Other/Class/ZThreadSingle.py | MPZinke/SmartCurtain | ef9976d7a6b982bb044e6fd914fdea4756d5b5c4 | [
"MIT"
]
| null | null | null | Hub/Program/Other/Class/ZThreadSingle.py | MPZinke/SmartCurtain | ef9976d7a6b982bb044e6fd914fdea4756d5b5c4 | [
"MIT"
]
| 18 | 2020-06-21T02:36:52.000Z | 2022-03-14T04:17:56.000Z | Hub/Program/Other/Class/ZThreadSingle.py | MPZinke/SmartCurtain | ef9976d7a6b982bb044e6fd914fdea4756d5b5c4 | [
"MIT"
]
| 1 | 2020-01-19T02:24:38.000Z | 2020-01-19T02:24:38.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = "MPZinke"
########################################################################################################################
# #
# created by: MPZinke #
# on 2021.09.14 #
# #
# DESCRIPTION: Created to sepparate the crazy logic between repeating and single occurance threads. The flow of this #
# class is Sleep -> Action, where as the repeating class is (Action -> Sleep) <- REPEAT. #
# BUGS: #
# FUTURE: #
# #
########################################################################################################################
from collections.abc import Callable;
from Other.Class.ZThread import ZThread;
from Other.Logger import log_error;
class ZThreadSingle(ZThread):
def __init__(self, name : str, loop_process : Callable, sleep_time : Callable):
ZThread.__init__(self, name, loop_process, sleep_time);
# Main loop that runs thread if activated.
# Check that the it is supposed to do stuff, then sleeps thread
def _thread_loop(self) -> None:
try: # make it safe!!!
self.sleep(self._sleep_time());
self._loop_process();
except Exception as error:
try: log_error(error);
except: pass;
| 51.128205 | 120 | 0.336008 | 470 | 0.235707 | 0 | 0 | 0 | 0 | 0 | 0 | 1,496 | 0.750251 |
410eb00a7e9f189e5d58a886e9ad9987223272ea | 783 | py | Python | flasks/conver.py | edgells/python-commons | 38c0aa0ec10304a4147ea231c92c9e34da462052 | [
"MIT"
]
| null | null | null | flasks/conver.py | edgells/python-commons | 38c0aa0ec10304a4147ea231c92c9e34da462052 | [
"MIT"
]
| null | null | null | flasks/conver.py | edgells/python-commons | 38c0aa0ec10304a4147ea231c92c9e34da462052 | [
"MIT"
]
| null | null | null | from flask import Flask
from werkzeug.routing import BaseConverter
class RegexConverter(BaseConverter):
def __init__(self, url_map, *args):
super(RegexConverter, self).__init__(url_map)
self.regex = args[0]
def to_python(self, value):
"""
匹配到的值
:param value:
:return:
"""
return int(value)
def to_url(self, value):
"""
使用 url for 取获取视图时所对应的url
:param value:
:return:
"""
pass
app = Flask(__name__)
app.config['DEBUG'] = True
app.url_map.converters['re'] = RegexConverter
@app.route('/user/<re("[0-9]{3}"):user_id>/')
def users(user_id):
return {'data': "user_id: %s" % user_id}
if __name__ == '__main__':
app.run(host='0.0.0.0', port=8888)
| 19.575 | 53 | 0.583653 | 469 | 0.574051 | 0 | 0 | 110 | 0.134639 | 0 | 0 | 271 | 0.331701 |
4111e0047e651eec2b041b914921035fe36454e5 | 773 | py | Python | trend_analyze/src/model/entity_url.py | popper2710/Trend_Analyze | 0c98bcd7986bdb2d2b9bdc8022bfa08ddf0e7b0f | [
"MIT"
]
| null | null | null | trend_analyze/src/model/entity_url.py | popper2710/Trend_Analyze | 0c98bcd7986bdb2d2b9bdc8022bfa08ddf0e7b0f | [
"MIT"
]
| 2 | 2020-09-26T14:58:33.000Z | 2021-03-31T20:01:40.000Z | trend_analyze/src/model/entity_url.py | popper2710/Trend_Analyze | 0c98bcd7986bdb2d2b9bdc8022bfa08ddf0e7b0f | [
"MIT"
]
| null | null | null | from pyfields import field
from trend_analyze.src.validate import Validate
from trend_analyze.config import *
class EntityUrl:
v = Validate().generate
url: str = field(default=DEFAULT_ENTITY_URL, validators=v(is_blank=True, max_len=150), check_type=True)
start: int = field(default=-1, check_type=True)
end: int = field(default=-1, check_type=True)
expanded_url: str = field(default="", validators=v(max_len=2083), check_type=True)
created_at: datetime = field(default=DEFAULT_CREATED_AT, check_type=True)
def to_vec(self) -> dict:
return {
"url": self.url,
"start": self.start,
"end": self.end,
"expanded_url": self.expanded_url,
"created_at": self.created_at,
}
| 32.208333 | 107 | 0.659767 | 659 | 0.852523 | 0 | 0 | 0 | 0 | 0 | 0 | 45 | 0.058215 |
41122da858230ceb4c96eb4a8c7375d59b77bc28 | 8,148 | py | Python | kotti/testing.py | mete0r/Kotti | e89103cc57d5d2af8d60eb8208ae9d04c068f6e7 | [
"Naumen",
"Condor-1.1",
"MS-PL"
]
| null | null | null | kotti/testing.py | mete0r/Kotti | e89103cc57d5d2af8d60eb8208ae9d04c068f6e7 | [
"Naumen",
"Condor-1.1",
"MS-PL"
]
| null | null | null | kotti/testing.py | mete0r/Kotti | e89103cc57d5d2af8d60eb8208ae9d04c068f6e7 | [
"Naumen",
"Condor-1.1",
"MS-PL"
]
| null | null | null | # -*- coding: utf-8 -*-
"""
Inheritance Diagram
-------------------
.. inheritance-diagram:: kotti.testing
"""
import os
from os.path import join, dirname
from unittest import TestCase
from pytest import mark
from pyramid import testing
from pyramid.events import NewResponse
from pyramid.security import ALL_PERMISSIONS
from zope.deprecation.deprecation import deprecate
import transaction
# re-enable deprecation warnings during test runs
# however, let the `ImportWarning` produced by Babel's
# `localedata.py` vs `localedata/` show up once...
from warnings import catch_warnings
with catch_warnings():
from babel import localedata
import compiler
localedata, compiler # make pyflakes happy... :p
# py.test markers (see http://pytest.org/latest/example/markers.html)
user = mark.user
BASE_URL = 'http://localhost:6543'
class Dummy(dict):
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
class DummyRequest(testing.DummyRequest):
is_xhr = False
POST = dict()
user = None
referrer = None
def is_response(self, ob):
return (hasattr(ob, 'app_iter') and hasattr(ob, 'headerlist') and
hasattr(ob, 'status'))
def asset(name):
import kotti
return open(join(dirname(kotti.__file__), 'tests', name), 'rb')
def includeme_login(config):
config.add_view(
login_view,
name='login',
renderer='kotti:templates/login.pt')
def includeme_layout(config):
# override edit master layout with view master layout
config.override_asset(
to_override='kotti:templates/edit/master.pt',
override_with='kotti:templates/view/master.pt')
def login_view(request):
return {}
def dummy_search(search_term, request):
return u"Not found. Sorry!"
def testing_db_url():
return os.environ.get('KOTTI_TEST_DB_STRING', 'sqlite://')
def _initTestingDB():
from sqlalchemy import create_engine
from kotti import get_settings
from kotti.resources import initialize_sql
database_url = testing_db_url()
get_settings()['sqlalchemy.url'] = database_url
session = initialize_sql(create_engine(database_url), drop_all=True)
return session
def _populator():
from kotti import DBSession
from kotti.resources import Document
from kotti.populate import populate
populate()
for doc in DBSession.query(Document)[1:]:
DBSession.delete(doc)
transaction.commit()
def _turn_warnings_into_errors(): # pragma: no cover
# turn all warnings into errors, but let the `ImportWarning`
# produced by Babel's `localedata.py` vs `localedata/` show up once...
from babel import localedata
localedata # make pyflakes happy... :p
from warnings import filterwarnings
filterwarnings("error")
def setUp(init_db=True, **kwargs):
# _turn_warnings_into_errors()
from kotti import _resolve_dotted
from kotti import conf_defaults
tearDown()
settings = conf_defaults.copy()
settings['kotti.secret'] = 'secret'
settings['kotti.secret2'] = 'secret2'
settings['kotti.populators'] = 'kotti.testing._populator'
settings.update(kwargs.get('settings', {}))
settings = _resolve_dotted(settings)
kwargs['settings'] = settings
config = testing.setUp(**kwargs)
config.add_default_renderers()
if init_db:
_initTestingDB()
transaction.begin()
return config
def tearDown():
from kotti import events
from kotti import security
from kotti.message import _inject_mailer
# These should arguable use the configurator, so they don't need
# to be torn down separately:
events.clear()
security.reset()
_inject_mailer[:] = []
transaction.abort()
testing.tearDown()
class UnitTestBase(TestCase):
def setUp(self, **kwargs):
self.config = setUp(**kwargs)
def tearDown(self):
tearDown()
class EventTestBase(TestCase):
def setUp(self, **kwargs):
super(EventTestBase, self).setUp(**kwargs)
self.config.include('kotti.events')
# Functional ----
def _functional_includeme(config):
from kotti import DBSession
def expire(event):
DBSession.flush()
DBSession.expire_all()
config.add_subscriber(expire, NewResponse)
def _zope_testbrowser_pyquery(self):
from pyquery import PyQuery
return PyQuery(
self.contents.replace('xmlns="http://www.w3.org/1999/xhtml', ''))
def setUpFunctional(global_config=None, **settings):
from kotti import main
import wsgi_intercept.zope_testbrowser
from webtest import TestApp
tearDown()
_settings = {
'sqlalchemy.url': testing_db_url(),
'kotti.secret': 'secret',
'kotti.site_title': 'Website des Kottbusser Tors', # for mailing
'kotti.populators': 'kotti.testing._populator',
'mail.default_sender': 'kotti@localhost',
'pyramid.includes': 'kotti.testing._functional_includeme',
}
_settings.update(settings)
host, port = BASE_URL.split(':')[-2:]
app = main({}, **_settings)
wsgi_intercept.add_wsgi_intercept(host[2:], int(port), lambda: app)
Browser = wsgi_intercept.zope_testbrowser.WSGI_Browser
Browser.pyquery = property(_zope_testbrowser_pyquery)
return dict(
Browser=Browser,
browser=Browser(),
test_app=TestApp(app),
)
class FunctionalTestBase(TestCase):
BASE_URL = BASE_URL
def setUp(self, **kwargs):
self.__dict__.update(setUpFunctional(**kwargs))
def tearDown(self):
tearDown()
def login(self, login=u'admin', password=u'secret'):
return self.test_app.post(
'/@@login',
{'login': login, 'password': password, 'submit': 'submit'},
status=302,
)
@deprecate('login_testbrowser is deprecated as of Kotti 0.7. Please use '
'the `browser` funcarg in conjunction with the `@user` '
'decorator.')
def login_testbrowser(self, login=u'admin', password=u'secret'):
browser = self.Browser()
browser.open(BASE_URL + '/edit')
browser.getControl("Username or email").value = login
browser.getControl("Password").value = password
browser.getControl(name="submit").click()
return browser
class TestingRootFactory(dict):
__name__ = '' # root is required to have an empty name!
__parent__ = None
__acl__ = [('Allow', 'role:admin', ALL_PERMISSIONS)]
def __init__(self, request):
super(TestingRootFactory, self).__init__()
def dummy_view(context, request):
return {}
def include_testing_view(config):
config.add_view(
dummy_view,
context=TestingRootFactory,
renderer='kotti:tests/testing_view.pt',
)
config.add_view(
dummy_view,
name='secured',
permission='view',
context=TestingRootFactory,
renderer='kotti:tests/testing_view.pt',
)
def setUpFunctionalStrippedDownApp(global_config=None, **settings):
# An app that doesn't use Nodes at all
_settings = {
'kotti.base_includes': (
'kotti kotti.views kotti.views.login kotti.views.users'),
'kotti.use_tables': 'principals',
'kotti.populators': 'kotti.populate.populate_users',
'pyramid.includes': 'kotti.testing.include_testing_view',
'kotti.root_factory': 'kotti.testing.TestingRootFactory',
'kotti.site_title': 'My Stripped Down Kotti',
}
_settings.update(settings)
return setUpFunctional(global_config, **_settings)
def registerDummyMailer():
from pyramid_mailer.mailer import DummyMailer
from kotti.message import _inject_mailer
mailer = DummyMailer()
_inject_mailer.append(mailer)
return mailer
# set up deprecation warnings
from zope.deprecation.deprecation import deprecated
for item in UnitTestBase, EventTestBase, FunctionalTestBase, _initTestingDB:
name = getattr(item, '__name__', item)
deprecated(name, 'Unittest-style tests are deprecated as of Kotti 0.7. '
'Please use pytest function arguments instead.')
| 26.627451 | 78 | 0.676976 | 1,835 | 0.225209 | 0 | 0 | 509 | 0.062469 | 0 | 0 | 2,236 | 0.274423 |
4113d55c3875b03d32cbc830fabbfbb2cdd11046 | 694 | py | Python | leetcode/trees/level-order.py | vtemian/interviews-prep | ddef96b5ecc699a590376a892a804c143fe18034 | [
"Apache-2.0"
]
| 8 | 2019-05-14T12:50:29.000Z | 2022-03-01T09:08:27.000Z | leetcode/trees/level-order.py | vtemian/interviews-prep | ddef96b5ecc699a590376a892a804c143fe18034 | [
"Apache-2.0"
]
| 46 | 2019-03-24T20:59:29.000Z | 2019-04-09T16:28:43.000Z | leetcode/trees/level-order.py | vtemian/interviews-prep | ddef96b5ecc699a590376a892a804c143fe18034 | [
"Apache-2.0"
]
| 1 | 2022-01-28T12:46:29.000Z | 2022-01-28T12:46:29.000Z | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def levelOrder(self, root: TreeNode) -> List[List[int]]:
if not root:
return []
result = []
queue = [(root, 0)]
while queue:
node, level = queue.pop(0)
if len(result) <= level:
result.append([])
result[level].append(node.val)
if node.left:
queue.append((node.left, level + 1))
if node.right:
queue.append((node.right, level + 1))
return result
| 22.387097 | 60 | 0.487032 | 529 | 0.762248 | 0 | 0 | 0 | 0 | 0 | 0 | 156 | 0.224784 |
41141320d4d129bb735e6489daf0039fbb723f89 | 300 | py | Python | Python Files/count_down.py | gerryjenkinslb/cs22-slides-and-py-files | 9474f7a2e50d57afa13edc3b13c008f7295da747 | [
"MIT"
]
| 28 | 2019-07-05T04:00:45.000Z | 2022-02-16T09:43:50.000Z | Python Files/count_down.py | gerryjenkinslb/cs22-slides-and-py-files | 9474f7a2e50d57afa13edc3b13c008f7295da747 | [
"MIT"
]
| null | null | null | Python Files/count_down.py | gerryjenkinslb/cs22-slides-and-py-files | 9474f7a2e50d57afa13edc3b13c008f7295da747 | [
"MIT"
]
| 22 | 2018-10-24T04:42:05.000Z | 2022-02-04T08:17:27.000Z | # simple recursions
def count_down(n): # print n, n-1, n-2, ... , 3, 2, 1
print(n, end=" ")
if n > 1: # check for end case
count_down(n-1) # do smaller problem
print("-"*5, "count down from 10")
count_down(10)
print()
print("-"*5, "count down from 5")
count_down(5)
print()
| 20 | 54 | 0.576667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 141 | 0.47 |
4116fb09afbe45f7f25e769dc8cd6a9b1dcfa4fc | 1,513 | py | Python | src/activations.py | saman-codes/dldojo | 9fd828f1902ba3d46e9bb5f554ef37d07335b29e | [
"MIT"
]
| null | null | null | src/activations.py | saman-codes/dldojo | 9fd828f1902ba3d46e9bb5f554ef37d07335b29e | [
"MIT"
]
| null | null | null | src/activations.py | saman-codes/dldojo | 9fd828f1902ba3d46e9bb5f554ef37d07335b29e | [
"MIT"
]
| null | null | null | # Standard Python
import copy
# Thirdparty
import numpy as np
class Activation():
'''
Base class for an activation layer
Inspired by
https://github.com/eriklindernoren/ML-From-Scratch/blob/master/mlfromscratch/deep_learning/activation_functions.py
'''
def __call__(self, x):
return
def derivative(self, x):
return
class Relu(Activation):
def __call__(self, x):
return np.absolute(x * (x > 0))
def derivative(self, x):
return np.absolute(1. * (x > 0))
class LeakyRelu(Activation):
def __init__(self, mu=0.05):
self.mu = mu
return
def __call__(self, x):
return np.maximum(self.mu*x, x)
def derivative(self, x):
x[x>=0] = 1
x[x<1] = self.mu
return x
class Linear(Activation):
def __call__(self, x):
return x
def derivative(self, x):
return 1
class Sigmoid(Activation):
def __call__(self, x):
return 1./(1.+np.nan_to_num((np.exp(-x))))
def derivative(self, x):
return self.__call__(x)*(1.-self.__call__(x))
class Softmax(Activation):
def __call__(self, x):
# Using normalised x for numerical stability
norm_x = x - np.max(x, axis=0)
return np.exp(norm_x) / np.exp(norm_x).sum(axis=0, keepdims=True)
def derivative(self, x):
batch_jacobian = np.apply_along_axis(
lambda col: np.diag(col) - np.outer(col, col), 0, self.__call__(x))
return batch_jacobian
| 19.649351 | 118 | 0.604759 | 1,430 | 0.945142 | 0 | 0 | 0 | 0 | 0 | 0 | 259 | 0.171183 |
411e7327dfc8f59a57e3065bd00dbadcb1b1f18c | 302 | py | Python | mkdir.py | FunsomMars/Timg | 216c994fd0b100996e72f4cda4eace369c8452ef | [
"MIT"
]
| null | null | null | mkdir.py | FunsomMars/Timg | 216c994fd0b100996e72f4cda4eace369c8452ef | [
"MIT"
]
| null | null | null | mkdir.py | FunsomMars/Timg | 216c994fd0b100996e72f4cda4eace369c8452ef | [
"MIT"
]
| null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Time : 2019-07-23 22:47
# @Author : Simon Meng
# @Site :
# @File : mkdir.py
# @Software: PyCharm
import os
# Make a folder under the current path
def mkdir(path):
folder = os.path.exists(path)
if not folder:
os.makedirs(path)
| 17.764706 | 38 | 0.609272 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 185 | 0.612583 |
411fa137c5df36c387a70295ace27f0afc3352fe | 2,183 | py | Python | scripts/create-opencl-headers.py | molkoback/icemet-server | 9d7a29b38c711534923952d598fc37efff5db154 | [
"MIT"
]
| null | null | null | scripts/create-opencl-headers.py | molkoback/icemet-server | 9d7a29b38c711534923952d598fc37efff5db154 | [
"MIT"
]
| null | null | null | scripts/create-opencl-headers.py | molkoback/icemet-server | 9d7a29b38c711534923952d598fc37efff5db154 | [
"MIT"
]
| 1 | 2020-09-16T15:33:23.000Z | 2020-09-16T15:33:23.000Z | import os
import sys
header_file_fmt = "{name}_ocl.hpp"
header_string = (
"#ifndef {definition}_OCL_HPP\n"
"#define {definition}_OCL_HPP\n"
"#include <opencv2/core/ocl.hpp>\n"
"const cv::ocl::ProgramSource& {module}_{name}_ocl() {{\n"
"static cv::ocl::ProgramSource source(\"{module}\", \"{name}\", \"{kernel}\", \"\");\n"
"return source;\n"
"}}\n"
"#endif\n"
)
def clear_between(string, del1, del2):
pos1 = string.find(del1)
if pos1 < 0:
return string
pos2 = string[pos1:].find(del2) + pos1
if pos2 < 0:
return string
return string.replace(string[pos1:pos2+len(del2)], "")
def clear_all(string, del1, del2):
while True:
cleared = clear_between(string, del1, del2)
if string == cleared:
return string
string = cleared
def clear_repeating(string, tok):
while True:
cleared = string.replace(tok+tok, tok)
if string == cleared:
return string
string = cleared
def compress(code):
code = clear_all(code, "/*", "*/")
code = clear_all(code, "//", "\n")
code = code.replace("\n", "\\n")
code = code.replace("\t", "")
code = code.replace("\"", "\\\"")
code = clear_repeating(code, " ")
code = clear_repeating(code, "\\n")
return code
def create_header_file(kernel_path, header_path):
with open(kernel_path) as fp:
kernel = compress(fp.read())
base = os.path.splitext(os.path.basename(kernel_path))[0]
module, name = base.split("_")
data = header_string.format(
definition=base.upper(),
module=module,
name=name,
kernel=kernel
)
with open(header_path, "w") as fp:
fp.write(data)
def create_headers(kernel_dir, header_dir):
for kernel_file in os.listdir(kernel_dir):
kernel_path = os.path.join(kernel_dir, kernel_file)
if os.path.isfile(kernel_path) and kernel_file.endswith(".cl"):
header_file = header_file_fmt.format(name=os.path.splitext(kernel_file)[0])
header_path = os.path.join(header_dir, header_file)
create_header_file(kernel_path, header_path)
print("-- Created {}".format(header_file))
if __name__ == "__main__":
if len(sys.argv) != 3:
print("Usage: {} <kernel_dir> <header_dir>".format(sys.argv[0]))
sys.exit(1)
os.makedirs(sys.argv[2], exist_ok=True)
create_headers(sys.argv[1], sys.argv[2])
| 27.987179 | 87 | 0.685295 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 418 | 0.19148 |
4120f12f58edfe39a3cbad96c6f37cc20266c8ae | 251 | py | Python | main.py | aleattene/lotto-game | 6871699c44c988f926db986668524c002d3560f2 | [
"MIT"
]
| null | null | null | main.py | aleattene/lotto-game | 6871699c44c988f926db986668524c002d3560f2 | [
"MIT"
]
| null | null | null | main.py | aleattene/lotto-game | 6871699c44c988f926db986668524c002d3560f2 | [
"MIT"
]
| null | null | null |
from lotto_game.lotto_game import LottoGame
def main():
# Play Tickets
LottoGame.acquire_tickets()
# Do Extraction
LottoGame.do_extraction()
# Check Results
LottoGame.check_results()
if __name__ == "__main__":
main()
| 14.764706 | 43 | 0.681275 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 54 | 0.215139 |
412189bdca83add7a6eee8aca45c35007f4cbdb4 | 256 | py | Python | models/mail_message.py | billhepeng/wx_tools | 64369531bd76a935eff547c50ff68150a240849d | [
"Apache-2.0"
]
| 1 | 2021-01-19T02:49:14.000Z | 2021-01-19T02:49:14.000Z | models/mail_message.py | billhepeng/wx_tools | 64369531bd76a935eff547c50ff68150a240849d | [
"Apache-2.0"
]
| null | null | null | models/mail_message.py | billhepeng/wx_tools | 64369531bd76a935eff547c50ff68150a240849d | [
"Apache-2.0"
]
| null | null | null | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, fields, models
class MailMessage(models.Model):
_inherit = 'mail.message'
weixin_id = fields.Char('微信ID', required=False)
| 21.333333 | 74 | 0.703125 | 119 | 0.457692 | 0 | 0 | 0 | 0 | 0 | 0 | 121 | 0.465385 |
41229bdd678deb184613795447a9a74eef973ea7 | 2,361 | py | Python | src/TheLanguage/Parser/Statements/BinaryStatementParserInfo.py | davidbrownell/DavidBrownell_TheLanguage | 07170b448a0ebd7fa2325c9ccd4cefdb3cf7eb98 | [
"BSL-1.0"
]
| null | null | null | src/TheLanguage/Parser/Statements/BinaryStatementParserInfo.py | davidbrownell/DavidBrownell_TheLanguage | 07170b448a0ebd7fa2325c9ccd4cefdb3cf7eb98 | [
"BSL-1.0"
]
| null | null | null | src/TheLanguage/Parser/Statements/BinaryStatementParserInfo.py | davidbrownell/DavidBrownell_TheLanguage | 07170b448a0ebd7fa2325c9ccd4cefdb3cf7eb98 | [
"BSL-1.0"
]
| 1 | 2021-06-18T18:58:57.000Z | 2021-06-18T18:58:57.000Z | # ----------------------------------------------------------------------
# |
# | BinaryStatementParserInfo.py
# |
# | David Brownell <[email protected]>
# | 2021-10-12 13:55:27
# |
# ----------------------------------------------------------------------
# |
# | Copyright David Brownell 2021
# | Distributed under the Boost Software License, Version 1.0. See
# | accompanying file LICENSE_1_0.txt or copy at
# | http://www.boost.org/LICENSE_1_0.txt.
# |
# ----------------------------------------------------------------------
"""Contains the BinaryStatementParserInfo"""
import os
from enum import auto, Enum
from dataclasses import dataclass
import CommonEnvironment
from CommonEnvironmentEx.Package import InitRelativeImports
# ----------------------------------------------------------------------
_script_fullpath = CommonEnvironment.ThisFullpath()
_script_dir, _script_name = os.path.split(_script_fullpath)
# ----------------------------------------------------------------------
with InitRelativeImports():
from .StatementParserInfo import StatementParserInfo
from ..Expressions.ExpressionParserInfo import ExpressionParserInfo
from ..Names.NameParserInfo import NameParserInfo
# ----------------------------------------------------------------------
class OperatorType(Enum):
# Mathematical
AddInplace = auto()
SubtractInplace = auto()
MultiplyInplace = auto()
PowerInplace = auto()
DivideInplace = auto()
DivideFloorInplace = auto()
ModuloInplace = auto()
# Bit Manipulation
BitShiftLeftInplace = auto()
BitShiftRightInplace = auto()
BitXorInplace = auto()
BitAndInplace = auto()
BitOrInplace = auto()
# ----------------------------------------------------------------------
@dataclass(frozen=True, repr=False)
class BinaryStatementParserInfo(StatementParserInfo):
Name: NameParserInfo
Operator: OperatorType
Expression: ExpressionParserInfo
| 36.890625 | 79 | 0.454892 | 864 | 0.365947 | 0 | 0 | 182 | 0.077086 | 0 | 0 | 911 | 0.385853 |
4122eec27606ee355d75c58e79851c7226e97614 | 10,947 | py | Python | deprecated/finder_0.1.py | acic2015/findr | ac3061cb056cfe6a151c4096d04bce0d03545032 | [
"MIT"
]
| 7 | 2015-11-24T04:44:55.000Z | 2018-02-08T02:13:14.000Z | deprecated/finder_0.1.py | acic2015/findr | ac3061cb056cfe6a151c4096d04bce0d03545032 | [
"MIT"
]
| 9 | 2015-11-24T17:43:13.000Z | 2017-09-15T19:37:10.000Z | deprecated/finder_0.1.py | acic2015/findr | ac3061cb056cfe6a151c4096d04bce0d03545032 | [
"MIT"
]
| 4 | 2015-12-15T03:39:40.000Z | 2021-07-20T11:28:09.000Z | __author__ = 'Daniel Kapellusch'
import astropy.io.fits as fits
import os
import csv
import json
import sys
import multiprocessing as mp #necessary imports. Note: this is written in python 2.
from os import path
import ConfigParser
from os import system
#necessary imports. Note: this is written in python 2.
global path, max_processes,file_shifts,darkmaster,darksub,fitscent
def main(argv):
if not argv:
print "findr_0.1.py, path, config_file name"
global imgpath, max_processes,file_shifts,darkmaster,darksub,fitscent
imgpath = argv[0] # get path and cfg file name from passed args
config_file = argv[1]
print "Loading Configuration File..."
config = ConfigParser.ConfigParser() # open config file as input file with config parser
config.read(config_file)
max_processes = config.get("findr","max_processes") # read cfg and get applicable fields
file_shifts = config.get("findr","fileshifts")
darkmaster = config.get("findr","darkmaster_path")
darksub = config.get("findr","darksub_path")
fitscent = config.get("findr","fitscent_path")
darklist_fn, masterdark_fn, norm_fn = "darks.list", "mastedark.fits","norm.dat"
fits_lst = [path+"/"+fit for fit in os.listdir(path) if fit.endswith(".fits")] # get files in dir if they are .fits
with fits.open(fits_lst[0]) as fits_file:
items = list(set([str(header_field) for header_field in fits_file[0].header.keys()]+["FILENAME"])) # get fieldnames from first fits file
pool = mp.Pool(processes=None) # setup multiprocessing pool
ls = pool.map(get_metadata_and_sort,fits_lst) #asynchronously gather metadata
sorted_dic = sort_list(ls) # sort metadata into dictionary of lists based on VIMTYPE
make_tsv(ls,items) #generate tsv of metadata
total_dic = {item["FILENAME"]:item for item in ls} # make
build_json(total_dic) #create json from list of metadata
cleaned_dic = clean_dic(sorted_dic,total_dic) # remove science files from metadata dictionary if AOLOOPST is OPEN
runDarkmaster(cleaned_dic,darklist_fn,masterdark_fn,norm_fn) # run master dark with
cent_dsub_files = subtractAndCenter(cleaned_dic,masterdark_fn,file_shifts) # run subtractAndCenter
#TODO Klip-reduce
return(sorted_dic) #return a dictionary of lists of filenames sorted by type
def get_metadata_and_sort(image):
print("Building Total_Dic")
hdulist = fits.open(image) # open each fits file in the list
header = hdulist[0].header #get all the metadata from the fits file hdulist
hdulist.close()
header["FILENAME"] = path.basename(image)
temp = str(str(header["COMMENT"]).encode('ascii', 'ignore')) #encode in ascii as unicode doesn't play nice
header = {key: value for key, value in header.items() #remove double comment field
if key is not "COMMENT"}
header["COMMENT"] = temp.replace("\n"," ") #put comments back in
return(header)
def make_tsv(header,items):
print("Outputting metadata.tsv")
with open('metadata.tsv',"wb") as csvfile: #create a file called metadata.tsv for the output
writer = csv.DictWriter(csvfile,fieldnames=items,delimiter= "\t") #set up the writer, header fields, and delimiter
writer.writeheader() # write the headers to the file
[writer.writerow({k:str(image[k]) for k in items}) for image in header]
def build_json(total_dic):
print("Outputting metadata.json")
with open("metadata.json",'w') as jsonfile: #builds json file of metadata not sorted by VIMTYPE
json.dump(total_dic,jsonfile, separators=(',',':'),indent=4)
def sort_list(ls):
print("Sorting list into sorted_dic")
#sort filenames into dictionary by VIMTYPE
dic = {"SCIENCE":[],"DARK":[]}
[dic["SCIENCE"].append(i["FILENAME"]) if i["VIMTYPE"] == "SCIENCE" else dic["DARK"].append(i["FILENAME"]) for i in ls]
return(dic)
def clean_dic(sorted_dic,total_dic):
print("Cleaning dic")
cleaned_dic = {'SCIENCE':[],"DARK":sorted_dic["DARK"]}
for image in sorted_dic["SCIENCE"]: #Search dictionary built by my other script
if total_dic[image]["AOLOOPST"] == "CLOSED":
cleaned_dic["SCIENCE"].append(image) #store names of good files
return(cleaned_dic) #return those names
def writeListCfg(lst, cfgname):
"""
Write out a config file from a list.
- Entries: 'listItem\n'
:param lst: List to be written as a config file.
:param cfgname: Filename or path/to/filename for config file.
:return: Config filename or path/to/filename
"""
cfg_out = open(cfgname, 'w')
for e in lst:
cfg_out.write(str(e) + '\n')
cfg_out.close()
return cfgname
def writeDictCfg(dct, cfgname):
"""
Write out a config file from a dictionary.
- Entries: 'key=value\n'
:param dct: Dictionary to be written as a config file.
:param cfgname: Filename or path/to/filename for config file.
:return: Config filename or path/to/filename
"""
cfg_out = open(cfgname, 'w')
for k, v in dct.iteritems():
cfg_out.write('%s=%s\n' % (str(k), str(v)))
cfg_out.close()
return cfgname
def runDarkmaster(image_dict, darklist_filename, masterdark_filename, norm_filename,
bot_xo=None, bot_xf=None, bot_yo=None, bot_yf=None,
top_xo=None, top_xf=None, top_yo=None, top_yf=None,
width=None, height=None,
config=None, medianNorm=False, medianDark=False):
print("Running DarkMaster")
global path, darkmaster
# Write dark images to config file.
darks = [path+'/'+image for image in image_dict['DARK']]
writeListCfg(darks, darklist_filename)
# Fill out required parameters
options = '--fileListFile=%s --darkFileName=%s --normFileName=%s' % (darklist_filename,
masterdark_filename,
norm_filename)
# Fill out bottom/top normalization coordinates, if present.
if bot_xo and bot_xf and bot_yo and bot_yf and top_xo and top_xf and top_yo and top_yf:
options += ' --bot_xo=%s --bot_xf=%s --bot_yo=%s --bot_yf=%s' % (str(bot_xo), str(bot_xf),
str(bot_yo), str(bot_yf))
options += ' --top_xo=%s --top_xf=%s --top_yo=%s --top_yf=%s' % (str(top_xo), str(top_xf),
str(top_yo), str(top_yf))
# Fill out height/width of centered normalization region (overrides normalization coordinates), if present.
if width and height:
options += ' --width=%s --height=%s' % (str(width), str(height))
# Add median options, if present
if medianNorm:
options += ' --medianNorm'
if medianDark:
options += ' --medianDark'
# Build & call darkmaster command.
cmd = darkmaster + ' ' + options
print cmd
system(cmd)
return 1
def prependToFilename(filename, prepending):
"""
Prepend Text to Filename.
:param filename: Filename or path/to/filename to be modified.
:param prepending: String to prepend to filename.
:return: Modified filename or path/to/filename.
"""
b = os.path.basename(filename)
n = prepending + b
return filename.replace(b, n)
def spawnDsubCmd(science_img, dark_img, norm_bot=None, norm_top=None):
"""
Spawn a darksub command.
:param science_img: Science image filename or path/to/filename.
:param dark_img: Master dark filename or path/to/filename.
:param norm_bot: Multiplicative scaling to apply to the bottom amplifier (optional).
:param norm_top: Multiplicative scaling to apply to the top amplifier (optional).
:return: darksub_command, subtracted_fiilename
"""
dsub_out = prependToFilename(science_img, 'dsub_')
dsub_opts = '--inputFile=%s --darkFile=%s --outputFile=%s' % (science_img, dark_img, dsub_out)
if norm_bot:
dsub_opts += ' --norm_bot=%s' % str(norm_bot)
if norm_top:
dsub_opts += ' --norm_top=%s' % str(norm_top)
dsub_cmd = darksub + ' ' + dsub_opts
return dsub_cmd, dsub_out
def spawnCentCmd(subtracted_img, xshift, yshift):
"""
Spawn a fitscent command.
:param subtracted_img: Dark subtracted science image.
:param xshift: X shift to apply to image.
:param yshift: Y shift to apply to image.
:return: fitscent_command, centered_filename
"""
cent_out = prependToFilename(subtracted_img, 'cent_')
cent_opts = '--input=%s --x=%s --y=%s --output=%s' % (subtracted_img, str(xshift), str(yshift), cent_out)
cent_cmd = fitscent + ' ' + cent_opts
return cent_cmd, cent_out
def loadShifts(shifts_file):
shifts = {}
with open(shifts_file, 'r') as s:
for l in s:
c = l.split()
shifts[c[0]] = {'x': c[1], 'y': c[2]}
return shifts
def getNorms(img): # TODO
"""
:param img: Image to obtain normalization s for.
:return:
"""
top = ''
bot = ''
return top, bot
def getShifts(img, fileshifts): # TODOr
"""
:param img: image to get shift values
:return: xshift, yshift
"""
try:
xs = fileshifts[img]['x']
ys = fileshifts[img]['y']
return xs, ys
except KeyError:
print "Warning (getShifts): %s not found in fileshifts" % str(img)
return 0, 0
def runProcess(call):
os.system(call)
return 1
def subtractAndCenter(image_dict, masterdark, shifts_file):
global max_processes
print("Subtracting and Centering")
# Build list of science images to process.
sciences = image_dict['SCIENCE']
# Load shift values from file to memory.
fileshifts = loadShifts(shifts_file)
# Define necessary variables.
scmds = []
souts = []
ccmds = []
couts = []
# Build up commands for each science image.
for img in sciences:
# Get norm and shift values.
tnorm, bnorm = getNorms(img)
xshift, yshift = getShifts(img, fileshifts)
# Build subtraction task.
ds_cmd, ds_out = spawnDsubCmd(img, masterdark, norm_bot=bnorm, norm_top=tnorm)
# subtractions[img] = {'cmd': ds_cmd, 'out': ds_out}
scmds.append(ds_cmd)
souts.append(ds_out)
# Build centering task.
cn_cmd, cn_out = spawnCentCmd(ds_out, xshift=xshift, yshift=yshift)
# centerings[img] = {'cmd': cn_cmd, 'out': cn_out}
ccmds.append(cn_cmd)
couts.append(cn_out)
# Execute subtraction tasks (parallel).
sub_pool = mp.Pool(processes=max_processes)
sub_pool.map(runProcess, scmds)
# Execute centering tasks (parallel).
cent_pool = mp.Pool(processes=max_processes)
cent_pool.map(runProcess, ccmds)
# Return list of final filenames.
return couts
if __name__ == "__main__":
print(main(sys.argv[1:])) | 37.108475 | 145 | 0.652325 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,556 | 0.416187 |
4122f41a65d52a80ce0e4e61b3b52bf36d00d875 | 3,143 | py | Python | concerned-coyotes/earlyinternet/news/tests.py | Vthechamp22/summer-code-jam-2021 | 0a8bf1f22f6c73300891fd779da36efd8e1304c1 | [
"MIT"
]
| 40 | 2020-08-02T07:38:22.000Z | 2021-07-26T01:46:50.000Z | concerned-coyotes/earlyinternet/news/tests.py | Vthechamp22/summer-code-jam-2021 | 0a8bf1f22f6c73300891fd779da36efd8e1304c1 | [
"MIT"
]
| 134 | 2020-07-31T12:15:45.000Z | 2020-12-13T04:42:19.000Z | concerned-coyotes/earlyinternet/news/tests.py | Vthechamp22/summer-code-jam-2021 | 0a8bf1f22f6c73300891fd779da36efd8e1304c1 | [
"MIT"
]
| 101 | 2020-07-31T12:00:47.000Z | 2021-11-01T09:06:58.000Z | import datetime
import random
from django.test import TestCase
from django.utils.dateparse import parse_datetime
from .models import Article
class ArticleTestCase(TestCase):
def setUp(self) -> None:
self.article = Article.objects.create(
source="HackerNews",
author="Guido van Rossum",
title="Why Python is such a nice language",
description="...",
content="...",
url="http://python.org/",
published_at=datetime.datetime(2020, 1, 1, 12, 0)
)
def test_representation(self):
""" Test if Article.__str__ works correctly """
self.assertEqual(
str(self.article),
"Why Python is such a nice language 2020-01-01T12:00:00"
)
def test_article_manager_create_article(self):
"""
Test if Article.objects.create_article works correctly
:return:
"""
article = {
'source': {'id': 'news-com-au', 'name': 'News.com.au'},
'author': 'unknown',
'title': 'F1 British Grand Prix live: updates, results, starting grid, Vettel reacts to Ferrari sabotage '
'questions',
'description': 'The British Grand Prix has ended in incredible drama as the last lap went down to the '
'wire with Lewis Hamilton winning after his tyre blew on the last lap.',
'url': 'https://www.news.com.au/sport/motorsport/formula-one/live-updates-from-the-2020-british-grand'
'-prix/live-coverage/ba297f46d4e91321c092db9d3d5d2e1f',
'urlToImage': 'https://content.api.news/v3/images/bin/2554ff2213b5c8a54e9809d310e697db',
'publishedAt': '2020-08-02T22:04:07Z',
'content': '...'
}
created = Article.objects.create_article(article)
self.assertEqual(article['source']['name'], created.source)
self.assertEqual('unknown', created.author)
self.assertEqual(article['title'], created.title)
self.assertEqual(article['description'], created.description)
self.assertEqual(article['url'], created.url)
self.assertEqual(parse_datetime(article['publishedAt']), created.published_at)
self.assertEqual('...', created.content)
def test_article_manager_get_latest(self):
""" Test Article.objects.get_latest """
# create 10 articles
articles = [self.article]
for i in range(9):
year = random.randrange(1900, 2020)
month = random.randrange(1, 12)
day = random.randrange(1, 28)
hour = random.randrange(1, 24)
article = Article.objects.create(
source="", author="", title=str(i), description="", content="", url="http://example.org/",
published_at=datetime.datetime(year, month, day, hour)
)
articles.append(article)
# sort articles
articles.sort(key=lambda x: x.published_at, reverse=True)
self.assertEqual(
articles[:4],
list(Article.objects.get_latest(4))
)
| 39.2875 | 118 | 0.598473 | 2,997 | 0.953548 | 0 | 0 | 0 | 0 | 0 | 0 | 1,099 | 0.349666 |
4122fe432a52c30c10d6907870148cde9432de71 | 2,648 | py | Python | src/shake/sentence_nlp.py | makergabriel/gearmood | cd0e205e6e22f9f4b33d18d93e5bef5a39b8763e | [
"MIT"
]
| null | null | null | src/shake/sentence_nlp.py | makergabriel/gearmood | cd0e205e6e22f9f4b33d18d93e5bef5a39b8763e | [
"MIT"
]
| null | null | null | src/shake/sentence_nlp.py | makergabriel/gearmood | cd0e205e6e22f9f4b33d18d93e5bef5a39b8763e | [
"MIT"
]
| null | null | null | import nltk
from nltk.tokenize import sent_tokenize
from nltk.tokenize import word_tokenize
from nltk.corpus import wordnet
# TODO move cut words to config
CUT_WORDS = (
"drop",
"cut",
"leave",
"lose",
"trim",
"shed",
"cast",
"unload",
"strike",
"skip",
"throw",
"shake",
"shave",
"ditch",
)
# phrases to include
# shave some weight
# phrases to ignore
# leave no trace
# A verb could be categorized to any of the following codes
VERB_CODES = {
"VB", # Verb, base form
"VBD", # Verb, past tense
"VBG", # Verb, gerund or present participle
"VBN", # Verb, past participle
"VBP", # Verb, non-3rd person singular present
"VBZ", # Verb, 3rd person singular present
}
class Sentence:
def __init__(self):
self.cut_words = CUT_WORDS
#config = env_config.EnvConfig()
def parse_candidate_text(self, text):
parsed_text = {}
cut_word_objs = []
# take a closer look at which cut word is used in the sentence and how it's used
for cut_word in self.cut_words:
if cut_word in text:
# check if the cut_word is used as a verb
result = nltk.pos_tag(text)
cut_word_obj = {
"value": cut_word,
"pos_tag": [word_obj[1] for word_obj in result if word_obj[0] == cut_word],
}
cut_word_objs.append(cut_word_obj)
parsed_text["words"] = cut_word_objs
return parsed_text
# refactor to attach the sentence to the comment id
def parse_comments(self, text, word_list):
# sent_toknenize doesn't stop at new lines/carriage returns
# some comments use just the phrase with a punctuaion
paragraphs = [p for p in text.split("\n") if p]
sentences = []
for paragraph in paragraphs:
sentences.extend(sent_tokenize(paragraph))
cut_sentences = []
# lets look for our cut words in the tokenized senteneces
for sentence in sentences:
# get the words from the sentence to avoid partial matches
tokens = word_tokenize(sentence)
tokens = [w.lower() for w in tokens]
# get the text from the tokenized words
text = nltk.Text(tokens)
# only dive into tokenized senntences that have any of our cut words
if any(x in text for x in word_list):
parsed_text = self.parse_candidate_text(text)
parsed_text["sentence"] = sentence
cut_sentences.append(parsed_text)
return cut_sentences
| 31.52381 | 95 | 0.60423 | 1,894 | 0.715257 | 0 | 0 | 0 | 0 | 0 | 0 | 1,028 | 0.388218 |
412303d786bf1234fa947a471b024bc73e098561 | 173 | py | Python | Python/Programming Fundamentals/Exams/72. Ad Astra Second.py | teodoramilcheva/softuni-software-engineering | 98dc9faa66f42570f6538fd7ef186d2bd1d39bff | [
"MIT"
]
| null | null | null | Python/Programming Fundamentals/Exams/72. Ad Astra Second.py | teodoramilcheva/softuni-software-engineering | 98dc9faa66f42570f6538fd7ef186d2bd1d39bff | [
"MIT"
]
| null | null | null | Python/Programming Fundamentals/Exams/72. Ad Astra Second.py | teodoramilcheva/softuni-software-engineering | 98dc9faa66f42570f6538fd7ef186d2bd1d39bff | [
"MIT"
]
| null | null | null | Python 3.9.2 (v3.9.2:1a79785e3e, Feb 19 2021, 09:06:10)
[Clang 6.0 (clang-600.0.57)] on darwin
Type "help", "copyright", "credits" or "license()" for more information.
>>> | 43.25 | 72 | 0.66474 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 37 | 0.213873 |
412484a70f53006f985b8a1791e3e361afab8182 | 2,144 | py | Python | tests/test_iam/test_iam_groups.py | mrucci/moto | 076a6a7055ad18908b5661e599648c40b251cdc1 | [
"Apache-2.0"
]
| 1 | 2021-03-06T22:01:41.000Z | 2021-03-06T22:01:41.000Z | tests/test_iam/test_iam_groups.py | mrucci/moto | 076a6a7055ad18908b5661e599648c40b251cdc1 | [
"Apache-2.0"
]
| 2 | 2016-07-01T03:43:37.000Z | 2016-07-18T19:38:06.000Z | tests/test_iam/test_iam_groups.py | zenefits/moto | 8341c722a8e06decf23fd4b5e67de612accebb80 | [
"Apache-2.0"
]
| 1 | 2017-10-19T00:53:28.000Z | 2017-10-19T00:53:28.000Z | from __future__ import unicode_literals
import boto
import sure # noqa
from nose.tools import assert_raises
from boto.exception import BotoServerError
from moto import mock_iam
@mock_iam()
def test_create_group():
conn = boto.connect_iam()
conn.create_group('my-group')
with assert_raises(BotoServerError):
conn.create_group('my-group')
@mock_iam()
def test_get_group():
conn = boto.connect_iam()
conn.create_group('my-group')
conn.get_group('my-group')
with assert_raises(BotoServerError):
conn.get_group('not-group')
@mock_iam()
def test_get_all_groups():
conn = boto.connect_iam()
conn.create_group('my-group1')
conn.create_group('my-group2')
groups = conn.get_all_groups()['list_groups_response']['list_groups_result']['groups']
groups.should.have.length_of(2)
@mock_iam()
def test_add_user_to_group():
conn = boto.connect_iam()
with assert_raises(BotoServerError):
conn.add_user_to_group('my-group', 'my-user')
conn.create_group('my-group')
with assert_raises(BotoServerError):
conn.add_user_to_group('my-group', 'my-user')
conn.create_user('my-user')
conn.add_user_to_group('my-group', 'my-user')
@mock_iam()
def test_remove_user_from_group():
conn = boto.connect_iam()
with assert_raises(BotoServerError):
conn.remove_user_from_group('my-group', 'my-user')
conn.create_group('my-group')
conn.create_user('my-user')
with assert_raises(BotoServerError):
conn.remove_user_from_group('my-group', 'my-user')
conn.add_user_to_group('my-group', 'my-user')
conn.remove_user_from_group('my-group', 'my-user')
@mock_iam()
def test_get_groups_for_user():
conn = boto.connect_iam()
conn.create_group('my-group1')
conn.create_group('my-group2')
conn.create_group('other-group')
conn.create_user('my-user')
conn.add_user_to_group('my-group1', 'my-user')
conn.add_user_to_group('my-group2', 'my-user')
groups = conn.get_groups_for_user('my-user')['list_groups_for_user_response']['list_groups_for_user_result']['groups']
groups.should.have.length_of(2)
| 29.369863 | 122 | 0.712687 | 0 | 0 | 0 | 0 | 1,947 | 0.908116 | 0 | 0 | 461 | 0.215019 |
41252221870a25e5a2cfca108df770ea4c662895 | 2,955 | py | Python | test/test_storage_v1beta1_api.py | Arvinhub/client-python | d67df30f635231d68dc4c20b9b7e234c616c1e6a | [
"Apache-2.0"
]
| 1 | 2021-06-16T02:57:18.000Z | 2021-06-16T02:57:18.000Z | test/test_storage_v1beta1_api.py | Arvinhub/client-python | d67df30f635231d68dc4c20b9b7e234c616c1e6a | [
"Apache-2.0"
]
| null | null | null | test/test_storage_v1beta1_api.py | Arvinhub/client-python | d67df30f635231d68dc4c20b9b7e234c616c1e6a | [
"Apache-2.0"
]
| null | null | null | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: unversioned
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import os
import sys
import unittest
import k8sclient
from k8sclient.rest import ApiException
from k8sclient.apis.storage_v1beta1_api import StorageV1beta1Api
class TestStorageV1beta1Api(unittest.TestCase):
""" StorageV1beta1Api unit test stubs """
def setUp(self):
self.api = k8sclient.apis.storage_v1beta1_api.StorageV1beta1Api()
def tearDown(self):
pass
def test_create_storage_v1beta1_storage_class(self):
"""
Test case for create_storage_v1beta1_storage_class
"""
pass
def test_delete_storage_v1beta1_collection_storage_class(self):
"""
Test case for delete_storage_v1beta1_collection_storage_class
"""
pass
def test_delete_storage_v1beta1_storage_class(self):
"""
Test case for delete_storage_v1beta1_storage_class
"""
pass
def test_get_storage_v1beta1_api_resources(self):
"""
Test case for get_storage_v1beta1_api_resources
"""
pass
def test_list_storage_v1beta1_storage_class(self):
"""
Test case for list_storage_v1beta1_storage_class
"""
pass
def test_patch_storage_v1beta1_storage_class(self):
"""
Test case for patch_storage_v1beta1_storage_class
"""
pass
def test_read_storage_v1beta1_storage_class(self):
"""
Test case for read_storage_v1beta1_storage_class
"""
pass
def test_replace_storage_v1beta1_storage_class(self):
"""
Test case for replace_storage_v1beta1_storage_class
"""
pass
def test_watch_storage_v1beta1_storage_class(self):
"""
Test case for watch_storage_v1beta1_storage_class
"""
pass
def test_watch_storage_v1beta1_storage_class_list(self):
"""
Test case for watch_storage_v1beta1_storage_class_list
"""
pass
if __name__ == '__main__':
unittest.main()
| 23.085938 | 105 | 0.666328 | 1,881 | 0.636548 | 0 | 0 | 0 | 0 | 0 | 0 | 1,716 | 0.580711 |
4125c4ef4416a704e2a8626b154e255f03a002cf | 433 | py | Python | rampwf/utils/tests/test_sanitize.py | DimitriPapadopoulos/ramp-workflow | c235e80b81fc8d8a5e0c175df50a55cc58dd78aa | [
"BSD-3-Clause"
]
| 66 | 2017-08-31T08:48:45.000Z | 2022-03-21T16:05:31.000Z | rampwf/utils/tests/test_sanitize.py | DimitriPapadopoulos/ramp-workflow | c235e80b81fc8d8a5e0c175df50a55cc58dd78aa | [
"BSD-3-Clause"
]
| 265 | 2017-06-02T19:22:38.000Z | 2022-03-31T13:08:00.000Z | rampwf/utils/tests/test_sanitize.py | DimitriPapadopoulos/ramp-workflow | c235e80b81fc8d8a5e0c175df50a55cc58dd78aa | [
"BSD-3-Clause"
]
| 44 | 2017-06-03T15:35:58.000Z | 2022-03-31T12:46:42.000Z | import pytest
from rampwf.utils.sanitize import _sanitize_input
def test_sanitize_input():
_sanitize_input('Harmess code')
msg = "forbidden key word open detected"
with pytest.raises(RuntimeError, match=msg):
_sanitize_input("with open('test.txt', 'wr') as fh")
msg = "forbidden key word scandir detected"
with pytest.raises(RuntimeError, match=msg):
_sanitize_input("for _ in os.scandir()")
| 27.0625 | 60 | 0.709007 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 143 | 0.330254 |
4127cc56a9b643adacb0505a74b957d4c74ed758 | 118 | py | Python | issues/apps.py | 6aika/o3-6a-kkhprp | de0373733a0f4a936a86f6a19b28ca2e577beb71 | [
"MIT"
]
| 6 | 2016-07-08T08:50:51.000Z | 2018-06-06T09:58:43.000Z | issues/apps.py | 6aika/issue-reporting | de0373733a0f4a936a86f6a19b28ca2e577beb71 | [
"MIT"
]
| 50 | 2016-04-19T12:22:08.000Z | 2021-09-22T17:39:33.000Z | issues/apps.py | 6aika/o3-6a-kkhprp | de0373733a0f4a936a86f6a19b28ca2e577beb71 | [
"MIT"
]
| 5 | 2016-07-08T08:50:56.000Z | 2019-07-06T11:34:42.000Z | from django.apps import AppConfig
class IssuesAppConfig(AppConfig):
name = 'issues'
verbose_name = 'Issues'
| 16.857143 | 33 | 0.728814 | 81 | 0.686441 | 0 | 0 | 0 | 0 | 0 | 0 | 16 | 0.135593 |
412aadfb4c71d1e45e7e11134561c9b5c2fc6eda | 2,018 | py | Python | FileNamePurifier/FileNamePurifier.py | dbpiper/FileNamePurifier | 620088ea3be1b8874609fa769cfb8e6b636d5e8b | [
"MIT"
]
| null | null | null | FileNamePurifier/FileNamePurifier.py | dbpiper/FileNamePurifier | 620088ea3be1b8874609fa769cfb8e6b636d5e8b | [
"MIT"
]
| null | null | null | FileNamePurifier/FileNamePurifier.py | dbpiper/FileNamePurifier | 620088ea3be1b8874609fa769cfb8e6b636d5e8b | [
"MIT"
]
| null | null | null | from Parser import Parser
from LexicalAnalyzer import LexicalAnalyzer
class FileNamePurifier:
def __init__(self, stringAppendToFront, stringAppendToEnd, removeFirstInstanceOfStringsInList, removeAllInstancesOfStringsInList,
substringsToPreserve, oldSeperators, seperatorToUse, breakUpByBraces,
breakUpByParens,
breakUpByBrackets, breakUpByCamelCase, camelCaseOldSeparator, camelCaseNewSeparator):
self.stringAppendToFront = stringAppendToFront
self.stringAppendToEnd = stringAppendToEnd
self.removeFirstInstanceOfStringsInList = removeFirstInstanceOfStringsInList
self.removeAllInstancesOfStringsInList = removeAllInstancesOfStringsInList
self.substringsToPreserve = substringsToPreserve
self.oldSeperators = oldSeperators
self.seperatorToUse = seperatorToUse
self.breakUpByBraces = breakUpByBraces
self.breakUpByParens = breakUpByParens
self.breakUpByBrackets = breakUpByBrackets
self.breakUpByCamelCase = breakUpByCamelCase
self.camelCaseOldSeparator = camelCaseOldSeparator
self.camelCaseNewSeparator = camelCaseNewSeparator
def CreateParserWithString(self, stringToParse):
parser = Parser(self.stringAppendToFront, self.stringAppendToEnd,
self.removeFirstInstanceOfStringsInList,
self.removeAllInstancesOfStringsInList,
self.substringsToPreserve, self.oldSeperators, self.seperatorToUse, self.breakUpByBraces,
self.breakUpByParens,
self.breakUpByBrackets, self.breakUpByCamelCase,
self.camelCaseOldSeparator, self.camelCaseNewSeparator,
stringToParse);
return parser;
def PurifyString(self, stringToPurify):
return self.CreateParserWithString(stringToPurify).outputString | 46.930233 | 133 | 0.693756 | 1,941 | 0.961843 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
412b47d093592288c113a1eac3194f68134c0446 | 11,406 | py | Python | data/transforms.py | raja21068/Federated-Learning-For-Medical-Images | aa30ce9d8106fd4039188fc56fa99bdc9f46f0e0 | [
"MIT"
]
| 27 | 2021-03-05T05:56:35.000Z | 2022-03-30T03:15:43.000Z | data/transforms.py | DiahannWu/FL-MRCM | 946c981a044452333791b7da26609c0874da292c | [
"MIT"
]
| 8 | 2021-03-08T10:41:19.000Z | 2021-12-30T04:53:21.000Z | data/transforms.py | DiahannWu/FL-MRCM | 946c981a044452333791b7da26609c0874da292c | [
"MIT"
]
| 5 | 2021-03-28T14:02:30.000Z | 2022-01-11T08:31:42.000Z | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import numpy as np
import torch
def to_tensor(data):
"""
Convert numpy array to PyTorch tensor. For complex arrays, the real and imaginary parts
are stacked along the last dimension.
Args:
data (np.array): Input numpy array
Returns:
torch.Tensor: PyTorch version of data
"""
if np.iscomplexobj(data):
data = np.stack((data.real, data.imag), axis=-1)
return torch.from_numpy(data)
def to_numpy(data):
"""
Convert PyTorch tensor to numpy array. For complex tensor with two channels, the complex numpy arrays are used.
Args:
data (torch.Tensor): Input torch tensor
Returns:
np.array numpy arrays
"""
if data.shape[-1] == 2:
out = np.zeros(data.shape[:-1], dtype=np.complex64)
real = data[..., 0].numpy()
imag = data[..., 1].numpy()
out.real = real
out.imag = imag
else:
out = data.numpy()
return out
def apply_mask(data, mask_func, seed=None):
"""
Subsample given k-space by multiplying with a mask.
Args:
data (torch.Tensor): The input k-space data. This should have at least 3 dimensions, where
dimensions -3 and -2 are the spatial dimensions, and the final dimension has size
2 (for complex values).
mask_func (callable): A function that takes a shape (tuple of ints) and a random
number seed and returns a mask.
seed (int or 1-d array_like, optional): Seed for the random number generator.
Returns:
(tuple): tuple containing:
masked data (torch.Tensor): Subsampled k-space data
mask (torch.Tensor): The generated mask
"""
shape = np.array(data.shape)
shape[:-3] = 1
mask = mask_func(shape, seed)
return data * mask, mask
def fft2(data, normalized=True):
"""
Apply centered 2 dimensional Fast Fourier Transform.
Args:
data (torch.Tensor): Complex valued input data containing at least 3 dimensions: dimensions
-3 & -2 are spatial dimensions and dimension -1 has size 2. All other dimensions are
assumed to be batch dimensions.
Returns:
torch.Tensor: The FFT of the input.
"""
assert data.size(-1) == 2
data = ifftshift(data, dim=(-3, -2))
data = torch.fft(data, 2, normalized=normalized)
data = fftshift(data, dim=(-3, -2))
return data
def rfft2(data):
"""
Apply centered 2 dimensional Fast Fourier Transform.
Args:
data (torch.Tensor): Complex valued input data containing at least 3 dimensions: dimensions
-3 & -2 are spatial dimensions and dimension -1 has size 2. All other dimensions are
assumed to be batch dimensions.
Returns:
torch.Tensor: The FFT of the input.
"""
data = ifftshift(data, dim=(-2, -1))
data = torch.rfft(data, 2, normalized=True, onesided=False)
data = fftshift(data, dim=(-3, -2))
return data
def ifft2(data, normalized=True):
"""
Apply centered 2-dimensional Inverse Fast Fourier Transform.
Args:
data (torch.Tensor): Complex valued input data containing at least 3 dimensions: dimensions
-3 & -2 are spatial dimensions and dimension -1 has size 2. All other dimensions are
assumed to be batch dimensions.
Returns:
torch.Tensor: The IFFT of the input.
"""
assert data.size(-1) == 2
data = ifftshift(data, dim=(-3, -2))
data = torch.ifft(data, 2, normalized=normalized)
data = fftshift(data, dim=(-3, -2))
return data
def irfft2(data):
"""
Apply centered 2-dimensional Inverse Fast Fourier Transform.
Args:
data (torch.Tensor): Complex valued input data containing at least 3 dimensions: dimensions
-3 & -2 are spatial dimensions and dimension -1 has size 2. All other dimensions are
assumed to be batch dimensions.
Returns:
torch.Tensor: The IFFT of the input.
"""
data = ifftshift(data, dim=(-3, -2))
data = torch.irfft(data, 2, normalized=True, onesided=False)
data = fftshift(data, dim=(-2, -1))
return data
def complex_to_mag_phase(data):
"""
:param data (torch.Tensor): A complex valued tensor, where the size of the third last dimension should be 2
:return: Mag and Phase (torch.Tensor): tensor of same size as input
"""
assert data.size(-3) == 2
mag = (data ** 2).sum(dim=-3).sqrt()
phase = torch.atan2(data[:, 1, :, :], data[:, 0, :, :])
return torch.stack((mag, phase), dim=-3)
def mag_phase_to_complex(data):
"""
:param data (torch.Tensor): Mag and Phase (torch.Tensor):
:return: A complex valued tensor, where the size of the third last dimension is 2
"""
assert data.size(-3) == 2
real = data[:, 0, :, :] * torch.cos(data[:, 1, :, :])
imag = data[:, 0, :, :] * torch.sin(data[:, 1, :, :])
return torch.stack((real, imag), dim=-3)
def partial_fourier(data):
"""
:param data:
:return:
"""
def complex_abs(data):
"""
Compute the absolute value of a complex valued input tensor.
Args:
data (torch.Tensor): A complex valued tensor, where the size of the final dimension
should be 2.
Returns:
torch.Tensor: Absolute value of data
"""
assert data.size(-1) == 2 or data.size(-3) == 2
return (data ** 2).sum(dim=-1).sqrt() if data.size(-1) == 2 else (data ** 2).sum(dim=-3).sqrt()
def root_sum_of_squares(data, dim=0):
"""
Compute the Root Sum of Squares (RSS) transform along a given dimension of a tensor.
Args:
data (torch.Tensor): The input tensor
dim (int): The dimensions along which to apply the RSS transform
Returns:
torch.Tensor: The RSS value
"""
return torch.sqrt((data ** 2).sum(dim))
def center_crop(data, shape):
"""
Apply a center crop to the input real image or batch of real images.
Args:
data (torch.Tensor): The input tensor to be center cropped. It should have at
least 2 dimensions and the cropping is applied along the last two dimensions.
shape (int, int): The output shape. The shape should be smaller than the
corresponding dimensions of data.
Returns:
torch.Tensor: The center cropped image
"""
assert 0 < shape[0] <= data.shape[-2]
assert 0 < shape[1] <= data.shape[-1]
w_from = (data.shape[-2] - shape[0]) // 2
h_from = (data.shape[-1] - shape[1]) // 2
w_to = w_from + shape[0]
h_to = h_from + shape[1]
return data[..., w_from:w_to, h_from:h_to]
def complex_center_crop(data, shape):
"""
Apply a center crop to the input image or batch of complex images.
Args:
data (torch.Tensor): The complex input tensor to be center cropped. It should
have at least 3 dimensions and the cropping is applied along dimensions
-3 and -2 and the last dimensions should have a size of 2.
shape (int, int): The output shape. The shape should be smaller than the
corresponding dimensions of data.
Returns:
torch.Tensor: The center cropped image
"""
assert 0 < shape[0] <= data.shape[-3]
assert 0 < shape[1] <= data.shape[-2]
w_from = (data.shape[-3] - shape[0]) // 2
h_from = (data.shape[-2] - shape[1]) // 2
w_to = w_from + shape[0]
h_to = h_from + shape[1]
return data[..., w_from:w_to, h_from:h_to, :]
def normalize(data, mean, stddev, eps=0.):
"""
Normalize the given tensor using:
(data - mean) / (stddev + eps)
Args:
data (torch.Tensor): Input data to be normalized
mean (float): Mean value
stddev (float): Standard deviation
eps (float): Added to stddev to prevent dividing by zero
Returns:
torch.Tensor: Normalized tensor
"""
return (data - mean) / (stddev + eps)
def normalize_instance(data, eps=0.):
"""
Normalize the given tensor using:
(data - mean) / (stddev + eps)
where mean and stddev are computed from the data itself.
Args:
data (torch.Tensor): Input data to be normalized
eps (float): Added to stddev to prevent dividing by zero
Returns:
torch.Tensor: Normalized tensor
"""
mean = data.mean()
std = data.std()
return normalize(data, mean, std, eps), mean, std
def normalize_volume(data, mean, std, eps=0.):
"""
Normalize the given tensor using:
(data - mean) / (stddev + eps)
where mean and stddev are provided and computed from volume.
Args:
data (torch.Tensor): Input data to be normalized
mean: mean of whole volume
std: std of whole volume
eps (float): Added to stddev to prevent dividing by zero
Returns:
torch.Tensor: Normalized tensor
"""
return normalize(data, mean, std, eps), mean, std
def normalize_complex(data, eps=0.):
"""
Normalize the given complex tensor using:
(data - mean) / (stddev + eps)
where mean and stddev are computed from magnitude of data.
Note that data is centered by complex mean so that the result centered data have average zero magnitude.
Args:
data (torch.Tensor): Input data to be normalized (*, 2)
mean: mean of image magnitude
std: std of image magnitude
eps (float): Added to stddev to prevent dividing by zero
Returns:
torch.Tensor: Normalized complex tensor with 2 channels (*, 2)
"""
mag = complex_abs(data)
mag_mean = mag.mean()
mag_std = mag.std()
temp = mag_mean/mag
mean_real = data[..., 0] * temp
mean_imag = data[..., 1] * temp
mean_complex = torch.stack((mean_real, mean_imag), dim=-1)
stddev = mag_std
return (data - mean_complex) / (stddev + eps), mag_mean, stddev
# Helper functions
def roll(x, shift, dim):
"""
Similar to np.roll but applies to PyTorch Tensors
"""
if isinstance(shift, (tuple, list)):
assert len(shift) == len(dim)
for s, d in zip(shift, dim):
x = roll(x, s, d)
return x
shift = shift % x.size(dim)
if shift == 0:
return x
left = x.narrow(dim, 0, x.size(dim) - shift)
right = x.narrow(dim, x.size(dim) - shift, shift)
return torch.cat((right, left), dim=dim)
def fftshift(x, dim=None):
"""
Similar to np.fft.fftshift but applies to PyTorch Tensors
"""
if dim is None:
dim = tuple(range(x.dim()))
shift = [dim // 2 for dim in x.shape]
elif isinstance(dim, int):
shift = x.shape[dim] // 2
else:
shift = [x.shape[i] // 2 for i in dim]
return roll(x, shift, dim)
def ifftshift(x, dim=None):
"""
Similar to np.fft.ifftshift but applies to PyTorch Tensors
"""
if dim is None:
dim = tuple(range(x.dim()))
shift = [(dim + 1) // 2 for dim in x.shape]
elif isinstance(dim, int):
shift = (x.shape[dim] + 1) // 2
else:
shift = [(x.shape[i] + 1) // 2 for i in dim]
return roll(x, shift, dim)
| 29.703125 | 115 | 0.608276 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6,827 | 0.598545 |
412b523c6ab5df841fbace6c8ecfb1e65fe6d301 | 171 | py | Python | tests/test_entries_to_mt.py | Hagihara-A/migrate-exblog | f5df20e07e74bc1bb14888c143bc43b2d775f666 | [
"MIT"
]
| null | null | null | tests/test_entries_to_mt.py | Hagihara-A/migrate-exblog | f5df20e07e74bc1bb14888c143bc43b2d775f666 | [
"MIT"
]
| 1 | 2019-01-07T14:34:14.000Z | 2019-01-07T14:34:14.000Z | tests/test_entries_to_mt.py | Hagihara-A/scrape-excite-blog | f5df20e07e74bc1bb14888c143bc43b2d775f666 | [
"MIT"
]
| null | null | null | import doctest
from migrate_exblog import entries_to_mt
def load_tests(loader, tests, ignore):
tests.addTests(doctest.DocTestSuite(entries_to_mt))
return tests
| 19 | 55 | 0.795322 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
412d18a2cbe30949e9cef10400c6fc6b33fdbee8 | 97 | py | Python | deeppavlov/utils/server/__init__.py | xbodx/DeepPavlov | 4b60bf162df4294b8b0db3b72786cdd699c674fa | [
"Apache-2.0"
]
| 5,893 | 2018-02-01T18:13:20.000Z | 2022-03-31T19:22:21.000Z | deeppavlov/utils/server/__init__.py | xbodx/DeepPavlov | 4b60bf162df4294b8b0db3b72786cdd699c674fa | [
"Apache-2.0"
]
| 749 | 2018-01-31T11:36:02.000Z | 2022-03-30T07:24:22.000Z | deeppavlov/utils/server/__init__.py | xbodx/DeepPavlov | 4b60bf162df4294b8b0db3b72786cdd699c674fa | [
"Apache-2.0"
]
| 1,155 | 2018-02-01T10:52:15.000Z | 2022-03-29T02:12:15.000Z | from .server import get_server_params, get_ssl_params, redirect_root_to_docs, start_model_server
| 48.5 | 96 | 0.886598 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
f5aba0aa3a1bda30d3d5e14338fb55d72ab3b386 | 1,883 | py | Python | b5/lib/state.py | team23/b5 | 90f45e86966eeb7a259667bbe06a5555648d012d | [
"BSD-3-Clause"
]
| 14 | 2018-11-24T23:33:35.000Z | 2022-02-04T23:46:49.000Z | b5/lib/state.py | team23/b5 | 90f45e86966eeb7a259667bbe06a5555648d012d | [
"BSD-3-Clause"
]
| 3 | 2020-02-10T11:05:11.000Z | 2020-03-04T08:42:11.000Z | b5/lib/state.py | team23/b5 | 90f45e86966eeb7a259667bbe06a5555648d012d | [
"BSD-3-Clause"
]
| 1 | 2020-02-11T19:45:13.000Z | 2020-02-11T19:45:13.000Z | import os
import tempfile
from types import TracebackType
from typing import Any, BinaryIO, Optional, TextIO, Type, Union
import yaml
class StoredState:
def __init__(self, state: "State") -> None:
self.state = state
if self.state.stored_name is not None:
raise RuntimeError('You may only store the state once')
self.file_handle = tempfile.NamedTemporaryFile(suffix='b5-state', mode='w', encoding='utf-8', delete=False)
self.state.stored_name = self.name
yaml.dump({
key: getattr(self.state, key)
for key in state.KEYS
}, self.file_handle, default_flow_style=False)
self.file_handle.close()
def close(self) -> None:
os.unlink(self.file_handle.name)
self.state.stored_name = None
def __enter__(self) -> "StoredState":
return self
def __exit__(
self,
exc_type: Optional[Type[BaseException]],
exc: Optional[BaseException],
traceback: Optional[TracebackType],
) -> None:
self.close()
@property
def name(self) -> str:
return self.file_handle.name
class State:
KEYS = ('project_path', 'run_path', 'taskfiles', 'configfiles', 'config', 'args', 'stored_name')
taskfiles = []
configfiles = []
args = {}
def __init__(self, **kwargs: Any) -> None:
for key in self.KEYS:
if not hasattr(self, key):
setattr(self, key, None)
for key in kwargs:
if key not in self.KEYS:
raise RuntimeError('Key %s is not a valid state attribute' % key)
setattr(self, key, kwargs[key])
def stored(self) -> StoredState:
return StoredState(self)
@classmethod
def load(cls, file_handle: Union[BinaryIO, TextIO]) -> "State":
return cls(**yaml.safe_load(file_handle))
| 28.969231 | 115 | 0.60701 | 1,742 | 0.925119 | 0 | 0 | 203 | 0.107807 | 0 | 0 | 196 | 0.104089 |
f5ac35c88920717e7f434d347b3a61d75f1b9fd5 | 2,711 | py | Python | lines_ext.py | subhrajit02/handwritten-digit-recognision | 239a4bd1283393865d2655b91ad4674ce8450882 | [
"MIT"
]
| null | null | null | lines_ext.py | subhrajit02/handwritten-digit-recognision | 239a4bd1283393865d2655b91ad4674ce8450882 | [
"MIT"
]
| null | null | null | lines_ext.py | subhrajit02/handwritten-digit-recognision | 239a4bd1283393865d2655b91ad4674ce8450882 | [
"MIT"
]
| null | null | null | import numpy as np
import cv2
def rem_multi_lines(lines, thresh):
"""
to remove the multiple lines with close proximity
:param lines: initial list with all the lines(multiple in place of singular)
:param thresh: dist between two lines for them to be considered as same
:return: final list with singular lines in place of multiple
"""
a = []
i = 0
lines.append([800, 0]) # random val/ noise
out = []
# this loop collects lines with close proximity in a list (a) and then appends that
# complete list in a common list called out.
while i < len(lines) - 1:
if lines[i] not in a:
a.append(lines[i])
if abs(lines[i + 1][0] - lines[i][0]) < thresh:
a.append(lines[i + 1])
else:
out.append(a)
a = []
i += 1
# print(out)
final = []
for i in out:
a = np.array(i)
final.append(np.average(a, axis=0))
# print(final)
for i in final.copy():
if i[0] < 0:
final.remove(i)
return final
def draw_r_theta_lines(img, lines, color):
"""
draw lines on image which are of (r, theta) form
:param img: image to draw the lines on
:param lines: list of lines on the form (r, theta)
:param color: color of lines
:return:
"""
for rho, theta in lines:
a = np.cos(theta)
b = np.sin(theta)
x0 = a * rho
y0 = b * rho
x1 = int(x0 + 1000 * (-b))
y1 = int(y0 + 1000 * a)
x2 = int(x0 - 1000 * (-b))
y2 = int(y0 - 1000 * a)
cv2.line(img, (x1, y1), (x2, y2), color, 2)
def lines_ext(img, hough_thresh, multilines_thresh):
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
edges = cv2.Canny(gray, 45, 10)
line_image = img.copy()
lines = cv2.HoughLines(edges, 1, np.pi / 180, hough_thresh)
lines = lines.reshape(lines.shape[0], 2)
draw_r_theta_lines(line_image, lines, (0, 0, 255))
lines = sorted(lines, key=lambda x: x[0])
cv2.imshow("lines", line_image)
cv2.waitKey(0)
l1 = list(lines)
l2 = []
for i in l1:
l2.append(list(i))
v_lines = []
h_lines = []
for i in l2:
if round(i[1]) == 0:
v_lines.append(i)
elif round(i[1]) > 0.5:
h_lines.append(i)
# print('v:', v_lines)
# print('h:', h_lines)
v_lines = rem_multi_lines(v_lines, multilines_thresh)
h_lines = rem_multi_lines(h_lines, multilines_thresh)
final = v_lines + h_lines
draw_r_theta_lines(line_image, final, (0, 255, 0))
cv2.imshow("lines1", line_image)
cv2.waitKey(0)
return v_lines, h_lines
| 25.101852 | 88 | 0.560679 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 729 | 0.268904 |
f5acb14365decf5cb2d85dfdb8cc3ac0e9ffe41f | 1,553 | py | Python | examples/wmt/tools/scorer/nlm.py | godweiyang/ParaGen | 9665d1244ea38a41fc06b4e0a7f6411985e2221f | [
"Apache-2.0"
]
| 50 | 2022-01-18T07:25:46.000Z | 2022-03-14T13:06:18.000Z | examples/wmt/tools/scorer/nlm.py | JiangtaoFeng/ParaGen | 509334bf16e3674e009bb9dc37ecc38ae3b5c977 | [
"Apache-2.0"
]
| 2 | 2022-01-19T09:36:42.000Z | 2022-02-23T07:16:02.000Z | examples/wmt/tools/scorer/nlm.py | JiangtaoFeng/ParaGen | 509334bf16e3674e009bb9dc37ecc38ae3b5c977 | [
"Apache-2.0"
]
| 6 | 2022-01-19T09:28:53.000Z | 2022-03-10T10:20:08.000Z | # Before running this command, you should firstly run:
# pip install fairseq
# pip install fastBPE
# wget https://dl.fbaipublicfiles.com/fairseq/models/lm/wmt19.en.tar.gz
# tar zxvf wmt19.en.tar.gz
import argparse
from itertools import islice
import numpy as np
from fairseq.models.transformer_lm import TransformerLanguageModel
parser = argparse.ArgumentParser()
parser.add_argument('--hypo_filename', metavar='N', type=str, help='hypo_filename')
parser.add_argument('--out_filename', metavar='N', type=str, help='out_filename')
# parser.add_argument('--num_candidates', type=int, help="num_candidates")
args, unknown = parser.parse_known_args()
en_lm = TransformerLanguageModel.from_pretrained('wmt19.en', 'model.pt', tokenizer='moses', bpe='fastbpe')
en_lm.cuda()
num_processed = 0
ppl = []
batch_num = 1000
with open(args.hypo_filename, 'r') as f, open(args.out_filename, 'w') as out:
while True:
n_lines = list(map(lambda x: x.strip(), islice(f, batch_num)))
if len(n_lines) == 0:
break
for ele in en_lm.score(n_lines, beam=1):
ppl.append(float(ele['positional_scores'].mean().neg().exp().item()))
num_processed += batch_num
print(f"Processed {num_processed}")
ppl = np.array(ppl)
ppl = np.nan_to_num(ppl, nan=np.nanmax(ppl))
# scores = 1 - ppl/ppl.max()
# for ele in zip(ppl.tolist(), scores.tolist()):
# out.write(f"{np.log(ele[0])}, {ele[0]}, {ele[1]}\n")
ppl = np.array(ppl)
for ele in ppl.tolist():
out.write(f"{np.log(ele)}\n")
| 36.116279 | 106 | 0.676755 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 576 | 0.370895 |
f5accc4b43ec1556256e37986ed9a579a786c19a | 2,742 | py | Python | aioli_openapi/service.py | jimorie/aioli-openapi | 5a5ea6471d332adc8361ad39af7421e4686811fd | [
"MIT"
]
| null | null | null | aioli_openapi/service.py | jimorie/aioli-openapi | 5a5ea6471d332adc8361ad39af7421e4686811fd | [
"MIT"
]
| null | null | null | aioli_openapi/service.py | jimorie/aioli-openapi | 5a5ea6471d332adc8361ad39af7421e4686811fd | [
"MIT"
]
| null | null | null | import warnings
from apispec import APISpec
from apispec.ext.marshmallow import MarshmallowPlugin
from aioli.service import BaseService
from aioli.controller import BaseHttpController
from aioli.exceptions import NoMatchFound
class OpenApiService(BaseService):
_specs = {}
def oas_schema(self, pkg):
spec = APISpec(
title=pkg.meta["name"].capitalize(),
version=pkg.meta["version"],
openapi_version=self.config["oas_version"],
plugins=[MarshmallowPlugin()],
)
for ctrl in pkg.controllers:
if not isinstance(ctrl, BaseHttpController):
continue
routes = {}
for func, handler in ctrl.handlers:
if not handler.status:
warnings.warn(f"No @returns for {func}, cannot generate OAS3 schema for this handler")
break
abspath = handler.path_full
method = handler.method.lower()
if abspath not in routes:
routes[abspath] = {}
if method not in routes[abspath]:
routes[abspath][method] = dict(
responses={},
parameters=[]
)
route = routes[abspath][method]
responses = route["responses"]
parameters = route["parameters"]
for location, schema_cls in handler.schemas:
if location == "response":
if not schema_cls:
content = {}
else:
content = {"application/json": {"schema": schema_cls}}
responses[handler.status] = dict(
description=None,
content=content
)
elif location in ["path", "query", "header"]:
if not schema_cls:
continue
parameters.append({
"in": location,
"schema": schema_cls
})
spec.path(handler.path_full, operations=routes[abspath])
return spec.to_dict()
async def on_startup(self):
for pkg in self.app.registry.imported:
if not pkg.config["path"]:
continue
self._specs[pkg.meta["name"]] = self.oas_schema(pkg)
async def get_schemas(self, **query):
return self._specs
async def get_schema(self, name):
if name not in self._specs:
raise NoMatchFound
return self._specs[name]
| 31.159091 | 106 | 0.496718 | 2,511 | 0.915755 | 0 | 0 | 0 | 0 | 402 | 0.146608 | 203 | 0.074034 |
f5ae655bb41bdfdac3cd957f9a322f3eb321c3ad | 124 | py | Python | wrangle_scripts/wrangle_data.py | es-g/dash | 443b04593e66f7f2dcea325937eee4683f4c7a13 | [
"MIT"
]
| null | null | null | wrangle_scripts/wrangle_data.py | es-g/dash | 443b04593e66f7f2dcea325937eee4683f4c7a13 | [
"MIT"
]
| null | null | null | wrangle_scripts/wrangle_data.py | es-g/dash | 443b04593e66f7f2dcea325937eee4683f4c7a13 | [
"MIT"
]
| null | null | null | import pandas as pd
import plotly.graph_objs as go
def load_data():
df = pd.read_csv("data/Data.csv")
return df
| 12.4 | 37 | 0.685484 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 15 | 0.120968 |
f5ae7c5fd10eb5c3a55627538569669fa5235f04 | 399 | py | Python | cpdb/popup/factories.py | invinst/CPDBv2_backend | b4e96d620ff7a437500f525f7e911651e4a18ef9 | [
"Apache-2.0"
]
| 25 | 2018-07-20T22:31:40.000Z | 2021-07-15T16:58:41.000Z | cpdb/popup/factories.py | invinst/CPDBv2_backend | b4e96d620ff7a437500f525f7e911651e4a18ef9 | [
"Apache-2.0"
]
| 13 | 2018-06-18T23:08:47.000Z | 2022-02-10T07:38:25.000Z | cpdb/popup/factories.py | invinst/CPDBv2_backend | b4e96d620ff7a437500f525f7e911651e4a18ef9 | [
"Apache-2.0"
]
| 6 | 2018-05-17T21:59:43.000Z | 2020-11-17T00:30:26.000Z | import factory
from faker import Faker
from popup.models import Popup
fake = Faker()
class PopupFactory(factory.django.DjangoModelFactory):
class Meta:
model = Popup
name = factory.LazyFunction(lambda: fake.word())
page = factory.LazyFunction(lambda: fake.word())
title = factory.LazyFunction(lambda: fake.word())
text = factory.LazyFunction(lambda: fake.text(512))
| 23.470588 | 55 | 0.719298 | 309 | 0.774436 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
f5afafed15f47453d454c043799fdd7a4422ab1b | 1,863 | py | Python | src_old/tests/delete_migrations.py | rishikesh67/django-tenant-oracle-schemas | 918a64e842b678fc506eadbb4d7e51b0b38ab0a2 | [
"MIT"
]
| null | null | null | src_old/tests/delete_migrations.py | rishikesh67/django-tenant-oracle-schemas | 918a64e842b678fc506eadbb4d7e51b0b38ab0a2 | [
"MIT"
]
| 8 | 2019-12-04T23:26:11.000Z | 2022-02-10T09:42:18.000Z | src/tests/delete_migrations.py | rishikesh67/django-tenant-oracle-schemas | 918a64e842b678fc506eadbb4d7e51b0b38ab0a2 | [
"MIT"
]
| 2 | 2019-06-26T05:31:16.000Z | 2019-07-01T12:22:50.000Z | import os
import glob
import shutil
import logging
# logging.basicConfig(level=logging.DEBUG)
# DEBUG:root:Skipping file /Users/hygull/Projects/Python3/DjangoTenantOracleSchemas/django-tenant-oracle-schemas/src/tenants/models.py
# logging.basicConfig(format='%(asctime)s %(message)s', level=logging.DEBUG)
# 2019-06-24 16:19:29,898 Skipping file /Users/hygull/Projects/Python3/DjangoTenantOracleSchemas/django-tenant-oracle-schemas/src/manage.py
# logging.basicConfig(format='%(asctime)s %(message)s', level=logging.DEBUG, datefmt='%d/%m/%Y %H:%M:%S %p')
# 24/06/2019 04:23:31 PM Skipping file /Users/hygull/Projects/Python3/DjangoTenantOracleSchemas/django-tenant-oracle-schemas/src/manage.py
logging.basicConfig(format='%(asctime)s %(message)s', level=logging.DEBUG, datefmt='[%d/%m/%Y %H:%M:%S %p] =>')
# 24/06/2019 16:24:02 PM Skipping file /Users/hygull/Projects/Python3/DjangoTenantOracleSchemas/django-tenant-oracle-schemas/src/manage.py
def delete_migrations(
dir_path='/Users/hygull/Projects/Python3/DjangoTenantOracleSchemas/django-tenant-oracle-schemas/',
migrations=True,
pycaches=False,
**kwargs
):
dir_path = os.path.join(os.path.abspath(dir_path))
logging.info(dir_path)
if os.path.isdir(dir_path):
files = os.listdir(dir_path)
for file in files:
abspath = os.path.join(dir_path, file)
if os.path.isdir(abspath):
logging.debug('file ---> {0} {1}'.format(file, pycaches))
if (migrations and file == 'migrations') or (pycaches and file == "__pycache__"):
logging.debug('Found migration as ' + abspath)
shutil.rmtree(abspath)
logging.debug(abspath + ' is removed')
else:
logging.debug('Iteration over -> ' + abspath)
delete_migrations(abspath, pycaches, migrations, **kwargs)
else:
logging.debug('Skipping file ' + abspath)
else:
logging.debug('Path is not a directory')
| 38.8125 | 139 | 0.7343 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,054 | 0.565754 |
f5b0b5d5e4ce7c8e9669a43f27a5226a60590d4f | 6,075 | py | Python | qa2nli/converters/processors.py | nli-for-qa/conversion | 588de7fbbcdeb9698fe888b6e3ece7dfadf25238 | [
"MIT"
]
| null | null | null | qa2nli/converters/processors.py | nli-for-qa/conversion | 588de7fbbcdeb9698fe888b6e3ece7dfadf25238 | [
"MIT"
]
| null | null | null | qa2nli/converters/processors.py | nli-for-qa/conversion | 588de7fbbcdeb9698fe888b6e3ece7dfadf25238 | [
"MIT"
]
| 1 | 2021-07-04T01:59:56.000Z | 2021-07-04T01:59:56.000Z | from typing import Callable, List, Union, Optional, Dict, Tuple
import re
import spacy
import logging
import math
from enum import Enum
logger = logging.getLogger(__name__)
def remove_excess_space(inp: str) -> str:
return ' '.join(inp.split()).strip()
def get_spacy_model(model: str) -> spacy.language.Model:
try:
spacy_model = spacy.load(model)
except OSError:
logger.warning(
f"Spacy models '{model}' not found. Downloading and installing.")
spacy.cli.download(model)
# Import the downloaded model module directly and load from there
spacy_model_module = __import__(model)
spacy_model = spacy_model_module.load()
return spacy_model
class PreprocessorBase:
"""Override the __call__ method in inherited class to change functionallity"""
def __call__(self, q: str, o: str) -> Tuple[str, Dict]:
""" Very basic preprocessor which concats question and option.
Handles fill in the black type questions.
"""
if '_' in q: # FITB
h = q.replace('_', o)
else:
h = q + ' ' + o
h = remove_excess_space(h)
meta = {'question': q, 'option': o}
return h, meta
Preprocessor = PreprocessorBase
dots = re.compile(r"[\.\'\"\?, ]{2,}[\w ]*")
def remove_dots(inp: str) -> str:
return dots.sub('.', inp)
class ConversionIssue(Enum):
NONE = 'none'
TOO_SHORT = 'too_short'
TOO_LONG = 'too_long'
COULD_NOT_FIX = 'could_not_fix'
INVALID_QUESTION = 'invalid_question'
INVALID_OPTION = 'invalid_option'
MISSING_INFORMATION = 'missing_info'
UNGRAMTICAL_RESULT = 'ungramatical_result'
UNKNOWN = 'unknown'
def __str__(self) -> str:
return self.value
class PostprocessorBase:
def __init__(self,
lower_length_ratio: Optional[float] = None,
upper_length_ratio: float = 1.3) -> None:
self.lower_length_ratio = lower_length_ratio
self.upper_length_ratio = upper_length_ratio
def __call__(self, inp: str, meta: Dict) -> Tuple[str, Dict]:
# if the list does not exists add an empty
meta['conversion_issues'] = meta.get('conversion_issues', [])
return inp, meta
def _length_check(self, output: str, question: str,
option: str) -> ConversionIssue:
total_ratio = (len(output) / (len(question) + len(option)))
if total_ratio > self.upper_length_ratio:
# too long. Cut the output
return ConversionIssue.TOO_LONG
elif self.lower_length_ratio is None and len(output) < len(option):
return ConversionIssue.TOO_SHORT
elif self.lower_length_ratio is not None:
if total_ratio < self.lower_length_ratio:
return ConversionIssue.TOO_SHORT
return ConversionIssue.NONE
class Postprocessor(PostprocessorBase):
def __init__(self,
sentence_splitter: str = 'period',
cleaner: str = None,
lower_length_ratio: float = None,
upper_length_ratio: float = 1.3) -> None:
self.sentence_splitter = sentence_splitter
if cleaner == 'remove_dots':
self.cleaner: Callable[[str], str] = remove_dots
else:
self.cleaner = lambda x: x
if sentence_splitter == 'spacy':
self.spacy_nlp = get_spacy_model('en_core_web_sm')
else:
self.spacy_nlp = None
super().__init__(
lower_length_ratio=lower_length_ratio,
upper_length_ratio=upper_length_ratio)
def _fix_too_short(self, all_sentences: List[str],
meta: Dict) -> Tuple[str, bool]:
next_ = 1
could_not_fix = False
current_output = all_sentences[0]
# add sentences till legth is not too short
max_tries = min(5, len(all_sentences))
length_issue = ConversionIssue.TOO_SHORT
if max_tries == 1:
could_not_fix = True
while length_issue == ConversionIssue.TOO_SHORT and (
not could_not_fix):
current_output = current_output + f" {all_sentences[next_]}"
length_issue = self._length_check(current_output, meta['question'],
meta['option'])
next_ += 1
if next_ >= max_tries:
could_not_fix = True
break
return current_output, could_not_fix
def __call__(self, inp: str, meta: Dict) -> Tuple[str, Dict]:
cleaned = self.cleaner(inp)
if self.sentence_splitter == 'spacy':
sentences = [
s.text.strip() for s in list(self.spacy_nlp(cleaned).sents)
]
first_sent = (sentences[0]).strip()
elif self.sentence_splitter == 'period':
sentences = cleaned.split('.')
first_sent = sentences[0]
meta['all_sentences'] = sentences
output = first_sent
issues_encountered = []
length_issue = self._length_check(output, meta['question'],
meta['option'])
if length_issue == ConversionIssue.TOO_SHORT:
issues_encountered.append(length_issue)
output, could_not_fix = self._fix_too_short(sentences, meta)
if could_not_fix:
issues_encountered.append(ConversionIssue.COULD_NOT_FIX)
# check again
length_issue = self._length_check(output, meta['question'],
meta['option'])
if length_issue == ConversionIssue.TOO_LONG:
issues_encountered.append(length_issue)
output = output[:int(
math.ceil(self.upper_length_ratio *
(len(meta['question']) + len(meta['option']))))]
meta['conversion_issues'] = [
str(issue) for issue in issues_encountered
]
output = remove_excess_space(output)
return output, meta
| 31.806283 | 82 | 0.597366 | 5,200 | 0.855967 | 0 | 0 | 0 | 0 | 0 | 0 | 877 | 0.144362 |
f5b0c54a48711381cd579c3094b7c9b18f185760 | 2,106 | py | Python | trphysx/data_utils/dataset_cylinder.py | zabaras/transformer-physx | eb28d09957641cc594b3e5acf4ace2e4dc193584 | [
"MIT"
]
| 33 | 2020-10-15T06:43:36.000Z | 2022-03-24T10:46:12.000Z | trphysx/data_utils/dataset_cylinder.py | zabaras/transformer-physx | eb28d09957641cc594b3e5acf4ace2e4dc193584 | [
"MIT"
]
| 2 | 2021-05-18T14:31:38.000Z | 2021-07-30T18:18:50.000Z | trphysx/data_utils/dataset_cylinder.py | zabaras/transformer-physx | eb28d09957641cc594b3e5acf4ace2e4dc193584 | [
"MIT"
]
| 6 | 2020-12-01T05:54:01.000Z | 2022-03-25T21:22:09.000Z | """
=====
Distributed by: Notre Dame SCAI Lab (MIT Liscense)
- Associated publication:
url: https://arxiv.org/abs/2010.03957
doi:
github: https://github.com/zabaras/transformer-physx
=====
"""
import logging
import h5py
import torch
from .dataset_phys import PhysicalDataset
from ..embedding.embedding_model import EmbeddingModel
logger = logging.getLogger(__name__)
class CylinderDataset(PhysicalDataset):
"""Dataset for 2D flow around a cylinder numerical example
"""
def embed_data(self, h5_file: h5py.File, embedder: EmbeddingModel) -> None:
"""Embeds cylinder flow data into a 1D vector representation for the transformer.
Args:
h5_file (h5py.File): HDF5 file object of raw data
embedder (EmbeddingModel): Embedding neural network
"""
# Iterate through stored time-series
samples = 0
embedder.eval()
for key in h5_file.keys():
ux = torch.Tensor(h5_file[key + '/ux'])
uy = torch.Tensor(h5_file[key + '/uy'])
p = torch.Tensor(h5_file[key + '/p'])
data_series = torch.stack([ux, uy, p], dim=1).to(embedder.devices[0])
visc = (2.0 / float(key))*torch.ones(ux.size(0), 1).to(embedder.devices[0])
with torch.no_grad():
embedded_series = embedder.embed(data_series, visc).cpu()
# Stride over time-series
for i in range(0, data_series.size(0) - self.block_size + 1, self.stride): # Truncate in block of block_size
data_series0 = embedded_series[i: i + self.block_size] # .repeat(1, 4)
self.examples.append(data_series0)
if self.eval:
self.states.append(data_series[i: i + self.block_size].cpu())
samples = samples + 1
if (self.ndata > 0 and samples >= self.ndata): # If we have enough time-series samples break loop
break
logger.info(
'Collected {:d} time-series from hdf5 file. Total of {:d} time-series.'.format(samples, len(self.examples))
)
| 37.607143 | 121 | 0.61396 | 1,735 | 0.823837 | 0 | 0 | 0 | 0 | 0 | 0 | 737 | 0.349953 |
f5b4beb61d529163a339e65d180ea7a983c8e73d | 359 | py | Python | HLTrigger/Configuration/python/HLT_75e33/paths/L1T_SingleTkMuon_22_cfi.py | PKUfudawei/cmssw | 8fbb5ce74398269c8a32956d7c7943766770c093 | [
"Apache-2.0"
]
| 1 | 2021-11-30T16:24:46.000Z | 2021-11-30T16:24:46.000Z | HLTrigger/Configuration/python/HLT_75e33/paths/L1T_SingleTkMuon_22_cfi.py | PKUfudawei/cmssw | 8fbb5ce74398269c8a32956d7c7943766770c093 | [
"Apache-2.0"
]
| 4 | 2021-11-29T13:57:56.000Z | 2022-03-29T06:28:36.000Z | HLTrigger/Configuration/python/HLT_75e33/paths/L1T_SingleTkMuon_22_cfi.py | PKUfudawei/cmssw | 8fbb5ce74398269c8a32956d7c7943766770c093 | [
"Apache-2.0"
]
| 1 | 2021-11-30T16:16:05.000Z | 2021-11-30T16:16:05.000Z | import FWCore.ParameterSet.Config as cms
#from ..modules.hltL1TkMuons_cfi import *
from ..modules.hltL1TkSingleMuFiltered22_cfi import *
from ..sequences.HLTBeginSequence_cfi import *
from ..sequences.HLTEndSequence_cfi import *
L1T_SingleTkMuon_22 = cms.Path(
HLTBeginSequence +
# hltL1TkMuons +
hltL1TkSingleMuFiltered22 +
HLTEndSequence
)
| 25.642857 | 53 | 0.788301 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 60 | 0.167131 |
f5b575448dfd3070de7e8cc30de61a51b143522f | 927 | py | Python | strategies/forest.py | aladics/DeepBugHunter | 564f2417eafc50e99de60d5d6c0a1b4193d1bf8b | [
"Apache-2.0"
]
| 6 | 2019-03-01T13:17:09.000Z | 2022-03-07T04:07:04.000Z | strategies/forest.py | aladics/DeepBugHunter | 564f2417eafc50e99de60d5d6c0a1b4193d1bf8b | [
"Apache-2.0"
]
| null | null | null | strategies/forest.py | aladics/DeepBugHunter | 564f2417eafc50e99de60d5d6c0a1b4193d1bf8b | [
"Apache-2.0"
]
| 2 | 2020-08-02T07:36:00.000Z | 2021-01-13T15:04:00.000Z | import os
import math
import argparse
import dbh_util as util
from sklearn.ensemble import RandomForestClassifier
parser = argparse.ArgumentParser()
parser.add_argument('--n-estimators', type=int, default=10, help='The number of trees in the forest')
parser.add_argument('--max-depth', type=int, default=5, help='Max decision tree leaf node depth')
parser.add_argument('--criterion', default='gini', help='Split quality criterion, "gini" or "entropy"')
#
# Random Forest approach
#
def predict(classifier, test, args, sargs_str, threshold=None):
sargs = util.parse(parser, sargs_str.split())
preds = classifier.predict(test[0])
if threshold is not None:
preds = [1 if x >= threshold else 0 for x in preds]
return preds
def learn(train, dev, test, args, sargs_str):
sargs = util.parse(parser, sargs_str.split())
return util.sklearn_wrapper(train, dev, test, RandomForestClassifier(**sargs))
| 34.333333 | 103 | 0.73247 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 190 | 0.204962 |
f5b7476abd3046a860b7d297b7e32e4ae0dcc3db | 9,476 | py | Python | vitrage_tempest_plugin/tests/e2e/test_overlapping_actions.py | openstack/vitrage-tempest-plugin | 69acc7f3ea26f8c3a652cdf9d1fd842dbf9af58f | [
"Apache-2.0"
]
| 6 | 2018-08-02T12:11:09.000Z | 2019-03-05T11:45:09.000Z | vitrage_tempest_plugin/tests/e2e/test_overlapping_actions.py | openstack/vitrage-tempest-plugin | 69acc7f3ea26f8c3a652cdf9d1fd842dbf9af58f | [
"Apache-2.0"
]
| null | null | null | vitrage_tempest_plugin/tests/e2e/test_overlapping_actions.py | openstack/vitrage-tempest-plugin | 69acc7f3ea26f8c3a652cdf9d1fd842dbf9af58f | [
"Apache-2.0"
]
| 1 | 2018-08-22T12:29:54.000Z | 2018-08-22T12:29:54.000Z | # Copyright 2017 - Nokia
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
from oslo_log import log as logging
from vitrage_tempest_plugin.tests.base import IsEmpty
from vitrage_tempest_plugin.tests.common.constants import DOCTOR_DATASOURCE
from vitrage_tempest_plugin.tests.common.constants import EntityCategory
from vitrage_tempest_plugin.tests.common.constants import VertexProperties \
as VProps
from vitrage_tempest_plugin.tests.common.constants import VITRAGE_DATASOURCE
from vitrage_tempest_plugin.tests.common import general_utils as g_utils
from vitrage_tempest_plugin.tests.common.tempest_clients import TempestClients
from vitrage_tempest_plugin.tests.common import vitrage_utils as v_utils
from vitrage_tempest_plugin.tests.e2e.test_actions_base import TestActionsBase
from vitrage_tempest_plugin.tests import utils
LOG = logging.getLogger(__name__)
TRIGGER_ALARM_1 = 'e2e.test_overlapping_actions.trigger.alarm1'
TRIGGER_ALARM_2 = 'e2e.test_overlapping_actions.trigger.alarm2'
TRIGGER_ALARM_3 = 'e2e.test_overlapping_actions.trigger.alarm3'
TRIGGER_ALARM_4 = 'e2e.test_overlapping_actions.trigger.alarm4'
DEDUCED = 'e2e.test_overlapping_actions.deduced.alarm'
TRIGGER_ALARM_1_PROPS = {
VProps.NAME: TRIGGER_ALARM_1,
VProps.VITRAGE_CATEGORY: EntityCategory.ALARM,
VProps.VITRAGE_TYPE: DOCTOR_DATASOURCE,
}
TRIGGER_ALARM_2_PROPS = {
VProps.NAME: TRIGGER_ALARM_2,
VProps.VITRAGE_CATEGORY: EntityCategory.ALARM,
VProps.VITRAGE_TYPE: DOCTOR_DATASOURCE,
}
DEDUCED_PROPS = {
VProps.NAME: DEDUCED,
VProps.VITRAGE_CATEGORY: EntityCategory.ALARM,
VProps.VITRAGE_TYPE: VITRAGE_DATASOURCE,
}
class TestOverlappingActions(TestActionsBase):
@classmethod
def setUpClass(cls):
super(TestOverlappingActions, cls).setUpClass()
cls._template = v_utils.add_template(
'e2e_test_overlapping_actions.yaml')
@classmethod
def tearDownClass(cls):
if cls._template is not None:
v_utils.delete_template(cls._template['uuid'])
@utils.tempest_logger
def test_overlapping_action_set_state(self):
try:
# Do - first
self._trigger_do_action(TRIGGER_ALARM_1)
curr_host = v_utils.get_first_host()
self.assertEqual(
'ERROR',
curr_host.get(VProps.VITRAGE_AGGREGATED_STATE),
'state should change after set_state action')
# Do - second
self._trigger_do_action(TRIGGER_ALARM_2)
curr_host = v_utils.get_first_host()
self.assertEqual(
'ERROR',
curr_host.get(VProps.VITRAGE_AGGREGATED_STATE),
'state should remain unchanged')
# Undo - first
self._trigger_undo_action(TRIGGER_ALARM_1)
curr_host = v_utils.get_first_host()
self.assertEqual(
'ERROR',
curr_host.get(VProps.VITRAGE_AGGREGATED_STATE),
'state should remain unchanged')
# Undo - second
self._trigger_undo_action(TRIGGER_ALARM_2)
curr_host = v_utils.get_first_host()
self.assertEqual(
self.orig_host.get(VProps.VITRAGE_AGGREGATED_STATE),
curr_host.get(VProps.VITRAGE_AGGREGATED_STATE),
'state should change after undo set_state action')
finally:
self._trigger_undo_action(TRIGGER_ALARM_1)
self._trigger_undo_action(TRIGGER_ALARM_2)
@utils.tempest_logger
def test_overlapping_action_mark_down(self):
try:
host_name = self.orig_host.get(VProps.NAME)
# Do - first
self._trigger_do_action(TRIGGER_ALARM_3)
nova_service = TempestClients.nova().services.list(
host=host_name, binary='nova-compute')[0]
self.assertEqual("down", nova_service.state)
# Do - second
self._trigger_do_action(TRIGGER_ALARM_4)
nova_service = TempestClients.nova().services.list(
host=host_name, binary='nova-compute')[0]
self.assertEqual("down", nova_service.state)
# Undo - first
self._trigger_undo_action(TRIGGER_ALARM_3)
nova_service = TempestClients.nova().services.list(
host=host_name, binary='nova-compute')[0]
self.assertEqual("down", nova_service.state)
# Undo - second
self._trigger_undo_action(TRIGGER_ALARM_4)
nova_service = TempestClients.nova().services.list(
host=host_name, binary='nova-compute')[0]
self.assertEqual("up", nova_service.state)
finally:
self._trigger_undo_action(TRIGGER_ALARM_3)
self._trigger_undo_action(TRIGGER_ALARM_4)
# nova.host datasource may take up to snapshot_interval to update
time.sleep(130)
@utils.tempest_logger
def test_overlapping_action_deduce_alarm(self):
try:
host_id = self.orig_host.get(VProps.VITRAGE_ID)
# Do - first
self._trigger_do_action(TRIGGER_ALARM_1)
self._check_deduced(1, DEDUCED_PROPS, host_id)
# Do - second
self._trigger_do_action(TRIGGER_ALARM_2)
self._check_deduced(1, DEDUCED_PROPS, host_id)
# Undo - first
self._trigger_undo_action(TRIGGER_ALARM_1)
self._check_deduced(1, DEDUCED_PROPS, host_id)
# Undo - second
self._trigger_undo_action(TRIGGER_ALARM_2)
self._check_deduced(0, DEDUCED_PROPS, host_id)
finally:
self._trigger_undo_action(TRIGGER_ALARM_1)
self._trigger_undo_action(TRIGGER_ALARM_2)
@utils.tempest_logger
def test_overlapping_action_add_causal_relationship(self):
try:
# ---- Do first & second ----
self._trigger_do_action(TRIGGER_ALARM_1)
self._trigger_do_action(TRIGGER_ALARM_2)
alarms = self.vitrage_client.alarm.list(
vitrage_id=self.orig_host.get(VProps.VITRAGE_ID),
all_tenants=True)
deduced = g_utils.first_match(alarms, **DEDUCED_PROPS)
trigger1 = g_utils.first_match(alarms, **TRIGGER_ALARM_1_PROPS)
trigger2 = g_utils.first_match(alarms, **TRIGGER_ALARM_2_PROPS)
# Get Rca for the deduced
rca = self.vitrage_client.rca.get(deduced[VProps.VITRAGE_ID],
all_tenants=True)
self._check_rca(rca, [deduced, trigger1, trigger2], DEDUCED_PROPS)
# Get Rca for trigger 1
rca = self.vitrage_client.rca.get(trigger1[VProps.VITRAGE_ID],
all_tenants=True)
self._check_rca(rca, [deduced, trigger1], TRIGGER_ALARM_1_PROPS)
# Get Rca for trigger 2
rca = self.vitrage_client.rca.get(trigger2[VProps.VITRAGE_ID],
all_tenants=True)
self._check_rca(rca, [deduced, trigger2], TRIGGER_ALARM_2_PROPS)
# ---- Undo - first ----
self._trigger_undo_action(TRIGGER_ALARM_1)
alarms = self.vitrage_client.alarm.list(
vitrage_id=self.orig_host.get(VProps.VITRAGE_ID),
all_tenants=True)
deduced = g_utils.first_match(alarms, **DEDUCED_PROPS)
trigger2 = g_utils.first_match(alarms, **TRIGGER_ALARM_2_PROPS)
# Get Rca for the deduced
rca = self.vitrage_client.rca.get(deduced[VProps.VITRAGE_ID],
all_tenants=True)
self._check_rca(rca, [deduced, trigger2], DEDUCED_PROPS)
# Get Rca for trigger 2
rca = self.vitrage_client.rca.get(trigger2[VProps.VITRAGE_ID],
all_tenants=True)
self._check_rca(rca, [deduced, trigger2], TRIGGER_ALARM_2_PROPS)
# ---- Undo - second ----
self._trigger_undo_action(TRIGGER_ALARM_2)
alarms = self.vitrage_client.alarm.list(
vitrage_id=self.orig_host.get(VProps.VITRAGE_ID),
all_tenants=True)
self.assertThat(
g_utils.all_matches(alarms, **TRIGGER_ALARM_1_PROPS),
IsEmpty(),
'trigger alarm 1 should have been removed')
self.assertThat(
g_utils.all_matches(alarms, **TRIGGER_ALARM_2_PROPS),
IsEmpty(),
'trigger alarm 2 should have been removed')
self.assertThat(
g_utils.all_matches(alarms, **DEDUCED_PROPS),
IsEmpty(),
'deduced alarm should have been removed')
finally:
self._trigger_undo_action(TRIGGER_ALARM_1)
self._trigger_undo_action(TRIGGER_ALARM_2)
| 40.495726 | 78 | 0.646264 | 7,323 | 0.772794 | 0 | 0 | 7,241 | 0.764141 | 0 | 0 | 1,626 | 0.171591 |
f5b80f86d6e5672de1791e2d08c1fbaf96195a02 | 4,137 | py | Python | clone_tests/clone_compilation_errors.py | dcz-purism/glib | eccd097166cdf7dfea9be17869868d45f8ef4ef6 | [
"MIT-0",
"MIT"
]
| null | null | null | clone_tests/clone_compilation_errors.py | dcz-purism/glib | eccd097166cdf7dfea9be17869868d45f8ef4ef6 | [
"MIT-0",
"MIT"
]
| null | null | null | clone_tests/clone_compilation_errors.py | dcz-purism/glib | eccd097166cdf7dfea9be17869868d45f8ef4ef6 | [
"MIT-0",
"MIT"
]
| null | null | null | import json
import os
import subprocess
import sys
TEST_FILENAME = "tmp_py_file"
TEST_FOLDER = "clone_tests"
TESTS = [
("clone!( => move || {})",
"If you have nothing to clone, no need to use this macro!"),
("clone!(|| {})",
"If you have nothing to clone, no need to use this macro!"),
("clone!(|a, b| {})",
"If you have nothing to clone, no need to use this macro!"),
("clone!(@strong self => move |x| {})",
"Can't use `self` as variable name. Try storing it in a temporary variable or rename it using `as`."),
("clone!(@strong self.v => move |x| {})",
"Field accesses are not allowed as is, you must rename it!"),
("clone!(@weak v => @default-return false, || {})",
"Closure needs to be \"moved\" so please add `move` before closure"),
("clone!(@weak v => @default-return false, |bla| {})",
"Closure needs to be \"moved\" so please add `move` before closure"),
("clone!(@weak v => default-return false, move || {})",
"Missing `@` before `default-return`"),
("clone!(@weak v => @default-return false move || {})",
"Missing comma after `@default-return`'s value"),
("clone!(@yolo v => move || {})",
"Unknown keyword, only `weak` and `strong` are allowed"),
("clone!(v => move || {})",
"You need to specify if this is a weak or a strong clone."),
]
def convert_to_string(s):
if s.__class__.__name__ == 'bytes':
return s.decode('utf-8')
return s
def exec_command(command):
child = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = child.communicate()
return (child.returncode == 0, convert_to_string(stdout), convert_to_string(stderr))
def run_test(code, expected_str):
with open("{}/{}.rs".format(TEST_FOLDER, TEST_FILENAME), 'w') as f:
f.write('extern crate glib;use glib::clone;use std::rc::Rc;fn main(){{let v = Rc::new(1);{};}}'.format(code))
code, stdout, stderr = exec_command([
"bash",
"-c",
"cd {} && cargo build --message-format json".format(TEST_FOLDER),
])
os.remove("{}/{}.rs".format(TEST_FOLDER, TEST_FILENAME))
if code is True:
return "This isn't supposed to compile!"
parts = stdout.split('}\n{')
compiler_message = None
for (pos, part) in enumerate(parts):
try:
if pos > 0:
part = "{" + part
if pos + 1 < len(parts):
part += "}"
x = json.loads(part)
if (x["reason"] != "compiler-message"
or x["message"]["message"] == "aborting due to previous error"):
continue
compiler_message = x["message"]["message"]
break
except Exception:
continue
if compiler_message is None:
return "Weird issue: no compiler-message found..."
if expected_str not in compiler_message:
return "`{}` not found in `{}`".format(expected_str, compiler_message)
return None
def run_tests():
print("About to start the tests on the clone! macro.")
print("It might be slow to run the first one since cargo has to build dependencies...")
print("")
errors = 0
with open('{}/Cargo.toml'.format(TEST_FOLDER), 'w') as f:
f.write("""[package]
name = "test"
version = "0.0.1"
authors = ["gtk-rs developers"]
[dependencies]
glib = {{ path = ".." }}
[[bin]]
name = "{0}"
path = "{0}.rs"
""".format(TEST_FILENAME))
for (code, expected_str) in TESTS:
sys.stdout.write('Running `{}`...'.format(code))
sys.stdout.flush()
err = run_test(code, expected_str)
if err is not None:
print(" FAILED\n{}".format(err))
errors += 1
else:
print(" OK")
print("Ran {} tests, got {} failure{}".format(len(TESTS), errors, "s" if errors > 1 else ""))
os.remove("{}/Cargo.toml".format(TEST_FOLDER))
os.remove("{}/Cargo.lock".format(TEST_FOLDER))
exec_command(['bash', '-c', 'rm -r {}/target'.format(TEST_FOLDER)])
return errors
if __name__ == "__main__":
sys.exit(run_tests())
| 35.358974 | 117 | 0.578922 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,916 | 0.463138 |
f5b9371efb3fb18aace487077f47abfd7957e4b2 | 2,437 | py | Python | tests/test_tags.py | wbcsmarteezgithub/django-snakeoil | ae1a8dab9e14194e48963101ff3349f45aee0ccf | [
"BSD-2-Clause"
]
| 1 | 2020-07-03T15:52:25.000Z | 2020-07-03T15:52:25.000Z | tests/test_tags.py | wbcsmarteezgithub/django-snakeoil | ae1a8dab9e14194e48963101ff3349f45aee0ccf | [
"BSD-2-Clause"
]
| null | null | null | tests/test_tags.py | wbcsmarteezgithub/django-snakeoil | ae1a8dab9e14194e48963101ff3349f45aee0ccf | [
"BSD-2-Clause"
]
| null | null | null | from __future__ import unicode_literals
from django.http import HttpRequest
from django.template import Context, Template, TemplateSyntaxError
from django.test import TestCase
from snakeoil.models import SeoUrl
from .models import TestModel
class GetSeoDataTagTests(TestCase):
def test_invalid_syntax(self):
request = HttpRequest()
request.path = '/'
with self.assertRaises(TemplateSyntaxError):
Template(
'{% load snakeoil %}'
'{% get_seo_data spam %}'
'{{ seo.head_title }}'
'{{ seo.meta_description }}'
).render(Context({'request': request}))
def test_no_data(self):
request = HttpRequest()
request.path = '/'
out = Template(
'{% load snakeoil %}'
'{% get_seo_data %}'
'{{ seo.head_title }}'
'{{ seo.meta_description }}'
).render(Context({'request': request}))
self.assertEqual(out, '')
def test_data_from_url(self):
SeoUrl.objects.create(url='/', head_title='spam',
meta_description='eggs')
request = HttpRequest()
request.path = '/'
out = Template(
'{% load snakeoil %}'
'{% get_seo_data %}'
'{{ seo.head_title }}'
'{{ seo.meta_description }}'
).render(Context({'request': request}))
self.assertEqual(out, 'spameggs')
def test_as_parameter(self):
SeoUrl.objects.create(url='/', head_title='spam',
meta_description='eggs')
request = HttpRequest()
request.path = '/'
out = Template(
'{% load snakeoil %}'
'{% get_seo_data as spam %}'
'{{ spam.head_title }}'
'{{ spam.meta_description }}'
).render(Context({'request': request}))
self.assertEqual(out, 'spameggs')
def test_data_from_model(self):
obj = TestModel.objects.create(head_title='spam',
meta_description='eggs')
request = HttpRequest()
request.path = '/'
out = Template(
'{% load snakeoil %}'
'{% get_seo_data %}'
'{{ seo.head_title }}'
'{{ seo.meta_description }}'
).render(Context({'request': request, 'obj': obj}))
self.assertEqual(out, 'spameggs')
| 29.719512 | 66 | 0.531801 | 2,190 | 0.898646 | 0 | 0 | 0 | 0 | 0 | 0 | 609 | 0.249897 |
f5b9906a08803c2fec8e92b95456e8a8ee69c95c | 50 | py | Python | src/runner/__init__.py | Tung-I/nips2019_template | a1fcf35b7633d192d2706a533731cb8c457ac230 | [
"MIT"
]
| 11 | 2020-08-09T08:08:56.000Z | 2022-01-18T14:25:22.000Z | src/runner/__init__.py | Tung-I/nips2019_template | a1fcf35b7633d192d2706a533731cb8c457ac230 | [
"MIT"
]
| 2 | 2021-09-13T09:48:41.000Z | 2021-11-08T14:20:58.000Z | src/runner/__init__.py | Tung-I/nips2019_template | a1fcf35b7633d192d2706a533731cb8c457ac230 | [
"MIT"
]
| 4 | 2020-08-30T14:13:35.000Z | 2021-09-14T09:26:55.000Z | from .trainers import *
from .predictors import *
| 16.666667 | 25 | 0.76 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
f5ba98b5a8a467c1237f20ea32bee34cf54cde58 | 420 | py | Python | test/nn/conv/test_gravnet_conv.py | shrey-bansal/pytorch_geometric | 17108a08066b0a73530544d01719b186f2625ef2 | [
"MIT"
]
| 2 | 2020-09-08T15:22:08.000Z | 2020-09-08T15:22:09.000Z | test/nn/conv/test_gravnet_conv.py | shrey-bansal/pytorch_geometric | 17108a08066b0a73530544d01719b186f2625ef2 | [
"MIT"
]
| null | null | null | test/nn/conv/test_gravnet_conv.py | shrey-bansal/pytorch_geometric | 17108a08066b0a73530544d01719b186f2625ef2 | [
"MIT"
]
| 1 | 2021-07-06T06:50:21.000Z | 2021-07-06T06:50:21.000Z | import torch
from torch_geometric.nn import GravNetConv
def test_gravnet_conv():
num_nodes, in_channels, out_channels = 20, 16, 32
x = torch.randn((num_nodes, in_channels))
conv = GravNetConv(in_channels, out_channels, space_dimensions=4,
propagate_dimensions=8, k=12)
assert conv.__repr__() == 'GravNetConv(16, 32, k=12)'
assert conv(x).size() == (num_nodes, out_channels)
| 32.307692 | 69 | 0.688095 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 27 | 0.064286 |
f5baf25c3fc1ee4bca1c0e0df333ed41bd65f476 | 2,216 | py | Python | base/CrossPlotter.py | pulsatrixwx/PulsatrixWx | aae6ac36e2460dcf7f4a592d709139cd0d6a2e91 | [
"MIT"
]
| 3 | 2016-03-27T00:21:46.000Z | 2018-06-01T09:20:57.000Z | base/CrossPlotter.py | pulsatrixwx/PulsatrixWx | aae6ac36e2460dcf7f4a592d709139cd0d6a2e91 | [
"MIT"
]
| null | null | null | base/CrossPlotter.py | pulsatrixwx/PulsatrixWx | aae6ac36e2460dcf7f4a592d709139cd0d6a2e91 | [
"MIT"
]
| null | null | null | from datetime import datetime
from hootpy import HootPy
class CrossPlotter(HootPy):
"""
CrossPlotter
Purpose: Handles the plotting of cross section products.
Started: 14 June 2010 by Tim Supinie ([email protected])
Completed: [not yet]
Modified: [not yet]
"""
def __init__(self, config):
"""
__init__()
Purpose: Constructor for the CrossPlotter class.
Parameters: config [type=dictionary]
Dictionary containing configuration parameters for the run.
"""
super(CrossPlotter, self).__init__(config)
return
def loadData(self):
"""
loadData() [public]
Purpose: Handles the loading in of data.
Parameters: [none]
Returns: [nothing]
"""
return
def plot(self):
"""
plot() [public]
Purpose: Plot cross section products. For model products, plots products for all forecast hours.
Parameters: [none]
Returns: [nothing]
"""
if self._forecast_hours is None:
# Plot cross section here ...
self._finalizeCrossSection(None)
else:
for fh in self._forecast_hours:
# Plot the cross section here ...
self._finalizeCrossSection(fh)
return
def _finalizeCrossSection(self, forecast_hour):
"""
_finalizeCrossSection() [protected]
Purpose: Add final things to the profile, such as the background,
title, valid time, and image border, and then save the image.
Parameters: forecast_hour [type=int]
Forecast hour for model products (pass in None for an observed product).
Returns: [nothing]
"""
# Finish creating the product. Should be last.
self._finalizeProduct(forecast_hour)
return
if __name__ == "__main__":
cfg = {
'forecast_hours':[0, 3, 6, 9, 12],
'product_title':"NAM Forecast Cross Section KDRT-KGRB",
'image_file_name':"nam_fcross_KDRT-KGRB_f%02d.png"
}
hpc = CrossPlotter(cfg)
hpc.loadData()
hpc.plot()
| 28.410256 | 108 | 0.581679 | 1,882 | 0.849278 | 0 | 0 | 0 | 0 | 0 | 0 | 1,418 | 0.639892 |
f5bb1ebe52102d71c8810bac844699880019ddf3 | 3,072 | py | Python | management/commands/syncldap.py | LUH-CHI/chiffee | 78ec85d36a6c757e5f56113089f1b56fdb0ed494 | [
"MIT"
]
| 1 | 2018-03-22T09:53:06.000Z | 2018-03-22T09:53:06.000Z | management/commands/syncldap.py | LUH-CHI/chiffee | 78ec85d36a6c757e5f56113089f1b56fdb0ed494 | [
"MIT"
]
| 4 | 2019-04-01T08:44:40.000Z | 2020-02-07T17:44:16.000Z | management/commands/syncldap.py | LUH-CHI/chiffee | 78ec85d36a6c757e5f56113089f1b56fdb0ed494 | [
"MIT"
]
| 4 | 2018-05-04T12:01:50.000Z | 2019-10-11T09:47:33.000Z | import logging
import ldap
from django.conf import settings
from django.contrib.auth.models import Group
from django.core.management.base import BaseCommand
from django_auth_ldap.backend import LDAPBackend
from chiffee.models import User
logger = logging.getLogger('syncldap')
# This command synchronizes local database with the LDAP server.
# New LDAP user -> new user in the local database.
# Deleted LDAP user -> local user is set to inactive.
class Command(BaseCommand):
help = 'Syncing local users with LDAP... '
def handle(self, *args, **options):
self.populate_db()
self.find_inactive_user()
# Find all users in LDAP and add them to the database if needed.
def populate_db(self):
connection = ldap.initialize(settings.AUTH_LDAP_SERVER_URI)
connection.simple_bind_s(settings.AUTH_LDAP_BIND_DN,
settings.AUTH_LDAP_BIND_PASSWORD)
filter_ = '(&(uid=*))' # Customize this if necessary.
ldap_users = connection.search_s(settings.BASE_DN,
ldap.SCOPE_SUBTREE,
filter_)
connection.unbind()
for ldap_user in ldap_users:
username = ldap_user[1]['uid'][0].decode('UTF-8')
if not User.objects.filter(username=username).exists():
logger.info('Adding new user %s...' % username)
user = LDAPBackend().populate_user(
ldap_user[1]['uid'][0].decode('UTF-8'))
user.is_active = True
# Add a single group to the user.
# When group information is not stored as part of the user info,
# code needs to be modified.
try:
groups = ldap_user[1]['group']
except KeyError:
logger.info(
'User could not be added to a group and won\'t be able to '
'purchase anything.')
continue
groups = [g.decode('UTF-8') for g in groups]
self.add_user_to_group(user, groups)
user.save()
# A user should belong to only one group.
# Group priority: professors > employees > students
def add_user_to_group(self, user, groups):
if 'professors' in groups:
group_name = 'professors'
elif 'employees' in groups:
group_name = 'employees'
else:
group_name = 'students'
group = Group.objects.get(name=group_name)
if len(user.groups.all()) == 0:
group.user_set.add(user)
else:
user.groups.clear()
group.user_set.add(user)
# Mark all users with no LDAP entry inactive.
def find_inactive_user(self):
for user in User.objects.filter(is_active=True):
ldap_user = LDAPBackend().populate_user(user.username)
if ldap_user is None and not user.is_superuser:
logger.info('User %s set to inactive.' % user)
user.is_active = False
user.save()
| 36.571429 | 79 | 0.595378 | 2,619 | 0.852539 | 0 | 0 | 0 | 0 | 0 | 0 | 802 | 0.261068 |
f5bc7050656c4c3afee2238a72f86661143054d5 | 598 | py | Python | pysal/spreg/__init__.py | cubensys/pysal | 8d50990f6e6603ba79ae1a887a20a1e3a0734e51 | [
"MIT",
"BSD-3-Clause"
]
| null | null | null | pysal/spreg/__init__.py | cubensys/pysal | 8d50990f6e6603ba79ae1a887a20a1e3a0734e51 | [
"MIT",
"BSD-3-Clause"
]
| null | null | null | pysal/spreg/__init__.py | cubensys/pysal | 8d50990f6e6603ba79ae1a887a20a1e3a0734e51 | [
"MIT",
"BSD-3-Clause"
]
| 1 | 2021-07-19T01:46:17.000Z | 2021-07-19T01:46:17.000Z | from ols import *
from diagnostics import *
from diagnostics_sp import *
from user_output import *
from twosls import *
from twosls_sp import *
from error_sp import *
from error_sp_het import *
from error_sp_hom import *
from ols_regimes import *
from twosls_regimes import *
from twosls_sp_regimes import *
from error_sp_regimes import *
from error_sp_het_regimes import *
from error_sp_hom_regimes import *
from probit import *
from ml_lag import *
from ml_lag_regimes import *
from ml_error import *
from ml_error_regimes import *
from sur import *
from sur_error import *
from sur_lag import *
| 24.916667 | 34 | 0.807692 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
f5bdaf65264833d8c298cbab96f3a7c910693f18 | 209 | py | Python | tests/conftest.py | lambertsbennett/Encountertk | 708aedb38cb1689da8d2f39c68bd8694c64a79da | [
"MIT"
]
| null | null | null | tests/conftest.py | lambertsbennett/Encountertk | 708aedb38cb1689da8d2f39c68bd8694c64a79da | [
"MIT"
]
| null | null | null | tests/conftest.py | lambertsbennett/Encountertk | 708aedb38cb1689da8d2f39c68bd8694c64a79da | [
"MIT"
]
| null | null | null | from pytest import fixture
from encountertk.e_model import EncounterModel, ps_encounter, mean_vol_encountered
@fixture(scope='function')
def EModel():
return EncounterModel(kernel=1,pop2c=[1],pop1c=[1])
| 26.125 | 82 | 0.789474 | 0 | 0 | 0 | 0 | 96 | 0.45933 | 0 | 0 | 10 | 0.047847 |
f5beb267f6635aef6117ff273b49cdca310125ca | 367 | py | Python | jp.atcoder/abc045/abc045_b/8983851.py | kagemeka/atcoder-submissions | 91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e | [
"MIT"
]
| 1 | 2022-02-09T03:06:25.000Z | 2022-02-09T03:06:25.000Z | jp.atcoder/abc045/abc045_b/8983851.py | kagemeka/atcoder-submissions | 91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e | [
"MIT"
]
| 1 | 2022-02-05T22:53:18.000Z | 2022-02-09T01:29:30.000Z | jp.atcoder/abc045/abc045_b/8983851.py | kagemeka/atcoder-submissions | 91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e | [
"MIT"
]
| null | null | null | import sys
from collections import deque
a, b, c = sys.stdin.read().split()
def main():
deck = dict([("a", deque(a)), ("b", deque(b)), ("c", deque(c))])
p = "a"
while True:
if deck[p]:
p = deck[p].popleft()
else:
return p.upper()
if __name__ == "__main__":
ans = main()
print(ans)
| 17.47619 | 69 | 0.46594 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 22 | 0.059946 |
f5bed273a043f28510a7c31520baff8cb6ddab43 | 16,504 | py | Python | src/pipelines/azureml/lightgbm_training.py | microsoft/lightgbm-benchmark | 286668d698d9d166857f924ecb775d5de224d489 | [
"MIT"
]
| 13 | 2021-08-20T01:03:51.000Z | 2022-02-12T05:34:46.000Z | src/pipelines/azureml/lightgbm_training.py | microsoft/lightgbm-benchmark | 286668d698d9d166857f924ecb775d5de224d489 | [
"MIT"
]
| 199 | 2021-08-21T21:18:53.000Z | 2022-03-27T23:08:44.000Z | src/pipelines/azureml/lightgbm_training.py | microsoft/lightgbm-benchmark | 286668d698d9d166857f924ecb775d5de224d489 | [
"MIT"
]
| 4 | 2021-08-20T06:53:26.000Z | 2022-01-24T22:22:39.000Z | """
Runs LightGBM using distributed (mpi) training.
to execute:
> python src/pipelines/azureml/lightgbm_training.py --exp-config conf/experiments/lightgbm_training/cpu.yaml
"""
# pylint: disable=no-member
# NOTE: because it raises 'dict' has no 'outputs' member in dsl.pipeline construction
import os
import sys
import json
import logging
import argparse
# config management
from dataclasses import dataclass
from omegaconf import OmegaConf, MISSING
from typing import Optional, Any, List
# AzureML
from azure.ml.component import Component
from azure.ml.component import dsl
from azure.ml.component.environment import Docker
# when running this script directly, needed to import common
LIGHTGBM_REPO_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..'))
SCRIPTS_SOURCES_ROOT = os.path.join(LIGHTGBM_REPO_ROOT, 'src')
if SCRIPTS_SOURCES_ROOT not in sys.path:
logging.info(f"Adding {SCRIPTS_SOURCES_ROOT} to path")
sys.path.append(str(SCRIPTS_SOURCES_ROOT))
from common.tasks import training_task, training_variant
from common.sweep import SweepParameterParser
from common.aml import load_dataset_from_data_input_spec
from common.aml import apply_sweep_settings
from common.pipelines import (
parse_pipeline_config,
azureml_connect,
pipeline_submit,
COMPONENTS_ROOT
)
### CONFIG DATACLASS ###
# Step 1 : to configure your pipeline, add all your fields inside a
# properly defined dataclass, pipeline_cli_main will figure out how
# to read that config from a given yaml file + hydra override commands
@dataclass
class lightgbm_training_config: # pylint: disable=invalid-name
""" Config object constructed as a dataclass.
NOTE: the name of this class will be used as namespace in your config yaml file.
"""
# NOTE: all those values are REQUIRED in your yaml config file
benchmark_name: str = MISSING
# INPUT DATASETS
tasks: List[training_task] = MISSING
# TRAINING PARAMS
reference: training_variant = MISSING
# free changing parameters on top of reference
variants: Optional[Any] = None
### PIPELINE COMPONENTS ###
# Step 2 : your pipeline consists in assembling components
# load those components from local yaml specifications
# use COMPONENTS_ROOT as base folder
lightgbm_train_module = Component.from_yaml(yaml_file=os.path.join(COMPONENTS_ROOT, "training", "lightgbm_python", "spec.yaml"))
lightgbm_train_sweep_module = Component.from_yaml(yaml_file=os.path.join(COMPONENTS_ROOT, "training", "lightgbm_python", "sweep_spec.yaml"))
partition_data_module = Component.from_yaml(yaml_file=os.path.join(COMPONENTS_ROOT, "data_processing", "partition_data", "spec.yaml"))
lightgbm_data2bin_module = Component.from_yaml(yaml_file=os.path.join(COMPONENTS_ROOT, "data_processing", "lightgbm_data2bin", "spec.yaml"))
### PIPELINE SPECIFIC CODE ###
def process_sweep_parameters(params_dict, sweep_algorithm):
"""Parses config and spots sweepable paraneters
Args:
params_dict (dict): configuration object (see get_config_class())
sweep_algorithm (str): random, grid, bayesian
Returns:
tunable_params (dict)
"""
# the class below automates parsing of sweepable parameters
sweep_param_parser = SweepParameterParser(
tunable_parameters=[
# those are keys and their default values
"num_iterations",
"num_leaves",
"min_data_in_leaf",
"learning_rate",
"max_bin",
"feature_fraction"
],
cli_prefix=None, # this is not argparse
parameter_sampling=sweep_algorithm
)
# provide config as a dictionary to the parser
sweep_parameters = {
"num_iterations": params_dict['num_iterations'],
"num_leaves": params_dict['num_leaves'],
"min_data_in_leaf": params_dict['min_data_in_leaf'],
"learning_rate": params_dict['learning_rate'],
"max_bin": params_dict['max_bin'],
"feature_fraction": params_dict['feature_fraction'],
}
# parser gonna parse
sweep_param_parser.parse_from_dict(sweep_parameters)
# and return params as we want them
tunable_params = sweep_param_parser.get_tunable_params()
fixed_params = sweep_param_parser.get_fixed_params()
# return dictionaries to fed as params into our pipeline
return tunable_params, fixed_params
### TRAINING PIPELINE ###
# Step 3: your pipeline consists in creating a python function
# decorated with @dsl.pipeline.
# You can create as many subgraphs as you want,
# but `pipeline_cli_main` will need one pipeline function
# taking a single config argument, not a pipeline parameter.
# Here you should create an instance of a pipeline function (using your custom config dataclass)
@dsl.pipeline(
name="lightgbm_training", # pythonic name
description="LightGBM distributed training (mpi)",
non_pipeline_parameters=['config', 'benchmark_custom_properties']
)
def lightgbm_training_pipeline_function(config,
benchmark_custom_properties,
train_dataset,
test_dataset):
"""Pipeline function for this graph.
Args:
TODO
Returns:
dict[str->PipelineOutputData]: a dictionary of your pipeline outputs
for instance to be consumed by other graphs
"""
# create list of all variants params
training_variants_params = [
config.lightgbm_training_config.reference
]
# if there's any variant specified
if config.lightgbm_training_config.variants:
# create distinct training params for each variant
for variant_index, training_variant_config in enumerate(config.lightgbm_training_config.variants):
# create a specific dict of params for the variant
variant_config = OmegaConf.merge(config.lightgbm_training_config.reference, training_variant_config)
training_variants_params.append(variant_config)
# for each variant, check if sweep needs to be applied
for variant_index, variant_params in enumerate(training_variants_params):
############
### DATA ###
############
# if we're using multinode, add partitioning
if variant_params.data.auto_partitioning and (variant_params.training.tree_learner == "data" or variant_params.training.tree_learner == "voting"):
# if using data parallel, train data has to be partitioned first
if (variant_params.runtime.nodes * variant_params.runtime.processes) > 1:
partition_data_step = partition_data_module(
input_data=train_dataset,
mode="roundrobin",
number=(variant_params.runtime.nodes * variant_params.runtime.processes),
header=variant_params.data.header,
verbose=variant_params.training.verbose
)
partition_data_step.runsettings.configure(target=config.compute.linux_cpu)
partitioned_train_data = partition_data_step.outputs.output_data
else:
# for other modes, train data has to be one file
partitioned_train_data = train_dataset
else:
# for other modes, train data has to be one file
partitioned_train_data = train_dataset
# convert into binary files
if variant_params.data.pre_convert_to_binary:
convert_data2bin_step = lightgbm_data2bin_module(
train=partitioned_train_data,
test=test_dataset,
header=variant_params.data.header,
label_column=variant_params.data.label_column,
group_column=variant_params.data.group_column,
max_bin=variant_params.training.max_bin,
custom_params=json.dumps(dict(variant_params.training.custom_params or {})),
verbose=variant_params.training.verbose
)
convert_data2bin_step.runsettings.configure(target=config.compute.linux_cpu)
prepared_train_data = convert_data2bin_step.outputs.output_train
prepared_test_data = convert_data2bin_step.outputs.output_test
else:
prepared_train_data = partitioned_train_data
prepared_test_data = test_dataset
################
### TRAINING ###
################
# copy params into dict for flexibility
training_params = dict(variant_params.training)
# add all data-related params
training_params['header'] = variant_params.data.header
training_params['label_column'] = variant_params.data.label_column
training_params['group_column'] = variant_params.data.group_column
# extract and construct "sweepable" params
if variant_params.sweep:
tunable_params, fixed_params = process_sweep_parameters(
variant_params.training,
variant_params.sweep.algorithm
)
# test if we have sweepable parameters in the learning params
if len(tunable_params) > 0:
use_sweep = True
training_params.update(tunable_params)
else:
use_sweep = False
else:
use_sweep = False
# create custom properties and serialize to pass as argument
variant_custom_properties = {
'variant_index': variant_index,
'framework': "lightgbm",
'framework_build': variant_params.runtime.build,
}
variant_custom_properties.update(benchmark_custom_properties)
training_params['custom_properties'] = json.dumps(variant_custom_properties)
# serialize custom_params to pass as argument
training_params['custom_params'] = json.dumps(dict(variant_params.training.custom_params or {}))
# some debug outputs to expose variant parameters
print(f"*** lightgbm variant#{variant_index}: {training_params}")
# figuring out target (cpu/gpu)
training_target = variant_params.runtime.target
if not training_target:
if (variant_params.training.device_type == 'gpu' or variant_params.training.device_type == 'cuda'):
training_target = config.compute.linux_gpu
else:
training_target = config.compute.linux_cpu
if use_sweep:
# sweep training
if variant_params.sweep.primary_metric is None:
variant_params.sweep.primary_metric=f"node_0/valid_0.{variant_params.training.metric}"
lightgbm_train_step = lightgbm_train_sweep_module(
train = prepared_train_data,
test = prepared_test_data,
**training_params
)
# apply runsettings
lightgbm_train_step.runsettings.target=training_target
lightgbm_train_step.runsettings.resource_layout.node_count = variant_params.runtime.nodes
lightgbm_train_step.runsettings.resource_layout.process_count_per_node = variant_params.runtime.processes
# apply settings from our custom yaml config
apply_sweep_settings(lightgbm_train_step, variant_params.sweep)
else:
# regular training, no sweep
lightgbm_train_step = lightgbm_train_module(
train = prepared_train_data,
test = prepared_test_data,
**training_params
)
# apply runsettings
lightgbm_train_step.runsettings.target=training_target
lightgbm_train_step.runsettings.resource_layout.node_count = variant_params.runtime.nodes
lightgbm_train_step.runsettings.resource_layout.process_count_per_node = variant_params.runtime.processes
###############
### RUNTIME ###
###############
# # optional: override docker (ex: to test custom builds)
if 'build' in variant_params.runtime and variant_params.runtime.build:
custom_docker = Docker(file=os.path.join(LIGHTGBM_REPO_ROOT, variant_params.runtime.build))
lightgbm_train_step.runsettings.environment.configure(
docker=custom_docker
)
##############
### OUTPUT ###
##############
# add some relevant comments on the component
lightgbm_train_step.comment = " -- ".join(
[
f"variant #{variant_index}",
# add more
]
)
# optional: save output model
if variant_params.output and variant_params.output.register_model:
# "{register_model_prefix}-{task_key}-{num_iterations}trees-{num_leaves}leaves-{register_model_suffix}"
model_basename = "{num_iterations}trees-{num_leaves}leaves".format(
num_iterations=variant_params.training.num_iterations,
num_leaves=variant_params.training.num_leaves
)
# prepend task_key if given
if benchmark_custom_properties.get('benchmark_task_key', None):
model_basename = benchmark_custom_properties['benchmark_task_key'] + "-" + model_basename
# prepend prefix if given
if variant_params.output.register_model_prefix:
model_basename = variant_params.output.register_model_prefix + "-" + model_basename
# append suffix if given
if variant_params.output.register_model_suffix:
model_basename += "-" + variant_params.output.register_model_suffix
print(f"*** Will output model at {model_basename}")
# auto-register output with model basename
lightgbm_train_step.outputs.model.register_as(
name=model_basename,
create_new_version=True
)
# return {key: output}'
return {}
# creating an overall pipeline using pipeline_function for each task given
@dsl.pipeline(
name="training_all_tasks",
non_pipeline_parameters=['workspace', 'config']
)
def training_all_tasks(workspace, config):
# loop on all training tasks
for training_task in config.lightgbm_training_config.tasks:
# load the given train dataset
train_data = load_dataset_from_data_input_spec(workspace, training_task.train)
test_data = load_dataset_from_data_input_spec(workspace, training_task.test)
# create custom properties for this task
# they will be passed on to each job as tags
benchmark_custom_properties = {
'benchmark_name' : config.lightgbm_training_config.benchmark_name,
'benchmark_task_key' : training_task.task_key
}
# call pipeline_function as a subgraph here
training_task_subgraph_step = lightgbm_training_pipeline_function(
# NOTE: benchmark_custom_properties is not an actual pipeline input, just passed to the python code
config=config,
benchmark_custom_properties=benchmark_custom_properties,
train_dataset=train_data,
test_dataset=test_data
)
# add some relevant comments on the subgraph
training_task_subgraph_step.comment = " -- ".join([
"LightGBM training pipeline",
f"benchmark name: {config.lightgbm_training_config.benchmark_name}",
f"benchmark task key: {training_task.task_key}"
])
### MAIN BLOCK ###
# Step 4: implement main block using helper functions
def main():
# use parse helper function to get arguments from CLI
config = parse_pipeline_config(lightgbm_training_config)
# you'll need a workspace object to connect
workspace = azureml_connect(config)
# run the pipeline function with the given arguments
pipeline_instance = training_all_tasks(workspace, config)
# generate a nice markdown description
experiment_description="\n".join([
"Training on all specified tasks (see yaml below).",
"```yaml""",
"data_generation_config:",
OmegaConf.to_yaml(config.lightgbm_training_config),
"```"
])
# validate/submit the pipeline (if run.submit=True)
pipeline_submit(
workspace,
config,
pipeline_instance,
experiment_description=experiment_description
)
if __name__ == "__main__":
main()
| 39.961259 | 154 | 0.673534 | 522 | 0.031629 | 0 | 0 | 11,241 | 0.681108 | 0 | 0 | 5,811 | 0.352096 |
f5bf990b580312d748c5534bd056ce7638df5fe7 | 3,319 | py | Python | twinfield/metadata.py | zypp-io/twinfield | b4306e79f514ae691584c2d47ce072a3619469b8 | [
"Apache-2.0"
]
| 4 | 2020-12-20T23:02:33.000Z | 2022-01-13T19:40:13.000Z | twinfield/metadata.py | zypp-io/twinfield | b4306e79f514ae691584c2d47ce072a3619469b8 | [
"Apache-2.0"
]
| 9 | 2020-12-18T07:27:07.000Z | 2022-02-17T09:23:51.000Z | twinfield/metadata.py | zypp-io/twinfield | b4306e79f514ae691584c2d47ce072a3619469b8 | [
"Apache-2.0"
]
| null | null | null | from xml.etree import ElementTree as Et
import pandas as pd
import requests
from twinfield.core import Base
from twinfield.exceptions import ServerError
from twinfield.messages import METADATA_XML
class Metadata(Base):
def __init__(self, access_token: str, code: str, company: str):
"""
This class is for building the Browse SOAP requests for getting metadata of browse codes
Parameters
----------
access_token: str
access_token obtained from TwinfieldLogin class.
code: str
specific browsecode of which we want to get the metadata
company: str
specific the office code of the request
"""
super().__init__()
self.browsecode = code
self.access_token = access_token
self.company = company
def create_metadata_query(self) -> str:
"""
Returns
-------
columns: str
combination of fields and filters, that together make up for the <columns> section in
the XML template.
"""
metadata_request = f"""<read>
<type>browse</type>
<code>{self.browsecode}</code>
</read>"""
return metadata_request
def body(self) -> str:
"""
Returns
-------
body: str
the full XML SOAP message for the request. The body is build up in a base template,
string formatted with the current session_id , the module requested and the columns.
"""
xml = self.create_metadata_query()
body = METADATA_XML.format(self.access_token, self.company, xml)
return body
def parse_metadata_response(self, response: requests.Response) -> pd.DataFrame:
"""
Parameters
----------
response
Response object containing the twinfield server response
Returns
-------
df: pd.DataFrame
dataframe of metadata
"""
root = Et.fromstring(response.text)
body = root.find("env:Body", self.namespaces)
if body.find("env:Fault", self.namespaces):
raise ServerError()
data = body.find("tw:ProcessXmlStringResponse/tw:ProcessXmlStringResult", self.namespaces)
data = Et.fromstring(data.text)
col = data.find("columns")
rec = list()
for records in col:
ttl = dict()
for record in records:
ttl[record.tag] = record.text
rec.append(ttl)
df = pd.DataFrame(rec)
return df
def send_request(self, cluster) -> pd.DataFrame:
"""
Parameters
----------
cluster: cluster obtained from TwinfieldApi class
Returns
-------
df: pd.DataFrame
dataframe containing the records.
"""
body = self.body()
response = requests.post(
url=f"{cluster}/webservices/processxml.asmx?wsdl",
headers={"Content-Type": "text/xml", "Accept-Charset": "utf-8"},
data=body,
)
metadata = self.parse_metadata_response(response)
metadata.loc[metadata.label.isna(), "label"] = metadata.field
metadata.set_index("field", inplace=True)
return metadata
| 27.658333 | 98 | 0.577584 | 3,117 | 0.939138 | 0 | 0 | 0 | 0 | 0 | 0 | 1,599 | 0.481772 |
f5c0bee32dd9418b4866fcc07b3ab0eea9c2d30b | 172 | py | Python | sru_lm/load_data/__init__.py | Fast-LM-WordEvalRu/SRU-LM | dd69d6c7b7b6c0164e83a874aee5e6f6766070d5 | [
"Apache-2.0"
]
| null | null | null | sru_lm/load_data/__init__.py | Fast-LM-WordEvalRu/SRU-LM | dd69d6c7b7b6c0164e83a874aee5e6f6766070d5 | [
"Apache-2.0"
]
| null | null | null | sru_lm/load_data/__init__.py | Fast-LM-WordEvalRu/SRU-LM | dd69d6c7b7b6c0164e83a874aee5e6f6766070d5 | [
"Apache-2.0"
]
| 2 | 2019-11-06T13:07:30.000Z | 2020-02-04T11:21:19.000Z | # Author: Artem Skiba
# Created: 20/01/2020
from .dataset import FastDataset
from .dataloader import get_dataloader
__all__ = [
'FastDataset', 'get_dataloader'
]
| 17.2 | 38 | 0.726744 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 75 | 0.436047 |
f5c48e8b3a21158680b98773692e8c83b730ba87 | 5,053 | py | Python | libs/complex2epz.py | ledummy/CoMPlEx | f315df7a1b13cfcbdafd9879ff93a974f2e2c38b | [
"MIT"
]
| null | null | null | libs/complex2epz.py | ledummy/CoMPlEx | f315df7a1b13cfcbdafd9879ff93a974f2e2c38b | [
"MIT"
]
| 1 | 2020-04-08T12:55:50.000Z | 2020-04-08T12:55:50.000Z | libs/complex2epz.py | ledummy/CoMPlEx | f315df7a1b13cfcbdafd9879ff93a974f2e2c38b | [
"MIT"
]
| 1 | 2020-04-08T12:44:47.000Z | 2020-04-08T12:44:47.000Z | INIT = 1
REST = ['START_MODSAFE',[0,0]]
NEUTRAL = ['START_MODSAFE',[1,INIT]]
FDBK = ['START_MODSAFE',[2,INIT]]
LIN = ['START_MODSAFE',[3,INIT]]
SIN = ['START_MODSAFE',[4,INIT]]
TYPES = {'Vconst':LIN,'Fconst':FDBK,'Zconst':NEUTRAL}
try:
import epz as tempEpz
import inspect
_,_,keys,_ = inspect.getargspec(tempEpz.CMD.__init__())
if 'tag' not in keys:
from libs.epz import epz as tempEpz
epz = tempEpz
except:
from libs.epz import epz
# N set the triggers. The triggers are, in order, adc (deflection), dac (z position), time
# 1 = used, 0 = not used
#Triggers
# K = set adc (deflection) stop trigger (Volts)
# L = set dac (z position) stop trigger (Volts)
# M = set time stop trigger in microseconds
# P = set the setpoint for the feedback (-1, +1)
# Q = set the proportional gain for the feedback (0.0 to 1.0)
# R = set the integral gain for the feedback (0.0 to 1.0)
# S = set the differential gain for the feedback (0.0 to 1.0)
# B = set DAC output (Volts)
# D = set the piezo speed (Volt/s)
# C = set the piezo speed sign
'''
SET_DACSTEP:D
SET_NUMT6TRIG:T
SET_TIMETRIG:M
SET_DAC_SOFT:B
SET_DAC_HARD:U
SET_TRIGGERS:N
SET_ZTRIG:L
SET_FTRIG:K
SET_TIM8PER:8
SET_SETPOINT:P
SET_PGAIN:Q
SET_IGAIN:R
SET_DGAIN:S
START_MODSAFE:O
SET_DACMODE:F
SET_TESTPIN:H
INIT_SPI2:I
SET_RAMPSIGN:C
SET_USECIRCBUFF:G
SET_MODEDBG:E
SET_DACTO0:J
SET_DAC_2OR4:A
SWITCH_SPI2:g
KILL:k
'''
class Interpreter(object):
def __init__(self,env,device=None,tag='CMD'):
if device is not None:
env.device = device
self.cmd = epz.CMD(env,tag=tag)
## Start the SPI communication
def startDev(self):
self.cmd.send('SWITCH_SPI2',1)
## Close the communication between the PIC and the raspberry PI
def stopDev(self):
self.cmd.send('SWITCH_SPI2',0)
## Turns the DSPIC circula buffer on
def circulaBufferOn(self):
self.cmd.send('SET_USECIRCBUFF',1)
## Turns the DSPIC circula buffer off
def circulaBufferOff(self):
self.cmd.send('SET_USECIRCBUFF',0)
## Set the unipolar DAC mode
def goUnipolar(self):
self.cmd.send('SET_DACMODE',0)
## Set the bipolar DAC mode
def goBipolar(self):
self.cmd.send('SET_DACMODE',1)
## Kill the epizmq process on the target raspberry PI
def killDev(self):
self.cmd.send('KILL')
## Set the Z piezo position
# @param value The new wanted z position in Volt
def setZ(self,value):
self.cmd.send('SET_Z',value)
## Set the speed at which the piezo has to move
# @param dacStep The number of steps to perform every 'T6' microseconds
# @param t6TickNum The number of 'T6'you have to wait before tacking another step
def setZramp(self,dacStep,t6TicksTum):
self.cmd.send('SET_DACSTEP',dacStep)
self.cmd.send('SET_NUMT6TRIG',t6TicksTum)
## Set the speed sign
# @param value The wanted speed sign (0 = positive, 1 = negative)
def setZrampSign(self,value):
self.cmd.send('SET_RAMPSIGN',value)
## Set the PI feedback integral gain
# @param value The new integral gain
def setI(self,value):
self.cmd.send('SET_IGAIN',value)
## Set the PI feedback proportional gain
# @param value The new proportional gain
def setP(self,value):
self.cmd.send('SET_PGAIN',value)
## Set the PI feedback set point
# @param value The new set point in Volt
def setSetPoint(self,value):
self.cmd.send('SET_SETPOINT',value)
## Set the deflection stop trigger
# @param value The stop trigger value in Volt for the deflection
# @param sign 0 = greathern than, 1 = less than
def setDeflStopTrig(self,value,sign):
self.cmd.send('SET_FTRIG',[value,sign])
## Set the z position stop trigger
# @param value The stop trigger value in Volt for the z position
# @param sign 0 = greathern than, 1 = less than
def setZposStopTrig(self,value,sign):
self.cmd.send('SET_ZTRIG',[value,sign])
## Set the time stop trigger
# @param value The time stop trigger value in microseconds
# @param sign 0 = greathern than, 1 = less than
def setTimeStopTrig(self,value,sign):
self.cmd.send('SET_TIMETRIG',[value,sign])
## Set which trigger you want to use
# @param t 1 = time trigger in use, 0 = time trigger not in use
# @param z 1 = z trigger in use, 0 = z trigger not in use
# @param d 1 = deflection trigger in use, 0 = deflection trigger not in use
def setTriggersSwitch(self,t,z,d):
self.cmd.send('SET_TRIGGERS',[d,z,t])
## Start a chosen type of segment, determined by "type"
# @param type The type of segment that has to be started
def startSegment(self,type):
self.cmd.send(*TYPES[type])
## Turns on the feedback
def feedbackOn(self):
self.cmd.send('START_MODSAFE',[2,0])
def setSine(self):
pass
## Brings he system to the "rest" state
def goToRest(self):
self.cmd.send(*REST)
| 23.723005 | 90 | 0.660993 | 3,645 | 0.721354 | 0 | 0 | 0 | 0 | 0 | 0 | 2,954 | 0.584603 |
f5c4f96d849731c4a186b3fef06e21bef4391f32 | 1,177 | py | Python | test/device/test_brakes.py | uOstar/barista | ab62ec6320fb9b5e9c305f23be7fc7e828c25ab1 | [
"MIT"
]
| 4 | 2017-11-05T19:37:23.000Z | 2018-06-18T13:18:11.000Z | test/device/test_brakes.py | uOstar/barista | ab62ec6320fb9b5e9c305f23be7fc7e828c25ab1 | [
"MIT"
]
| 24 | 2017-11-05T19:22:08.000Z | 2018-06-14T13:50:39.000Z | test/device/test_brakes.py | uorocketry/barista | ab62ec6320fb9b5e9c305f23be7fc7e828c25ab1 | [
"MIT"
]
| 1 | 2022-03-25T04:01:25.000Z | 2022-03-25T04:01:25.000Z | import pytest
from mock import patch
from app.device.brakes import Brakes
from app.utils.servo import Servo
from app.utils.exceptions import InvalidArguments
@patch.object(Servo, 'write')
@patch.object(Servo, '__init__')
def test_init_creates_servo_on_pin_21(servo_init_mock, servo_write_mock):
servo_init_mock.return_value = None
servo_write_mock.return_value = None
brakes = Brakes()
servo_init_mock.assert_called_once_with(21)
servo_write_mock.assert_called_once_with(0)
@patch.object(Servo, 'write')
@patch.object(Servo, '__init__')
def test_write_full_close_is_20_precent(servo_init_mock, servo_write_mock):
servo_init_mock.return_value = None
servo_write_mock.return_value = None
brakes = Brakes()
brakes.deploy(0)
servo_write_mock.assert_called_with(0.2)
assert brakes.percentage == 0
@patch.object(Servo, 'write')
@patch.object(Servo, '__init__')
def test_write_full_open(servo_init_mock, servo_write_mock):
servo_init_mock.return_value = None
servo_write_mock.return_value = None
brakes = Brakes()
brakes.deploy(1.0)
servo_write_mock.assert_called_with(1.0)
assert brakes.percentage == 1.0
| 28.02381 | 75 | 0.773152 | 0 | 0 | 0 | 0 | 1,012 | 0.859813 | 0 | 0 | 51 | 0.043331 |
f5c957427e5b93fcfc4229d7e7efbe7a5cf8ce25 | 601 | py | Python | 4 kyu/Most_frequently_used_words_in_a_text.py | jonathansnolan/Codewars | 9d6a3fd10ffb2c61ae292961f384067cdede0470 | [
"MIT"
]
| null | null | null | 4 kyu/Most_frequently_used_words_in_a_text.py | jonathansnolan/Codewars | 9d6a3fd10ffb2c61ae292961f384067cdede0470 | [
"MIT"
]
| null | null | null | 4 kyu/Most_frequently_used_words_in_a_text.py | jonathansnolan/Codewars | 9d6a3fd10ffb2c61ae292961f384067cdede0470 | [
"MIT"
]
| null | null | null | from collections import Counter
def top_3_words(text):
text = text.lower()
count = ""
j = []
for u in text:
if ord(u) > 96 and ord(u) < 123 or ord(u) == 39:
count += u
else:
j.append(count)
count = ""
i = []
for k in j:
temp = ""
for u in k:
if ord(u) > 96 and ord(u) < 123 or ord(u) == 39 and len(k) > 3:
temp += u
if temp != "":
i.append(temp)
u = dict(Counter(i))
ans = sorted(u, key=u.get)
ans = ans[::-1]
ans = ans[:3]
return ans
| 22.259259 | 75 | 0.425957 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8 | 0.013311 |
f5cb0863a83b32aad95be43c48206bffad748391 | 33 | py | Python | test/__init__.py | rbn920/robosync | 1d430f64f6c7156920f92546770a1d2ddb558fea | [
"MIT"
]
| null | null | null | test/__init__.py | rbn920/robosync | 1d430f64f6c7156920f92546770a1d2ddb558fea | [
"MIT"
]
| null | null | null | test/__init__.py | rbn920/robosync | 1d430f64f6c7156920f92546770a1d2ddb558fea | [
"MIT"
]
| null | null | null | '''Test package for robosync'''
| 16.5 | 32 | 0.666667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 32 | 0.969697 |
f5cc6aee2d43d9f8f6fc9d61aea78cd19c169feb | 4,921 | py | Python | tadpole/template/app/lib/auth.py | echoyuanliang/pine | 22175e6aea0ca9b02d6542677b27a690c1501c9c | [
"MIT"
]
| 2 | 2017-12-02T07:02:31.000Z | 2020-10-13T02:20:18.000Z | tadpole/template/app/lib/auth.py | echoyuanliang/pine | 22175e6aea0ca9b02d6542677b27a690c1501c9c | [
"MIT"
]
| null | null | null | tadpole/template/app/lib/auth.py | echoyuanliang/pine | 22175e6aea0ca9b02d6542677b27a690c1501c9c | [
"MIT"
]
| 1 | 2018-04-23T04:59:38.000Z | 2018-04-23T04:59:38.000Z | #!/usr/bin/env python
# coding: utf-8
"""
create at 2017/11/22 by allen
"""
import re
from flask import request, session, current_app
from app.lib.constant import ResourceType
from app.models.auth import Resource, role_resource, Role, user_role, User
from app.lib.exceptions import AuthError, PermissionError
class HttpBasicAuth(object):
def __init__(self, user_loader, hash_password_handler=None,
verify_password_handler=None):
self.user_loader = user_loader
self.hash_password_handler = hash_password_handler
self.verify_password_handler = verify_password_handler
def hash_password(self, auth):
if self.hash_password_handler:
try:
return self.hash_password_handler(auth.password)
except Exception as e:
current_app.logger.exception(str(e))
try:
return self.hash_password_handler(auth.username, auth.password)
except Exception as e:
current_app.logger.exception(str(e))
return auth.password
def get_user(self, auth):
if not auth or not auth.username:
return None
user = self.user_loader(auth.username)
return user
def auth_user(self, auth):
if session.get('user_account'):
return self.user_loader(session['user_account'])
user = self.get_user(auth)
stored_password = user.password if user else None
if not stored_password:
return None
if self.verify_password_handler:
return self.verify_password_handler(auth.username, auth.password)
client_password = self.hash_password(auth)
if stored_password == client_password:
session['user_account'] = user.account
return user
return None
def __call__(self, auth):
return self.auth_user(auth=auth)
class AuthLoaderBase(object):
@staticmethod
def get_user_resources(user_id):
raise NotImplemented
@staticmethod
def load_user(account):
raise NotImplemented
@staticmethod
def get_user_resources(user_id):
raise NotImplemented
@staticmethod
def load_resources(rtype, name, operation):
raise NotImplemented
class AuthDbLoader(AuthLoaderBase):
@staticmethod
def get_user_resources(user_id):
return Resource.query.join(role_resource, Role, user_role, User). \
filter(User.id == user_id)
@staticmethod
def load_user(account):
return User.get_by(account=account).first()
@staticmethod
def get_user_resources(user_id):
return Resource.query.join(role_resource, Role, user_role, User). \
filter(User.id == user_id)
@staticmethod
def load_resources(rtype, name, operation):
http_resources = Resource.get_by(rtype=rtype)
return (resource for resource in http_resources
if re.match(resource.name, name) and
operation in resource.operation.split(','))
@staticmethod
def load_user_roles(user_id):
return Role.query.join(user_role).filter(user_id=user_id)
_auth_db_loader = AuthDbLoader()
_http_basic_auth = HttpBasicAuth(user_loader=_auth_db_loader.load_user)
class PermissionAuth(object):
def __init__(self, http_auth_handler=_http_basic_auth,
auth_info_loader=_auth_db_loader):
self.auth_info_loader = auth_info_loader
self.auth_handler = http_auth_handler
def validate_user_permission(self, user, resources):
user_resources = set(resource.id for
resource in self.auth_info_loader.
get_user_resources(user.id))
access_resources = set(resource.id for resource in resources)
if access_resources.issubset(user_resources):
return True
def is_root_user(self, user):
roles = self.auth_info_loader.load_user_roles(user.id)
return any(role.name == 'root' for role in roles)
def validate_request_permission(self):
path_resources = list(self.auth_info_loader.load_resources(
ResourceType.HTTP, request.path, request.method.upper()))
if not path_resources:
return True
user = self.auth_handler(request.authorization)
if not user:
raise AuthError(u'authenticate failed,'
u' please check your username or password')
# super admin, ignore permission
if self.is_root_user(user):
return True
# validate permission
if not self.validate_user_permission(user, path_resources):
raise PermissionError(u'permission denied, your have not'
u' permission to do {0} on {1}'.format(
request.path, request.method.upper()))
| 30.190184 | 79 | 0.65251 | 4,486 | 0.911603 | 0 | 0 | 1,139 | 0.231457 | 0 | 0 | 313 | 0.063605 |
f5d03f80ba9950414b41050d76a8ec9d43425ee6 | 656 | py | Python | src/easy/plus_one_66.py | ahmet9cengiz/leetCode | 9e9a61f059072d7791dd19706b7a3e0d0a446669 | [
"MIT"
]
| null | null | null | src/easy/plus_one_66.py | ahmet9cengiz/leetCode | 9e9a61f059072d7791dd19706b7a3e0d0a446669 | [
"MIT"
]
| null | null | null | src/easy/plus_one_66.py | ahmet9cengiz/leetCode | 9e9a61f059072d7791dd19706b7a3e0d0a446669 | [
"MIT"
]
| null | null | null | class Solution(object):
# Time Complexity: O(n)
@staticmethod
def plus_one(digits):
keep_going = True
for i, e in reversed(list(enumerate(digits))):
if keep_going:
if e == 9:
digits[i] = 0
else:
digits[i] += 1
keep_going = False
else:
break
if keep_going:
new_digits = [1]
new_digits[1:] = [digits[i] for i in range(len(digits))]
return new_digits
return digits
if __name__ == '__main__':
s = Solution()
print(s.plus_one([9,9,9]))
| 24.296296 | 69 | 0.464939 | 577 | 0.879573 | 0 | 0 | 520 | 0.792683 | 0 | 0 | 33 | 0.050305 |
f5d07d12c4b5747b9b1b9f630c617df1ba338e16 | 1,607 | py | Python | timetracker/vms/test/models/test_client_admin_invite_model.py | comp523-jarvis/timetracker-web | af638f0b3aab8a69a974bdb9a18118198488657c | [
"Apache-2.0"
]
| 1 | 2019-04-09T16:46:53.000Z | 2019-04-09T16:46:53.000Z | timetracker/vms/test/models/test_client_admin_invite_model.py | comp523-jarvis/timetracker-web | af638f0b3aab8a69a974bdb9a18118198488657c | [
"Apache-2.0"
]
| 105 | 2018-10-12T17:57:20.000Z | 2020-06-05T19:35:21.000Z | timetracker/vms/test/models/test_client_admin_invite_model.py | comp523-jarvis/timetracker-web | af638f0b3aab8a69a974bdb9a18118198488657c | [
"Apache-2.0"
]
| 1 | 2019-04-11T14:43:42.000Z | 2019-04-11T14:43:42.000Z | from django.conf import settings
from django.template.loader import render_to_string
from vms import models
def test_accept(client_admin_invite_factory, user_factory):
"""
Accepting the invitation should create a new client admin for the
user who accepts.
"""
invite = client_admin_invite_factory()
user = user_factory()
admin = invite.accept(user)
assert admin.client == invite.client
assert models.ClientAdminInvite.objects.count() == 0
def test_send(client_admin_invite_factory, request_factory, mailoutbox):
"""
Sending the invitation should send an email to the email address
attached to the invite.
"""
request = request_factory.get('/')
invite = client_admin_invite_factory()
invite.send(request)
context = {
'accept_url': f'{request.get_host()}{invite.accept_url}',
'client': invite.client,
}
expected_msg = render_to_string(
'vms/emails/client-admin-invite.txt',
context=context,
)
assert len(mailoutbox) == 1
msg = mailoutbox[0]
assert msg.body == expected_msg
assert msg.from_email == settings.DEFAULT_FROM_EMAIL
assert msg.subject == 'Client Administrator Invitation'
assert msg.to == [invite.email]
def test_string_conversion(client_admin_invite_factory):
"""
Converting an invite to a string should return a string containing
the email it was sent to and the linked client.
"""
invite = client_admin_invite_factory()
expected = f'Admin invite for {invite.email} from {invite.client}'
assert str(invite) == expected
| 27.706897 | 72 | 0.701929 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 534 | 0.332296 |
f5d0bd552a2206b2e1b134ade80b6b88f2ce3b53 | 3,489 | py | Python | _from_pydot/lambdas/dev/pyppeteer.py | owasp-sbot/pbx-gs-python-utils | f448aa36c4448fc04d30c3a5b25640ea4d44a267 | [
"Apache-2.0"
]
| 3 | 2018-12-14T15:43:46.000Z | 2019-04-25T07:44:58.000Z | _from_pydot/lambdas/dev/pyppeteer.py | owasp-sbot/pbx-gs-python-utils | f448aa36c4448fc04d30c3a5b25640ea4d44a267 | [
"Apache-2.0"
]
| 1 | 2019-05-11T14:19:37.000Z | 2019-05-11T14:51:04.000Z | _from_pydot/lambdas/dev/pyppeteer.py | owasp-sbot/pbx-gs-python-utils | f448aa36c4448fc04d30c3a5b25640ea4d44a267 | [
"Apache-2.0"
]
| 4 | 2018-12-27T04:54:14.000Z | 2019-05-11T14:07:47.000Z | import base64
import os
import asyncio
from pbx_gs_python_utils.utils.Process import Process
from osbot_aws.Dependencies import load_dependency
def run(event, context):
load_dependency("pyppeteer") # (on first run downloads a zip file from S3 to /tmp/lambdas-dependencies/pyppeteer/ which contains
# the contents of `pip3 install pyppeteer - t pyppeteer` and the headless_shell file created by
# https://github.com/sambaiz/puppeteer-lambda-starter-kit
# This command also sets the add the /tmp/lambdas-dependencies/pyppeteer/ to sys.path
path_headless_shell = '/tmp/lambdas-dependencies/pyppeteer/headless_shell' # path to headless_shell AWS Linux executable
path_page_screenshot = '/tmp/screenshot.png' # path to store screenshot of url loaded
os.environ['PYPPETEER_HOME'] = '/tmp' # tell pyppeteer to use this read-write path in Lambda aws
target_url = event.get('url') # get url to load from lambda params
doc_type = event.get('doc_type')
async def get_screenshot(): # async method to run request
from pyppeteer import launch # import pyppeteer dependency
Process.run("chmod", ['+x', path_headless_shell]) # set the privs of path_headless_shell to execute
browser = await launch(executablePath = path_headless_shell, # lauch chrome (i.e. headless_shell)
args = ['--no-sandbox','--single-process']) # two key settings or the requests will not work
page = await browser.newPage() # typical pyppeteer code, where we create a new Page object
await page.goto(target_url) # - open an url
await page.waitFor(2 * 1000); # To Remove
#await page.waitForNavigation(); not working
if doc_type and doc_type == 'pdf':
await page.pdf({'path': path_page_screenshot});
else:
await page.screenshot({'path': path_page_screenshot}) # - take a screenshot of the page loaded and save it
await browser.close() # - close the browser
asyncio.get_event_loop().run_until_complete(get_screenshot()) # event loop to start the run async method which will open the
# url provided in the lambda params and save it as an png
with open(path_page_screenshot, "rb") as image_file: # open path_page_screenshot file
encoded_png = base64.b64encode(image_file.read()).decode() # save it as a png string (base64 encoded to make it easier to return)
return { "base64_data" : encoded_png} # return value to Lambda caller
| 67.096154 | 162 | 0.509888 | 0 | 0 | 0 | 0 | 0 | 0 | 1,402 | 0.401719 | 1,373 | 0.39341 |
f5d23a181d6fd76675487606efe26f43a22cb25e | 2,757 | py | Python | filter_plugins/net_textfsm_parse.py | iamroddo/ansible_helpers | 420b9d7a1bb637f52209aeeea4cd424d03cf4eef | [
"Apache-2.0"
]
| 44 | 2017-05-19T19:55:39.000Z | 2022-02-08T17:21:22.000Z | filter_plugins/net_textfsm_parse.py | iamroddo/ansible_helpers | 420b9d7a1bb637f52209aeeea4cd424d03cf4eef | [
"Apache-2.0"
]
| 2 | 2017-07-17T14:28:23.000Z | 2020-12-11T15:54:00.000Z | filter_plugins/net_textfsm_parse.py | iamroddo/ansible_helpers | 420b9d7a1bb637f52209aeeea4cd424d03cf4eef | [
"Apache-2.0"
]
| 18 | 2017-07-27T07:58:34.000Z | 2021-06-06T04:06:33.000Z | """
Filter to convert results from network device show commands obtained from ios_command,
eos_command, et cetera to structured data using TextFSM templates.
"""
from __future__ import unicode_literals
from __future__ import print_function
import os
from textfsm.clitable import CliTableError
import textfsm.clitable as clitable
def get_template_dir():
"""Find and return the ntc-templates/templates dir."""
try:
template_dir = os.environ['NET_TEXTFSM']
index = os.path.join(template_dir, 'index')
if not os.path.isfile(index):
# Assume only base ./ntc-templates specified
template_dir = os.path.join(template_dir, 'templates')
except KeyError:
# Construct path ~/ntc-templates/templates
home_dir = os.path.expanduser("~")
template_dir = os.path.join(home_dir, 'ntc-templates', 'templates')
index = os.path.join(template_dir, 'index')
if not os.path.isdir(template_dir) or not os.path.isfile(index):
msg = """
Valid ntc-templates not found, please install https://github.com/networktocode/ntc-templates
and then set the NET_TEXTFSM environment variable to point to the ./ntc-templates/templates
directory."""
raise ValueError(msg)
return template_dir
def get_structured_data(raw_output, platform, command):
"""Convert raw CLI output to structured data using TextFSM template."""
template_dir = get_template_dir()
index_file = os.path.join(template_dir, 'index')
textfsm_obj = clitable.CliTable(index_file, template_dir)
attrs = {'Command': command, 'Platform': platform}
try:
# Parse output through template
textfsm_obj.ParseCmd(raw_output, attrs)
return clitable_to_dict(textfsm_obj)
except CliTableError:
return raw_output
def clitable_to_dict(cli_table):
"""Converts TextFSM cli_table object to list of dictionaries."""
objs = []
for row in cli_table:
temp_dict = {}
for index, element in enumerate(row):
temp_dict[cli_table.header[index].lower()] = element
objs.append(temp_dict)
return objs
def net_textfsm_parse(output, platform, command):
"""Process config find interfaces using ip helper."""
try:
output = output['stdout'][0]
except (KeyError, IndexError, TypeError):
pass
return get_structured_data(output, platform, command)
class FilterModule(object):
"""Filter to convert results from network device show commands obtained from ios_command,
eos_command, et cetera to structured data using TextFSM templates."""
def filters(self):
return {
'net_textfsm_parse': net_textfsm_parse,
}
if __name__ == "__main__":
# Test code
pass
| 32.821429 | 93 | 0.696772 | 297 | 0.107726 | 0 | 0 | 0 | 0 | 0 | 0 | 1,026 | 0.372144 |
f5d2d84344ef95aeed5c0f078a4e133508f0ccd9 | 5,705 | py | Python | firebaseClient/firebaseClientGPIO.py | tabris2015/personCounter | 0cd7f8698afefdd9e913a97820b9ff9c01752274 | [
"MIT"
]
| null | null | null | firebaseClient/firebaseClientGPIO.py | tabris2015/personCounter | 0cd7f8698afefdd9e913a97820b9ff9c01752274 | [
"MIT"
]
| null | null | null | firebaseClient/firebaseClientGPIO.py | tabris2015/personCounter | 0cd7f8698afefdd9e913a97820b9ff9c01752274 | [
"MIT"
]
| null | null | null | #!/usr/bin/python
import threading
import Queue
import serial
import time
from datetime import datetime
from firebase import firebase
import sqlite3
from datetime import datetime, timedelta
from gpiozero import Button, LED
#///////////////////////////////////////////
import firebase_admin
from firebase_admin import credentials
from firebase_admin import firestore
#/////////////////////////////////////////////////
missed_events = []
DB_INTERVAL = 180
##### pin definitions
FAULT = LED(5)
FALLA = False
IN1 = 13
OUT1 = 6
IN2 = 26
OUT2 = 19
in1_button = Button(IN1, pull_up=False)
out1_button = Button(OUT1, pull_up=False)
in2_button = Button(IN2, pull_up=False)
out2_button = Button(OUT2, pull_up=False)
eventQueue = Queue.Queue()
####
connected = False
def queue_get_all(q):
items = []
maxItemsToRetreive = 10000
for numOfItemsRetrieved in range(0, maxItemsToRetreive):
try:
if numOfItemsRetrieved == maxItemsToRetreive:
break
items.append(q.get_nowait())
except:
break
return items
def in1Event():
print("in1!")
event_dic = {}
event_dic["tipo_marcado"] = 1
event_dic["fecha"] = datetime.utcnow()
event_dic["id_sensor"] = 1
eventQueue.put(event_dic)
def out1Event():
print("out1!")
event_dic = {}
event_dic["tipo_marcado"] = 0
event_dic["fecha"] = datetime.utcnow()
event_dic["id_sensor"] = 1
eventQueue.put(event_dic)
def in2Event():
print("in2!")
event_dic = {}
event_dic["tipo_marcado"] = 1
event_dic["fecha"] = datetime.utcnow()
event_dic["id_sensor"] = 2
eventQueue.put(event_dic)
def out2Event():
print("out2!")
event_dic = {}
event_dic["tipo_marcado"] = 0
event_dic["fecha"] = datetime.utcnow()
event_dic["id_sensor"] = 2
eventQueue.put(event_dic)
def periodicDBInsert(key):
insert_SQL = '''INSERT INTO personEvent(fecha, tipo_marcado, id_sensor) VALUES(?, ?, ?)'''
db = sqlite3.connect('/home/pi/projects/personCounter/firebaseClient/local.db')
c = db.cursor()
global DB_INTERVAL
global FALLA
#///////////////////
global missed_events
try:
print("conectando a la DB...")
cred = credentials.Certificate(key)
firebase_admin.initialize_app(cred)
dbFs = firestore.client()
FAULT.off()
FALLA = False
except:
FAULT.on()
FALLA = True
return
# for sqlite
while True:
if eventQueue.empty() and not missed_events:
print("no hay eventos!")
else:
print("insertando eventos...")
# for event in events:
# pushToLocalDB(db, event)
# creando doc
events = []
if not eventQueue.empty():
print("eventos nuevos en cola: ", eventQueue.qsize())
events = queue_get_all(eventQueue)
eventQueue.task_done()
try:
print("eventos perdidos en cola: ", len(missed_events))
total_events = events + missed_events
print("accediendo a coleccion...")
doc_data = {
'marcados':total_events,
'id_evento': 1,
}
######
events_sqlite = []
for event in total_events:
events_sqlite.append(
(
event['fecha'],
event['tipo_marcado'],
event['id_sensor']
)
)
c.executemany(insert_SQL, events_sqlite)
print('ingresando datos a db local...')
db.commit()
######
print('ingresando datos a db remota...')
doc_ref = dbFs.collection(u'marcados_eventos').document(unicode(datetime.now()))
doc_ref.set(doc_data)
##################
events = []
missed_events = []
FAULT.off()
FALLA = False
print('actualizacion de db finalizada!')
except Exception:
print(Exception.message)
print('salvando datos...')
missed_events = events
FAULT.on()
FALLA = True
#c.executemany(insert_SQL, events2)
#db.commit()
#select_last_events(db)
events = []
time.sleep(DB_INTERVAL)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='contador de personas')
parser.add_argument('-key', required=True, action='store',help='path to key for remote connection')
args = parser.parse_args()
keyPath = ""
if args.key != None:
keyPath = args.key
#first_event = False
dbTh = threading.Thread(target=periodicDBInsert, args=(keyPath,))
#dbTh = threading.Timer(5, periodicDBInsert, args=(db,))
dbTh.daemon = True
# -----
dbTh.start()
###
#firebase = firebase.FirebaseApplication(URL, authentication=authentication)
in1_button.when_pressed = in1Event
out1_button.when_pressed = out1Event
in2_button.when_pressed = in2Event
out2_button.when_pressed = out2Event
while True:
if not FALLA:
FAULT.on()
time.sleep(0.1)
FAULT.off()
time.sleep(0.9)
else:
FAULT.on()
time.sleep(1)
FAULT.on()
FAULT.on()
| 26.784038 | 103 | 0.540053 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,198 | 0.209991 |
f5d40b58d32d09631a74deab03cacd263794a4ed | 3,204 | py | Python | look-for.py | barnesrobert/find-aws-resource-in-all-accounts | 5f02aacca3ce3a28894d7d497c4158ed9b08c238 | [
"Apache-2.0"
]
| null | null | null | look-for.py | barnesrobert/find-aws-resource-in-all-accounts | 5f02aacca3ce3a28894d7d497c4158ed9b08c238 | [
"Apache-2.0"
]
| null | null | null | look-for.py | barnesrobert/find-aws-resource-in-all-accounts | 5f02aacca3ce3a28894d7d497c4158ed9b08c238 | [
"Apache-2.0"
]
| null | null | null | #--------------------------------------------------------------------------------------------------
# Function: look-for
# Purpose: Loops through all AWS accounts and regions within an Organization to find a specific resource
# Inputs:
#
# {
# "view_only": "true|false",
# "regions": ["us-east-1", ...]
# }
#
# Leave the regions sections blank to apply to all regions
#
#--------------------------------------------------------------------------------------------------
import json
import boto3
import botocore
from botocore.exceptions import ClientError
from botocore.exceptions import EndpointConnectionError
sts_client = boto3.client('sts')
organizations_client = boto3.client('organizations')
#--------------------------------------------------------------------------------------------------
# Function handler
#--------------------------------------------------------------------------------------------------
def lambda_handler(event, context):
# Determine whether the user just wants to view the orphaned logs.
view_only = ('view_only' in event and event['view_only'].lower() == 'true')
regions = []
#--------------------------------------------------
# Determine which regions to include. Apply to all regions by default.
#--------------------------------------------------
if 'regions' in event and type(event['regions']) == list:
regions = event['regions']
# Get all regions if not otherwise specified.
if not regions:
region_response = boto3.client('ec2').describe_regions()
regions = [region['RegionName'] for region in region_response['Regions']]
# Loop through the accounts in the organization.
response = organizations_client.list_accounts()
for account in response['Accounts']:
if account['Status'] == 'ACTIVE':
member_account = sts_client.assume_role(
RoleArn='arn:aws:iam::{}:role/AWSControlTowerExecution'.format(account['Id']),
RoleSessionName='look_for'
)
loop_through_account(account['Id'], member_account, regions, view_only)
return
#--------------------------------------------------
# function: loop_through_account
#--------------------------------------------------
def loop_through_account(account_id, assumed_role, regions, view_only):
ACCESS_KEY = assumed_role['Credentials']['AccessKeyId']
SECRET_KEY = assumed_role['Credentials']['SecretAccessKey']
SESSION_TOKEN = assumed_role['Credentials']['SessionToken']
#--------------------------------------------------
# Iterate through the specified regions.
#--------------------------------------------------
for region in regions:
print({
"Account": account_id,
"Region": region
}
)
try:
# Create service client using the assumed role credentials, e.g. S3
client = boto3.client(
'SERVICE_NAME',
aws_access_key_id=ACCESS_KEY,
aws_secret_access_key=SECRET_KEY,
aws_session_token=SESSION_TOKEN,
region_name=region
)
for RESOURCE in client.METHOD()['RESOURCES']:
print('DO SOMETHING HERE')
except botocore.exceptions.SERVCICE_METHOD_ERROR as error:
print(ValueError(error))
| 32.693878 | 105 | 0.542447 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,696 | 0.529338 |
f5d6cff69b0e62527106143d8be0c05d4bcd4fe7 | 2,972 | py | Python | opennem/spiders/aemo/monitoring.py | paulculmsee/opennem | 9ebe4ab6d3b97bdeebc352e075bbd5c22a8ddea1 | [
"MIT"
]
| 22 | 2020-06-30T05:27:21.000Z | 2022-02-21T12:13:51.000Z | opennem/spiders/aemo/monitoring.py | paulculmsee/opennem | 9ebe4ab6d3b97bdeebc352e075bbd5c22a8ddea1 | [
"MIT"
]
| 71 | 2020-08-07T13:06:30.000Z | 2022-03-15T06:44:49.000Z | opennem/spiders/aemo/monitoring.py | paulculmsee/opennem | 9ebe4ab6d3b97bdeebc352e075bbd5c22a8ddea1 | [
"MIT"
]
| 13 | 2020-06-30T03:28:32.000Z | 2021-12-30T08:17:16.000Z | import logging
from typing import Any, Dict
from pydantic import ValidationError
from scrapy import Spider
from scrapy.http import Response
from opennem.pipelines.aemo.downloads import DownloadMonitorPipeline
from opennem.schema.aemo.downloads import AEMOFileDownloadSection
from opennem.utils.dates import parse_date
from opennem.utils.numbers import filesize_from_string
from opennem.utils.url import strip_query_string
class AEMOMonitorRelSpider(Spider):
name = "au.aemo.downloads"
start_urls = [
"https://aemo.com.au/en/energy-systems/electricity/national-electricity-market-nem/participate-in-the-market/registration",
"https://www.aemo.com.au/energy-systems/electricity/national-electricity-market-nem/nem-forecasting-and-planning/forecasting-and-planning-data/generation-information",
]
pipelines = set([DownloadMonitorPipeline])
def parse(self, response: Any) -> Dict[str, Any]:
file_downloads = []
source_title = response.css("title::text").get()
download_sections = response.xpath("//div[@class='file-list-wrapper']/..")
if not download_sections or len(download_sections) < 1:
raise Exception("{} spider could not find any download sections".format(self.name))
for download_section in download_sections:
date_text = download_section.css("div.field-publisheddate span::text").get()
if not date_text:
raise Exception(
"{} could not get download section published date".format(self.name)
)
published_date = parse_date(date_text)
publish_link_relative = download_section.css("a::attr(href)").get()
if not publish_link_relative:
raise Exception("{} could not get rel published link".format(self.name))
publish_link = response.urljoin(publish_link_relative)
publish_link = strip_query_string(publish_link)
download_title = download_section.css(".field-title::text").get()
download_size_raw = download_section.css(".field-size span::text").get()
download_size = None
if download_size_raw:
download_size, _ = filesize_from_string(download_size_raw)
# create a model from the extracted fields
section_model = None
try:
section_model = AEMOFileDownloadSection(
published_date=published_date,
filename=download_title,
download_url=publish_link,
file_size=download_size,
source_url=response.url,
source_title=source_title,
)
file_downloads.append(section_model)
except ValidationError as e:
self.log("Validation error: {}".format(e), logging.ERROR)
return {"_data": file_downloads, "items": file_downloads}
| 37.620253 | 175 | 0.657133 | 2,545 | 0.856326 | 0 | 0 | 0 | 0 | 0 | 0 | 666 | 0.224092 |
f5d87e21f9ec6f8ae018914ba1e9c0e382bc83dd | 319 | py | Python | python/13/servo.py | matsujirushi/raspi_parts_kouryaku | 35cd6f34d21c5e3160636671175fa8d5aff2d4dc | [
"Apache-2.0"
]
| 6 | 2022-03-05T02:36:57.000Z | 2022-03-12T12:31:27.000Z | python/13/servo.py | matsujirushi/raspi_parts_kouryaku | 35cd6f34d21c5e3160636671175fa8d5aff2d4dc | [
"Apache-2.0"
]
| null | null | null | python/13/servo.py | matsujirushi/raspi_parts_kouryaku | 35cd6f34d21c5e3160636671175fa8d5aff2d4dc | [
"Apache-2.0"
]
| null | null | null | import wiringpi as pi
pi.wiringPiSetupGpio()
pi.pinMode(18, pi.PWM_OUTPUT)
pi.pwmSetMode(pi.PWM_MODE_MS)
pi.pwmSetClock(2)
pi.pwmSetRange(192000)
while True:
for i in list(range(-90, 90, 10)) + list(range(90, -90, -10)):
pi.pwmWrite(18, int(((i + 90) / 180 * (2.4 - 0.5) + 0.5) / 20 * 192000))
pi.delay(200)
| 26.583333 | 76 | 0.652038 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
f5d9d9ea4f3e787d1de8f24aa36d4dcbede900ec | 2,549 | py | Python | src/vswarm/object_detection/blob_detector.py | Faust-Wang/vswarm | d18ce643218c18ef1e762f40562104b2a0926ad7 | [
"MIT"
]
| 21 | 2021-03-03T10:51:46.000Z | 2022-03-28T11:00:35.000Z | src/vswarm/object_detection/blob_detector.py | Faust-Wang/vswarm | d18ce643218c18ef1e762f40562104b2a0926ad7 | [
"MIT"
]
| 2 | 2021-07-21T07:57:16.000Z | 2022-03-17T12:41:51.000Z | src/vswarm/object_detection/blob_detector.py | hvourtsis/vswarm | d18ce643218c18ef1e762f40562104b2a0926ad7 | [
"MIT"
]
| 8 | 2021-02-27T14:29:55.000Z | 2022-01-05T19:40:38.000Z | import cv2 as cv
from geometry_msgs.msg import Pose2D
from vision_msgs.msg import (BoundingBox2D, Detection2D, Detection2DArray,
ObjectHypothesisWithPose)
THRESHOLD_MAX = 255
THRESHOLD = 240
class BlobDetector:
def __init__(self):
pass
def detect_multi(self, images):
detections_list = []
for image in images:
detections = self.detect(image)
detections_list.append(detections)
return detections_list
def detect(self, image):
# Convert to grayscale if needed
if image.ndim == 3:
image = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
image_height, image_width = image.shape
image_area = image_height * image_width
# Apply (inverse) binary threshold to input image
mask = cv.threshold(image, THRESHOLD, THRESHOLD_MAX, cv.THRESH_BINARY_INV)[1]
# Dilate mask to find more reliable contours
# kernel = np.ones((5, 5), np.uint8)
# mask_dilated = cv.dilate(mask, kernel, iterations=1)
# Find external approximate contours in dilated mask
contours, hierarchy = cv.findContours(mask, cv.RETR_EXTERNAL,
cv.CHAIN_APPROX_SIMPLE)
# Filter out contours that don't qualify as a detection
detections = []
for contour in contours:
# Filer out if the contour touches the image border
x, y, w, h = cv.boundingRect(contour)
if x == 0 or y == 0 or x + w == image_width or y + h == image_height:
continue
# Filter out if the contour is too small
if cv.contourArea(contour) < 1e-4 * image_area:
continue
detections.append((x, y, w, h))
# Fill detections msg
detection_array_msg = Detection2DArray()
for detection in detections:
x, y, w, h = detection
center_x = x + w / 2.
center_y = y + h / 2.
bbox = BoundingBox2D()
bbox.center = Pose2D(x=center_x, y=center_y, theta=0)
bbox.size_x = w
bbox.size_y = h
object_hypothesis = ObjectHypothesisWithPose()
object_hypothesis.id = 0
object_hypothesis.score = 1.0
detection_msg = Detection2D()
detection_msg.bbox = bbox
detection_msg.results.append(object_hypothesis)
detection_array_msg.detections.append(detection_msg)
return detection_array_msg
| 32.265823 | 85 | 0.59592 | 2,325 | 0.912122 | 0 | 0 | 0 | 0 | 0 | 0 | 434 | 0.170263 |
f5dc231bdf053f390dc67dc11fbefb6147ad20d2 | 188 | py | Python | setup.py | wicrep/triplet-reid | 251c24d828e223de75b45ae65aa3f38171f9676b | [
"MIT"
]
| null | null | null | setup.py | wicrep/triplet-reid | 251c24d828e223de75b45ae65aa3f38171f9676b | [
"MIT"
]
| null | null | null | setup.py | wicrep/triplet-reid | 251c24d828e223de75b45ae65aa3f38171f9676b | [
"MIT"
]
| null | null | null | from setuptools import find_packages, setup
setup(
name="triplet-reid",
version="0.1.0",
description="Triplet-based Person Re-Identification",
packages=find_packages(),
)
| 20.888889 | 57 | 0.712766 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 61 | 0.324468 |
f5dc6d973bebdd28a311046ec3c5d189663906f8 | 530 | py | Python | sentences.py | vanatteveldt/perspectives | 6d537082b915ccde15031d94983bd2d575cdc380 | [
"MIT"
]
| null | null | null | sentences.py | vanatteveldt/perspectives | 6d537082b915ccde15031d94983bd2d575cdc380 | [
"MIT"
]
| null | null | null | sentences.py | vanatteveldt/perspectives | 6d537082b915ccde15031d94983bd2d575cdc380 | [
"MIT"
]
| null | null | null | import csv
import sys
from KafNafParserPy import KafNafParser
from naflib import *
woorden = [r['original'] for r in csv.DictReader(open("klimaatwoorden.csv"))]
o = csv.writer(sys.stdout)
o.writerow(["file", "sentence", "term", "text"])
for fn in sys.argv[1:]:
naf = KafNafParser(fn)
for klimaterm in find_terms(naf, woorden):
sent = get_sentence(naf, klimaterm)
text = " ".join([get_word(naf, t) for t in get_terms_in_sentence(naf, sent)])
o.writerow([fn, sent, klimaterm.get_lemma(), text])
| 27.894737 | 85 | 0.677358 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 61 | 0.115094 |
f5dd11fe9a9263410d61440cc6794ca854255416 | 1,127 | py | Python | view/user/__init__.py | archever/flask-web | cd120f64deec31fd1a87285372abaa22fc379b9f | [
"MIT"
]
| null | null | null | view/user/__init__.py | archever/flask-web | cd120f64deec31fd1a87285372abaa22fc379b9f | [
"MIT"
]
| null | null | null | view/user/__init__.py | archever/flask-web | cd120f64deec31fd1a87285372abaa22fc379b9f | [
"MIT"
]
| null | null | null | # coding=utf-8
from flask import Blueprint, render_template, redirect
from controlers.user import UserCtr
from libs.login import login_user, logout_user, current_user
bp = Blueprint("user", __name__, url_prefix="/user")
@bp.route("/login", methods=["GET"])
def login_form():
return render_template("user/login.html")
@bp.route("/regist", methods=["GET"])
def regist_form():
return render_template("user/regist.html")
@bp.route("/logout", methods=["GET"])
def logout():
logout_user()
return redirect("/")
@bp.route("/login", methods=["POST"])
def login():
email = request.form.get("email")
password = request.form.get("password")
if not email or not password:
raise AppError("参数错误")
user = UserCtr.login(email, password)
user.sid = login_user(user)
return redirect("/")
@bp.route("/regist", methods=["POST"])
def login():
email = request.form.get("email")
password = request.form.get("password")
if not email or not password:
raise AppError("参数错误")
user = UserCtr.regist(email, password)
sid = session.login(user)
return redirect("/")
| 23.978723 | 60 | 0.668146 | 0 | 0 | 0 | 0 | 907 | 0.793526 | 0 | 0 | 203 | 0.177603 |
f5deb3f2744fe175063b1c389f169973e74ce044 | 9,607 | py | Python | recipes/Python/52275_sparse_dictionary_based_sparse_matrix/recipe-52275.py | tdiprima/code | 61a74f5f93da087d27c70b2efe779ac6bd2a3b4f | [
"MIT"
]
| 2,023 | 2017-07-29T09:34:46.000Z | 2022-03-24T08:00:45.000Z | recipes/Python/52275_sparse_dictionary_based_sparse_matrix/recipe-52275.py | unhacker/code | 73b09edc1b9850c557a79296655f140ce5e853db | [
"MIT"
]
| 32 | 2017-09-02T17:20:08.000Z | 2022-02-11T17:49:37.000Z | recipes/Python/52275_sparse_dictionary_based_sparse_matrix/recipe-52275.py | unhacker/code | 73b09edc1b9850c557a79296655f140ce5e853db | [
"MIT"
]
| 780 | 2017-07-28T19:23:28.000Z | 2022-03-25T20:39:41.000Z | #!/usr/bin/env python
import vector
import math, types, operator
"""
A sparse matrix class based on a dictionary, supporting matrix (dot)
product and a conjugate gradient solver.
In this version, the sparse class inherits from the dictionary; this
requires Python 2.2 or later.
"""
class sparse(dict):
"""
A complex sparse matrix
A. Pletzer 5 Jan 00/12 April 2002
Dictionary storage format { (i,j): value, ... }
where (i,j) are the matrix indices
"""
# no c'tor
def size(self):
" returns # of rows and columns "
nrow = 0
ncol = 0
for key in self.keys():
nrow = max([nrow, key[0]+1])
ncol = max([ncol, key[1]+1])
return (nrow, ncol)
def __add__(self, other):
res = sparse(self.copy())
for ij in other:
res[ij] = self.get(ij,0.) + other[ij]
return res
def __neg__(self):
return sparse(zip(self.keys(), map(operator.neg, self.values())))
def __sub__(self, other):
res = sparse(self.copy())
for ij in other:
res[ij] = self.get(ij,0.) - other[ij]
return res
def __mul__(self, other):
" element by element multiplication: other can be scalar or sparse "
try:
# other is sparse
nval = len(other)
res = sparse()
if nval < len(self):
for ij in other:
res[ij] = self.get(ij,0.)*other[ij]
else:
for ij in self:
res[ij] = self[ij]*other.get(ij,0j)
return res
except:
# other is scalar
return sparse(zip(self.keys(), map(lambda x: x*other, self.values())))
def __rmul__(self, other): return self.__mul__(other)
def __div__(self, other):
" element by element division self/other: other is scalar"
return sparse(zip(self.keys(), map(lambda x: x/other, self.values())))
def __rdiv__(self, other):
" element by element division other/self: other is scalar"
return sparse(zip(self.keys(), map(lambda x: other/x, self.values())))
def abs(self):
return sparse(zip(self.keys(), map(operator.abs, self.values())))
def out(self):
print '# (i, j) -- value'
for k in self.keys():
print k, self[k]
def plot(self, width_in=400, height_in=400):
import colormap
import Tkinter
cmax = max(self.values())
cmin = min(self.values())
offset = 0.05*min(width_in, height_in)
xmin, ymin, xmax, ymax = 0,0,self.size()[0], self.size()[1]
scale = min(0.9*width_in, 0.9*height_in)/max(xmax-xmin, ymax-ymin)
root = Tkinter.Tk()
frame = Tkinter.Frame(root)
frame.pack()
text = Tkinter.Label(width=20, height=10, text='matrix sparsity')
text.pack()
canvas = Tkinter.Canvas(bg="black", width=width_in, height=height_in)
canvas.pack()
button = Tkinter.Button(frame, text="OK?", fg="red", command=frame.quit)
button.pack()
for index in self.keys():
ix, iy = index[0], ymax-index[1]-1
ya, xa = offset+scale*(ix ), height_in -offset-scale*(iy )
yb, xb = offset+scale*(ix+1), height_in -offset-scale*(iy )
yc, xc = offset+scale*(ix+1), height_in -offset-scale*(iy+1)
yd, xd = offset+scale*(ix ), height_in -offset-scale*(iy+1)
color = colormap.strRgb(self[index], cmin, cmax)
canvas.create_polygon(xa, ya, xb, yb, xc, yc, xd, yd, fill=color)
root.mainloop()
def CGsolve(self, x0, b, tol=1.0e-10, nmax = 1000, verbose=1):
"""
Solve self*x = b and return x using the conjugate gradient method
"""
if not vector.isVector(b):
raise TypeError, self.__class__,' in solve '
else:
if self.size()[0] != len(b) or self.size()[1] != len(b):
print '**Incompatible sizes in solve'
print '**size()=', self.size()[0], self.size()[1]
print '**len=', len(b)
else:
kvec = diag(self) # preconditionner
n = len(b)
x = x0 # initial guess
r = b - dot(self, x)
try:
w = r/kvec
except: print '***singular kvec'
p = vector.zeros(n);
beta = 0.0;
rho = vector.dot(r, w);
err = vector.norm(dot(self,x) - b);
k = 0
if verbose: print " conjugate gradient convergence (log error)"
while abs(err) > tol and k < nmax:
p = w + beta*p;
z = dot(self, p);
alpha = rho/vector.dot(p, z);
r = r - alpha*z;
w = r/kvec;
rhoold = rho;
rho = vector.dot(r, w);
x = x + alpha*p;
beta = rho/rhoold;
err = vector.norm(dot(self, x) - b);
if verbose: print k,' %5.1f ' % math.log10(err)
k = k+1
return x
def biCGsolve(self,x0, b, tol=1.0e-10, nmax = 1000):
"""
Solve self*x = b and return x using the bi-conjugate gradient method
"""
try:
if not vector.isVector(b):
raise TypeError, self.__class__,' in solve '
else:
if self.size()[0] != len(b) or self.size()[1] != len(b):
print '**Incompatible sizes in solve'
print '**size()=', self.size()[0], self.size()[1]
print '**len=', len(b)
else:
kvec = diag(self) # preconditionner
n = len(b)
x = x0 # initial guess
r = b - dot(self, x)
rbar = r
w = r/kvec;
wbar = rbar/kvec;
p = vector.zeros(n);
pbar = vector.zeros(n);
beta = 0.0;
rho = vector.dot(rbar, w);
err = vector.norm(dot(self,x) - b);
k = 0
print " bi-conjugate gradient convergence (log error)"
while abs(err) > tol and k < nmax:
p = w + beta*p;
pbar = wbar + beta*pbar;
z = dot(self, p);
alpha = rho/vector.dot(pbar, z);
r = r - alpha*z;
rbar = rbar - alpha* dot(pbar, self);
w = r/kvec;
wbar = rbar/kvec;
rhoold = rho;
rho = vector.dot(rbar, w);
x = x + alpha*p;
beta = rho/rhoold;
err = vector.norm(dot(self, x) - b);
print k,' %5.1f ' % math.log10(err)
k = k+1
return x
except: print 'ERROR ',self.__class__,'::biCGsolve'
def save(self, filename, OneBased=0):
"""
Save matrix in file <filaname> using format:
OneBased, nrow, ncol, nnonzeros
[ii, jj, data]
"""
m = n = 0
nnz = len(self)
for ij in self.keys():
m = max(ij[0], m)
n = max(ij[1], n)
f = open(filename,'w')
f.write('%d %d %d %d\n' % (OneBased, m+1,n+1,nnz))
for ij in self.keys():
i,j = ij
f.write('%d %d %20.17f \n'% \
(i+OneBased,j+OneBased,self[ij]))
f.close()
###############################################################################
def isSparse(x):
return hasattr(x,'__class__') and x.__class__ is sparse
def transp(a):
" transpose "
new = sparse({})
for ij in a:
new[(ij[1], ij[0])] = a[ij]
return new
def dotDot(y,a,x):
" double dot product y^+ *A*x "
if vector.isVector(y) and isSparse(a) and vector.isVector(x):
res = 0.
for ij in a.keys():
i,j = ij
res += y[i]*a[ij]*x[j]
return res
else:
print 'sparse::Error: dotDot takes vector, sparse , vector as args'
def dot(a, b):
" vector-matrix, matrix-vector or matrix-matrix product "
if isSparse(a) and vector.isVector(b):
new = vector.zeros(a.size()[0])
for ij in a.keys():
new[ij[0]] += a[ij]* b[ij[1]]
return new
elif vector.isVector(a) and isSparse(b):
new = vector.zeros(b.size()[1])
for ij in b.keys():
new[ij[1]] += a[ij[0]]* b[ij]
return new
elif isSparse(a) and isSparse(b):
if a.size()[1] != b.size()[0]:
print '**Warning shapes do not match in dot(sparse, sparse)'
new = sparse({})
n = min([a.size()[1], b.size()[0]])
for i in range(a.size()[0]):
for j in range(b.size()[1]):
sum = 0.
for k in range(n):
sum += a.get((i,k),0.)*b.get((k,j),0.)
if sum != 0.:
new[(i,j)] = sum
return new
else:
raise TypeError, 'in dot'
def diag(b):
# given a sparse matrix b return its diagonal
res = vector.zeros(b.size()[0])
for i in range(b.size()[0]):
res[i] = b.get((i,i), 0.)
return res
def identity(n):
if type(n) != types.IntType:
raise TypeError, ' in identity: # must be integer'
else:
new = sparse({})
for i in range(n):
new[(i,i)] = 1+0.
return new
###############################################################################
if __name__ == "__main__":
print 'a = sparse()'
a = sparse()
print 'a.__doc__=',a.__doc__
print 'a[(0,0)] = 1.0'
a[(0,0)] = 1.0
a.out()
print 'a[(2,3)] = 3.0'
a[(2,3)] = 3.0
a.out()
print 'len(a)=',len(a)
print 'a.size()=', a.size()
b = sparse({(0,0):2.0, (0,1):1.0, (1,0):1.0, (1,1):2.0, (1,2):1.0, (2,1):1.0, (2,2):2.0})
print 'a=', a
print 'b=', b
b.out()
print 'a+b'
c = a + b
c.out()
print '-a'
c = -a
c.out()
a.out()
print 'a-b'
c = a - b
c.out()
print 'a*1.2'
c = a*1.2
c.out()
print '1.2*a'
c = 1.2*a
c.out()
print 'a=', a
print 'dot(a, b)'
print 'a.size()[1]=',a.size()[1],' b.size()[0]=', b.size()[0]
c = dot(a, b)
c.out()
print 'dot(b, a)'
print 'b.size()[1]=',b.size()[1],' a.size()[0]=', a.size()[0]
c = dot(b, a)
c.out()
try:
print 'dot(b, vector.vector([1,2,3]))'
c = dot(b, vector.vector([1,2,3]))
c.out()
print 'dot(vector.vector([1,2,3]), b)'
c = dot(vector.vector([1,2,3]), b)
c.out()
print 'b.size()=', b.size()
except: pass
print 'a*b -> element by element product'
c = a*b
c.out()
print 'b*a -> element by element product'
c = b*a
c.out()
print 'a/1.2'
c = a/1.2
c.out()
print 'c = identity(4)'
c = identity(4)
c.out()
print 'c = transp(a)'
c = transp(a)
c.out()
b[(2,2)]=-10.0
b[(2,0)]=+10.0
try:
import vector
print 'Check conjugate gradient solver'
s = vector.vector([1, 0, 0])
print 's'
s.out()
x0 = s
print 'x = b.biCGsolve(x0, s, 1.0e-10, len(b)+1)'
x = b.biCGsolve(x0, s, 1.0e-10, len(b)+1)
x.out()
print 'check validity of CG'
c = dot(b, x) - s
c.out()
except: pass
print 'plot b matrix'
b.out()
b.plot()
print 'del b[(2,2)]'
del b[(2,2)]
print 'del a'
del a
#a.out()
| 22.819477 | 90 | 0.565525 | 5,822 | 0.606016 | 0 | 0 | 0 | 0 | 0 | 0 | 2,363 | 0.245966 |
f5dedc85895871ad1a7086cfc4fa5d80500516b2 | 7,557 | py | Python | bibref_parser/parser.py | glooney/python-bibref-parser | 9ca6b99a917659425fe7b4759f523c78f0180124 | [
"MIT"
]
| null | null | null | bibref_parser/parser.py | glooney/python-bibref-parser | 9ca6b99a917659425fe7b4759f523c78f0180124 | [
"MIT"
]
| null | null | null | bibref_parser/parser.py | glooney/python-bibref-parser | 9ca6b99a917659425fe7b4759f523c78f0180124 | [
"MIT"
]
| null | null | null | import re
class BibRefParser:
def __init__(self):
self.reset()
def reset(self, reference=''):
self._ref = reference
self.reference = reference
self.title = ''
self.authors = ''
# publication date
self.date = ''
self.publisher = ''
self._ref = self._normalise(self._ref)
@classmethod
def _normalise(cls, s):
return s.replace('“', '"').replace('”', '"').replace('–', '-')
def _extract(self, pattern, field, first=False):
ret = ''
matches = re.findall(pattern, self._ref)
if len(matches):
if (len(matches) == 1) or first:
match = matches[0]
self._ref = self._ref.replace(match[0], '{' + field + '}')
ret = match[1]
return ret
def parse(self, reference):
self.reset(reference)
# get quoted title
self.title = self._extract(r'("([^"]+)")', 'title')
datep = r'(\b(18|19|20)\d\d[abc]?\b)'
while not self.date:
# get bracketed year
self.date = self._extract(
r'(\([^)]*' + datep + r'[^)]*\))', 'date')
# get unique year
if not self.date:
self.date = self._extract(r'(' + datep + r')', 'date')
if not self.date:
self.date = self._extract(
r'(\. ' + datep + r'\.)', 'date'
)
if not self.date:
self.date = self._extract(
r'(, ' + datep + r'\.)', 'date'
)
if not self.date:
self.date = self._extract(
r'(, ' + datep + r',)', 'date'
)
# get unique year not preceded or followed by -
# if 0 and not self.date:
# self.date = self._extract(
# r'((?<![-0-9])' + datep + r'(?![-0-9]))', 'date')
# remove access date
if 1 and not self.date:
access_date = self._extract(
r'(\[[^\]]*' + datep + r'[^\]]*\])', 'access_date')
if not access_date:
break
else:
break
if self.date:
self._extract(r'({date}([.,;]))', 'date')
if 1 and self.title and not self.authors:
# anything in front of title (or date) that isn't a date
# catches 40% of authors on test set
self.authors = self._extract(
r'^((([^{](?!\d{4,4}))+))', 'authors',
)
# if 0:
# # author (without . or ,) -> title
# # Works sometimes BUT
# # NO: b/c title can be after
# if self.authors and not self.title:
# if not re.search(r'\.|,', self.authors):
# self.title = self.authors
# self.authors = ''
if 1 and not self.authors:
# the authors field most likely captured the title
# we need to split them
#
# #80, ACS
# Evans, D. A.; Fitch, D. M.; Smith, T. E.; Cee, V. J.
# #69, AMA
# Venkat Narayan, KM.
# #4, ?
# Bagdikian, B.H.
# 22, APA
# Greene, C. (Producer), del Toro, G.(Director)
#
# sentence with lowercase words (other than and/et) indicate title
#
if not self.authors:
# #32, IEEE
# B. Klaus and P. Horn
# #34
# L. Bass, P. Clements, and R. Kazman
# #84
# W. Zeng, H. Yu, C. Lin
# self.authors = self._extract(
# r'^(((( ?[A-Z]{1,2}\.)+ [^.,]+[,.]( and)?)+))',
# 'authors1'
# )
self.authors = self._extract(
r'^((((^|,|,? and)( ?[A-Z]{1,2}\.)+ ([^,{.](?!and ))+)+))',
'authors1'
)
if not self.authors:
# #10 xxx
# Ellman, M., and F. Germano
# #19 APA
# Carter, S., & Dunbar-Odom, D.
# #20
# Gaudio, J. L., & Snowdon, C. T.
# included = [19, 80, 20, 69, 4, 22]
self.authors = self._extract(
# r'^((([^,.{]+,((| |-)[A-Z]{1,2}\.)+(\s*\([^)]+\))?,?)+))',
r'^((((^|,|,? (and|&) )[^,.{]+,((| |-)[A-Z]{1,2}\.)+(\s*\([^)]+\))?)+))',
'authors2'
)
if not self.authors:
# #49, MLA
# #50
# Smith, John, and Bob Anderson
# #51
# Campbell, Megan, et al.
self.authors = self._extract(
r'^(([A-Z][a-z]+, [A-Z][a-z]+[^.{]+\.))',
'authors3'
)
if 1 and not self.authors:
# #68, AMA
# Boyd B, Basic C, Bethem R, eds
# #70, AMA
# Guyton JL, Crockarell JR
# #76
# Florez H, Martinez R, Chakra W, Strickman-Stein M, Levis S
self.authors = self._extract(
r'^((((^| )[A-Z][a-z][-\w]* [A-Z]{1,2}[,.])+))',
'authors4'
)
if 1 and self.authors:
self.authors += self._extract(
r'(\{authors\d?\}((\.? ?(,? ?(et al|and others)\.?)?(,? ?[Ee]ds\.?))?))',
'authors9',
True
)
if 1 and not self.authors:
# authors = anything from start to . or {
# catches 80%
# BUT also a lot of FALSE POSITIVES
# (i.e. include title and other stuff in the authors)
# e.g. Goh, S. L. Polymer Chemistry
part = self._extract(
# r'^(([^{]+?))(?:\{|(?<![A-Z)])\.)',
r'^((((?<=[A-Z])\.|[^{.])+))',
'authors8'
)
if not self.title and (
re.match(r'(The|A|An) ', part)
# Fast facts
or (
re.search(r' [a-z]+\.?$', part)
and not re.search(r' et al\.?$', part)
)
):
self.title = part
else:
self.authors = part
if 0 and self.authors and not self.title:
# we might have captured the title in the authors
# Michael Pollan, The Omnivore's Dilemma
# if self.authors
pass
if self.authors and self.date and not self.title:
# title = anything between } and { with a dot in it
# assumes that the date is after the title
self.title = self._extract(
r'\}\s*\.*\s*(([^.{}]{2,}))', 'title',
True
)
# clean the title
if self.title:
# Crimson peak [Motion picture]
self.title = re.sub(r'\[[^\]]+\]$', '', self.title)
# The New Media Monopoly, Boston: Beacon Press
self.title = re.sub(r',[^,:]+:[^,:]+$', '', self.title)
self.title = self.title.strip(' ').strip(
'.').strip(',')
self.title = re.sub(r"^'(.+)'$", r"\1", self.title)
| 34.040541 | 93 | 0.382162 | 7,550 | 0.998281 | 0 | 0 | 117 | 0.01547 | 0 | 0 | 3,053 | 0.403676 |
f5e083f241a88c8c9d72629bf0fc59c5c51dd648 | 392 | py | Python | FlaskApp/sql_connection.py | pjneelam/pjneelam.eportfolio2022 | 3f55c1da6214e3eabab949ff83b34c0553c52866 | [
"CC-BY-3.0"
]
| null | null | null | FlaskApp/sql_connection.py | pjneelam/pjneelam.eportfolio2022 | 3f55c1da6214e3eabab949ff83b34c0553c52866 | [
"CC-BY-3.0"
]
| null | null | null | FlaskApp/sql_connection.py | pjneelam/pjneelam.eportfolio2022 | 3f55c1da6214e3eabab949ff83b34c0553c52866 | [
"CC-BY-3.0"
]
| null | null | null | #https://www.youtube.com/watch?v=f9PR1qcwOyg
#create global convention
import mysql.connector
__cnx=None
def get_sql_connection():
global __cnx
if __cnx is None:
__cnx = mysql.connector.connect(user='root', password='password',
host='127.0.0.1',
database='assignment2')
return __cnx
cnx.close() | 30.153846 | 74 | 0.584184 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 112 | 0.285714 |
f5e2b3958e10bba2c1126d9063cd6d9ca99a6bc2 | 1,217 | py | Python | kernellib/utils/visualization.py | jejjohnson/kernellib | eb9f80c1b605c8a6b5e8a324efd4ef07d8f59050 | [
"MIT"
]
| 1 | 2021-02-04T08:52:04.000Z | 2021-02-04T08:52:04.000Z | kernellib/utils/visualization.py | jejjohnson/kernellib | eb9f80c1b605c8a6b5e8a324efd4ef07d8f59050 | [
"MIT"
]
| null | null | null | kernellib/utils/visualization.py | jejjohnson/kernellib | eb9f80c1b605c8a6b5e8a324efd4ef07d8f59050 | [
"MIT"
]
| 1 | 2018-04-17T06:42:09.000Z | 2018-04-17T06:42:09.000Z | import matplotlib.pyplot as plt
def plot_gp(xtest, predictions, std=None, xtrain=None, ytrain=None, title=None, save_name=None):
xtest, predictions = xtest.squeeze(), predictions.squeeze()
fig, ax = plt.subplots()
# Plot the training data
if (xtrain is not None) and (ytrain is not None):
xtrain, ytrain = xtrain.squeeze(), ytrain.squeeze()
ax.scatter(xtrain, ytrain, s=100, color='r', label='Training Data')
# plot the testing data
ax.plot(xtest, predictions, linewidth=5,
color='k', label='Predictions')
# plot the confidence interval
if std is not None:
std = std.squeeze()
upper_bound = predictions + 1.960 * std
lower_bound = predictions - 1.960 * std
ax.fill_between(xtest, upper_bound, lower_bound,
color='red', alpha=0.2, label='95% Condidence Interval')
# ax.legend()
if title is not None:
ax.set_title(title)
ax.tick_params(
axis='both',
which='both',
bottom=False,
top=False,
left=False,
labelleft=False,
labelbottom=False)
if save_name:
fig.savefig(save_name)
else:
plt.show()
return fig
| 25.354167 | 97 | 0.612161 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 166 | 0.136401 |
f5e3743f51af18cff1772397d3d93a0c7e89bca0 | 2,780 | py | Python | edit/editseries.py | lokal-profil/isfdb_site | 0ce20d6347849926d4eda961ea9249c31519eea5 | [
"BSD-3-Clause"
]
| null | null | null | edit/editseries.py | lokal-profil/isfdb_site | 0ce20d6347849926d4eda961ea9249c31519eea5 | [
"BSD-3-Clause"
]
| null | null | null | edit/editseries.py | lokal-profil/isfdb_site | 0ce20d6347849926d4eda961ea9249c31519eea5 | [
"BSD-3-Clause"
]
| null | null | null | #!_PYTHONLOC
#
# (C) COPYRIGHT 2004-2021 Al von Ruff, Bill Longley and Ahasuerus
# ALL RIGHTS RESERVED
#
# The copyright notice above does not evidence any actual or
# intended publication of such source code.
#
# Version: $Revision$
# Date: $Date$
from isfdb import *
from isfdblib import *
from isfdblib_help import *
from isfdblib_print import *
from library import *
from SQLparsing import *
if __name__ == '__main__':
series_number = SESSION.Parameter(0, 'int')
series = SQLget1Series(series_number)
if not series:
SESSION.DisplayError('Record Does Not Exist')
PrintPreSearch('Series Editor')
PrintNavBar('edit/editseries.cgi', series_number)
help = HelpSeries()
printHelpBox('series', 'SeriesData')
print "Note:"
print "<ul>"
print "<li>Changing the Name field changes the name of the series for all books currently in this series."
print "<li>Changing the Parent field does NOT change the name of the parent series."
print "<li>If the Parent exists, changing the Parent field relinks the Named series to that parent."
print "<li>If the Parent does not exist, a new Parent series will be created and the Named series will be linked to that parent."
print "</ul>"
print "<hr>"
print "<p>"
print '<form id="data" METHOD="POST" ACTION="/cgi-bin/edit/submitseries.cgi">'
print '<table border="0">'
print '<tbody id="tagBody">'
# Display the series name
printfield("Name", "series_name", help, series[SERIES_NAME])
trans_series_names = SQLloadTransSeriesNames(series[SERIES_PUBID])
printmultiple(trans_series_names, "Transliterated Name", "trans_series_names", help)
# Display the name of this series' parent (if one exists)
parent_series_name = ''
if series[SERIES_PARENT]:
parent_series = SQLget1Series(series[SERIES_PARENT])
parent_series_name = parent_series[SERIES_NAME]
printfield("Parent", "series_parent", help, parent_series_name)
# Display this series' ordering position within its superseries
printfield("Series Parent Position", "series_parentposition", help, series[SERIES_PARENT_POSITION])
webpages = SQLloadSeriesWebpages(series[SERIES_PUBID])
printWebPages(webpages, 'series', help)
printtextarea('Note', 'series_note', help, SQLgetNotes(series[SERIES_NOTE]))
printtextarea('Note to Moderator', 'mod_note', help, '')
print '</tbody>'
print '</table>'
print '<p>'
print '<hr>'
print '<p>'
print '<input NAME="series_id" VALUE="%d" TYPE="HIDDEN">' % series_number
print '<input TYPE="SUBMIT" VALUE="Submit Data" tabindex="1">'
print '</form>'
print '<p>'
print '<hr>'
PrintPostSearch(0, 0, 0, 0, 0, 0)
| 32.325581 | 130 | 0.685612 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,401 | 0.503957 |
f5e3d0985186fbf72ce1898f6d250fd384de7e07 | 2,154 | py | Python | sound.py | ITNano/soundserver | b84cbfd821987ad8af72a6c2677caa0b949abff6 | [
"MIT"
]
| null | null | null | sound.py | ITNano/soundserver | b84cbfd821987ad8af72a6c2677caa0b949abff6 | [
"MIT"
]
| null | null | null | sound.py | ITNano/soundserver | b84cbfd821987ad8af72a6c2677caa0b949abff6 | [
"MIT"
]
| null | null | null | import pyaudio
import numpy as np
import mixer
class Sound(object):
def __init__(self):
self.p = pyaudio.PyAudio()
self.mixers = []
self.streams = []
for i in range(self.p.get_device_count()-3):
self.streams.append(SoundcardStream(self.p, i))
def start_stream(self, index):
self.streams[index].start_stream()
def start_streams(self):
for stream in self.streams:
stream.start_stream()
def add_sound(self, index, sound):
self.streams[index].add_sound(sound)
def stop_stream(self, index):
self.streams[index].stop_stream()
def stop_streams(self):
for stream in self.streams:
stream.stop_stream()
def terminate(self):
for stream in self.streams:
stream.close()
self.p.terminate()
class SoundcardStream(object):
def __init__(self, p, soundcard, width=2, channels=2, rate=44100):
self.soundcard = soundcard
self.mixer = mixer.Mixer(width, channels, rate)
try:
print("Loading soundcard "+str(soundcard))
self.stream = p.open(format=p.get_format_from_width(width), channels=channels, rate=rate, output_device_index=soundcard, output=True, stream_callback=self.get_data)
except:
self.stream = None
print("Device unavailable (index "+str(soundcard)+")")
def get_data(self, in_data, frame_count, time_info, status):
return (self.mixer.get_data(frame_count, time_info["input_buffer_adc_time"]), pyaudio.paContinue)
def add_sound(self, sound):
print("Adding sound to soundcard "+str(self.soundcard))
self.mixer.add_sound(sound)
def start_stream(self):
if self.stream is not None:
self.stream.start_stream()
def stop_stream(self):
if self.stream is not None:
self.stream.stop_stream()
def close(self):
if self.stream is not None:
self.stream.close()
self.mixer.close() | 32.149254 | 176 | 0.596565 | 2,087 | 0.968895 | 0 | 0 | 0 | 0 | 0 | 0 | 102 | 0.047354 |
f5e5cd56b7a8f566083c50626d4a1f1f2165bd63 | 2,284 | py | Python | noxutils.py | sphinx-contrib/zopeext | b749d0023f4fb8b8eea3a8f3216f63397c6272de | [
"BSD-2-Clause"
]
| 1 | 2020-03-16T07:20:58.000Z | 2020-03-16T07:20:58.000Z | noxutils.py | sphinx-contrib/zopeext | b749d0023f4fb8b8eea3a8f3216f63397c6272de | [
"BSD-2-Clause"
]
| 3 | 2021-12-19T09:39:45.000Z | 2022-01-06T05:05:03.000Z | noxutils.py | sphinx-contrib/zopeext | b749d0023f4fb8b8eea3a8f3216f63397c6272de | [
"BSD-2-Clause"
]
| null | null | null | """
From https://github.com/brechtm/rinohtype/blob/master/noxutil.py
https://github.com/cjolowicz/nox-poetry/discussions/289
"""
import json
from collections.abc import Iterable
from pathlib import Path
from typing import Optional
from urllib.request import urlopen, Request
from poetry.core.factory import Factory
from poetry.core.semver import parse_single_constraint as parse_version
VERSION_PARTS = ("major", "minor", "patch")
def get_versions(
dependency: str,
granularity: str = "minor",
# ascending: bool = False, limit: Optional[int] = None,
# allow_prerelease: bool = False,
) -> Iterable[str]:
"""Yield all versions of `dependency` considering version constraints
Args:
dependency: the name of the dependency
granularity: yield only the newest patch version of each major/minor
release
ascending: count backwards from latest version, by default (not much
use without the 'limit' arg)
limit: maximum number of entries to return
allow_prerelease: whether to include pre-release versions
Yields:
All versions of `dependency` that match the version constraints defined
and in this project's pyproject.toml and the given `granularity`.
"""
package = Factory().create_poetry(Path(__file__).parent).package
for requirement in package.requires:
if requirement.name == dependency:
break
else:
raise ValueError(f"{package.name} has no dependency '{dependency}'")
filtered_versions = [
version
for version in all_versions(dependency)
if requirement.constraint.allows(version)
]
parts = VERSION_PARTS[: VERSION_PARTS.index(granularity) + 1]
result = {}
for version in filtered_versions:
key = tuple(getattr(version, part) for part in parts)
result[key] = max((result[key], version)) if key in result else version
return [str(version) for version in result.values()]
def all_versions(dependency):
request = Request(f"https://pypi.org/pypi/{dependency}/json")
response = urlopen(request)
json_string = response.read().decode("utf8")
json_data = json.loads(json_string)
yield from (parse_version(version) for version in json_data["releases"])
| 35.138462 | 79 | 0.700088 | 0 | 0 | 293 | 0.128284 | 0 | 0 | 0 | 0 | 985 | 0.431261 |
f5e6032fc8e0c3163e2cd3542bdd970f3cb1268b | 423 | py | Python | tbutton_maker/admin.py | codefisher/tbutton_web | 357bddc26b42c8511e7b5ce087bb0ac115f97e4c | [
"MIT"
]
| null | null | null | tbutton_maker/admin.py | codefisher/tbutton_web | 357bddc26b42c8511e7b5ce087bb0ac115f97e4c | [
"MIT"
]
| null | null | null | tbutton_maker/admin.py | codefisher/tbutton_web | 357bddc26b42c8511e7b5ce087bb0ac115f97e4c | [
"MIT"
]
| null | null | null | from django.contrib import admin
from tbutton_web.tbutton_maker.models import Application, Button, DownloadSession, UpdateSession
class DownloadSessionAdmin(admin.ModelAdmin):
list_display = ['time', 'query_string']
admin.site.register(DownloadSession, DownloadSessionAdmin)
class UpdateSessionAdmin(admin.ModelAdmin):
list_display = ['time', 'query_string']
admin.site.register(UpdateSession, UpdateSessionAdmin) | 42.3 | 96 | 0.820331 | 176 | 0.416076 | 0 | 0 | 0 | 0 | 0 | 0 | 40 | 0.094563 |
f5e6080e840c71c64f246a6744ac59598bb42ed0 | 1,359 | py | Python | abi_recursion.py | Abirami33/python-75-hackathon | c15505615d92cf304c27eabd3136406b08c59078 | [
"MIT"
]
| null | null | null | abi_recursion.py | Abirami33/python-75-hackathon | c15505615d92cf304c27eabd3136406b08c59078 | [
"MIT"
]
| null | null | null | abi_recursion.py | Abirami33/python-75-hackathon | c15505615d92cf304c27eabd3136406b08c59078 | [
"MIT"
]
| null | null | null | #PASCALS TRIANGLE USING RECURSION
def pascal(n):
if n == 0: #if 0 number of rows
return [] #return a null list
elif n == 1: #if 1 row
return [[1]] #return a list with 1
else:
initial= [1] #initial list contains 1 as first element
ret = pascal(n-1) #recursively pass n-1 to function
final = ret[-1] #last row with -1 as depicting end
for i in range(len(final)-1):
initial.append(final[i] + final[i+1]) #add top and top left and goes on
initial=initial+[1]
ret.append(initial) #append it and set it as initial
return ret #return the whole list of lists
if __name__ == "__main__":
print("Enter the number of rows:")
n=int(input()) #getting user input
print(pascal(n)) #call the pascal triangle function
''' OUTPUT:Enter the number of rows:5
[[1], [1, 1], [1, 2, 1], [1, 3, 3, 1], [1, 4, 6, 4, 1]] '''
| 48.535714 | 106 | 0.40103 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 561 | 0.412804 |
f5e6d7bb0bd30f9540f1c0b749f54516092b6ca3 | 3,806 | py | Python | nodes/centered_mocap_and_tag_rebroadcaster.py | rislab/apriltag_tracker | 41c4deb4b5bcd94e5f666f3d4b1f1d141c705582 | [
"BSD-3-Clause"
]
| null | null | null | nodes/centered_mocap_and_tag_rebroadcaster.py | rislab/apriltag_tracker | 41c4deb4b5bcd94e5f666f3d4b1f1d141c705582 | [
"BSD-3-Clause"
]
| null | null | null | nodes/centered_mocap_and_tag_rebroadcaster.py | rislab/apriltag_tracker | 41c4deb4b5bcd94e5f666f3d4b1f1d141c705582 | [
"BSD-3-Clause"
]
| 1 | 2019-02-18T00:40:20.000Z | 2019-02-18T00:40:20.000Z | #!/usr/bin/env python2.7
from __future__ import division
import roslib
import rospy
import tf
from nav_msgs.msg import Odometry
from nav_msgs.msg import Path
from geometry_msgs.msg import PoseStamped
import numpy as np
import pdb
from message_filters import Subscriber, ApproximateTimeSynchronizer
class GT_cleaner:
def __init__(self):
self.init = [False, False]
self.broadcaster = tf.TransformBroadcaster()
self.mocap_pub = rospy.Publisher(
'/gt_clean_odom', Odometry, queue_size=10)
self.april_pub = rospy.Publisher(
'/april_clean_odom', Odometry, queue_size=10)
self.first_quat = None
self.first_pos = np.array([0, 0, 0])
self.prev_frame = [np.eye(4), np.eye(4)]
self.first_frame = [np.eye(4),np.eye(4)]
self.first_frame_inv = [np.eye(4),np.eye(4)]
self.last_time = [rospy.Time.now(),rospy.Time.now()]
self.sub = ApproximateTimeSynchronizer([Subscriber("/mocap/odom", Odometry),Subscriber("/apriltag_tracker/odom", Odometry)],100, 0.05)
self.sub.registerCallback(self.callback)
def callback(self, mocap_msg, odom_msg):
for i,msg in enumerate([mocap_msg, odom_msg]):
q = msg.pose.pose.orientation
p = msg.pose.pose.position
quat = np.array([q.x, q.y, q.z, q.w])
pos = np.array([p.x, p.y, p.z])
frame = tf.transformations.quaternion_matrix(quat)
frame[:3, 3] = pos
if i==1:
frame = np.linalg.inv(frame) # Because track tag in body is the other way around
if self.init[i] == False:
self.last_time[i] = msg.header.stamp
self.init[i] = True
self.first_frame[i] = frame
self.first_frame_inv[i] = np.linalg.inv(frame)
continue
dt = (msg.header.stamp - self.last_time[i]).to_sec()
self.last_time[i] = msg.header.stamp
frame_in_first = np.dot(self.first_frame_inv[i], frame)
# add to path
odom = Odometry()
odom.header.frame_id = msg.header.frame_id
odom.pose.pose.position.x = frame_in_first[0, 3]
odom.pose.pose.position.y = frame_in_first[1, 3]
odom.pose.pose.position.z = frame_in_first[2, 3]
q = tf.transformations.quaternion_from_matrix(frame_in_first)
odom.pose.pose.orientation.x = q[0]
odom.pose.pose.orientation.y = q[1]
odom.pose.pose.orientation.z = q[2]
odom.pose.pose.orientation.w = q[3]
odom.header.stamp = msg.header.stamp
#Now time for the velocities
# Get the delta transform to obtain the velocities
delta_frame = np.dot(np.linalg.inv(self.prev_frame[i]), frame_in_first)
self.prev_frame[i] = frame_in_first
# Linear part is easy
odom.twist.twist.linear.x = delta_frame[0,3]/dt
odom.twist.twist.linear.y = delta_frame[1,3]/dt
odom.twist.twist.linear.z = delta_frame[2,3]/dt
# For the angular velocity, we compute the angle axis
result = tf.transformations.rotation_from_matrix(delta_frame)
angle = result[0]
direction = result[1]
omega = direction * angle/dt
odom.twist.twist.angular.x = omega[0]
odom.twist.twist.angular.y = omega[1]
odom.twist.twist.angular.z = omega[2]
if i == 0:
self.mocap_pub.publish(odom)
else:
self.april_pub.publish(odom)
if __name__ == '__main__':
rospy.init_node('gt_cleaner', anonymous=True)
cleaner_obj = GT_cleaner()
rospy.spin()
| 37.313725 | 142 | 0.59196 | 3,379 | 0.887809 | 0 | 0 | 0 | 0 | 0 | 0 | 334 | 0.087756 |
f5e74389c152886253bc86c73ff3f6d23bab1e6e | 3,266 | py | Python | garage.py | DidymusRex/garage-pi | 4f4dcc0251f8cb5f5150ddaff7dac01a64eac948 | [
"CC0-1.0"
]
| null | null | null | garage.py | DidymusRex/garage-pi | 4f4dcc0251f8cb5f5150ddaff7dac01a64eac948 | [
"CC0-1.0"
]
| null | null | null | garage.py | DidymusRex/garage-pi | 4f4dcc0251f8cb5f5150ddaff7dac01a64eac948 | [
"CC0-1.0"
]
| null | null | null | from datetime import datetime
from gpiozero import DistanceSensor
from garage_door import garage_door
from garage_camera import garage_camera
import MQTT_Config
import paho.mqtt.client as mqtt
from temp_sensor import temp_sensor
from time import sleep
"""
GPIO pin assignments:
relays
range finder sensor (echo passes thru voltage converter)
DHT11 temperature/huidity sensor
"""
GPIO_Pins = {'temp_1':21,
'relay_1':6,
'relay_2':12,
'trig_1':17,
'echo_1':18,
'trig_2':22,
'echo_2':23}
"""
MQTT connect callback
Subscribing in on_connect() means that if we lose the connection and
reconnect then subscriptions will be renewed.
"""
def on_connect(client, userdata, flags, rc):
client.subscribe(mqtt_topic)
"""
MQTT receive message callback (garage/command)
Take action on a subject
"""
def on_message(client, userdata, msg):
print("message received ", str(msg.payload.decode("utf-8")))
print("message topic=", msg.topic)
print("message qos=", msg.qos)
print("message retain flag=", msg.retain)
cmd = str(msg.payload.decode("utf-8")).split(",")
bad_command = False
if len(cmd) == 2:
(subject, action) = cmd
if subject in garage_doors:
if action == "open":
garage_doors[subject].open()
elif action == "close":
garage_doors[subject].close()
elif action == "check":
garage_doors[subject].get_position()
else:
bad_command = True
elif subject == "dht11":
dht11.check_temp()
elif subject == "camera":
if action == "still":
garage_cam.take_still()
else:
bad_command = True
else:
bad_command = True
else:
bad_command = True
if bad_command:
print("Invalid payload {}".format(msg.payload.decode("utf-8")))
"""
MQTT publish callback
Mainly for debugging
"""
def on_publish(client, userdata, mid):
print("message id {} published".format(mid))
"""
Just in case
"""
def main():
pass
"""
Create client and connect it to the MQTT broker
"""
mqc = mqtt.Client("garage-pi", clean_session=True)
mqc.on_connect = on_connect
mqc.on_message = on_message
mqc.on_publish = on_publish
mqc.username_pw_set(mqtt_account, mqtt_passwd)
mqc.connect(mqtt_broker)
mqc.loop_start()
mqc.publish("garage/foo", "go!")
"""
Create temperature sensor object
"""
dht11 = temp_sensor(mqc, GPIO_Pins['temp_1'])
"""
Create garage camera object
"""
garage_cam = garage_camera(mqc)
"""
Create garage door objects
"""
garage_doors = dict()
garage_doors["left"] = garage_door(mqc,
"left",
GPIO_Pins['relay_1'],
GPIO_Pins['echo_1'],
GPIO_Pins['trig_1'])
garage_doors["right"] = garage_door(mqc,
"right",
GPIO_Pins['relay_2'],
GPIO_Pins['echo_2'],
GPIO_Pins['trig_2'])
if __name__ == "__main__":
main()
| 26.33871 | 72 | 0.580527 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 973 | 0.297918 |
f5e7ef3d480cf9bb53271fcd48200dc95c179ef9 | 5,887 | py | Python | app.py | leemengtaiwan/gist-evernote | 90d8573870ded37dc82575ba25968d7a06efe219 | [
"MIT"
]
| 35 | 2018-01-29T00:50:36.000Z | 2021-04-04T13:59:26.000Z | app.py | leemengtaiwan/gist-evernote | 90d8573870ded37dc82575ba25968d7a06efe219 | [
"MIT"
]
| 5 | 2021-02-08T20:18:24.000Z | 2022-03-11T23:15:12.000Z | app.py | leemengtaiwan/gist-evernote | 90d8573870ded37dc82575ba25968d7a06efe219 | [
"MIT"
]
| 4 | 2018-02-06T12:13:09.000Z | 2019-12-20T09:12:41.000Z | # encoding: utf-8
import os
import time
from multiprocessing import Pool, cpu_count
from selenium import webdriver
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.common.exceptions import TimeoutException
from enote.util import get_note, get_notebook, get_notebooks, \
create_resource, create_note, create_notebook, update_note
from github.util import get_user_name, get_all_gists
from web.util import fullpage_screenshot, get_gist_hash, create_chrome_driver
from settings import NOTEBOOK_TO_SYNC
from db import get_db
DATE_FORMAT = "%Y-%m-%dT%H:%M:%SZ"
GIST_BASE_URL = 'https://gist.github.com'
notebook = None
github_user = get_user_name() # get current login github user for fetching gist content
db = get_db() # database to store synchronization info
def app():
start = time.time()
global notebook
# find notebook to put new notes
notebooks = get_notebooks()
for n in notebooks:
if n.name == NOTEBOOK_TO_SYNC:
notebook = get_notebook(n.guid)
# create notebook with the specified name if not found
if not notebook:
notebook = create_notebook(NOTEBOOK_TO_SYNC)
print('Using notebook: %s' % notebook.name)
# initialize, get all available gists
if db.is_empty() or db.is_cold_start():
gists = get_all_gists()
# sync only gists that were pushed after last synchronization
else:
last_sync_date = db.get_last_sync()
print("Find gists that are updated after last sync (UTC): {}".format(last_sync_date))
gists = get_all_gists(after_date=last_sync_date)
print("Total number of gists to be synchronized: %d" % len(gists))
# headless mode to reduce overhead and distraction
driver = create_chrome_driver() if gists else None
for gist in gists:
_ = sync_gist(gist, driver=driver)
if driver:
driver.quit()
# TODO multi-processes + mysql
# setup multiple selenium drivers to speed up if multiple cpu available
# num_processes = min(4, cpu_count() - 1) if cpu_count() > 1 else 1
# print("Number of %d processes being created" % num_processes)
# pool = Pool(num_processes)
#
# notes = pool.map(sync_gist, gists)
#
# pool.terminate()
# pool.close()
# pool.join()
# sync all gists successfully, set to warm-start mode
if db.is_cold_start():
db.toggle_cold_start()
print("Synchronization took {:.0f} seconds.".format(time.time() - start))
def sync_gist(gist, driver):
"""Sync the Github gist to the corresponding Evernote note.
Create a new Evernote note if there is no corresponding one with the gist.
Overwrite existing note's content if gist has been changed.
Parameters
----------
gist : dict
A Gist acquired by Github GraphQL API with format like:
{
'id': 'gist_id',
'name': 'gist_name',
'description': 'description',
'pushAt': '2018-01-15T00:48:23Z'
}
driver : selenium.webdriver
The web driver used to access gist url
Returns
-------
note : evernote.edam.type.ttpyes.Note
None if no new note created or updated
"""
note_exist = False
gist_url = '/'.join((GIST_BASE_URL, gist['name']))
# check existing gist hash before fetch if available
prev_hash = db.get_hash_by_id(gist['id'])
note_guid = db.get_note_guid_by_id(gist['id'])
if prev_hash and note_guid:
note_exist = True
cur_hash = get_gist_hash(github_user, gist['name'])
if prev_hash == cur_hash:
print('Gist {} remain the same, ignore.'.format(gist_url))
db.update_gist(gist, note_guid, cur_hash)
return None
driver.get(gist_url)
# wait at most x seconds for Github rendering gist context
delay_seconds = 10
try:
WebDriverWait(driver, delay_seconds).until(EC.presence_of_element_located((By.CLASS_NAME, 'is-render-ready')))
except TimeoutException:
print("Take longer than {} seconds to load page.".format(delay_seconds))
# get first file name as default note title
gist_title = driver.find_element(By.CLASS_NAME, 'gist-header-title>a').text
# take screen shot for the gist and save it temporally
image_path = 'images/{}.png'.format(gist['name'])
fullpage_screenshot(driver, image_path)
# build skeleton for note (including screenshot)
resource, _ = create_resource(image_path)
note_title = gist['description'] if gist['description'] else gist_title
note_body = format_note_body(gist)
# get hash of raw gist content and save gist info to database
gist_hash = get_gist_hash(github_user, gist['name'])
# create new note / update existing note
if not note_exist:
note = create_note(note_title, note_body, [resource], parent_notebook=notebook)
db.save_gist(gist, note.guid, gist_hash)
else:
note = get_note(note_guid)
update_note(note, note_title, note_body, note_guid, [resource])
db.update_gist(gist, note_guid, gist_hash)
os.remove(image_path)
print("Finish creating note for gist {}".format(gist_url))
return note
def format_note_body(gist):
"""Create the note content that will be shown before attachments.
Parameters
----------
gist : dict
Dict that contains all information of the gist
Returns
-------
note_body : str
"""
blocks = []
desc = gist['description']
if desc:
blocks.append(desc)
gist_url = '/'.join((GIST_BASE_URL, gist['name']))
blocks.append('<a href="{}">Gist on Github</a>'.format(gist_url))
note_body = '<br/>'.join(blocks)
return note_body
if __name__ == '__main__':
app()
| 31.821622 | 118 | 0.674367 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,532 | 0.4301 |
f5e7fdab1587e4d6e66ab3defb25c9ecd73fb773 | 20 | py | Python | hello-fortran-dependency/hello/__init__.py | Nicholaswogan/skbuild-f2py-examples | e47d0a9ce483e54b678e31789dbfcc90ff4a8e74 | [
"MIT"
]
| 4 | 2021-07-28T02:16:52.000Z | 2021-12-23T00:20:21.000Z | hello-fortran-dependency/hello/__init__.py | Nicholaswogan/skbuild-f2py-examples | e47d0a9ce483e54b678e31789dbfcc90ff4a8e74 | [
"MIT"
]
| 1 | 2021-09-14T21:17:49.000Z | 2021-09-14T23:17:47.000Z | hello-fortran-dependency/hello/__init__.py | Nicholaswogan/skbuild-f2py-examples | e47d0a9ce483e54b678e31789dbfcc90ff4a8e74 | [
"MIT"
]
| null | null | null | from .hola import *
| 10 | 19 | 0.7 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
f5e81680dbe98070292ce77eaa7479aa8b7e1630 | 326 | py | Python | python-leetcode/350.py | MDGSF/interviews | 9faa9aacdb0cfbb777d4d3d4d1b14b55ca2c9f76 | [
"MIT"
]
| 12 | 2020-01-16T08:55:27.000Z | 2021-12-02T14:52:39.000Z | python-leetcode/350.py | MDGSF/interviews | 9faa9aacdb0cfbb777d4d3d4d1b14b55ca2c9f76 | [
"MIT"
]
| null | null | null | python-leetcode/350.py | MDGSF/interviews | 9faa9aacdb0cfbb777d4d3d4d1b14b55ca2c9f76 | [
"MIT"
]
| 1 | 2019-12-11T12:00:38.000Z | 2019-12-11T12:00:38.000Z | import collections
class Solution:
def intersect(self, nums1: List[int], nums2: List[int]) -> List[int]:
m = collections.Counter(nums1)
result = []
for num in nums2:
if num in m:
result.append(num)
if m[num] == 1:
del m[num]
else:
m[num] -= 1
return result
| 21.733333 | 71 | 0.546012 | 305 | 0.935583 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
f5e9dfce4e604e5d08d5833b9e96482b6754ad47 | 217 | py | Python | finally.py | rkjin/algorithm | 5661dd621a43bcbb37b4113fd0918854e7a24310 | [
"Apache-2.0"
]
| null | null | null | finally.py | rkjin/algorithm | 5661dd621a43bcbb37b4113fd0918854e7a24310 | [
"Apache-2.0"
]
| null | null | null | finally.py | rkjin/algorithm | 5661dd621a43bcbb37b4113fd0918854e7a24310 | [
"Apache-2.0"
]
| null | null | null | import numpy as np
if __name__ == '__main__':
try:
f = open('test_file.txt', 'w')
f.write('this is exception finally')
except Exception as e:
pass
finally:
f.close
pass
| 15.5 | 44 | 0.557604 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 55 | 0.253456 |
f5ea24e7021ff1af76d60fe6869f59dd63386b1e | 198 | py | Python | autokey/data/Emacs/c_g.py | Curiosidad-Racional/.config | af5a8901510e4b87dff1be024d3d29987c148f3f | [
"MIT"
]
| 2 | 2021-05-29T18:11:26.000Z | 2021-10-21T20:53:16.000Z | autokey/data/Emacs/c_g.py | Curiosidad-Racional/.config | af5a8901510e4b87dff1be024d3d29987c148f3f | [
"MIT"
]
| null | null | null | autokey/data/Emacs/c_g.py | Curiosidad-Racional/.config | af5a8901510e4b87dff1be024d3d29987c148f3f | [
"MIT"
]
| null | null | null | import os
store.set_global_value("ctrl-space", False)
with open(os.path.expanduser("~/.config/polybar/keys.fifo"), "wb") as f:
f.write(b"TITLE:\n")
store.set_global_value("emacs-chain-keys", []) | 39.6 | 72 | 0.712121 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 74 | 0.373737 |
f5eaea013c4c8e9169d5648e9946cf1e2ab0fb60 | 520 | py | Python | lupin/fields/__init__.py | Clustaar/lupin | 9ef73642d84a99adb80abf5a922a9422ddae9254 | [
"MIT"
]
| 22 | 2017-10-18T08:27:20.000Z | 2022-03-25T18:53:43.000Z | lupin/fields/__init__.py | Clustaar/lupin | 9ef73642d84a99adb80abf5a922a9422ddae9254 | [
"MIT"
]
| 5 | 2019-09-16T15:31:55.000Z | 2022-02-10T08:29:14.000Z | lupin/fields/__init__.py | Clustaar/lupin | 9ef73642d84a99adb80abf5a922a9422ddae9254 | [
"MIT"
]
| null | null | null | from .field import Field # NOQA
from .datetime_field import DateTime # NOQA
from .date import Date # NOQA
from .string import String # NOQA
from .object import Object # NOQA
from .list import List # NOQA
from .polymorphic_object import PolymorphicObject # NOQA
from .polymorphic_list import PolymorphicList # NOQA
from .constant import Constant # NOQA
from .int import Int # NOQA
from .float import Float # NOQA
from .number import Number # NOQA
from .bool import Bool # NOQA
from .dict import Dict # NOQA
| 34.666667 | 57 | 0.757692 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 84 | 0.161538 |
f5edd88e2d458d89d6714005f92ae5a2d900050e | 564 | py | Python | polls/urls.py | SkyFlame00/webpolls | d137da1aaaa8af78520af7762b8002428842d617 | [
"MIT"
]
| null | null | null | polls/urls.py | SkyFlame00/webpolls | d137da1aaaa8af78520af7762b8002428842d617 | [
"MIT"
]
| null | null | null | polls/urls.py | SkyFlame00/webpolls | d137da1aaaa8af78520af7762b8002428842d617 | [
"MIT"
]
| null | null | null | from django.urls import path
from django.conf.urls import url
from . import views
urlpatterns = [
path('', views.index, name='index'),
path('logout/', views.logoutView, name='logout'),
path('signup/', views.signup, name='signup'),
url(r'^activate/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$', views.activate, name='activate'),
path('myprofile/', views.myprofile, name='myprofile'),
path('myprofile/edit/', views.myprofile_edit, name='myprofile_edit'),
path('testing', views.testing, name='testing')
]
| 37.6 | 132 | 0.654255 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 216 | 0.382979 |
f5ee0fc5d74aae0b09b30c0e37603f02a2ea4deb | 14,918 | py | Python | forceDAQ/gui/plotter.py | gftabor/pyForceDAQ | 3eababb41d855b961d228d8366fdd154bb6314ea | [
"MIT"
]
| null | null | null | forceDAQ/gui/plotter.py | gftabor/pyForceDAQ | 3eababb41d855b961d228d8366fdd154bb6314ea | [
"MIT"
]
| null | null | null | forceDAQ/gui/plotter.py | gftabor/pyForceDAQ | 3eababb41d855b961d228d8366fdd154bb6314ea | [
"MIT"
]
| null | null | null | __version__ = "0.2"
import threading
import numpy as np
import pygame
from expyriment.stimuli import Canvas, Rectangle, TextLine
from expyriment.stimuli._visual import Visual
from expyriment.misc import constants
lock_expyriment = threading.Lock()
Numpy_array_type = type(np.array([]))
class Scaling(object):
"""littel helper object function to handle plotter scaling"""
step_size = 5 # for increasing/decreasing
def __init__(self, min, max,
pixel_min, pixel_max):
"""xy-value arrays"""
self._min = min
self._max = max
self.pixel_min = pixel_min
self.pixel_max = pixel_max
self._update()
@property
def max(self):
return self._max
@max.setter
def max(self, value):
self._max = value
self._update()
@property
def min(self):
return self._min
@min.setter
def min(self, value):
self._min = value
self._update()
def _update(self):
self._zero_shift = (self._min + self._max)/2.0
self._range = float(self._max - self._min)
def get_pixel_factor(self):
return (self.pixel_max - self.pixel_min) / self._range
def increase_data_range(self):
self.min += Scaling.step_size
self.max -= Scaling.step_size
if self.min >= self.max:
self.decrease_data_range()
def decrease_data_range(self):
self.min -= Scaling.step_size
self.max += Scaling.step_size
def data_range_up(self):
self.min += Scaling.step_size
self.max += Scaling.step_size
def data_range_down(self):
self.min -= Scaling.step_size
self.max -= Scaling.step_size
def data2pixel(self, values):
""" values: numeric or numpy array
pixel_min_max: 2D array"""
return (values - self._zero_shift) * \
(self.pixel_max - self.pixel_min) / self._range # pixel_factor
def trim(self, value):
"""trims value to the range, ie. set to min or max if <min or > max """
if value < self.min:
return self.min
elif value > self.max:
return self.max
return value
class PGSurface(Canvas):
"""PyGame Surface: Expyriment Stimulus for direct Pygame operations and
PixelArrays
In contrast to other Expyriment stimuli the class does not generate temporary
surfaces.
"""
def __init__(self, size, position=None, colour=None):
Canvas.__init__(self, size, position, colour)
self._px_array = None
@property
def surface(self):
"""todo"""
if not self.has_surface:
ok = self._set_surface(self._get_surface()) # create surface
if not ok:
raise RuntimeError(Visual._compression_exception_message.format(
"surface"))
return self._surface
@property
def pixel_array(self):
"""todo"""
if self._px_array is None:
self._px_array = pygame.PixelArray(self.surface)
return self._px_array
@pixel_array.setter
def pixel_array(self, value):
if self._px_array is None:
self._px_array = pygame.PixelArray(self.surface)
self._px_array = value
def unlock_pixel_array(self):
"""todo"""
self._px_array = None
def preload(self, inhibit_ogl_compress=False):
self.unlock_pixel_array()
return Canvas.preload(self, inhibit_ogl_compress)
def compress(self):
self.unlock_pixel_array()
return Canvas.compress(self)
def decompress(self):
self.unlock_pixel_array()
return Canvas.decompress(self)
def plot(self, stimulus):
self.unlock_pixel_array()
return Canvas.plot(self, stimulus)
def clear_surface(self):
self.unlock_pixel_array()
return Canvas.clear_surface(self)
def copy(self):
self.unlock_pixel_array()
return Canvas.copy(self)
def unload(self, keep_surface=False):
if not keep_surface:
self.unlock_pixel_array()
return Canvas.unload(self, keep_surface)
def rotate(self, degree):
self.unlock_pixel_array()
return Canvas.rotate(self, degree)
def scale(self, factors):
self.unlock_pixel_array()
return Canvas.scale(self, factors)
# expyriment 0.8.0
# def scale_to_fullscreen(self, keep_aspect_ratio=True):
# self.unlock_pixel_array()
# return Canvas.scale_to_fullscreen(self, keep_aspect_ratio)
def flip(self, booleans):
self.unlock_pixel_array()
return Canvas.flip(self, booleans)
def blur(self, level):
self.unlock_pixel_array()
return Canvas.blur(self, level)
def scramble(self, grain_size):
self.unlock_pixel_array()
return Canvas.scramble(self, grain_size)
def add_noise(self, grain_size, percentage, colour):
self.unlock_pixel_array()
return Canvas.add_noise(self, grain_size, percentage, colour)
class Plotter(PGSurface):
"""Pygame Plotter"""
def __init__(self, n_data_rows, data_row_colours,
width=600, y_range=(-100, 100),
background_colour=(180, 180, 180),
marker_colour=(200, 200, 200),
position=None,
axis_colour=None):
self.n_data_rows = n_data_rows
self.data_row_colours = data_row_colours
self.width = width
self.y_range = y_range
self._background_colour = background_colour
self.marker_colour = marker_colour
self._horizontal_lines = None
if axis_colour is None:
self.axis_colour = background_colour
else:
self.axis_colour = axis_colour
self._previous = [None] * n_data_rows
PGSurface.__init__(self, size=(self.width, self._height),
position=position)
self.clear_area()
@property
def y_range(self):
return self.y_range
@y_range.setter
def y_range(self, values):
"""tuple with lower and upper values"""
self._y_range = values
self._height = self._y_range[1] - self._y_range[0]
@property
def data_row_colours(self):
return self._data_row_colours
@data_row_colours.setter
def data_row_colours(self, values):
"""data_row_colours: list of colour"""
try:
if not isinstance(values[0], list) and \
not isinstance(values[0], tuple): # one dimensional
values = [values]
except:
values = [[]] # values is not listpixel_array
if len(values) != self.n_data_rows:
raise RuntimeError('Number of data row colour does not match the ' +
'defined number of data rows!')
self._data_row_colours = values
def clear_area(self):
self.pixel_array[:, :] = self._background_colour
def set_horizontal_line(self, y_values):
"""y_values: array"""
try:
self._horizontal_lines = np.array(y_values, dtype=int)
except:
self._horizontal_lines = None
def write_values(self, position, values, set_marker=False,
set_point_marker=False):
"""
additional points: np.array
"""
if set_marker:
self.pixel_array[position, :] = self.marker_colour
else:
self.pixel_array[position, :] = self._background_colour
if set_point_marker:
self.pixel_array[position, 0:2] = self.marker_colour
if self._horizontal_lines is not None:
for c in (self._y_range[1] - self._horizontal_lines):
self.pixel_array[:, c:c+1] = self.marker_colour
for c, plot_value in enumerate(self._y_range[1] - \
np.array(values, dtype=int)):
if plot_value >= 0 and self._previous[c] >= 0 \
and plot_value <= self._height and \
self._previous[c] <= self._height:
if self._previous[c] > plot_value:
self.pixel_array[position,
plot_value:self._previous[c] + 1] = \
self._data_row_colours[c]
else:
self.pixel_array[position,
self._previous[c]:plot_value + 1] = \
self._data_row_colours[c]
self._previous[c] = plot_value
def add_values(self, values, set_marker=False):
""" high level function of write values with type check and shifting to left
not used by plotter thread
"""
if type(values) is not Numpy_array_type and \
not isinstance(values, tuple) and \
not isinstance(values, list):
values = [values]
if len(values) != self.n_data_rows:
raise RuntimeError('Number of data values does not match the ' +
'defined number of data rows!')
# move plot one pixel to the left
self.pixel_array[:-1, :] = self.pixel_array[1:, :]
self.write_values(position=-1, values=values, set_marker=set_marker)
class PlotterThread(threading.Thread):
def __init__(self, n_data_rows, data_row_colours,
width=600, y_range=(-100, 100),
background_colour=(80, 80, 80),
marker_colour=(200, 200, 200),
position=None,
axis_colour=None):
super(PlotterThread, self).__init__()
self._plotter = Plotter(n_data_rows=n_data_rows,
data_row_colours=data_row_colours,
width=width, y_range=y_range,
background_colour=background_colour,
marker_colour=marker_colour,
position=position,
axis_colour=axis_colour)
self._new_values = []
self._lock_new_values = threading.Lock()
self._running = threading.Event()
self._stop_request = threading.Event()
self._clear_area_event = threading.Event()
self.unpause()
def get_plotter_rect(self, screen_size):
half_screen_size = (screen_size[0] / 2, screen_size[1] / 2)
pos = self._plotter.absolute_position
stim_size = self._plotter.surface_size
rect_pos = (pos[0] + half_screen_size[0] - stim_size[0] / 2,
- pos[1] + half_screen_size[1] - stim_size[1] / 2)
return pygame.Rect(rect_pos, stim_size)
def clear_area(self):
self._clear_area_event.set()
def pause(self):
self._running.clear()
def unpause(self):
self._running.set()
def stop(self):
self.join()
def join(self, timeout=None):
self._stop_request.set()
super(PlotterThread, self).join(timeout)
def run(self):
"""the plotter thread is constantly updating the the
pixel_area"""
while not self._stop_request.is_set():
if not self._running.is_set():
self._running.wait(timeout=1)
continue
if self._clear_area_event.is_set():
self._plotter.clear_area()
self._clear_area_event.clear()
# get data
if self._lock_new_values.acquire(False):
values = self._new_values
self._new_values = []
self._lock_new_values.release() # release to receive new values
else:
values = []
n = len(values)
if n > 0:
if n > self._plotter.width:
values = values[-1 * self._plotter.width:] # only the last
n = len(values)
self._plotter.pixel_array[:-1 * n, :] = \
self._plotter.pixel_array[n:, :]
for x in range(-1 * n, 0):
self._plotter.write_values(position=x,
values=values[x][0],
set_marker=values[x][1],
set_point_marker=values[x][2])
# Expyriment present
lock_expyriment.acquire()
self._plotter.present(update=False, clear=False)
lock_expyriment.release()
def set_horizontal_lines(self, y_values):
"""adds new values to the plotter
y_values has to be an array
"""
self._lock_new_values.acquire()
self._plotter.set_horizontal_line(y_values=y_values)
self._lock_new_values.release()
def add_values(self, values, set_marker=False, set_point_marker=False):
"""adds new values to the plotter"""
self._lock_new_values.acquire()
self._new_values.append((values, set_marker, set_point_marker))
self._lock_new_values.release()
def level_indicator(value, text, scaling, width=20,
text_size=14, text_gap=20, position=(0,0), thresholds = None,
colour=constants.C_EXPYRIMENT_ORANGE):
"""make an level indicator in for of an Expyriment stimulus
text_gap: gap between indicator and text
scaling: Scaling object
Returns
--------
expyriment.Canvas
"""
value = scaling.trim(value)
# indicator
height = scaling.pixel_max - scaling.pixel_min
indicator = Canvas(size=[width + 2, height + 2],
colour=(30, 30, 30))
zero = scaling.data2pixel(0)
px_bar_height = scaling.data2pixel(value) - zero
bar = Rectangle(size=(width, abs(px_bar_height)),
position=(0, zero + int((px_bar_height + 1) / 2)),
colour=colour)
bar.plot(indicator)
# levels & horizontal lines
try:
px_horizontal_lines = scaling.data2pixel(values=np.array(thresholds.thresholds))
except:
px_horizontal_lines = None
if px_horizontal_lines is not None:
for px in px_horizontal_lines:
level = Rectangle(size=(width+6, 2),
position=(0, px),
colour=constants.C_WHITE)
level.plot(indicator)
# text labels
txt = TextLine(text=text, text_size=text_size,
position=(0, -1 * (int(height / 2.0) + text_gap)),
text_colour=constants.C_YELLOW)
# make return canvas
w = max(txt.surface_size[0], indicator.size[0])
h = height + 2 * (txt.surface_size[1]) + text_gap
rtn = Canvas(size=(w, h), colour=(0, 0, 0), position=position)
indicator.plot(rtn)
txt.plot(rtn)
return rtn
if __name__ == "__main__":
pass
| 32.714912 | 88 | 0.58292 | 12,792 | 0.857488 | 0 | 0 | 1,872 | 0.125486 | 0 | 0 | 1,737 | 0.116437 |
f5eeb057bded5c49089e78a2d6eb892367d91cd2 | 3,528 | py | Python | gcp/extract/lib/weights_vcv.py | dylanhogan/prospectus-tools | 662b2629290cd27c74cd34769773e0d6e73c7048 | [
"MIT"
]
| null | null | null | gcp/extract/lib/weights_vcv.py | dylanhogan/prospectus-tools | 662b2629290cd27c74cd34769773e0d6e73c7048 | [
"MIT"
]
| null | null | null | gcp/extract/lib/weights_vcv.py | dylanhogan/prospectus-tools | 662b2629290cd27c74cd34769773e0d6e73c7048 | [
"MIT"
]
| null | null | null | # -*- coding: utf-8 -*-
################################################################################
# Copyright 2014, Distributed Meta-Analysis System
################################################################################
"""
This file provides methods for handling weighting across GCMs under
delta method calculations.
"""
__copyright__ = "Copyright 2014, Distributed Meta-Analysis System"
__author__ = "James Rising"
__credits__ = ["James Rising"]
__maintainer__ = "James Rising"
__email__ = "[email protected]"
__status__ = "Production"
__version__ = "$Revision$"
# $Source$
import numpy as np
from scipy.optimize import brentq
from scipy.stats import norm
class WeightedGMCDF(object):
"""
A weighted Gaussian mixture model.
"""
def __init__(self, means, variances, weights):
self.means = means
self.sds = np.sqrt(variances) # as std. dev.
self.weights = weights / np.sum(weights) # as fractions of 1
def inverse(self, pp):
# pp is a scalar or vector of probabilities
# make it an array, if not already
if len(np.array(pp).shape) == 0:
pp = np.array([pp])
# determine extreme left and right bounds for root-finding
pp = np.array(pp) # needs to be np array
left = np.min(norm.ppf(np.min(pp), self.means, self.sds))
right = np.max(norm.ppf(np.max(pp[pp < 1]), self.means, self.sds))
# find root for each probability
roots = []
for p in pp:
if p == 2:
roots.append(np.average(self.means, weights=self.weights))
continue
# Set up mixed distribution CDF with root and find it
func = lambda x: sum(self.weights * norm.cdf(x, self.means, self.sds)) - p
roots.append(brentq(func, left, right))
return roots
@staticmethod
def encode_evalqvals(evalqvals):
encoder = {'mean': 2}
return map(lambda p: p if isinstance(p, float) else encoder[p], evalqvals)
if __name__ == '__main__':
## Example between R and python
## R:
# means <- rnorm(10)
# sds <- rexp(10)
# weights <- runif(10)
# weights <- weights / sum(weights)
# draws <- sapply(1:100000, function(ii) sample(rnorm(10, means, sds), 1, prob=weights))
# pp <- runif(10)
# quantile(draws, pp)
## For the values below:
# > quantile(draws, pp)
# 4.261865% 57.54305% 9.961645% 13.1325% 68.3729% 89.93871% 37.68216% 25.06827% 72.6134% 92.35501%
# -2.70958468 0.77240194 -2.15403320 -1.90146370 1.17428553 1.95475922 -0.06482985 -0.92293638 1.36865349 2.00405179
## Python:
means = [-1.10402809, 1.91300947, -2.21007153, 0.65175650, 0.56314868, -0.28337581, 0.98788803, 1.10211432, -0.06220629, -1.45807086]
variances = np.array([0.65422226, 0.13413332, 0.61493262, 0.29639041, 2.20748648, 1.69513869, 1.15008972, 0.41550756, 0.03384455, 1.07446232])**2
weights = [0.07420341, 0.16907337, 0.11439943, 0.08439015, 0.01868190, 0.14571485, 0.07630478, 0.17063990, 0.09951820, 0.04707401]
pp = [0.04261865, 0.57543051, 0.09961645, 0.13132502, 0.68372897, 0.89938713, 0.37682157, 0.25068274, 0.72613404, 0.92355014]
dist = WeightedGMCDF(means, variances, weights)
print dist.inverse(pp)
# [-2.708582712985005, 0.7720415676939508, -2.152969315647189, -1.8999500392063315, 1.1698917665106159, 1.955783738182657, -0.0641650435162273, -0.9150700927430755, 1.3660161904436894, 2.004650382993468]
| 40.551724 | 207 | 0.614229 | 1,338 | 0.379252 | 0 | 0 | 163 | 0.046202 | 0 | 0 | 1,583 | 0.448696 |
f5efba2cc27e11d0b24ffd544963fe1fe77b60d3 | 764 | py | Python | ecojunk/users/api/v1/resources.py | PIN-UPV/EcoJunkWebServer | 53a42687c303ffe345f59dc1f11fa41c3526f6d7 | [
"MIT"
]
| 1 | 2018-10-02T11:54:26.000Z | 2018-10-02T11:54:26.000Z | ecojunk/users/api/v1/resources.py | PIN-UPV/EcoJunkWebServer | 53a42687c303ffe345f59dc1f11fa41c3526f6d7 | [
"MIT"
]
| 8 | 2018-10-03T08:02:39.000Z | 2018-11-21T07:42:26.000Z | ecojunk/users/api/v1/resources.py | PIN-UPV/EcoJunkWebServer | 53a42687c303ffe345f59dc1f11fa41c3526f6d7 | [
"MIT"
]
| 1 | 2018-10-02T11:54:32.000Z | 2018-10-02T11:54:32.000Z | from rest_framework import status
from rest_framework.generics import RetrieveUpdateAPIView
from rest_framework.response import Response
from ecojunk.users.api.v1.serializers import UserSerializer
class UserResource(RetrieveUpdateAPIView):
serializer_class = UserSerializer
def retrieve(self, request, *args, **kwargs):
serializer = self.serializer_class(request.user)
return Response(serializer.data, status=status.HTTP_200_OK)
def update(self, request, *args, **kwargs):
serializer = self.serializer_class(
request.user, data=request.data, partial=True
)
serializer.is_valid(raise_exception=True)
serializer.save()
return Response(serializer.data, status=status.HTTP_200_OK)
| 31.833333 | 67 | 0.740838 | 563 | 0.736911 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
f5f03ea17d8bc72c5ae1602cba0dbeef3ed61e6b | 2,905 | py | Python | app/modules/payments/resources.py | almlys/sample_paymentsapi | d7ba4d2effeb7654ee06aab6dbb15e22f8d213cc | [
"MIT"
]
| null | null | null | app/modules/payments/resources.py | almlys/sample_paymentsapi | d7ba4d2effeb7654ee06aab6dbb15e22f8d213cc | [
"MIT"
]
| null | null | null | app/modules/payments/resources.py | almlys/sample_paymentsapi | d7ba4d2effeb7654ee06aab6dbb15e22f8d213cc | [
"MIT"
]
| null | null | null | # encoding: utf-8
# pylint: disable=bad-continuation
"""
RESTful API Payments resources
--------------------------
"""
import logging
from flask_login import current_user
from flask_restplus_patched import Resource
from flask_restplus._http import HTTPStatus
from app.extensions import db
from app.extensions.api import Namespace, abort
from app.extensions.api.parameters import PaginationParameters
from . import parameters, schemas
from .models import Payment
log = logging.getLogger(__name__) # pylint: disable=invalid-name
api = Namespace('payments', description="Payments") # pylint: disable=invalid-name
@api.route('/')
class Payments(Resource):
"""
Manipulations with Payments.
"""
@api.parameters(PaginationParameters())
@api.response(schemas.BasePaymentSchema(many=True))
def get(self, args):
"""
List of Payment.
Returns a list of Payment starting from ``offset`` limited by ``limit``
parameter.
"""
return Payment.query.offset(args['offset']).limit(args['limit'])
@api.parameters(parameters.CreatePaymentParameters())
@api.response(schemas.DetailedPaymentSchema())
@api.response(code=HTTPStatus.CONFLICT)
def post(self, args):
"""
Create a new instance of Payment.
"""
with api.commit_or_abort(
db.session,
default_error_message="Failed to create a new Payment"
):
payment = Payment(**args)
db.session.add(payment)
return payment
@api.route('/<payment_id>')
@api.response(
code=HTTPStatus.NOT_FOUND,
description="Payment not found.",
)
@api.resolve_object_by_model(Payment, 'payment')
class PaymentByID(Resource):
"""
Manipulations with a specific Payment.
"""
@api.response(schemas.DetailedPaymentSchema())
def get(self, payment):
"""
Get Payment details by ID.
"""
return payment
@api.parameters(parameters.PatchPaymentDetailsParameters())
@api.response(schemas.DetailedPaymentSchema())
@api.response(code=HTTPStatus.CONFLICT)
def patch(self, args, payment):
"""
Patch Payment details by ID.
"""
with api.commit_or_abort(
db.session,
default_error_message="Failed to update Payment details."
):
parameters.PatchPaymentDetailsParameters.perform_patch(args, obj=payment)
db.session.merge(payment)
return payment
@api.response(code=HTTPStatus.CONFLICT)
@api.response(code=HTTPStatus.NO_CONTENT)
def delete(self, payment):
"""
Delete a Payment by ID.
"""
with api.commit_or_abort(
db.session,
default_error_message="Failed to delete the Payment."
):
db.session.delete(payment)
return None
| 27.666667 | 85 | 0.640275 | 2,101 | 0.723236 | 0 | 0 | 2,280 | 0.784854 | 0 | 0 | 800 | 0.275387 |
f5f344323771b9cf37b06554ddc6a58b22178367 | 1,616 | py | Python | bin/list-teams.py | kws/python-msgraphy | a5dad8bd834c476974fae151f30865c229e0f798 | [
"MIT"
]
| 1 | 2022-01-06T08:06:47.000Z | 2022-01-06T08:06:47.000Z | bin/list-teams.py | kws/python-msgraphy | a5dad8bd834c476974fae151f30865c229e0f798 | [
"MIT"
]
| null | null | null | bin/list-teams.py | kws/python-msgraphy | a5dad8bd834c476974fae151f30865c229e0f798 | [
"MIT"
]
| null | null | null | import msgraphy_util
import argparse
from msgraphy import GraphApi
def main(name, starts_with, exact, channels, folder):
api = GraphApi(scopes=["Group.Read.All"])
response = api.team.list_teams(search=name, starts_with=starts_with, exact=exact)
for team in response.value:
print(f"{team.display_name} [{team.id}]")
print(team.description)
if channels or folder:
response = api.team.list_channels(team.id)
for ch in response.value:
print(f"* {ch.display_name} [{ch.id}]")
if folder:
response = api.team.get_channel_files_folder(team.id, ch.id)
if response.ok:
folder = response.value
print(f" {folder.web_url}")
else:
print(" [Folder not found]")
print("")
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='List or search for MS team'
)
parser.add_argument("name", type=str, nargs="?", help="show only teams which contains [name]")
parser.add_argument("--starts_with", "-s", type=str, nargs="?", metavar="value", help="only teams starting with [value]")
parser.add_argument("--exact", "-e", type=str, nargs="?", metavar="value", help="only teams exactly matching [value]")
parser.add_argument("--channels", "-c", action='store_true', help="include channels")
parser.add_argument("--folder", "-f", action='store_true', help="include channel folder (implies -c)")
args = parser.parse_args()
main(**vars(args))
| 41.435897 | 125 | 0.603342 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 445 | 0.275371 |
f5f35c0e3a98205f6d6bd8dde9d15ab552f7d436 | 21,372 | py | Python | tileEditor.py | haywireSSC/Level-Editor | 34fedbe36b90afeb8c0d995fcecbed845ffd6253 | [
"CC0-1.0"
]
| null | null | null | tileEditor.py | haywireSSC/Level-Editor | 34fedbe36b90afeb8c0d995fcecbed845ffd6253 | [
"CC0-1.0"
]
| null | null | null | tileEditor.py | haywireSSC/Level-Editor | 34fedbe36b90afeb8c0d995fcecbed845ffd6253 | [
"CC0-1.0"
]
| null | null | null | import pygame as p
from math import floor
from copy import deepcopy
import Tkinter, tkFileDialog
root = Tkinter.Tk()
root.withdraw()
p.init()
running = True
tileWidth = 16
tileHeight = 16
mapWidth = 100
mapHeight = 100
camX = 0
camY = 0
scale = 2
uiScale = 2
hand = 1
layerStack = True
file_path = ''
file_path = tkFileDialog.askopenfilename()
if file_path[-3:] != 'png':
exit()
layers = []
currentLayer = 1
layers.append([-1] * (mapWidth * mapHeight))
layers.append([-1] * (mapWidth * mapHeight))
prevLayers = deepcopy(layers)
prevLayerLists = []
prevLayerListsRedo = []
brush = p.image.load('brush.png')
brushHover = p.image.load('brushHover.png')
square = p.image.load('square.png')
squareHover = p.image.load('squareHover.png')
brushRect = brush.get_rect()
squareRect = square.get_rect()
brushRect.width, brushRect.height = brushRect.width * uiScale, brushRect.height * uiScale
squareRect.width, squareRect.height = squareRect.width * uiScale, squareRect.height * uiScale
(width, height) = (480, 360)
p.display.set_caption('Tile Editor')
font = p.font.Font('Minecraftia-Regular.ttf', 8)
s = p.display.set_mode((width, height), p.RESIZABLE)
clock = p.time.Clock()
middleClick = False
leftClick = False
leftClickPrev = False
rightClick = False
rightClickDown = False
rightClickPrev = False
mouseOffset = (0, 0)
mousePos = (0, 0)
buttonClick = False
buttonHover = False
sDown = False
squareT = False
sDownStart = False
startPos = (0,0)
def drawBox(width, height, filled):
surf = p.Surface((width, height))
if(filled):
surf.fill((41,48,50))
else:
surf.fill((0,0,0,0))
p.draw.rect(surf, (113,58,41), (0, 0, width, height), 1)
surf.set_at((0, 0), (0,0,0,0))
surf.set_at((width-1, 0), (0,0,0,0))
surf.set_at((0, height-1), (0,0,0,0))
surf.set_at((width-1, height-1), (0,0,0,0))
p.draw.rect(surf, (10,21,27), (1, 1, width-2, height-2), 1)
surf.set_at((1, 1), (88,41,24))
surf.set_at((width-2, 1), (88,41,24))
surf.set_at((1, height-2), (88,41,24))
surf.set_at((width-2, height-2), (88,41,24))
p.draw.lines(surf, (34,30,21), False, ((2, height-3), (2, 2), (width-3, 2)))
p.draw.lines(surf, (86,92,86), False, ((3, height-3), (width-3, height-3), (width-3, 3)))
#p.draw.rect(surf, (225,0,225), (3, 3, width-6, height-6))
return(p.transform.scale(surf, (uiScale * width, uiScale * height)))
def drawButton(textt, x, y):
global buttonClick
buttonClick = False
global buttonHover
buttonHover = False
text = font.render(textt, False, (251,175,113))
width = text.get_width() + 5
height = text.get_height() + 3
if textt[-1] == str(currentLayer):
text = font.render(textt, False, (150,179,174))
if textt == 'Layer Stack' and layerStack:
text = font.render(textt, False, (150,179,174))
if p.Rect(x, y, width * uiScale, height * uiScale).collidepoint(mousePos[0], mousePos[1]):
text = font.render(textt, False, (150,179,174))
buttonHover = True
if leftClick:
y += uiScale
if not leftClickPrev:
buttonClick = True
surf = p.Surface((width, height), p.SRCALPHA)
surf.fill((41,48,50))
surf.blit(text, (3, 1))
p.draw.rect(surf, (113,58,41), (0, 0, width, height), 1)
surf.set_at((0, 0), (0,0,0,0))
surf.set_at((width-1, 0), (0,0,0,0))
surf.set_at((0, height-1), (0,0,0,0))
surf.set_at((width-1, height-1), (0,0,0,0))
p.draw.rect(surf, (10,21,27), (1, 1, width-2, height-2), 1)
surf.set_at((1, 1), (88,41,24))
surf.set_at((width-2, 1), (88,41,24))
surf.set_at((1, height-2), (88,41,24))
surf.set_at((width-2, height-2), (88,41,24))
p.draw.lines(surf, (34,30,21), False, ((2, height-3), (2, 2), (width-3, 2)))
p.draw.lines(surf, (86,92,86), False, ((3, height-3), (width-3, height-3), (width-3, 3)))
s.blit(p.transform.scale(surf, (uiScale * width, uiScale * height)), (x, y))
tiles = []
sheetHeight = 0
sheetWidth = 0
def load_sheet(path):
global tiles
global sheetHeight
global sheetWidth
sheet = p.image.load(path)
if sheet.get_width() >= tileWidth and sheet.get_height() >= tileHeight:
tiles = []
sheetWidth = sheet.get_width()
sheetHeight = sheet.get_height()
for y in range(sheetHeight // tileHeight):
for x in range(sheetWidth // tileWidth):
image = p.Surface((tileWidth, tileHeight), p.SRCALPHA)
image.blit(sheet, (0, 0), (x * tileWidth, y * tileHeight, tileWidth, tileHeight))
tiles.append((image, x * tileWidth, y * tileHeight))
load_sheet(file_path)
while running:
windowResize = False
for event in p.event.get():
if event.type == p.QUIT:
running = False
elif event.type == p.MOUSEMOTION:
mousePos = p.mouse.get_pos()
elif event.type == p.MOUSEBUTTONDOWN:
mousePos = p.mouse.get_pos()
if event.button == 2:
mouseOffset = (mousePos[0] - camX, mousePos[1] - camY);
middleClick = True
elif event.button == 1:
leftClick = True
elif event.button == 3:
rightClick = True
rightClickDown = True
elif event.type == p.MOUSEBUTTONUP:
if event.button == 2:
middleClick = False
elif event.button == 1:
leftClick = False
elif event.button == 3:
rightClick = False
elif event.type == p.MOUSEWHEEL and not middleClick:
scale += event.y
if(scale < 1):
scale = 1
elif event.type == p.VIDEORESIZE:
width = event.w
height = event.h
windowResize = True
elif event.type == p.KEYDOWN:
if event.key == p.K_z and p.key.get_mods() & p.KMOD_CTRL:
if len(prevLayerLists) != 0:
prevLayerListsRedo.append(layers)
layers = prevLayerLists[-1]
del prevLayerLists[-1]
elif event.key == p.K_y and p.key.get_mods() & p.KMOD_CTRL:
if len(prevLayerListsRedo) != 0:
prevLayerLists.append(layers)
layers = prevLayerListsRedo[-1]
del prevLayerListsRedo[-1]
elif event.key == p.K_s:
sDown = True
elif event.type == p.KEYUP:
if event.key == p.K_s:
sDown = False
prevLayers = deepcopy(layers)
if middleClick:
camX, camY = mousePos[0] - mouseOffset[0], mousePos[1] - mouseOffset[1]
x = int(round((mousePos[0] - camX) / (tileWidth * scale)))
y = int(round((mousePos[1] - camY) / (tileHeight * scale)))
layers[0][(y * mapWidth) + x] = hand
if leftClick and not sDownStart:
if(mousePos[0] > (9 * uiScale) and mousePos[0] < (sheetWidth + 9) * uiScale and mousePos[1] > (9 * uiScale) and mousePos[1] < (sheetHeight + 9) * uiScale):
x = int(round((mousePos[0] - (9 * uiScale)) / (tileWidth * uiScale)))
y = int(round((mousePos[1] - (9 * uiScale)) / (tileHeight * uiScale)))
hand = (y * (sheetWidth // (tileWidth))) + x
else:
if(mousePos[0] > camX and mousePos[0] < camX + ((tileWidth * scale) * mapWidth) and mousePos[1] > camY and mousePos[1] < camY + ((tileHeight * scale) * mapHeight)):
layers[currentLayer][(y * mapWidth) + x] = hand
elif rightClick and not sDown:
if(mousePos[0] > camX and mousePos[0] < camX + ((tileWidth * scale) * mapWidth) and mousePos[1] > camY and mousePos[1] < camY + ((tileHeight * scale) * mapHeight)):
layers[currentLayer][(y * mapWidth) + x] = -1
s.fill((41,48,50))
renderList = []
for i in range(0, len(layers)):
if not i == 0:
for x in range(mapWidth):
for y in range(mapHeight):
if (x * tileWidth * scale) + camX > tileWidth * -scale and (x * tileWidth * scale) + camX < width and (y * tileHeight * scale) + camY > tileHeight * -scale and (y * tileHeight * scale) + camY < height:
tile = layers[0][y * mapWidth + x]
if not layerStack:
if i == currentLayer and tile != -1 and not [x,y] in renderList:
renderList.append([x,y])
s.blit(p.transform.scale(tiles[tile][0], (tileWidth * scale, tileHeight * scale)), ((x * tileWidth * scale) + camX, (y * tileHeight * scale) + camY))
else:
tile = layers[i][y * mapWidth + x]
if not [x,y] in renderList:
if tile == -1 and i == currentLayer:
if uiScale >= scale:
p.draw.rect(s, (86,92,86), p.Rect((x * tileWidth * scale) + camX, (y * tileHeight * scale) + camY, tileWidth * scale, tileHeight * scale), 1)
else:
p.draw.rect(s, (86,92,86), p.Rect((x * tileWidth * scale) + camX, (y * tileHeight * scale) + camY, tileWidth * scale, tileHeight * scale), uiScale)
elif tile != -1:
renderList.append([x,y])
s.blit(p.transform.scale(tiles[tile][0], (tileWidth * scale, tileHeight * scale)), ((x * tileWidth * scale) + camX, (y * tileHeight * scale) + camY))
else:
if i == currentLayer and tile != -1:
renderList.append([x,y,tile])
else:
tile = layers[i][y * mapWidth + x]
if tile == -1 and i == currentLayer:
if uiScale >= scale:
p.draw.rect(s, (86,92,86), p.Rect((x * tileWidth * scale) + camX, (y * tileHeight * scale) + camY, tileWidth * scale, tileHeight * scale), 1)
else:
p.draw.rect(s, (86,92,86), p.Rect((x * tileWidth * scale) + camX, (y * tileHeight * scale) + camY, tileWidth * scale, tileHeight * scale), uiScale)
elif tile != -1:
renderList.append([x,y,tile])
if layerStack:
for i in range(len(renderList)-1, 0, -1):
s.blit(p.transform.scale(tiles[renderList[i][2]][0], (tileWidth * scale, tileHeight * scale)), ((renderList[i][0] * tileWidth * scale) + camX, (renderList[i][1] * tileHeight * scale) + camY))
i = sheetHeight + int(tileHeight * 1.5 + 12)
s.blit(drawBox(sheetWidth + 12, i, True), (3 * uiScale, 3 * uiScale))
drawButton('New Layer', 3 * uiScale, (i + 6) * uiScale)
if buttonClick:
layers.append([-1] * (mapWidth * mapHeight))
currentLayer = len(layers)-1
for layer in range(0, len(layers)-1):
drawButton('Layer ' + str(layer + 1), 3 * uiScale, (i + 26 * (layer + 1)) * uiScale)
if buttonClick:
currentLayer = layer + 1
if buttonHover and rightClickDown and len(layers) > 2:
prevLayerLists.append(deepcopy(layers))
del layers[layer + 1]
if currentLayer > len(layers) - 1:
currentLayer -= 1
prevLayers = layers
for image in tiles:
s.blit(p.transform.scale(image[0], (tileWidth * uiScale, tileHeight * uiScale)), ((image[1] + 9) * uiScale, (image[2] + 9) * uiScale))
s.blit(p.transform.scale(tiles[hand][0], (tileWidth * uiScale, tileHeight * uiScale)), (9 * uiScale, (sheetHeight + tileHeight) * uiScale))
drawButton('Open Tilesheet', (sheetWidth + 18) * uiScale, 3 * uiScale)
if buttonClick:
file_path = tkFileDialog.askopenfilename()
if file_path[-3:] == 'png':
load_sheet(file_path)
drawButton('Layer Stack', (sheetWidth + 18) * uiScale, 23 * uiScale)
if buttonClick:
layerStack = not layerStack
layers[0] = [-1] * (mapWidth * mapHeight)
if not leftClick and leftClickPrev and sDownStart:
sDownStart = False
for x in range(startPos[0], int(round((mousePos[0] - camX) / (tileWidth * scale))) + 1):
for y in range(startPos[1], int(round((mousePos[1] - camY) / (tileHeight * scale))) + 1):
if(mousePos[0] > camX and mousePos[0] < camX + ((tileWidth * scale) * mapWidth) and mousePos[1] > camY and mousePos[1] < camY + ((tileHeight * scale) * mapHeight)):
layers[currentLayer][(y * mapWidth) + x] = hand
for x in range(startPos[0], int(round((mousePos[0] - camX) / (tileWidth * scale))) - 1, -1):
for y in range(startPos[1], int(round((mousePos[1] - camY) / (tileHeight * scale))) - 1, -1):
if(mousePos[0] > camX and mousePos[0] < camX + ((tileWidth * scale) * mapWidth) and mousePos[1] > camY and mousePos[1] < camY + ((tileHeight * scale) * mapHeight)):
layers[currentLayer][(y * mapWidth) + x] = hand
for x in range(startPos[0], int(round((mousePos[0] - camX) / (tileWidth * scale))) + 1):
for y in range(startPos[1], int(round((mousePos[1] - camY) / (tileHeight * scale))) - 1, -1):
if(mousePos[0] > camX and mousePos[0] < camX + ((tileWidth * scale) * mapWidth) and mousePos[1] > camY and mousePos[1] < camY + ((tileHeight * scale) * mapHeight)):
layers[currentLayer][(y * mapWidth) + x] = hand
for x in range(startPos[0], int(round((mousePos[0] - camX) / (tileWidth * scale))) - 1, -1):
for y in range(startPos[1], int(round((mousePos[1] - camY) / (tileHeight * scale))) + 1):
if(mousePos[0] > camX and mousePos[0] < camX + ((tileWidth * scale) * mapWidth) and mousePos[1] > camY and mousePos[1] < camY + ((tileHeight * scale) * mapHeight)):
layers[currentLayer][(y * mapWidth) + x] = hand
elif leftClick and sDownStart:
for x in range(startPos[0], int(round((mousePos[0] - camX) / (tileWidth * scale))) + 1):
for y in range(startPos[1], int(round((mousePos[1] - camY) / (tileHeight * scale))) + 1):
if(mousePos[0] > camX and mousePos[0] < camX + ((tileWidth * scale) * mapWidth) and mousePos[1] > camY and mousePos[1] < camY + ((tileHeight * scale) * mapHeight)):
layers[0][(y * mapWidth) + x] = hand
for x in range(startPos[0], int(round((mousePos[0] - camX) / (tileWidth * scale))) - 1, -1):
for y in range(startPos[1], int(round((mousePos[1] - camY) / (tileHeight * scale))) - 1, -1):
if(mousePos[0] > camX and mousePos[0] < camX + ((tileWidth * scale) * mapWidth) and mousePos[1] > camY and mousePos[1] < camY + ((tileHeight * scale) * mapHeight)):
layers[0][(y * mapWidth) + x] = hand
for x in range(startPos[0], int(round((mousePos[0] - camX) / (tileWidth * scale))) + 1):
for y in range(startPos[1], int(round((mousePos[1] - camY) / (tileHeight * scale))) - 1, -1):
if(mousePos[0] > camX and mousePos[0] < camX + ((tileWidth * scale) * mapWidth) and mousePos[1] > camY and mousePos[1] < camY + ((tileHeight * scale) * mapHeight)):
layers[0][(y * mapWidth) + x] = hand
for x in range(startPos[0], int(round((mousePos[0] - camX) / (tileWidth * scale))) - 1, -1):
for y in range(startPos[1], int(round((mousePos[1] - camY) / (tileHeight * scale))) + 1):
if(mousePos[0] > camX and mousePos[0] < camX + ((tileWidth * scale) * mapWidth) and mousePos[1] > camY and mousePos[1] < camY + ((tileHeight * scale) * mapHeight)):
layers[0][(y * mapWidth) + x] = hand
if not rightClick and rightClickPrev and sDownStart:
sDownStart = False
for x in range(startPos[0], int(round((mousePos[0] - camX) / (tileWidth * scale))) + 1):
for y in range(startPos[1], int(round((mousePos[1] - camY) / (tileHeight * scale))) + 1):
if(mousePos[0] > camX and mousePos[0] < camX + ((tileWidth * scale) * mapWidth) and mousePos[1] > camY and mousePos[1] < camY + ((tileHeight * scale) * mapHeight)):
layers[currentLayer][(y * mapWidth) + x] = -1
for x in range(startPos[0], int(round((mousePos[0] - camX) / (tileWidth * scale))) - 1, -1):
for y in range(startPos[1], int(round((mousePos[1] - camY) / (tileHeight * scale))) - 1, -1):
if(mousePos[0] > camX and mousePos[0] < camX + ((tileWidth * scale) * mapWidth) and mousePos[1] > camY and mousePos[1] < camY + ((tileHeight * scale) * mapHeight)):
layers[currentLayer][(y * mapWidth) + x] = -1
for x in range(startPos[0], int(round((mousePos[0] - camX) / (tileWidth * scale))) + 1):
for y in range(startPos[1], int(round((mousePos[1] - camY) / (tileHeight * scale))) - 1, -1):
if(mousePos[0] > camX and mousePos[0] < camX + ((tileWidth * scale) * mapWidth) and mousePos[1] > camY and mousePos[1] < camY + ((tileHeight * scale) * mapHeight)):
layers[currentLayer][(y * mapWidth) + x] = -1
for x in range(startPos[0], int(round((mousePos[0] - camX) / (tileWidth * scale))) - 1, -1):
for y in range(startPos[1], int(round((mousePos[1] - camY) / (tileHeight * scale))) + 1):
if(mousePos[0] > camX and mousePos[0] < camX + ((tileWidth * scale) * mapWidth) and mousePos[1] > camY and mousePos[1] < camY + ((tileHeight * scale) * mapHeight)):
layers[currentLayer][(y * mapWidth) + x] = -1
elif rightClick and sDownStart:
for x in range(startPos[0], int(round((mousePos[0] - camX) / (tileWidth * scale))) + 1):
for y in range(startPos[1], int(round((mousePos[1] - camY) / (tileHeight * scale))) + 1):
if(mousePos[0] > camX and mousePos[0] < camX + ((tileWidth * scale) * mapWidth) and mousePos[1] > camY and mousePos[1] < camY + ((tileHeight * scale) * mapHeight)):
layers[0][(y * mapWidth) + x] = -2
for x in range(startPos[0], int(round((mousePos[0] - camX) / (tileWidth * scale))) - 1, -1):
for y in range(startPos[1], int(round((mousePos[1] - camY) / (tileHeight * scale))) - 1, -1):
if(mousePos[0] > camX and mousePos[0] < camX + ((tileWidth * scale) * mapWidth) and mousePos[1] > camY and mousePos[1] < camY + ((tileHeight * scale) * mapHeight)):
layers[0][(y * mapWidth) + x] = -2
for x in range(startPos[0], int(round((mousePos[0] - camX) / (tileWidth * scale))) + 1):
for y in range(startPos[1], int(round((mousePos[1] - camY) / (tileHeight * scale))) - 1, -1):
if(mousePos[0] > camX and mousePos[0] < camX + ((tileWidth * scale) * mapWidth) and mousePos[1] > camY and mousePos[1] < camY + ((tileHeight * scale) * mapHeight)):
layers[0][(y * mapWidth) + x] = -2
for x in range(startPos[0], int(round((mousePos[0] - camX) / (tileWidth * scale))) - 1, -1):
for y in range(startPos[1], int(round((mousePos[1] - camY) / (tileHeight * scale))) + 1):
if(mousePos[0] > camX and mousePos[0] < camX + ((tileWidth * scale) * mapWidth) and mousePos[1] > camY and mousePos[1] < camY + ((tileHeight * scale) * mapHeight)):
layers[0][(y * mapWidth) + x] = -2
if leftClick and not leftClickPrev or rightClick and not rightClickPrev:
if sDown:
sDownStart = True
startPos = (int(round((mousePos[0] - camX) / (tileWidth * scale))), int(round((mousePos[1] - camY) / (tileHeight * scale))))
if prevLayers != layers:
prevLayerLists.append(deepcopy(prevLayers))
leftClickPrev = leftClick
backDown = False
rightClickDown = False
brushRect.x,brushRect.y = (sheetWidth + 18) * uiScale, 43 * uiScale
if brushRect.collidepoint(mousePos[0], mousePos[1]) or not squareT:
if leftClick and brushRect.collidepoint(mousePos[0], mousePos[1]):
squareT = False
sDown = False
s.blit(p.transform.scale(brushHover, (brushRect.width, brushRect.height)), (brushRect.x, brushRect.y + uiScale))
else:
s.blit(p.transform.scale(brushHover, (brushRect.width, brushRect.height)), brushRect)
else:
s.blit(p.transform.scale(brush, (brushRect.width, brushRect.height)), brushRect)
squareRect.x,squareRect.y = (sheetWidth + 34) * uiScale, 43 * uiScale
if squareRect.collidepoint(mousePos[0], mousePos[1]) or squareT:
if leftClick and squareRect.collidepoint(mousePos[0], mousePos[1]):
squareT = True
s.blit(p.transform.scale(squareHover, (squareRect.width, squareRect.height)), (squareRect.x, squareRect.y + uiScale))
else:
s.blit(p.transform.scale(squareHover, (squareRect.width, squareRect.height)), squareRect)
else:
s.blit(p.transform.scale(square, (squareRect.width, squareRect.height)), squareRect)
if squareT:
sDown = True
rightClickPrev = rightClick
p.display.update()
clock.tick(60)
| 48.794521 | 221 | 0.561623 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 225 | 0.010528 |
f5f4c4714755e8b9549c5e4949c349f3b753fe90 | 5,148 | py | Python | EditGroupWindow.py | TheYargonaut/lucre | 1abd472993df01b443ab4811379dfe52e18cf790 | [
"MIT"
]
| null | null | null | EditGroupWindow.py | TheYargonaut/lucre | 1abd472993df01b443ab4811379dfe52e18cf790 | [
"MIT"
]
| null | null | null | EditGroupWindow.py | TheYargonaut/lucre | 1abd472993df01b443ab4811379dfe52e18cf790 | [
"MIT"
]
| null | null | null | import tkinter as tk
from tkinter.colorchooser import askcolor
from tkinter import ttk
from Scrollable import Scrollable
from ViewLedgerWidget import ViewLedgerWidget
from List import ListView
from Group import Group
# window for editing a group
prevLens = [ 10, 25, 100 ]
class EditGroupWindow( tk.Toplevel ):
def __init__( self, master, group, ledger, psize, *args, **kwargs ):
tk.Toplevel.__init__( self, master, *args, **kwargs )
self.title( "edit group" )
self.groupBack = group
self.group = Group( **dict( group ) )
self.ledger = ledger
self.psize = psize
self.highlight = self.group.color # "white"
self.ignored = "#E00E00E00" # gray
self.view = None
self.build()
self.matchListCb( self.view )
def matchListCb( self, view ):
'set the highlights when group lists change'
mask = self.group.filter( self.ledger.df.head( len( view ) ) )
for r, m in enumerate( mask ):
view.highlightRow( r, self.highlight if m else self.ignored )
def finalize( self ):
self.groupBack.whitelist = [ r for r in self.group.whitelist if r ]
self.groupBack.blacklist = [ r for r in self.group.blacklist if r ]
self.groupBack.negate = self.group.negate
self.groupBack.title = self.group.title
self.groupBack.color = self.group.color
self.ledger.updateCb( self.ledger.df )
self.destroy()
def whiteListCb( self, idx, txt ):
self.group.whitelist[ idx ] = txt
self.matchListCb( self.view )
def blackListCb( self, idx, txt ):
self.group.blacklist[ idx ] = txt
self.matchListCb( self.view )
def nameCb( self, *args ):
self.group.title = self.nameVar.get()
def expenseCb( self, value ):
self.group.negate = value == 'expense'
def colorCb( self ):
self.group.color = askcolor( self.group.color, parent=self )[ 1 ]
self.highlight = self.group.color
self.color.config( fg=self.group.color )
self.matchListCb( self.view )
def build( self ):
self.grid_rowconfigure( 0, weight=1 )
self.grid_columnconfigure( 0, weight=1 )
mainFrame = ttk.Frame( self )
mainFrame.grid( row=0, column=0, sticky=tk.NSEW )
mainFrame.grid_rowconfigure( 1, weight=1 )
mainFrame.grid_columnconfigure( 0, weight=1 )
listFrame = ttk.Frame( self )
listFrame.grid( row=0, column=1, sticky=tk.NSEW )
listFrame.grid_rowconfigure( 0, weight=1 )
listFrame.grid_rowconfigure( 1, weight=1 )
listFrame.grid_columnconfigure( 0, weight=1 )
whiteFrame = ttk.Frame( listFrame )
whiteFrame.grid( row=0, column=0, sticky=tk.NSEW )
whiteLabel = tk.Label( whiteFrame, text='whitelist' )
whiteLabel.pack( side=tk.TOP, fill=tk.X )
whiteScroll = Scrollable( whiteFrame, vertical=True )
whiteScroll.pack( side=tk.TOP, fill=tk.BOTH )
whiteList = ListView( whiteScroll, self.group.whitelist, '+', self.whiteListCb )
whiteList.pack()
blackFrame = ttk.Frame( listFrame )
blackFrame.grid( row=1, column=0, sticky=tk.NSEW )
blackLabel = tk.Label( blackFrame, text='blacklist' )
blackLabel.pack( side=tk.TOP, fill=tk.X )
blackScroll = Scrollable( blackFrame, vertical=True )
blackScroll.pack( side=tk.TOP, fill=tk.BOTH )
blackList = ListView( blackScroll, self.group.blacklist, '+', self.blackListCb )
blackList.pack()
button = ttk.Frame( self )
button.grid( row=1, column=0, columnspan=2, sticky=tk.W + tk.E )
cancel = ttk.Button( button, text="Cancel", command=self.destroy )
cancel.pack( side=tk.RIGHT )
confirm = ttk.Button( button, text="Confirm", command=self.finalize )
confirm.pack( side=tk.RIGHT )
nameFrame = ttk.Frame( mainFrame )
nameFrame.grid( row=0, column=0, sticky=tk.NSEW )
self.color = tk.Button( nameFrame, text="\u2B1B", command=self.colorCb, width=3 )
self.color.config( fg=self.group.color )
self.color.pack( side=tk.LEFT, fill=tk.NONE, expand=False )
self.nameVar = tk.StringVar( nameFrame )
self.nameVar.set( self.group.title )
self.nameVar.trace( 'w', self.nameCb )
name = ttk.Entry( nameFrame, textvariable=self.nameVar, exportselection=0 )
name.pack( side=tk.LEFT, fill=tk.X, expand=True )
style = ttk.OptionMenu( nameFrame, tk.StringVar( nameFrame ), ( "expense" if self.group.negate else "income" ), "income", "expense", command=self.expenseCb )
style.pack( side=tk.RIGHT, fill=tk.NONE, expand=False )
self.view = ViewLedgerWidget( mainFrame, self.ledger.df, lenCb=self.matchListCb )
self.view.grid( row=1, column=0, sticky=tk.NE + tk.S )
def editGroupCb( master, group, ledger, psize ):
def cb( master=master, group=group, ledger=ledger, psize=psize ):
window = EditGroupWindow( master, group, ledger, psize )
master.wait_window( window )
return cb | 43.260504 | 165 | 0.633061 | 4,636 | 0.900544 | 0 | 0 | 0 | 0 | 0 | 0 | 210 | 0.040793 |
f5f611d50ecae53133cd83f244cc01c20777a693 | 261 | py | Python | day_07/task_1.py | Korred/advent_of_code_2021 | 89afcaae3343653106d36fb7ad08558c0fbb4732 | [
"Unlicense"
]
| null | null | null | day_07/task_1.py | Korred/advent_of_code_2021 | 89afcaae3343653106d36fb7ad08558c0fbb4732 | [
"Unlicense"
]
| null | null | null | day_07/task_1.py | Korred/advent_of_code_2021 | 89afcaae3343653106d36fb7ad08558c0fbb4732 | [
"Unlicense"
]
| null | null | null | crabs = sorted(map(int, open("input.txt", "r").readline().strip().split(",")))
# position with minimal fuel usage is at the median position
median_pos = crabs[len(crabs) // 2]
min_fuel = sum([abs(crab_pos - median_pos) for crab_pos in crabs])
print(min_fuel)
| 32.625 | 78 | 0.704981 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 77 | 0.295019 |
f5f839cc33260b873ad589657cb5b87f8a948df8 | 5,172 | py | Python | dialmonkey/nlu/basketball.py | alexandergazo/NPFL123 | c52b6a880abf9fe694ce6a2d775c7db1bd765fba | [
"Apache-2.0"
]
| null | null | null | dialmonkey/nlu/basketball.py | alexandergazo/NPFL123 | c52b6a880abf9fe694ce6a2d775c7db1bd765fba | [
"Apache-2.0"
]
| null | null | null | dialmonkey/nlu/basketball.py | alexandergazo/NPFL123 | c52b6a880abf9fe694ce6a2d775c7db1bd765fba | [
"Apache-2.0"
]
| null | null | null | # Author: Matej Mik
from ..component import Component
from ..da import DAI
import re
def add_team_g(string, attributes):
if 'tym' in string:
if re.search('(muj|moj|meh)[^ ]{0,3} tym', string):
attributes.append('team=default')
else:
team = string.split('tym')[-1].split(' ', 1)[1]
if team.startswith('na '):
team = team[3:]
attributes.append(f'team={team}')
return attributes
def add_team_s(string, attributes):
if 'tym' in string:
if re.search('(vychozi[^ ]{0,2}|(muj|moj|meh)[^ ]{0,3}) tym', string):
attributes.append('default')
team = string.split('tym')[-1].split(' ', 1)[1]
if team.startswith('na '):
team = team[3:]
attributes.append(f'team={team}')
return attributes
def add_type(string, attributes):
if ' hrac' in string:
attributes.append('type=player')
elif ' tym' in string:
attributes.append('type=team')
return attributes
def add_nums(string, attributes):
nums = re.findall('[0-9]+[^ ]?', string)
if len(nums) == 1:
num = nums[0]
if num.endswith('.'):
attributes.append('rank=' + num.rstrip('.'))
else:
attributes.append('value=' + num)
elif any([stem in string for stem in [' nejv', ' nejlepsi']]):
attributes.append('rank=1')
return attributes
def add_time(string, attributes):
if ' dnes' in string:
attributes.append('time=today')
elif ' zitr' in string:
attributes.append('time=tommorow')
else:
time = re.findall('[0-9]{1,2}[. ]{1,2}[0-9]{1,2}[.]?', string)
if len(time) == 1:
attributes.append(f'time={time[0]}')
return attributes
def add_name(string, attributes):
if re.search('(vychozi[^ ]{0,2}|(muj|moj|meh)[^ ]{0,3}) tym', string):
attributes.append('name=default')
else:
names = re.findall(' hrac.*$', string) + re.findall(' tym.*$', string)
if len(names) == 1:
name = names[0].lstrip().split(' ', 1)
if len(name) == 2:
attributes.append(f'name={name[1]}')
return attributes
def add_stat(string, attributes):
if re.search('dv(.{2}bod|oje?k)', string):
attributes.append('stat=2_pt_made')
elif re.search('tr(.{1,2}bod|oje?k)', string):
attributes.append('stat=3_pt_made')
elif any([stem in string for stem in ['trestn', 'sestk', 'sestek']]):
if any([stem in string for stem in ['uspesn', 'procent']]):
attributes.append('stat=ft_percentage')
else:
attributes.append('stat=ft_made')
elif any([stem in string for stem in ['vyher', 'vyhr']]):
attributes.append('stat=wins')
elif any([stem in string for stem in ['strelec', 'strelc', ' bod']]):
attributes.append('stat=points')
return attributes
def to_DAIs(intent, attributes):
items = []
if intent:
if attributes:
for att in attributes:
items.append(DAI.parse(f'{intent}({att})'))
else:
items.append(DAI.parse(f'{intent}()'))
return items
class BasketballNLU(Component):
def __call__(self, dial, logger):
intent= ''
attributes = []
if dial['user'].startswith('kde'):
intent = 'request_game'
attributes.append('place=?')
attributes = add_team_g(dial['user'], attributes)
elif dial['user'].startswith('kdy'):
intent = 'request_game'
attributes.append('time=?')
attributes = add_team_g(dial['user'], attributes)
elif any([stem in dial['user'] for stem in ['zapas', 'utkani']]):
intent = 'request_game'
attributes = add_time(dial['user'], attributes)
elif any([dial['user'].startswith(stem) for stem in ['kolik', 'jaky pocet', 'na jake']]):
intent = 'request_stats'
if any([stem in dial['user'] for stem in ['kolikat', 'mist', 'pozic']]):
attributes.append('rank=?')
else:
attributes.append('value=?')
attributes = add_stat(dial['user'], attributes)
attributes = add_type(dial['user'], attributes)
attributes = add_name(dial['user'], attributes)
elif any([dial['user'].startswith(stem) for stem in ['kter', 'kdo', 'jak']]):
intent = 'request_stats'
attributes.append('name=?')
attributes = add_type(dial['user'], attributes)
attributes = add_nums(dial['user'], attributes)
attributes = add_stat(dial['user'], attributes)
elif any([stem in dial['user'] for stem in ['zmen', 'nastav']]):
intent = 'set'
years = re.findall('[0-9]{4}', dial['user'])
if len(years) == 1:
attributes.append(f'season={years[0]}')
attributes = add_team_s(dial['user'], attributes)
for item in to_DAIs(intent, attributes):
dial['nlu'].append(item)
logger.info('NLU: %s', str(dial['nlu']))
return dial | 37.478261 | 97 | 0.552204 | 1,998 | 0.386311 | 0 | 0 | 0 | 0 | 0 | 0 | 1,091 | 0.210944 |
f5f954fff242094361f8f329de47188d709c63c7 | 1,447 | py | Python | test_SSstache.py | jonschull/Lyte | e9ba2bb1b07c9398b81a6f591898d2474d1a4609 | [
"MIT"
]
| 1 | 2018-06-07T17:54:27.000Z | 2018-06-07T17:54:27.000Z | test_SSstache.py | jonschull/Lyte | e9ba2bb1b07c9398b81a6f591898d2474d1a4609 | [
"MIT"
]
| 1 | 2018-06-28T05:08:57.000Z | 2018-06-28T05:08:57.000Z | test_SSstache.py | jonschull/Lyte | e9ba2bb1b07c9398b81a6f591898d2474d1a4609 | [
"MIT"
]
| null | null | null | from SSstache import *
from plumbum.path.utils import delete
from plumbum.cmd import ls, touch, mkdir
def test_makeSupportScriptStache():
delete('xyz')
assert makeSupportScriptStache(stacheDir='xyz').endswith('xyz')
assert ls('xyz').split()==['RSrun.2.7.min.js', 'glow.2.7.min.js', 'ide.css', 'jquery-ui.custom.css', 'jquery-ui.custom.min.js', 'jquery.min.js']
delete('xyz')
def test_prepareHTMLdir():
delete('xyz')
prepareHTMLdir('xyz')
assert('xyz' in ls().strip())
delete('xyz')
def test_makeHTMLdir():
HTMLdirName = '123'
delete( HTMLdirName )
fakeSSname = 'fakeSupportScripts'
delete(fakeSSname)
mkdir(fakeSSname)
scriptNames=['xyz.test', 'xyz2.test']
for scriptName in scriptNames:
touch(f'{fakeSSname}/{scriptName}')
makeHTMLdir( HTMLdirName ,
stacheDir = fakeSSname,
GLOWPATH='.',
scriptNames= scriptNames)
assert('supportScripts' in ls( HTMLdirName ).split() )
assert( ls('123/supportScripts').split() == scriptNames )
delete( HTMLdirName )
delete(fakeSSname)
def test_putInHTMLdir():
open('box2.py','w').write('box(color=color.green)')
putInHTMLdir('box2.py')
assert( 'box2.py' in ls('box2').split() )
delete('box2.py')
delete('box2')
#prepareHTMLdir(dirName='xyz')
#test_makeHTMLdir() | 27.301887 | 148 | 0.608846 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 388 | 0.268141 |
f5fc2d7fa7991a4448eb7eb0d16d8da0aa0e1f7e | 173 | py | Python | graphic/introductions/graficoNormal.py | jonathanccardoso/data-science | d5977e5cd26b6a9ad05ef8940841158911a91586 | [
"MIT"
]
| null | null | null | graphic/introductions/graficoNormal.py | jonathanccardoso/data-science | d5977e5cd26b6a9ad05ef8940841158911a91586 | [
"MIT"
]
| null | null | null | graphic/introductions/graficoNormal.py | jonathanccardoso/data-science | d5977e5cd26b6a9ad05ef8940841158911a91586 | [
"MIT"
]
| null | null | null | import matplotlib.pyplot as plt
x = [1, 2, 5]
y = [2, 3, 7]
plt.title("1 grafico com python")
# Eixos
plt.xlabel("Eixo X")
plt.ylabel("Eixo Y")
plt.plot(x,y)
plt.show()
| 12.357143 | 33 | 0.630058 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 45 | 0.260116 |
f5fc99298c4f8aba96ad5b5882efa8fbf637939b | 421 | py | Python | makevideo.py | bitrogen/sorting-algorithms | f7eada32db9e0ce385878f49d79b3d6b8c09280a | [
"CC0-1.0"
]
| null | null | null | makevideo.py | bitrogen/sorting-algorithms | f7eada32db9e0ce385878f49d79b3d6b8c09280a | [
"CC0-1.0"
]
| 1 | 2021-04-05T20:20:30.000Z | 2021-04-05T20:22:41.000Z | makevideo.py | bitrogen/sorting-algorithms | f7eada32db9e0ce385878f49d79b3d6b8c09280a | [
"CC0-1.0"
]
| null | null | null | import cv2
import numpy
import glob
import os
images = []
path = os.getcwd()+"\\frames\\"
myVideo = cv2.VideoWriter("quicksort-1.mkv", cv2.VideoWriter_fourcc(*"DIVX"), 60, (1920,1080))
for filename in range(len(os.listdir(path))):
filename = f"frame-{filename}.png"
img = cv2.imread(f"{path}{filename}")
height, width, layers = img.shape
myVideo.write(img)
myVideo.release()
| 20.047619 | 95 | 0.638955 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 77 | 0.182898 |
f5fce2318bd81cf7ddc8f556365d8f472f7cc726 | 18,008 | py | Python | darknet.py | sugey/pytorch-yolov3 | cb6b46fd798debca5d8d066eabb2bd2e6c679953 | [
"MIT"
]
| 3 | 2019-10-21T16:05:15.000Z | 2019-10-25T00:43:17.000Z | darknet.py | sugey/pytorch-yolov3 | cb6b46fd798debca5d8d066eabb2bd2e6c679953 | [
"MIT"
]
| null | null | null | darknet.py | sugey/pytorch-yolov3 | cb6b46fd798debca5d8d066eabb2bd2e6c679953 | [
"MIT"
]
| null | null | null | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
from model.layers import *
from model.build import *
import cv2
from model.utils import *
def get_test_input():
img = cv2.imread("images/dog-cycle-car.png")
img = cv2.resize(img, (416, 416)) # Resize to the input dimension
# BGR -> RGB | H X W C -> C X H X W
img_ = img[:, :, ::-1].transpose((2, 0, 1))
# Add a channel at 0 (for batch) | Normalise
img_ = img_[np.newaxis, :, :, :]/255.0
img_ = torch.from_numpy(img_).float() # Convert to float
img_ = Variable(img_) # Convert to Variable
return img_
class Darknet(nn.Module):
"""
Main Darknet class. It is a subclass of nn.Module
"""
def __init__(self, cfgfile):
super(Darknet, self).__init__()
# Translate our YOLOv3 CFG file to blocks
self.blocks = parse_cfg(cfgfile)
# Convert those blocks to a module list for Pytorch
self.net_info, self.module_list = create_modules(self.blocks)
# These are for loading the weights below
self.header = torch.IntTensor([0, 0, 0, 0])
self.seen = 0
def get_blocks(self):
"""
Getter function for blocks
Returns:
blocks
"""
return self.blocks
def get_module_list(self):
"""
Getter function for module_list
Returns:
module_list
"""
return self.module_list
# Main forward pass
def forward(self, x, CUDA):
"""
Does the forward pass
Params:
x: The input
CUDA: Use GPU to accelerate task
"""
detections = []
# We don't want the first block, that contains the network info
modules = self.blocks[1:]
# We cache the output feature maps of every layer in a dict outputs.
# The keys are the the indices of the layers, and the values are
# the feature maps. We can then search through the keys to look up
# a layers feature maps for route or shortcuts.
outputs = {}
write = 0
# Go through every module (layer)
for i in range(len(modules)):
# Get the module type value from the current index
module_type = (modules[i]["type"])
if module_type == "convolutional" or module_type == "upsample" or module_type == "maxpool":
# Not 100% sure, but I think because the module list is a
# Pytorch nn.ModuleList(), you can multiply the index of this list,
# that is, the block, by the inputs to this function (x), to get the output.
# I believe this is the matrix multiplication part.
x = self.module_list[i](x)
# Set the key to the index, and set the value to the computed
# calculation of the block and the input
outputs[i] = x
elif module_type == "route":
layers = modules[i]["layers"]
# The two layers designated in the layer get turned into a list with indexes
# of 0 and 1
layers = [int(a) for a in layers]
# Route layers[0] is never greater than 0, so candidate for optimization deletion
if (layers[0]) > 0:
layers[0] = layers[0] - i
# This happens only on the 2 smaller detection laters, i.e. on a 416x416 image,
# the 13x13 and 26x26 detection region levels
if len(layers) == 1:
# Grab the out put from the index plus the first value, usually
# a -4 in this situation. This is what allows a kind of independent route
# for the detection region layers. This will then go back and take the layer
# where the split happen, pull those weights forward past the detection
# layer, and prepare them as a piece of input for the next convolution.
x = outputs[i + (layers[0])]
else:
# These are the two large skip connections, from layers 37 -> 99 and 62 -> 87
if (layers[1]) > 0:
# Reset layer 1 to the difference between the desired layer index
# and the current layer. So, from 37 - 99 = (-62). We then add
# it to the current layer below in map2
layers[1] = layers[1] - i
# map1 is the output of the previous layer (layers[0] is always a
# negative number), here an upsample layer in the YOLO Cfg
map1 = outputs[i + layers[0]]
# map2 is the previous convolution to pull the data from
map2 = outputs[i + layers[1]]
# We're adding together the values of the outputs from the routed layers
# along the depth of the tensor since the param of 1 corresponds to
# the depth dimension. `Cat` method stands for concatenate.
x = torch.cat((map1, map2), 1)
# Set the key to the current module index, and set the dict value to the computed
# calculation of the block x variable
outputs[i] = x
elif module_type == "shortcut":
from_ = int(modules[i]["from"])
# Grab the output from the previous layer, as well as the `from` layer (which
# is always -3) before. This is either a downsampling, upsampling or shortcut
# connection.This simply adds the weights together without the tensor
# concatenation you find in the routings. The is what creates the residual
# blocks throughout the YOLO network
# x = outputs[i-1] + outputs[i+from_]
x = outputs[i-1] + outputs[i+from_]
# Set the key to the current module index, and value to x variable calculation
outputs[i] = x
elif module_type == 'yolo':
# Get the anchor list
anchors = self.module_list[i][0].anchors
# Get the input dimensions
inp_dim = int(self.net_info["height"])
# Get the number of classes
num_classes = int(modules[i]["classes"])
# Output the result
x = x.data
# Run a prediction on a particular region size
x = predict_transform(x, inp_dim, anchors, num_classes, CUDA)
if type(x) == int:
continue
# If write = 0, that means this is the first detection
if not write:
detections = x
write = 1
# Otherise, concatenate the different predictions together along the
# depth of the tensor
else:
detections = torch.cat((detections, x), 1)
# Since this is a detection layer, we still need to pull the weights from the previous layer
# output, so that we can use it as input to the next later
outputs[i] = outputs[i-1]
try:
# After all the modules have been gone through, return the detections tensor, which is a
# combined tensor for all three region size
return detections
except:
return 0
def load_weights(self, weightfile):
"""
Loads the weightfile. It is all 32-bit floats with 5 bytes as headers. There
are only weights for convolution and batch_normalization layers.
Params:
weightfile: link to weightfile
Return:
loads weights
"""
# Open the weights file
fp = open(weightfile, "rb")
# The first 4 values are header information
# 1. Major version number
# 2. Minor Version Number
# 3. Subversion number
# 4. Images seen
header = np.fromfile(fp, dtype=np.int32, count=5)
# Turn the numpy header file into a tensor
self.header = torch.from_numpy(header)
# The total number of images seen
self.seen = self.header[3]
# The rest of the values are the weights, let's load them up
# into a numpy
weights = np.fromfile(fp, dtype=np.float32)
# This variable keeps track of where we are in the weight list
# which is different than the module list
ptr = 0
# Let's go through every item in the module list of this
# instantiated class
for i in range(len(self.module_list)):
# We have to add one to this list because the first block
# is the netinfo block. This is different then the module
# list which took the netinfo block out
module_type = self.blocks[i + 1]["type"]
if module_type == "convolutional":
# Grab the current module
model = self.module_list[i]
try:
# If there is batch normalize on this convolutional layer
# let's grab that
batch_normalize = int(self.blocks[i+1]["batch_normalize"])
except:
batch_normalize = 0
# The first value in the model is the Conv2D module, so, for example
# Conv2d(3, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
conv = model[0]
if (batch_normalize):
# The second value in the model is a BatchNorm2d module, so, for example
# BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
bn = model[1]
# Get the number of weights of Batch Norm Layer
# This is the first value in the module, so 32 in previous example
# PyTorch numel method stands for number of elements, which it returns
num_bn_biases = bn.bias.numel()
# Load the weights. Batch norm layers have a sequences of values stored
# for them in weights file. It goes:
# 1. bn_biases
# 2. bn_weights
# 3. bn_running mean
# 4. bn_running_var
# After those 4 items, then the convolutional weights are added, which
# we see once you exit this conditional loop
# Weight values are a numpy file, so we turn them into a tensor here via torch.
# We grab from the current ptr index, which is the (full file - header),
# and then add the number of biases for first section. We then increment the ptr
# variable so we can continue moving through the chunks of file data.
# First time through on 416, we get weights[0:32], so the first 32 bias values
bn_biases = torch.from_numpy(
weights[ptr:ptr + num_bn_biases])
ptr += num_bn_biases
# Grab the weights next. Following previous example, we get weights[32:64], which
# is the next chunk of 32 float values assigned to the weights for this
# batch norm layer
bn_weights = torch.from_numpy(
weights[ptr: ptr + num_bn_biases])
ptr += num_bn_biases
# Grab the runing_mean next. Following previous example, we get weights[64:96], which
# is the next chunk of 32 float values assigned to the running_mean for this
# batch norm layer
bn_running_mean = torch.from_numpy(
weights[ptr: ptr + num_bn_biases])
ptr += num_bn_biases
# Grab the running variance next. Following previous example, we get weights[96:128],
# which is the next chunk of 32 float values assigned to the running_mean for this
# batch norm layer
bn_running_var = torch.from_numpy(
weights[ptr: ptr + num_bn_biases])
ptr += num_bn_biases
# Cast the loaded weights into dims of model weights. This doens't
# seem like it's necessary since all of these are currently in
# the proper tensor format. Under consideration for deletion
# under optimization
bn_biases = bn_biases.view_as(bn.bias.data)
bn_weights = bn_weights.view_as(bn.weight.data)
bn_running_mean = bn_running_mean.view_as(bn.running_mean)
bn_running_var = bn_running_var.view_as(bn.running_var)
# Copy all the tensor data pulled from the files to the
# model BatchNorm2d data (bn) which we can process
bn.bias.data.copy_(bn_biases)
bn.weight.data.copy_(bn_weights)
bn.running_mean.copy_(bn_running_mean)
bn.running_var.copy_(bn_running_var)
else:
# Remember the format for the model is:
# Conv2d(3, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
# The only places there are biases in convolution layers are in the
# pre-detection layers where there are 255. Three of them in the CFG.
num_biases = conv.bias.numel()
# Load the biases. Convolution layers have a sequences of values stored
# for them in weights file. It goes:
# 1. conv_biases
# 2. conv_weights
# Since we add the conv_weights outside this loop, we only have to focus
# on preparing the biases here. In 416 example, the first ptr and bias
# values are 56367712, 255, which is what we expect since the first
# detection layer isn't until layer 83 out of 106, far into the CFG
conv_biases = torch.from_numpy(
weights[ptr: ptr + num_biases])
ptr = ptr + num_biases
# reshape the loaded weights according to the dims of the model weights
# Again, tensors in proper shape so candidate for
# optimization deletion
conv_biases = conv_biases.view_as(conv.bias.data)
# Copy all the tensor data pulled from the files to the
# model Conv2d data (conv) which we can process
conv.bias.data.copy_(conv_biases)
# Total the weight slots for the Convolutional layers
# Conv2d(3, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
num_weights = conv.weight.numel()
# Load the weights from the weights file into a tensor
# at the current ptr values plus the rest of chunk necessary
# from the file
conv_weights = torch.from_numpy(weights[ptr:ptr+num_weights])
# reset ptr to where we are in file
ptr = ptr + num_weights
# Reformat the weights tensor into a format that matches
# the model conv placeholder tensor
conv_weights = conv_weights.view_as(conv.weight.data)
# Copy the weights into the conv model
conv.weight.data.copy_(conv_weights)
def save_weights(self, savedfile, cutoff=0):
if cutoff <= 0:
cutoff = len(self.blocks) - 1
fp = open(savedfile, 'wb')
# Attach the header at the top of the file
self.header[3] = self.seen
header = self.header
header = header.numpy()
header.tofile(fp)
# Now, let us save the weights
for i in range(len(self.module_list)):
# We have to add one to this list because the first block
# is the netinfo block. This is different then the module
# list which took the netinfo block out
module_type = self.blocks[i+1]["type"]
if (module_type) == "convolutional":
# Grab the full module
model = self.module_list[i]
try:
# If this is a batch normalize layer
batch_normalize = int(self.blocks[i+1]["batch_normalize"])
except:
batch_normalize = 0
conv = model[0]
if (batch_normalize):
bn = model[1]
# If the parameters are on GPU, convert them back to CPU
# We don't convert the parameter to GPU
# Instead. we copy the parameter and then convert it to CPU
# This is done as weight are need to be saved during training
cpu(bn.bias.data).numpy().tofile(fp)
cpu(bn.weight.data).numpy().tofile(fp)
cpu(bn.running_mean).numpy().tofile(fp)
cpu(bn.running_var).numpy().tofile(fp)
else:
cpu(conv.bias.data).numpy().tofile(fp)
# Let us save the weights for the Convolutional layers
cpu(conv.weight.data).numpy().tofile(fp)
model = Darknet("cfg/yolov3.cfg")
model.load_weights("yolov3.weights")
inp = get_test_input()
pred = model(inp, torch.cuda.is_available())
| 44.907731 | 108 | 0.549034 | 17,182 | 0.954131 | 0 | 0 | 0 | 0 | 0 | 0 | 8,953 | 0.497168 |
f5fd8ae5a3e3e11874751c948747bc877e5305d4 | 1,131 | py | Python | src/icemac/addressbook/browser/search/result/handler/test_manager.py | icemac/icemac.addressbook | 6197e6e01da922feb100dd0943576523050cd703 | [
"BSD-2-Clause"
]
| 1 | 2020-03-26T20:16:44.000Z | 2020-03-26T20:16:44.000Z | src/icemac/addressbook/browser/search/result/handler/test_manager.py | icemac/icemac.addressbook | 6197e6e01da922feb100dd0943576523050cd703 | [
"BSD-2-Clause"
]
| 2 | 2020-02-21T13:04:23.000Z | 2020-02-21T13:06:10.000Z | src/icemac/addressbook/browser/search/result/handler/test_manager.py | icemac/icemac.addressbook | 6197e6e01da922feb100dd0943576523050cd703 | [
"BSD-2-Clause"
]
| null | null | null | from icemac.addressbook.browser.search.result.handler.manager import (
SearchResultHandler)
def makeSRHandler(viewName):
"""Create a `SearchResultHandler` with the specified `viewName`."""
handler = SearchResultHandler(None, None, None, None)
handler.viewName = viewName
return handler
def test_manager__SearchResultHandler____eq____1():
"""It is equal when `viewName` is equal."""
assert makeSRHandler('@@asdf.html') == makeSRHandler('@@asdf.html')
def test_manager__SearchResultHandler____eq____2():
"""It is not equal with unequal `viewName`."""
# There is no __neq__ implemented!
assert not(makeSRHandler('@@foo.html') == makeSRHandler('@@bar.html'))
def test_manager__SearchResultHandler____eq____3():
"""It is not equal to anything else."""
# There is no __neq__ implemented!
assert not(makeSRHandler(None) == object())
def test_manager__SearchResultHandler____hash____1():
"""It is hashable.
It is only needed for Python 3 where classes having an __eq__ method do
not have a __hash__ method.
"""
assert hash(makeSRHandler(None)) is not None
| 31.416667 | 75 | 0.72237 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 448 | 0.39611 |
eb03b18815a588a66491abb92833213166f65e34 | 2,271 | py | Python | superset/shuju_into_mysql.py | LCM1999/superset_secondary_dev | 293e3df9d46ef6096d35ee7d523ce5c7898902bc | [
"Apache-2.0"
]
| 1 | 2021-06-29T05:36:30.000Z | 2021-06-29T05:36:30.000Z | superset/shuju_into_mysql.py | LCM1999/superset_secondary_dev | 293e3df9d46ef6096d35ee7d523ce5c7898902bc | [
"Apache-2.0"
]
| null | null | null | superset/shuju_into_mysql.py | LCM1999/superset_secondary_dev | 293e3df9d46ef6096d35ee7d523ce5c7898902bc | [
"Apache-2.0"
]
| null | null | null | import json
import pymysql
import random
import string
import time
# def get_data():
# with open('E:\\QQ文档\\1420944066\\FileRecv\\Code (2)\\data\\nice looking data\\与gooddata里重复\\20_30(1).json', 'r') as f:
# camera_text = json.load(f) # 解析每一行数据
# print(camera_text)
# return camera_text
# def data_insert(text):
# db = pymysql.connect(host = "localhost",user = "root",password = "lxyroot",database = "superset-test")
# cur = db.cursor()
# try:
# cur.execute("drop table liutu_data")
# cur.execute("create table liutu_data(id int,name char(20),fillcolor char(20),time char(20),size_data TINYTEXT)")
# except:
# cur.execute("create table liutu_data(id int,name char(20),fillcolor char(20),time char(20),size_data TINYTEXT)")
# for i in text:
# for j in range(0,len(text[0]['size'])):
# sql="INSERT INTO liutu_data (id,name,fillcolor,time,size_data) VALUES ('"+str(i['id'])+"','"+i['name']+"','"+i['fillcolor']+"','"+str(j)+"','"+str(i['size'][j])+"');"
# cur.execute(sql)
# db.commit()
# cur.close()
def new_table():
db = pymysql.connect(host = "10.0.2.15",user = "mysqluser",password = "mysqlpw",database = "inventory")
cur = db.cursor()
#cur.execute("drop table refresh_data")
cur.execute("create table refresh_data(id int,name char(20),email char(20),view_data char(30))")
for i in range(0,30):
name = ''.join(random.sample(string.ascii_letters + string.digits, 8))
email = random.choice('abcdefghijklmnopqrstuvwxyz!@#$%^&*()')
view_data = random.random()*100
sql="INSERT INTO refresh_data (id,name,email,view_data) VALUES ("+str(i)+",'"+name+"','"+email+"','"+str(view_data)+"');"
print(sql)
cur.execute(sql)
db.commit()
return cur,db
def data_update(cur,update_num,db):
for i in range(0,update_num):
view_data = random.random()*100
sql = 'update refresh_data set view_data="'+str(view_data)+'" where id='+str(random.randint(1,30))+';'
cur.execute(sql)
db.commit()
if __name__ == "__main__":
cur,db = new_table()
i = 0
while 1==1:
time.sleep(5)
print('one update')
data_update(cur,20,db)
i = i+1
| 37.85 | 180 | 0.607221 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,402 | 0.610361 |
eb03b84ad235ef7df8266830a1654259db309611 | 3,290 | py | Python | Experiments/create_mean_optimization_sets.py | ariel415el/PerceptualLossGLO-Pytorch | 7caa743b719cd95066103a69f3e78a70507de8b5 | [
"MIT"
]
| null | null | null | Experiments/create_mean_optimization_sets.py | ariel415el/PerceptualLossGLO-Pytorch | 7caa743b719cd95066103a69f3e78a70507de8b5 | [
"MIT"
]
| null | null | null | Experiments/create_mean_optimization_sets.py | ariel415el/PerceptualLossGLO-Pytorch | 7caa743b719cd95066103a69f3e78a70507de8b5 | [
"MIT"
]
| null | null | null | import os
import random
import cv2
import numpy as np
import torch
from Experiments.all import load_models, embedd_data, save_batch
from GenerativeModels.utils.data_utils import get_dataset
device = torch.device("cuda")
def sample_latent_neighbors(outputs_dir, models_dir):
"""Find nearest latent neighbors of data samples and create sets of original/reconstructed similar images """
# Load models
n = 32
train_dataset = get_dataset('ffhq', split='train', resize=128, val_percent=0.15)
encoder, generator = load_models(device, models_dir)
embeddings = embedd_data(train_dataset, encoder, 32, device)
for i in [11, 15, 16, 25, 48, 53, 60, 67, 68, 78, 122]:
os.makedirs(os.path.join(outputs_dir, os.path.basename(models_dir), f"data_neighbors{i}"), exist_ok=True)
dists = torch.norm(embeddings - embeddings[i], dim=1)
neighbor_indices = torch.argsort(dists)[:n]
neighbors = torch.from_numpy(np.array([train_dataset[x][1] for x in neighbor_indices]))
save_batch(neighbors, os.path.join(outputs_dir, os.path.basename(models_dir), f"data_neighbors{i}"))
def center_crop_image_to_square(img, edge_perc=None):
h = img.shape[0]
w = img.shape[1]
if h > w:
e = int(np.ceil((h - w) / 2))
img = img[e:-e]
elif h < w:
e = int(np.ceil((w - h) / 2))
img = img[:, e:-e]
if edge_perc:
z = int(img.shape[0] * edge_perc)
img = img[z:-z, z:-z]
return img
def make_shift_sets(root, edge_size=7, zoom=0.2):
for path in os.listdir(root):
img = cv2.imread(os.path.join(root, path))
img = center_crop_image_to_square(img, zoom)
img = cv2.resize(img, (128+edge_size, 128 + edge_size))
dir_name = os.path.join(root, 'jitters', f"{os.path.splitext(path)[0]}_e-{edge_size}_z-{zoom}")
os.makedirs(dir_name, exist_ok=True)
for i, (x1, y1) in enumerate([(0, 0), (0, edge_size), (edge_size, 0), (edge_size, edge_size)]):
# x1 = np.random.randint(0, edge_size)
# y1 = np.random.randint(0, edge_size)
img2 = img[y1:img.shape[0] - edge_size + y1]
img2 = img2[:, x1:img.shape[1] - edge_size + x1]
img2 = cv2.resize(img2, (128, 128))
x = cv2.imwrite(os.path.join(dir_name, f"{i}.png"), img2)
print(x)
def create_shifted_colorfull_box_images():
im_dim = 128
n_images = 32
box_dim = 32
colors = [[128, 128, 255], [255, 128, 128], [128, 255, 128], [0, 128, 255], [255, 0, 128], [128, 255, 0]]
os.makedirs('color_box_dataset', exist_ok=True)
for i in range(n_images):
x = random.choice(range(0, im_dim - box_dim + 3, 3))
y = random.choice(range(0, im_dim - box_dim + 3, 3))
im = np.ones((im_dim, im_dim, 3)) * 127
im[y:y + box_dim, x:x + box_dim] = colors[i % len(colors)]
cv2.imwrite(f"color_box_dataset/{i}.png", im)
if __name__ == '__main__':
# sample_latent_neighbors("latent_neighbors_sets", 'trained_models/VGG-None_PT')
# sample_latent_neighbors("latent_neighbors_sets", 'trained_models/VGG-random')
make_shift_sets('/home/ariel/university/PerceptualLoss/PerceptualLossExperiments/style_transfer/imgs/textures')
# create_shifted_colorfull_box_images()
| 39.166667 | 115 | 0.643161 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 678 | 0.206079 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.