hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
34a22dd4ad9ab46d6938c8ba8be9e6f6b3432bf1
| 497 |
py
|
Python
|
quickkart_api/auth.py
|
envaleed/quick-kart-api-deploy
|
2b962dce3bc5ba19d4e90cb86822c016d51f65c2
|
[
"MIT"
] | null | null | null |
quickkart_api/auth.py
|
envaleed/quick-kart-api-deploy
|
2b962dce3bc5ba19d4e90cb86822c016d51f65c2
|
[
"MIT"
] | null | null | null |
quickkart_api/auth.py
|
envaleed/quick-kart-api-deploy
|
2b962dce3bc5ba19d4e90cb86822c016d51f65c2
|
[
"MIT"
] | null | null | null |
from quickkart_api import app
from quickkart_api.models import Users
from flask_jwt import JWT, jwt_required, current_identity
from flask import abort
def authenticate(username, password):
user = Users.query.filter_by(username=username).first()
if user and user.check_password(password):
return user
return abort(500, "Authentication failed")
def identity(payload):
return Users.query.filter(Users.id == payload['identity']).scalar()
jwt = JWT(app,authenticate,identity)
| 33.133333 | 71 | 0.7666 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 33 | 0.066398 |
34a2c4ef21ebf75e62e5e53df18db5c3d07d0336
| 1,665 |
py
|
Python
|
testdoc/conf.py
|
coding-to-music/sphinx-seo-meta-twitter
|
23ec10e32ae272d5024d2468a87813ecabd30bfa
|
[
"BSD-3-Clause"
] | null | null | null |
testdoc/conf.py
|
coding-to-music/sphinx-seo-meta-twitter
|
23ec10e32ae272d5024d2468a87813ecabd30bfa
|
[
"BSD-3-Clause"
] | null | null | null |
testdoc/conf.py
|
coding-to-music/sphinx-seo-meta-twitter
|
23ec10e32ae272d5024d2468a87813ecabd30bfa
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
import sys, os
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinxcontrib.seometatwitter']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'sphinx-seometatwitter'
copyright = u'2021'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.6.0'
# The full version, including alpha/beta/rc tags.
release = '0.6.0'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
html_theme = 'default'
html_static_path = ['_static']
# Output file base name for HTML help builder.
htmlhelp_basename = 'tweettestdoc'
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'tweettest', u'tweettest Documentation',
[u'test'], 1)
]
| 27.295082 | 80 | 0.713514 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,359 | 0.816216 |
34a3b4a7e1966e32d27cb46659df093776257437
| 69 |
py
|
Python
|
omnia_timeseries/__init__.py
|
equinor/omnia-timeseries-python
|
02cb4fe5eef3703725cb16f1a3d2c7094b3d623d
|
[
"MIT"
] | 5 |
2021-06-18T10:09:09.000Z
|
2022-03-04T13:14:57.000Z
|
omnia_timeseries/__init__.py
|
equinor/omnia-timeseries-python
|
02cb4fe5eef3703725cb16f1a3d2c7094b3d623d
|
[
"MIT"
] | 3 |
2021-05-27T08:49:10.000Z
|
2021-11-12T11:17:21.000Z
|
omnia_timeseries/__init__.py
|
equinor/omnia-timeseries-python
|
02cb4fe5eef3703725cb16f1a3d2c7094b3d623d
|
[
"MIT"
] | 1 |
2021-10-06T09:39:08.000Z
|
2021-10-06T09:39:08.000Z
|
from omnia_timeseries.api import TimeseriesAPI, TimeseriesEnvironment
| 69 | 69 | 0.913043 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
34a3c5979c5216c22bb261f3a724f1a3a6ea121a
| 799 |
py
|
Python
|
corehq/apps/smsforms/util.py
|
rochakchauhan/commcare-hq
|
aa7ab3c2d0c51fe10f2b51b08101bb4b5a376236
|
[
"BSD-3-Clause"
] | null | null | null |
corehq/apps/smsforms/util.py
|
rochakchauhan/commcare-hq
|
aa7ab3c2d0c51fe10f2b51b08101bb4b5a376236
|
[
"BSD-3-Clause"
] | 1 |
2021-06-02T04:45:16.000Z
|
2021-06-02T04:45:16.000Z
|
corehq/apps/smsforms/util.py
|
rochakchauhan/commcare-hq
|
aa7ab3c2d0c51fe10f2b51b08101bb4b5a376236
|
[
"BSD-3-Clause"
] | null | null | null |
from dimagi.utils.couch import CriticalSection
from corehq.apps.receiverwrapper.util import submit_form_locally
def form_requires_input(form):
"""
Returns True if the form has at least one question that requires input
"""
for question in form.get_questions([]):
if question["tag"] not in ("trigger", "label", "hidden"):
return True
return False
def process_sms_form_complete(session, form):
result = submit_form_locally(form, session.domain, app_id=session.app_id, partial_submission=False)
session.submission_id = result.xform.form_id
session.mark_completed(True)
session.save()
def critical_section_for_smsforms_sessions(contact_id):
return CriticalSection(['smsforms-sessions-lock-for-contact-%s' % contact_id], timeout=5 * 60)
| 30.730769 | 103 | 0.738423 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 154 | 0.192741 |
34a40b68218e951dfdb50b23863e768a1098e44d
| 1,531 |
py
|
Python
|
wssnet/utility/tawss_utils.py
|
EdwardFerdian/WSSNet
|
b5d2916348e834a5dc5d0c06b001059b2a020080
|
[
"MIT"
] | 2 |
2022-02-15T12:41:02.000Z
|
2022-03-15T04:46:10.000Z
|
wssnet/utility/tawss_utils.py
|
EdwardFerdian/WSSNet
|
b5d2916348e834a5dc5d0c06b001059b2a020080
|
[
"MIT"
] | null | null | null |
wssnet/utility/tawss_utils.py
|
EdwardFerdian/WSSNet
|
b5d2916348e834a5dc5d0c06b001059b2a020080
|
[
"MIT"
] | null | null | null |
import numpy as np
from scipy.integrate import simps
def get_wss_magnitude(wss_vector):
if (wss_vector.shape[-1] != 3):
return wss_vector
return np.sum(wss_vector ** 2, axis=-1) ** 0.5
def get_tawss(wss, dt):
x = np.arange(0, len(wss))
x = np.asarray(x) * dt
wss_int = simps(wss, x, axis=0)
return wss_int / (x[-1] - x[0])
def get_osi(wss_vector, dt):
eps = 1e-12
if (wss_vector.shape[-1] != 3):
return np.zeros((wss_vector.shape[1], wss_vector.shape[2]))
x = np.arange(0, len(wss_vector))
x = np.asarray(x) * dt
# numerator
wss_x = simps(wss_vector[...,0], x, axis=0)
wss_y = simps(wss_vector[...,1], x, axis=0)
wss_z = simps(wss_vector[...,2], x, axis=0)
wss_mag = (wss_x ** 2 + wss_y ** 2 + wss_z ** 2) ** 0.5
# denominator
denom = get_wss_magnitude(wss_vector)
denom = simps(denom, x, axis=0)
frac = (wss_mag + eps) / (denom + eps)
osi = 0.5 * ( 1 - frac)
return osi
def get_osi_discrete(wss_vector):
if (wss_vector.shape[-1] != 3):
return np.zeros((wss_vector.shape[1], wss_vector.shape[2]))
# numerator
wss_x = np.sum(wss_vector[...,0], axis=0)
wss_y = np.sum(wss_vector[...,1], axis=0)
wss_z = np.sum(wss_vector[...,2], axis=0)
wss_mag = (wss_x ** 2 + wss_y ** 2 + wss_z ** 2) ** 0.5
# denominator
denom = get_wss_magnitude(wss_vector)
denom = np.sum(denom, axis=0)
frac = wss_mag / denom
osi = 0.5 * ( 1 - frac)
return osi
| 24.301587 | 67 | 0.573481 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 48 | 0.031352 |
34a414f71bdbac2f19072c327b891b149dfefa34
| 6,511 |
py
|
Python
|
dash/lib/events/servermanagement.py
|
wjwwood/open-robotics-platform
|
c417f1e4e381cdbbe88ba9ad4dea3bdf9840d3d5
|
[
"MIT"
] | null | null | null |
dash/lib/events/servermanagement.py
|
wjwwood/open-robotics-platform
|
c417f1e4e381cdbbe88ba9ad4dea3bdf9840d3d5
|
[
"MIT"
] | null | null | null |
dash/lib/events/servermanagement.py
|
wjwwood/open-robotics-platform
|
c417f1e4e381cdbbe88ba9ad4dea3bdf9840d3d5
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python -OO
# encoding: utf-8
###########
# ORP - Open Robotics Platform
#
# Copyright (c) 2010 John Harrison, William Woodall
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
##########
"""
servermanagement.py - Contains components related to remote server management
Created by William Woodall on 2010-11-03.
"""
__author__ = "William Woodall"
__copyright__ = "Copyright (c) 2010 John Harrison, William Woodall"
### Imports ###
# Standard Python Libraries
import sys
import os
import logging
import thread
import xmlrpclib
import copy
import traceback
# Other libraries
import lib.elements as elements
try: # try to catch any missing dependancies
# wx for window elements
PKGNAME = 'wxpython'
import wx
import wx.aui as aui
del PKGNAME
except ImportError as PKG_ERROR: # We are missing something, let them know...
sys.stderr.write(str(PKG_ERROR)+"\nYou might not have the "+PKGNAME+" \
module, try 'easy_install "+PKGNAME+"', else consult google.")
### Functions ###
def sync_files(files):
cc_files, hwm_files = files
sync_files_helper(cc_files, "files", elements.MAIN.files[0])
sync_files_helper(hwm_files, "modules", elements.MAIN.files[1])
# buildListing()
elements.MAIN.project_drawer.updateList()
def sync_files_helper(files, root_folder, base_array):
local_array = copy.deepcopy(base_array)
for file in files:
try:
path_array = file.split(os.sep)
parent_array = file_array = local_array
file_index_reference = None
try:
if len(path_array):
for x in path_array:
parent_array = file_array
file_array = file_array[x]
file_index_reference = x
except KeyError as filename_error:
pass # this is supposed to happen, we handle it below
if file_array[1] == files[file]:
# Same file,
pass
elif file_array[1] > files[file]:
# Local copy is newer
contents = open(file_array[0], 'r').read()
elements.REMOTE_SERVER.write(os.path.join(root_folder, file), contents, file_array[1])
elif file_array[1] < files[file]:
# Server Copy is newer
# this is handling the key error above
file_path = os.path.join(os.getcwd(), root_folder, file)
with open(file_path, 'w+') as fp:
fp.write(elements.REMOTE_SERVER.getFileContents(os.path.join(root_folder, file)))
os.utime(file_path, (files[file], files[file]))
del parent_array[file_index_reference]
except KeyError:
try:
if file.find(os.sep) != -1:
os.makedirs(os.path.join(os.getcwd(), root_folder, os.path.split(file)[0]))
file_path = os.path.join(os.getcwd(), root_folder, file)
with open(file_path, 'w+') as fp:
fp.write(elements.REMOTE_SERVER.getFileContents(os.path.join(root_folder, file)))
os.utime(file_path, (files[file], files[file]))
except Exception as e:
traceback.print_exc(file=sys.stdout)
walk_and_send_files(root_folder, local_array)
def walk_and_send_files(root, list):
"""Walks a hash and sends the files to the remote server."""
if isinstance(list, dict):
for file in list:
if file[0] == '.':
continue
new_root = os.path.join(root, file)
walk_and_send_files(new_root, list[file])
else:
file_handler = open(list[0], 'r')
elements.REMOTE_SERVER.write(root, file_handler.read(), list[1])
def connect(event):
"""Connects to the remote server, using the info in remote server text box"""
# Connect to the remote server
location = elements.TOOLBAR.server_addr.GetValue()
try:
elements.REMOTE_SERVER = xmlrpclib.Server('http://' + str(location) + ':7003/')
elements.REMOTE_SERVER.connect()
elements.TOOLBAR.connect_button.SetLabel('Disconnect')
elements.TOOLBAR.connect_button.Bind(wx.EVT_BUTTON, disconnect)
elements.MAIN.SetStatusText('Connected')
except Exception as error:
elements.MAIN.log.error(str(error))
return
# Activate Buttons
elements.TOOLBAR.send_button.Enable()
elements.TOOLBAR.config_button.Enable()
elements.TOOLBAR.run_button.Enable()
elements.TOOLBAR.shutdown_button.Enable()
elements.TOOLBAR.restart_button.Enable()
elements.TOOLBAR.RC_button.Enable()
# Synchronize Files
sync_files(elements.REMOTE_SERVER.fileSync())
def disconnect(event):
"""Attempts to disconnect from the remote server"""
elements.TOOLBAR.connect_button.SetLabel('Connect')
elements.TOOLBAR.send_button.Disable()
elements.TOOLBAR.run_button.Disable()
elements.TOOLBAR.shutdown_button.Disable()
elements.TOOLBAR.restart_button.Disable()
elements.TOOLBAR.connect_button.Bind(wx.EVT_BUTTON, connect)
elements.REMOTE_SERVER.disconnect()
elements.REMOTE_SERVER.remote_server = None
elements.MAIN.SetStatusText('Disconnected')
def restart(event):
"""Calls the remote server to restart"""
elements.REMOTE_SERVER.restart()
def shutdown(event):
"""Shuts down the remote server"""
elements.REMOTE_SERVER.shutdown()
disconnect(None)
| 38.526627 | 102 | 0.665643 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,247 | 0.345108 |
34a4b62edf263b2fe76869067f5c2acf0eed223a
| 1,661 |
py
|
Python
|
test/uma/rp/check.py
|
rohe/oictest
|
f6f0800220befd5983b8cb34a5c984f98855d089
|
[
"Apache-2.0"
] | 32 |
2015-01-02T20:15:17.000Z
|
2020-02-15T20:46:25.000Z
|
test/uma/rp/check.py
|
rohe/oictest
|
f6f0800220befd5983b8cb34a5c984f98855d089
|
[
"Apache-2.0"
] | 8 |
2015-02-23T19:48:53.000Z
|
2016-01-20T08:24:05.000Z
|
test/uma/rp/check.py
|
rohe/oictest
|
f6f0800220befd5983b8cb34a5c984f98855d089
|
[
"Apache-2.0"
] | 17 |
2015-01-02T20:15:22.000Z
|
2022-03-22T22:58:28.000Z
|
import inspect
import sys
from uma import message
from rrtest.check import Error, get_protocol_response
from rrtest import Unknown
from oictest import check
__author__ = 'roland'
CLASS_CACHE = {}
class MatchResourceSet(Error):
"""
Verify that the returned resource set is as expected
"""
cid = "match-resource-set"
msg = ""
def _func(self, conv):
res = get_protocol_response(conv, message.ResourceSetDescription)
inst, txt = res[-1]
rset = self._kwargs["rset"]
# All but _id and _rev should be equal
for key in message.ResourceSetDescription.c_param.keys():
if key in ["_id", "_rev"]:
continue
try:
assert rset[key] == inst[key]
except AssertionError:
self._message = "Not the resource set I expected"
self._status = self.status
break
except KeyError:
try:
assert key not in rset and key not in inst
except AssertionError:
self._message = "Not the resource set I expected"
self._status = self.status
break
return {}
def factory(cid, classes=CLASS_CACHE):
if len(classes) == 0:
for name, obj in inspect.getmembers(sys.modules[__name__]):
if inspect.isclass(obj):
try:
classes[obj.cid] = obj
except AttributeError:
pass
if cid in classes:
return classes[cid]
else:
classes = {}
return check.factory(cid, classes)
| 27.683333 | 73 | 0.556291 | 1,034 | 0.622517 | 0 | 0 | 0 | 0 | 0 | 0 | 219 | 0.131848 |
34a59aa4fd323b18e2045c4173ab3b0589d86fd9
| 10,959 |
py
|
Python
|
lib/utils/general.py
|
yingbiaoluo/ocr_pytorch
|
7d9163f7d6d557d83e2f50a39a3219f330f0cf84
|
[
"MIT"
] | null | null | null |
lib/utils/general.py
|
yingbiaoluo/ocr_pytorch
|
7d9163f7d6d557d83e2f50a39a3219f330f0cf84
|
[
"MIT"
] | null | null | null |
lib/utils/general.py
|
yingbiaoluo/ocr_pytorch
|
7d9163f7d6d557d83e2f50a39a3219f330f0cf84
|
[
"MIT"
] | null | null | null |
import os
import cv2
import glob
import logging
import numpy as np
from pathlib import Path
import torch
from torch.autograd import Variable
import torch.distributed as dist
class strLabelConverter(object):
def __init__(self, alphabet_):
"""
字符串标签转换
"""
self.alphabet = alphabet_ + 'Ω'
self.dict = {}
for i, char in enumerate(self.alphabet):
self.dict[char] = i + 1
def encode(self, text):
length = []
result = []
for item in text:
item = item.replace(' ', '').replace('\t', '')
length.append(len(item))
for char in item:
if char not in self.alphabet:
print('char {} not in alphabets!'.format(char))
char = '-'
index = self.dict[char]
result.append(index)
text = result
return torch.IntTensor(text), torch.IntTensor(length)
def decode(self, t, length, raw=False):
"""Decode encoded texts back into strs.
Args:
torch.IntTensor [length_0 + length_1 + ... length_{n - 1}]: encoded texts.
torch.IntTensor [n]: length of each text.
Raises:
AssertionError: when the texts and its length does not match.
Returns:
text (str or list of str): texts to convert.
"""
if length.numel() == 1: # 元素个数只有一个 number of elements
length = length[0]
assert t.numel() == length, "text with length: {} does not match declared length: {}".format(t.numel(), length)
if raw:
return ''.join([self.alphabet[i - 1] for i in t])
else:
char_list = []
for i in range(length):
if t[i] != 0 and (not (i > 0 and t[i - 1] == t[i])):
char_list.append(self.alphabet[t[i] - 1])
return ''.join(char_list)
else:
# batch mode
assert t.numel() == length.sum(), "texts with length: {} does not match declared length: {}".format(t.numel(), length.sum())
texts = []
index = 0
for i in range(length.numel()):
l = length[i]
texts.append(
self.decode(
t[index:index + l], torch.IntTensor([l]), raw=raw))
index += l
return texts
class averager(object):
"""Compute average for `torch.Variable` and `torch.Tensor`. """
def __init__(self):
self.reset()
def add(self, v):
if isinstance(v, Variable):
count = v.data.numel()
v = v.data.sum()
elif isinstance(v, torch.Tensor):
count = v.numel()
v = v.sum()
self.n_count += count
self.sum += v
def reset(self):
self.n_count = 0
self.sum = 0
def val(self):
res = 0
if self.n_count != 0:
res = self.sum / float(self.n_count)
return res
def generate_alphabets(alphabet_path):
"""
读取文本标签,生成字符表。
:param alphabet_path: 文本标签.
:return: 字符表.
"""
with open(alphabet_path, 'r', encoding='utf-8') as file:
alphabet = sorted(list(set(repr(''.join(file.readlines())))))
if ' ' in alphabet:
alphabet.remove(' ')
alphabet = ''.join(alphabet)
return alphabet
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=':f'):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
return fmtstr.format(**self.__dict__)
class ProgressMeter(object):
def __init__(self, num_batches, meters, prefix=""):
self.batch_fmtstr = self._get_batch_fmtstr(num_batches)
self.meters = meters
self.prefix = prefix
def display(self, batch):
entries = [self.prefix + self.batch_fmtstr.format(batch)]
entries += [str(meter) for meter in self.meters]
print('\t'.join(entries))
def _get_batch_fmtstr(self, num_batches):
num_digits = len(str(num_batches // 1))
fmt = '{:' + str(num_digits) + 'd}'
return '[' + fmt + '/' + fmt.format(num_batches) + ']'
def lev_ratio(str_a, str_b):
"""
ED距离,用来衡量单词之间的相似度
:param str_a:
:param str_b:
:return:
"""
str_a = str_a.lower()
str_b = str_b.lower()
matrix_ed = np.zeros((len(str_a) + 1, len(str_b) + 1), dtype=np.int)
matrix_ed[0] = np.arange(len(str_b) + 1)
matrix_ed[:, 0] = np.arange(len(str_a) + 1)
for i in range(1, len(str_a) + 1):
for j in range(1, len(str_b) + 1):
# 表示删除a_i
dist_1 = matrix_ed[i - 1, j] + 1
# 表示插入b_i
dist_2 = matrix_ed[i, j - 1] + 1
# 表示替换b_i
dist_3 = matrix_ed[i - 1, j - 1] + (2 if str_a[i - 1] != str_b[j - 1] else 0)
# 取最小距离
matrix_ed[i, j] = np.min([dist_1, dist_2, dist_3])
# print(matrix_ed)
levenshtein_distance = matrix_ed[-1, -1]
sum = len(str_a) + len(str_b)
levenshtein_ratio = (sum - levenshtein_distance) / sum
return levenshtein_ratio
def set_logging():
logging.basicConfig(
format="%(asctime)s %(message)s", # 指定输出的格式和内容, %(message)s: 打印日志信息
level=logging.INFO) # 设置日志级别 默认为logging.WARNING
def get_latest_run(search_dir='./runs'):
# Return path to most recent 'last.pt' in /runs (i.e. to --resume from)
last_list = glob.glob(f'{search_dir}/**/last*.pt', recursive=True)
return max(last_list, key=os.path.getctime) if last_list else ''
def check_file(file):
# Search for file if not found
if os.path.isfile(file) or file == '':
return file
else:
files = glob.glob('./**/' + file, recursive=True) # find file '**'匹配所有文件、目录、子目录和子目录里的文件
assert len(files), 'File Not Found: %s' % file # assert file was found
return files[0] # return first file if multiple found
def increment_dir(dir, comment=''):
# Increments a directory runs/exp1 --> runs/exp2_comment
n = 0 # number
dir = str(Path(dir)) # os-agnostic
d = sorted(glob.glob(dir + '*')) # directories
if len(d):
n = max([int(x[len(dir):x.find('_') if '_' in x else None]) for x in d]) + 1 # increment
return dir + str(n) + ('_' + comment if comment else '')
def xyxy2xywh(x):
# Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right
y = torch.zeros_like(x) if isinstance(x, torch.Tensor) else np.zeros_like(x)
y[:, 0] = (x[:, 0] + x[:, 2]) / 2 # x center
y[:, 1] = (x[:, 1] + x[:, 3]) / 2 # y center
y[:, 2] = x[:, 2] - x[:, 0] # width
y[:, 3] = x[:, 3] - x[:, 1] # height
return y
def xywh2xyxy(x):
# Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right
y = torch.zeros_like(x) if isinstance(x, torch.Tensor) else np.zeros_like(x)
y[:, 0] = x[:, 0] - x[:, 2] / 2 # top left x
y[:, 1] = x[:, 1] - x[:, 3] / 2 # top left y
y[:, 2] = x[:, 0] + x[:, 2] / 2 # bottom right x
y[:, 3] = x[:, 1] + x[:, 3] / 2 # bottom right y
return y
def denoise(image):
"""
对灰色图片进行降噪(注:cv2.fastNlMeansDenoising函数处理时间较长,因此不宜采用该降噪函数)
"""
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
dst = cv2.fastNlMeansDenoising(image, None, h=10, templateWindowSize=7, searchWindowSize=21)
ret, image = cv2.threshold(dst, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
return image
def resize_padding(image, height, width):
# resize
h, w, c = image.shape
image = cv2.resize(image, (0, 0), fx=height / h, fy=height / h, interpolation=cv2.INTER_LINEAR)
# padding
h, w, c = image.shape
img = 255. * np.ones((height, width, c))
if w < width:
img[:, :w, :] = image
else:
r = height / h
img = cv2.resize(image, (0, 0), fx=r, fy=r, interpolation=cv2.INTER_LINEAR)
return img
def padding_image_batch(image_batch, height=32, width=480):
aspect_ratios = []
for image in image_batch:
h, w, c = image.shape
aspect_ratios.append(w/h)
max_len = int(np.ceil(32 * max(aspect_ratios)))
pad_len = max_len if max_len > width else width
imgs = []
for image in image_batch:
img = resize_padding(image, height, pad_len)
img = np.transpose(img, (2, 0, 1))
imgs.append(img)
img_batch = torch.from_numpy(np.array(imgs)) / 255.
return img_batch.float()
logger_initialized = {}
def get_logger(name, log_file=None, log_level=logging.INFO):
"""Initialize and get a logger by name.
If the logger has not been initialized, this method will initialize the
logger by adding one or two handlers, otherwise the initialized logger will
be directly returned. During initialization, a StreamHandler will always be
added. If `log_file` is specified and the process rank is 0, a FileHandler
will also be added.
Args:
name (str): Logger name.
log_file (str | None): The log filename. If specified, a FileHandler
will be added to the logger.
log_level (int): The logger level. Note that only the process of
rank 0 is affected, and other processes will set the level to
"Error" thus be silent most of the time.
Returns:
logging.Logger: The expected logger.
"""
logger = logging.getLogger(name)
if name in logger_initialized:
return logger
# handle hierarchical names
# e.g., logger "a" is initialized, then logger "a.b" will skip the
# initialization since it is a child of "a".
for logger_name in logger_initialized:
if name.startswith(logger_name):
return logger
stream_handler = logging.StreamHandler()
handlers = [stream_handler]
if dist.is_available() and dist.is_initialized(): # True False
rank = dist.get_rank()
else:
rank = 0
# only rank 0 will add a FileHandler
if rank == 0 and log_file is not None:
file_handler = logging.FileHandler(log_file, 'w')
handlers.append(file_handler)
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
for handler in handlers:
handler.setFormatter(formatter)
handler.setLevel(log_level)
logger.addHandler(handler)
if rank == 0:
logger.setLevel(log_level)
else:
logger.setLevel(logging.ERROR)
logger_initialized[name] = True
return logger
| 31.582133 | 136 | 0.57195 | 4,103 | 0.364646 | 0 | 0 | 0 | 0 | 0 | 0 | 3,188 | 0.283327 |
34a5f721af5cc589bff5b78d011f713fac9b79a1
| 211 |
py
|
Python
|
smoothie/plugins/__init__.py
|
PiterPentester/smoothie
|
810709273c3d7bb975aca1f44062d39d0b33b678
|
[
"0BSD"
] | 1 |
2021-02-12T00:24:45.000Z
|
2021-02-12T00:24:45.000Z
|
smoothie/plugins/__init__.py
|
PiterPentester/smoothie
|
810709273c3d7bb975aca1f44062d39d0b33b678
|
[
"0BSD"
] | 1 |
2021-03-26T00:37:50.000Z
|
2021-03-26T00:37:50.000Z
|
smoothie/plugins/__init__.py
|
PiterPentester/smoothie
|
810709273c3d7bb975aca1f44062d39d0b33b678
|
[
"0BSD"
] | null | null | null |
#!/usr/bin/env python
from smoothie.plugins.interfaces import run as interfaces
from smoothie.plugins.list_networks import run as list_networks
from smoothie.plugins.target_network import run as target_network
| 35.166667 | 65 | 0.848341 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 21 | 0.099526 |
34a704c37474e7d90bc81f141c9416313e5a36b0
| 3,912 |
py
|
Python
|
tests/unit_tests/webhook_server_test.py
|
Leanny/mmpy_bot
|
fd16db4f1b07130fbf95568fb242387f0c7973e2
|
[
"MIT"
] | 196 |
2018-05-31T23:45:34.000Z
|
2022-03-20T09:06:55.000Z
|
tests/unit_tests/webhook_server_test.py
|
Leanny/mmpy_bot
|
fd16db4f1b07130fbf95568fb242387f0c7973e2
|
[
"MIT"
] | 216 |
2018-05-31T19:18:46.000Z
|
2022-03-21T17:09:38.000Z
|
tests/unit_tests/webhook_server_test.py
|
tgly307/mmpy_bot
|
0ae52d9db86ac018f3d48dd52c11e4996f549073
|
[
"MIT"
] | 107 |
2018-06-01T05:12:27.000Z
|
2022-02-25T12:40:10.000Z
|
import asyncio
import threading
import time
import pytest
from aiohttp import ClientSession
from mmpy_bot import Settings
from mmpy_bot.threadpool import ThreadPool
from mmpy_bot.webhook_server import NoResponse, WebHookServer
@pytest.fixture(scope="function")
def threadpool():
pool = ThreadPool(num_workers=1)
yield pool
pool.stop() # if the pool was started, stop it.
class TestWebHookServer:
def test_start(self, threadpool):
# Test server startup with a different port so it won't clash with the
# integration tests
server = WebHookServer(port=3281, url=Settings().WEBHOOK_HOST_URL)
threadpool.start_webhook_server_thread(server)
threadpool.start()
time.sleep(0.5)
assert server.running
asyncio.set_event_loop(asyncio.new_event_loop())
# Run the other tests sequentially
self.test_obtain_response(server)
self.test_process_webhook(server)
# Test shutdown procedure
threadpool.stop()
assert not server.running
@pytest.mark.skip("Called from test_start since we can't parallellize this.")
def test_obtain_response(self, server):
assert server.response_handlers == {}
# Wait for a response for request id 'test
await_response = asyncio.get_event_loop().create_future()
server.response_handlers["test"] = await_response
assert not server.response_handlers["test"].done()
# We have no futures waiting for request id 'nonexistent', so nothing should
# happen.
server.response_queue.put(("nonexistent", None))
time.sleep(0.1)
assert not server.response_handlers["test"].done()
# If a response comes in for request id 'test', it should be removed from the
# response handlers dict.
server.response_queue.put(("test", None))
time.sleep(0.1)
assert "test" not in server.response_handlers
@pytest.mark.skip("Called from test_start since we can't parallellize this.")
def test_process_webhook(self, server):
"""Checks whether an incoming webhook post request is correctly handled."""
assert server.event_queue.empty()
assert server.response_queue.empty()
assert server.response_handlers == {}
async def send_request(data):
async with ClientSession() as session:
try:
response = await session.post(
f"{server.url}:{server.port}/hooks/test_hook",
json=data,
timeout=1,
)
return await response.json()
except asyncio.exceptions.TimeoutError:
return None
asyncio.run(send_request({"text": "Hello!"}))
# Verify that a WebHookEvent corresponding to our request was added to the
# event queue.
assert server.event_queue.qsize() == 1
event = server.event_queue.get_nowait()
assert event.webhook_id == "test_hook"
assert event.text == "Hello!"
# Since there is no MessageHandler, we have to signal the server ourselves
server.response_queue.put((event.request_id, NoResponse))
time.sleep(0.1)
# Upon receiving the NoResponse, the server should have emptied the response
# queue and handlers.
assert server.response_queue.empty()
assert server.response_handlers == {}
# Test whether the web response is correctly passed through, if there is one
response = {"text": "test response"}
def provide_response():
event = server.event_queue.get()
server.response_queue.put((event.request_id, response))
thread = threading.Thread(target=provide_response)
thread.start()
assert asyncio.run(send_request({"text": "Hello!"})) == response
| 37.257143 | 85 | 0.648773 | 3,521 | 0.900051 | 122 | 0.031186 | 3,004 | 0.767894 | 452 | 0.115542 | 1,104 | 0.282209 |
34a7b81a028bf0267f44066f902a91829b99db68
| 1,023 |
py
|
Python
|
sample/Python/kabusapi_ranking.py
|
HolyMartianEmpire/kabusapi
|
c88ee958c272fb6e1dfde9a256e138c5760ea545
|
[
"MIT"
] | 212 |
2020-08-20T09:10:35.000Z
|
2022-03-31T08:05:21.000Z
|
sample/Python/kabusapi_ranking.py
|
U2u14/kabusapi
|
e41d0c3fcbcf6a1164ace9eac1a4d93685012dcb
|
[
"MIT"
] | 496 |
2020-08-20T14:23:59.000Z
|
2022-03-31T23:59:09.000Z
|
sample/Python/kabusapi_ranking.py
|
U2u14/kabusapi
|
e41d0c3fcbcf6a1164ace9eac1a4d93685012dcb
|
[
"MIT"
] | 57 |
2020-08-20T10:40:07.000Z
|
2022-03-07T06:28:01.000Z
|
import urllib.request
import json
import pprint
url = 'http://localhost:18080/kabusapi/ranking' #?type=1&ExchangeDivision=ALL
params = { 'type': 15 } #type - 1:値上がり率(デフォルト)2:値下がり率 3:売買高上位 4:売買代金 5:TICK回数 6:売買高急増 7:売買代金急増 8:信用売残増 9:信用売残減 10:信用買残増 11:信用買残減 12:信用高倍率 13:信用低倍率 14:業種別値上がり率 15:業種別値下がり率
params['ExchangeDivision'] = 'S' #ExchangeDivision - ALL:全市場(デフォルト)T:東証全体 T1:東証一部 T2:東証二部 TM:マザーズ JQ:JASDAQ M:名証 FK:福証 S:札証
req = urllib.request.Request('{}?{}'.format(url, urllib.parse.urlencode(params)), method='GET')
req.add_header('Content-Type', 'application/json')
req.add_header('X-API-KEY', 'f2a3579e776f4b6b8015a96c8bdafdce')
try:
with urllib.request.urlopen(req) as res:
print(res.status, res.reason)
for header in res.getheaders():
print(header)
print()
content = json.loads(res.read())
pprint.pprint(content)
except urllib.error.HTTPError as e:
print(e)
content = json.loads(e.read())
pprint.pprint(content)
except Exception as e:
print(e)
| 39.346154 | 170 | 0.69697 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 656 | 0.521877 |
34a95de29250fa6c98650b9aff0293a9f1a7b915
| 3,905 |
py
|
Python
|
sfa_api/zones.py
|
lboeman/solarforecastarbiter-api
|
9df598b5c638c3e36d0649e08e955b3ddc1b542d
|
[
"MIT"
] | 7 |
2018-12-07T22:05:36.000Z
|
2020-05-03T03:20:50.000Z
|
sfa_api/zones.py
|
lboeman/solarforecastarbiter-api
|
9df598b5c638c3e36d0649e08e955b3ddc1b542d
|
[
"MIT"
] | 220 |
2018-11-01T23:33:19.000Z
|
2021-12-02T21:06:38.000Z
|
sfa_api/zones.py
|
lboeman/solarforecastarbiter-api
|
9df598b5c638c3e36d0649e08e955b3ddc1b542d
|
[
"MIT"
] | 3 |
2018-10-31T20:55:07.000Z
|
2021-11-10T22:51:43.000Z
|
from flask import Blueprint, jsonify, make_response
from flask.views import MethodView
from sfa_api import spec, json
from sfa_api.schema import ZoneListSchema
from sfa_api.utils.storage import get_storage
from sfa_api.utils.request_handling import validate_latitude_longitude
class AllZonesView(MethodView):
def get(self, *args):
"""
---
summary: List climate zones
description: List all climate zones that the user has access to.
tags:
- Climate Zones
responses:
200:
description: A list of climate zones.
content:
application/json:
schema:
type: array
items:
$ref: '#/components/schemas/ZoneMetadata'
401:
$ref: '#/components/responses/401-Unauthorized'
"""
storage = get_storage()
zones = storage.list_zones()
return jsonify(ZoneListSchema(many=True).dump(zones))
class ZoneView(MethodView):
def get(self, zone, *args):
"""
---
summary: Get zone GeoJSON
description: Get the GeoJSON for a requested climate zone.
tags:
- Climate Zones
parameters:
- zone
responses:
200:
description: The GeoJSON definition for the climate zone
content:
application/geo+json:
schema:
type: object
401:
$ref: '#/components/responses/401-Unauthorized'
404:
$ref: '#/components/responses/404-NotFound'
"""
storage = get_storage()
geojson = storage.read_climate_zone(zone.replace('+', ' '))
response = make_response(json.dumps(geojson), 200)
response.mimetype = 'application/geo+json'
return response
class SearchZones(MethodView):
def get(self, *args):
"""
---
summary: Find zones
description: Find all zones that the given point falls within
tags:
- Climate Zones
parameters:
- latitude
- longitude
responses:
200:
description: Sucessfully retrieved zones.
content:
application/json:
schema:
type: array
items:
$ref: '#/components/schemas/ZoneMetadata'
400:
$ref: '#/components/responses/400-BadRequest'
401:
$ref: '#/components/responses/401-Unauthorized'
"""
lat, lon = validate_latitude_longitude()
storage = get_storage()
zones = storage.find_climate_zones(lat, lon)
return jsonify(ZoneListSchema(many=True).dump(zones))
spec.components.parameter(
'zone', 'path',
{
'schema': {
'type': 'string',
},
'description': "Climate zone name. Spaces may be replaced with +.",
'required': 'true',
'name': 'zone'
})
spec.components.parameter(
'latitude', 'query',
{
'name': 'latitude',
'required': True,
'description': 'The latitude (in degrees North) of the location.',
'schema': {
'type': 'float',
}
})
spec.components.parameter(
'longitude', 'query',
{
'name': 'longitude',
'required': True,
'description': 'The longitude (in degrees East of the Prime Meridian)'
' of the location.',
'schema': {
'type': 'float',
}
})
zone_blp = Blueprint(
'climatezones', 'climatezones', url_prefix='/climatezones',
)
zone_blp.add_url_rule('/', view_func=AllZonesView.as_view('all'))
zone_blp.add_url_rule('/<zone_str:zone>', view_func=ZoneView.as_view('single'))
zone_blp.add_url_rule('/search', view_func=SearchZones.as_view('search'))
| 29.141791 | 79 | 0.557746 | 2,502 | 0.640717 | 0 | 0 | 0 | 0 | 0 | 0 | 2,270 | 0.581306 |
34aad533350d28ac32ca85f8601235b5751af580
| 283 |
py
|
Python
|
Mundo 3/Aula 16 - Tuplas/Ex074 - Maior e menor valor com tupla.py
|
Ruben-974/Exercicios-Python
|
11fc5c7c64c1b5e5f54f59821847987d4878764c
|
[
"MIT"
] | null | null | null |
Mundo 3/Aula 16 - Tuplas/Ex074 - Maior e menor valor com tupla.py
|
Ruben-974/Exercicios-Python
|
11fc5c7c64c1b5e5f54f59821847987d4878764c
|
[
"MIT"
] | null | null | null |
Mundo 3/Aula 16 - Tuplas/Ex074 - Maior e menor valor com tupla.py
|
Ruben-974/Exercicios-Python
|
11fc5c7c64c1b5e5f54f59821847987d4878764c
|
[
"MIT"
] | null | null | null |
from random import randint
n = (randint(1, 9), randint(1, 9), randint(1, 9), randint(1, 9), randint(1, 9))
print('Os números sorteados foram: ', end='')
for c in n:
print(c, end=' ')
print(f'\nO maior valor sorteado foi: {max(n)}')
print(f'O menor valor sorteado foi: {min(n)}')
| 35.375 | 79 | 0.639576 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 116 | 0.408451 |
34abfc5ba2b2f363e90afe9dd53efcac19d00daf
| 1,228 |
py
|
Python
|
tests/unit/http_/test_adapters.py
|
matt-mercer/localstack
|
b69ba25e495c6ef889d33a050b216d0cd1035041
|
[
"Apache-2.0"
] | 1 |
2022-03-17T07:22:23.000Z
|
2022-03-17T07:22:23.000Z
|
tests/unit/http_/test_adapters.py
|
matt-mercer/localstack
|
b69ba25e495c6ef889d33a050b216d0cd1035041
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/http_/test_adapters.py
|
matt-mercer/localstack
|
b69ba25e495c6ef889d33a050b216d0cd1035041
|
[
"Apache-2.0"
] | null | null | null |
import requests
from localstack.http import Response, Router
from localstack.http.adapters import RouterListener
from localstack.utils.testutil import proxy_server
class TestRouterListener:
def test_dispatching(self):
def endpoint(request, args):
resp = Response()
resp.set_json({"args": args})
return resp
router = Router()
router.add("/foo/<bar>", endpoint, methods=["GET"])
with proxy_server(RouterListener(router, fall_through=False)) as url:
response = requests.get(f"{url}/foo/ed")
assert response.ok
assert response.json() == {"args": {"bar": "ed"}}
# test with query
response = requests.get(f"{url}/foo/bar?hello=there")
assert response.ok
assert response.json() == {"args": {"bar": "bar"}}
# test invalid endpoint
response = requests.get(f"{url}/foo")
assert not response.ok
assert response.status_code == 404
# test non-allowed method
response = requests.post(f"{url}/foo/bar")
assert not response.ok
assert response.status_code == 405 # method not allowed
| 33.189189 | 77 | 0.592834 | 1,060 | 0.863192 | 0 | 0 | 0 | 0 | 0 | 0 | 210 | 0.17101 |
34ac4b2375450822de672fe9deedac50930b777e
| 647 |
py
|
Python
|
setup.py
|
ZaneColeRiley/ArcherProject
|
7d9ecf4953e3ea8ce3577321449549743eada34e
|
[
"MIT"
] | null | null | null |
setup.py
|
ZaneColeRiley/ArcherProject
|
7d9ecf4953e3ea8ce3577321449549743eada34e
|
[
"MIT"
] | null | null | null |
setup.py
|
ZaneColeRiley/ArcherProject
|
7d9ecf4953e3ea8ce3577321449549743eada34e
|
[
"MIT"
] | null | null | null |
from cx_Freeze import setup, Executable
import sys
base = None
if sys.platform == "win32":
base = "Win32GUI"
executables = [Executable("Archer.py", base=base, icon="favicon.ico")]
setup(name="Archer",
version="1.0.0",
options={"build_exe": {"packages": ["tkinter", "mysql", "PIL", "time", "requests", "os", "smtplib", "datetime", "pyAesCrypt"], "include_files": ["Screen_image.jpg", "favicon.ico", "Admin_screen.jpg", "Screen_image_small.jpg", "Journal.jpg", "db.sqlite3"]}},
description="",
executables=executables, requires=['requests', 'PIL', 'mysql', "smtplib", "tkinter", "time", "pyAesCrypt"])
| 40.4375 | 264 | 0.638331 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 322 | 0.497682 |
34ac94f8711db1745f63a3c064eaa86f3dde0de5
| 2,772 |
py
|
Python
|
WiSe-2122/Uebung-11/Gruppe-C/U11-A1.py
|
jonasrdt/Wirtschaftsinformatik2
|
30d5d896808b98664c55cb6fbb3b30a7f1904d9f
|
[
"MIT"
] | 1 |
2022-03-23T09:40:39.000Z
|
2022-03-23T09:40:39.000Z
|
WiSe-2122/Uebung-11/Gruppe-C/U11-A1.py
|
jonasrdt/Wirtschaftsinformatik2
|
30d5d896808b98664c55cb6fbb3b30a7f1904d9f
|
[
"MIT"
] | null | null | null |
WiSe-2122/Uebung-11/Gruppe-C/U11-A1.py
|
jonasrdt/Wirtschaftsinformatik2
|
30d5d896808b98664c55cb6fbb3b30a7f1904d9f
|
[
"MIT"
] | null | null | null |
# Übung 11 - Aufgabe 1
# Mitarbeiter-Kartei
# Bereitgestellt von M. Drews im Wintersemester 2021/22
# Funktionen
def trenner(anzahl_striche):
for i in range(anzahl_striche):
print("-", end="")
print()
def fehler():
print("\nFehler: Bitte geben Sie nur Zahlen an, die zur Auswahl stehen.")
def formular():
global vorname, nachname, geburtsort
vorname = input("> Vorname: ")
nachname = input("> Nachname: ")
geburtsort = input("> Geburtsort: ")
def suche():
global index
suche = input("Suchbegriff (Nachname eingeben): ")
index = next((i for i, item in enumerate(ma_kartei) if item["Nachname"] == suche), None)
def eintrag_neu():
print("\nBitte fügen Sie einen neuen Eintrag zur Mitarbeiter-Kartei hinzu: ")
formular()
gueltige_eingabe = False
while not gueltige_eingabe:
try:
auswahl = int(input("\n(1) Speichern (2) Abbrechen\n"))
if auswahl == 1:
gueltige_eingabe = True
eintrag = {"Vorname": vorname,"Nachname": nachname,"Geburtsort": geburtsort}
ma_kartei.append(eintrag)
print("Ihr Eintrag wurde gespeichert und der Kartei hinzugefügt.")
trenner(80)
elif auswahl == 2:
gueltige_eingabe = True
except:
fehler()
def eintrag_bearbeiten():
print("Welchen Eintrag möchten Sie bearbeiten?")
suche()
print("\nBitte überschreiben Sie den alten Eintrag:")
formular()
ma_kartei[index] = {"Vorname": vorname,"Nachname": nachname,"Geburtsort": geburtsort}
print("Ihr Eintrag wurde gespeichert und der Kartei hinzugefügt.")
trenner(80)
def eintrag_loeschen():
print("Welchen Eintrag möchten Sie löschen?")
suche()
print("\nFolgender Eintrag wurde gelöscht:")
print(ma_kartei[index])
ma_kartei.pop(index)
# Programmablauf
print("\n")
trenner(120)
print("Mitarbeiter-Kartei")
trenner(120)
trenner(120)
ma_kartei = []
programm = True
while programm:
print("Was möchten Sie tun?")
gueltige_eingabe = False
while not gueltige_eingabe:
try:
auswahl = int(input("\n(1) Eintrag hinzufügen\n(2) Eintrag bearbeiten\n(3) Eintrag löschen\n(4) Kartei anzeigen\n"))
if auswahl == 1:
gueltige_eingabe = True
eintrag_neu()
elif auswahl == 2:
gueltige_eingabe = True
eintrag_bearbeiten()
elif auswahl == 3:
gueltige_eingabe = True
eintrag_loeschen()
elif auswahl == 4:
gueltige_eingabe = True
print(ma_kartei)
trenner(80)
except:
fehler()
| 28.875 | 128 | 0.599206 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 886 | 0.318247 |
34af79c82e2f03fa8bacfe2aa4a2b6da7ce9ee22
| 20,230 |
py
|
Python
|
asciinode.py
|
bhsingleton/mason
|
9a14cdc758b7ef76e53d4ef9d30045834d6c17ef
|
[
"MIT"
] | 1 |
2021-09-08T00:51:52.000Z
|
2021-09-08T00:51:52.000Z
|
asciinode.py
|
bhsingleton/mason
|
9a14cdc758b7ef76e53d4ef9d30045834d6c17ef
|
[
"MIT"
] | null | null | null |
asciinode.py
|
bhsingleton/mason
|
9a14cdc758b7ef76e53d4ef9d30045834d6c17ef
|
[
"MIT"
] | null | null | null |
import maya.api.OpenMaya as om
from . import asciitreemixin, asciiattribute, asciiplug
from .collections import hashtable, weakreflist, notifylist
import logging
logging.basicConfig()
log = logging.getLogger(__name__)
log.setLevel(logging.INFO)
class AsciiNode(asciitreemixin.AsciiTreeMixin):
"""
Overload of AsciiTreeMixin used to interface with scene nodes.
"""
__slots__ = (
'_scene',
'_name',
'_namespace',
'_uuid',
'_type',
'_parent',
'_children',
'_locked',
'_default',
'_attributes',
'_plugs',
'_database',
'_connections'
)
__attributes__ = {} # Used for static attributes
def __init__(self, typeName, **kwargs):
"""
Private method called after a new instance is created.
:type typeName: str
:keyword scene: asciiscene.AsciiScene
:rtype: None
"""
# Call parent method
#
super(AsciiNode, self).__init__()
# Declare private variables
#
self._scene = kwargs.get('scene', self.nullWeakReference)
self._name = ''
self._namespace = ''
self._uuid = ''
self._type = typeName
self._parent = self.nullWeakReference
self._children = notifylist.NotifyList(cls=weakreflist.WeakRefList)
self._locked = False
self._attributes = hashtable.HashTable() # Used for dynamic attributes
self._plugs = hashtable.HashTable()
self._connections = []
self._default = kwargs.get('default', False)
# Setup child notifies
#
self._children.addCallback('itemAdded', self.childAdded)
self._children.addCallback('itemRemoved', self.childRemoved)
# Declare public variables
#
self.parent = kwargs.get('parent', None)
self.name = kwargs.get('name', '')
self.namespace = kwargs.get('namespace', '')
self.uuid = kwargs.get('uuid', om.MUuid().generate().asString())
# Initialize node attributes
#
self.initialize()
def __str__(self):
"""
Private method that returns a string representation of this instance.
:rtype: str
"""
return f'<{self.__class__.__module__}.{self.__class__.__name__} object: {self.absoluteName()}>'
def __getitem__(self, key):
"""
Private method that returns the plug associated with the supplied key.
:type key: str
:rtype: asciiplug.AsciiPlug
"""
return self.findPlug(key)
def __dumps__(self):
"""
Returns a list of command line strings that can be serialized.
:rtype: list[str]
"""
# Evaluate which commands to concatenate
#
commands = []
if self.isDefaultNode:
commands.append(self.getSelectCmd())
else:
commands.append(self.getCreateNodeCmd())
commands.append(self.getRenameCmd())
# Concatenate lockNode command
# But only if the node has actually been locked!
#
if self.isLocked:
commands.append(self.getLockNodeCmd())
# Concatenate attribute related commands
#
commands.extend(self.getAddAttrCmds())
commands.extend(self.getSetAttrCmds())
return commands
@property
def scene(self):
"""
Returns the scene this object is derived from.
:rtype: mason.asciiscene.AsciiScene
"""
return self._scene()
@property
def name(self):
"""
Getter method that returns the name of this node.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Setter method updates the name of this node.
:type name: str
:rtype: None
"""
# Check for redundancy
#
newName = self.stripAll(name)
oldName = self._name
if newName != oldName:
self._name = newName
self.nameChanged(oldName, newName)
def nameChanged(self, oldName, newName):
"""
Callback method for any name changes made to this node.
:type oldName: str
:type newName: str
:rtype: None
"""
# Remove previous name from registry
#
absoluteName = f'{self.namespace}:{oldName}'
hashCode = self.scene.registry.names.get(absoluteName, None)
if hashCode == self.hashCode():
del self.scene.registry.names[absoluteName]
# Append new name to registry
#
absoluteName = f'{self.namespace}:{newName}'
self.scene.registry.names[absoluteName] = self.hashCode()
@property
def namespace(self):
"""
Getter method that returns the namespace this node belongs to.
:rtype: str
"""
return self._namespace
@namespace.setter
def namespace(self, namespace):
"""
Setter method updates the namespace this node belongs to.
:type namespace: str
:rtype: None
"""
# Check for redundancy
#
oldNamespace = self._namespace
newNamespace = '' if namespace == ':' else namespace
if newNamespace != oldNamespace:
self._namespace = newNamespace
self.namespaceChanged(oldNamespace, newNamespace)
def namespaceChanged(self, oldNamespace, newNamespace):
"""
Callback method for any namespace changes made to this node.
:type oldNamespace: str
:type newNamespace: str
:rtype: None
"""
# Remove previous name from registry
#
absoluteName = f'{oldNamespace}:{self.name}'
hashCode = self.scene.registry.names.get(absoluteName, None)
if hashCode == self.hashCode():
del self.scene.registry.names[absoluteName]
# Append new name to registry
#
absoluteName = f'{newNamespace}:{self.name}'
self.scene.registry.names[absoluteName] = self.hashCode()
def absoluteName(self):
"""
Returns the bare minimum required to be a unique name.
:rtype: str
"""
if len(self.namespace) > 0:
return f'{self.namespace}:{self.name}'
else:
return self.name
@property
def parent(self):
"""
Getter method that returns the parent for this object.
:rtype: AsciiNode
"""
return self._parent()
@parent.setter
def parent(self, parent):
"""
Setter method that updates the parent for this object.
:type parent: AsciiNode
:rtype: None
"""
# Check for redundancy
#
if parent is self.parent:
log.debug(f'{self} is already parented to: {parent}')
return
# Check for none type
#
oldParent = self.parent
if isinstance(parent, AsciiNode):
self._parent = parent.weakReference()
elif isinstance(parent, str):
self.parent = self.scene.registry.getNodeByName(parent)
elif parent is None:
self._parent = self.nullWeakReference
else:
raise TypeError(f'parent.setter() expects an AsciiNode ({type(parent).__name__} given)!')
# Cleanup any old references
#
self.parentChanged(oldParent, parent)
def parentChanged(self, oldParent, newParent):
"""
Callback method that cleans up any parent/child references.
:type oldParent: AsciiNode
:type newParent: AsciiNode
:rtype: None
"""
# Remove self from former parent
#
if oldParent is not None:
oldParent.children.remove(self)
# Append self to new parent
#
if newParent is not None:
newParent.children.appendIfUnique(self)
@property
def children(self):
"""
Getter method that returns the children belonging to this object.
:rtype: weakreflist.WeakRefList
"""
return self._children
def childAdded(self, index, child):
"""
Adds a reference to this object to the supplied child.
:type index: int
:type child: AsciiNode
:rtype: None
"""
if child.parent is not self:
child.parent = self
def childRemoved(self, child):
"""
Removes the reference of this object from the supplied child.
:type child: AsciiNode
:rtype: None
"""
child.parent = None
@property
def type(self):
"""
Getter method that returns the name of this node type.
:rtype: str
"""
return self._type
@property
def uuid(self):
"""
Getter method that returns the UUID for this node.
:rtype: str
"""
return self._uuid
@uuid.setter
def uuid(self, uuid):
"""
Setter method that updates the UUID for this node.
:type uuid: str
:rtype: None
"""
# Check for redundancy
#
newUUID = self.scene.registry.generateUUID(uuid)
oldUUID = self._uuid
if newUUID != oldUUID:
self._uuid = newUUID
self.uuidChanged(oldUUID, newUUID)
def uuidChanged(self, oldUUID, newUUID):
"""
Callback method for any namespace changes made to this node.
:type oldUUID: str
:type newUUID: str
:rtype: None
"""
# Remove previous uuid from registry
#
hashCode = self.scene.registry.uuids.get(oldUUID, None)
if hashCode == self.hashCode():
del self.scene.registry.uuids[oldUUID]
# Append new uuid to registry
#
self.scene.registry.uuids[newUUID] = self.hashCode()
@property
def isLocked(self):
"""
Getter method that returns the lock state of this node.
:rtype: int
"""
return self._locked
@isLocked.setter
def isLocked(self, locked):
"""
Setter method that updates the lock state of this node.
:type locked: bool
:rtype: None
"""
self._locked = bool(locked)
@property
def isDefaultNode(self):
"""
Getter method that evaluates whether this is a default node.
:rtype: bool
"""
return self._default
def initialize(self):
"""
Initializes the attributes and plugs for this node.
:rtype: None
"""
# Check if static attributes exist
# If not then go ahead and initialize them
#
attributes = self.__attributes__.get(self.type)
if attributes is None:
attributes = asciiattribute.listPlugin(self.type)
self.__attributes__[self.type] = attributes
@property
def database(self):
"""
Getter method that returns the database for this node.
:rtype: asciidatabase.AsciiDatabase
"""
return self._database
@property
def plugs(self):
"""
Getter method that returns the plugs that are currently in use.
:rtype: hashtable.HashTable
"""
return self._plugs
def iterTopLevelPlugs(self):
"""
Iterates through all of the top-level plugs.
Please note that plugs are created on demand so don't expect a complete list from this generator!
:rtype: iter
"""
# Iterate through attributes
#
for attribute in self.listAttr(fromPlugin=True, userDefined=True).values():
# Check if this is a top level parent
#
if attribute.parent is not None:
continue
# Yield associated plug
#
plug = self._plugs.get(attribute.shortName, None)
if plug is not None:
yield plug
else:
continue
def dagPath(self):
"""
Returns a dag path for this node.
:rtype: str
"""
if self.parent is not None:
return '|'.join([x.absoluteName() for x in self.trace()])
else:
return self.name
def attribute(self, name):
"""
Returns an ascii attribute with the given name.
:type name: str
:rtype: asciiattribute.AsciiAttribute
"""
return self.listAttr(fromPlugin=True, userDefined=True).get(name, None)
def listAttr(self, fromPlugin=False, userDefined=False):
"""
Returns a list of attributes derived from this node.
:type fromPlugin: bool
:type userDefined: bool
:rtype: hashtable.HashTable
"""
# Check if plugin defined attributes should be returned
#
attributes = hashtable.HashTable()
if fromPlugin:
attributes.update(self.__class__.__attributes__[self.type])
# Check if user defined attributes should be returned
#
if userDefined:
attributes.update(self._attributes)
return attributes
def addAttr(self, *args, **kwargs):
"""
Adds a dynamic attribute to this node.
This function accepts two different sets of arguments.
You can either supply a fully formed AsciiAttribute.
Or you can pass all of the keywords required to create one.
:rtype: None
"""
# Check number of arguments
#
numArgs = len(args)
numKwargs = len(kwargs)
if numArgs == 1:
# Store reference to attribute
#
attribute = args[0]
self._attributes[attribute.shortName] = attribute
self._attributes[attribute.longName] = attribute
elif numKwargs > 0:
# Create new attribute from kwargs
#
attribute = asciiattribute.AsciiAttribute(**kwargs)
self.addAttr(attribute)
else:
raise TypeError(f'addAttr() expects 1 argument ({numArgs} given)!')
def setAttr(self, plug, value):
"""
Assigns the supplied value to the given plug.
:type plug: Union[str, asciiplug.AsciiPlug]
:type value: Any
:rtype: None
"""
# Check plug type
#
if isinstance(plug, str):
plug = self.findPlug(plug)
# Assign value to plug
#
plug.setValue(value)
def connectAttr(self, source, destination):
"""
Connects the two supplied plugs together.
:type source: Union[str, asciiplug.AsciiPlug]
:type destination: Union[str, asciiplug.AsciiPlug]
:rtype: None
"""
# Check source type
#
if isinstance(source, str):
source = self.findPlug(source)
# Check destination type
#
if isinstance(destination, str):
destination = self.findPlug(destination)
# Connect plugs
#
source.connect(destination)
def findPlugs(self, path):
"""
Returns a list of plugs from the supplied string path.
:type path: str
:rtype: list[asciiplug.AsciiPlug]
"""
return asciiplug.AsciiPlugPath(f'{self.absoluteName()}.{path}', scene=self.scene).evaluate()
def findPlug(self, path):
"""
Returns the plug associated with the given name.
If more than one plug is found then a type error is raised.
:type path: str
:rtype: asciiplug.AsciiPlug
"""
plugs = self.findPlugs(path)
numPlugs = len(plugs)
if numPlugs == 0:
return None
elif numPlugs == 1:
return plugs[0]
else:
raise TypeError('findPlug() multiple plugs found!')
def legalConnection(self, plug, otherPlug):
"""
Evaluates whether or not the connection between these two plugs is valid.
TODO: Implement this behaviour!
:type plug: asciiplug.AsciiPlug
:type otherPlug: asciiplug.AsciiPlug
:rtype: bool
"""
return True
def connectionMade(self, plug, otherPlug):
"""
Callback method for any connection changes made to this node.
:type plug: asciiplug.AsciiPlug
:type otherPlug: asciiplug.AsciiPlug
:rtype: None
"""
self._connections.append(otherPlug.weakReference())
def legalDisconnection(self, plug, otherPlug):
"""
Evaluates whether or not the disconnection between these two plugs is valid.
TODO: Implement this behaviour!
:type plug: asciiplug.AsciiPlug
:type otherPlug: asciiplug.AsciiPlug
:rtype: bool
"""
return True
def connectionBroken(self, plug, otherPlug):
"""
Callback method for any disconnection changes made to this node.
:type plug: asciiplug.AsciiPlug
:type otherPlug: asciiplug.AsciiPlug
:rtype: None
"""
self._connections.remove(otherPlug.weakReference())
def getCreateNodeCmd(self):
"""
Returns a command string that can create this node.
:rtype: str
"""
# Check if node has parent
#
if self.parent is not None:
return f'createNode {self.type} -s -n "{self.absoluteName()}" -p "{self.parent.absoluteName()}";'
else:
return f'createNode {self.type} -s -n "{self.absoluteName()}";'
def getSelectCmd(self):
"""
Returns a command string that can select this node.
:rtype: str
"""
return f'select -ne "{self.absoluteName()}";'
def getRenameCmd(self):
"""
Returns a command string that can rename this node's UUID.
:rtype: str
"""
return f'\trename -uid "{self.uuid}";'
def getLockNodeCmd(self):
"""
Returns a command string that can lock this node.
:rtype: str
"""
return f'\tlockNode -l {int(self.isLocked)};'
def getAddAttrCmds(self):
"""
Returns a list of commands for user-defined attributes.
:rtype: list[str]
"""
return [x.getAddAttrCmd() for x in self.listAttr(userDefined=True).values()]
def getSetAttrCmds(self):
"""
Returns a list of commands for non-default plugs.
:rtype: list[str]
"""
# Iterate through top-level plugs
#
commands = []
for plug in self.iterTopLevelPlugs():
commands.extend(plug.getSetAttrCmds())
return commands
def getConnectAttrCmds(self):
"""
Returns a list of command strings that can recreate the outgoing connections from this node.
:rtype: list[str]
"""
# Iterate through known connections
#
numCommands = len(self._connections)
commands = [None] * numCommands
for (i, ref) in enumerate(self._connections):
# Check if ref is still alive
#
otherPlug = ref()
if otherPlug is None:
continue
# Concatenate source name
#
plug = otherPlug.source()
source = plug.partialName(includeNodeName=True, useFullAttributePath=True, includeIndices=True)
# Check if destination index matters
#
if otherPlug.isElement and not otherPlug.attribute.indexMatters:
destination = otherPlug.parent.partialName(includeNodeName=True, useFullAttributePath=True, includeIndices=True)
commands[i] = f'connectAttr "{source}" "{destination}" -na;'
else:
destination = otherPlug.partialName(includeNodeName=True, useFullAttributePath=True, includeIndices=True)
commands[i] = f'connectAttr "{source}" "{destination}";'
return commands
| 23.997628 | 128 | 0.571824 | 19,980 | 0.987642 | 721 | 0.03564 | 4,381 | 0.21656 | 0 | 0 | 9,240 | 0.456747 |
34af9cb845a017d27fd9e7390bfebe5569ce5eaf
| 1,325 |
py
|
Python
|
freezer_api/policy.py
|
ctpegasus/freezer-api
|
b784327252ac6132a4d3b87c50e9a99c70d6c938
|
[
"Apache-2.0"
] | null | null | null |
freezer_api/policy.py
|
ctpegasus/freezer-api
|
b784327252ac6132a4d3b87c50e9a99c70d6c938
|
[
"Apache-2.0"
] | 5 |
2019-08-14T06:46:03.000Z
|
2021-12-13T20:01:25.000Z
|
freezer_api/policy.py
|
ctpegasus/freezer-api
|
b784327252ac6132a4d3b87c50e9a99c70d6c938
|
[
"Apache-2.0"
] | 2 |
2020-03-15T01:24:15.000Z
|
2020-07-22T20:34:26.000Z
|
"""
(c) Copyright 2015-2016 Hewlett-Packard Enterprise Company L.P.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import functools
from oslo_policy import policy
from freezer_api.common import exceptions
from freezer_api.common import policies
ENFORCER = None
def setup_policy(conf):
global ENFORCER
if not ENFORCER:
ENFORCER = policy.Enforcer(conf)
ENFORCER.register_defaults(policies.list_rules())
ENFORCER.load_rules()
def enforce(rule):
def decorator(func):
@functools.wraps(func)
def handler(*args, **kwargs):
ctx = args[1].env['freezer.context']
ENFORCER.enforce(rule, {}, ctx.to_dict(), do_raise=True,
exc=exceptions.AccessForbidden)
return func(*args, **kwargs)
return handler
return decorator
| 27.604167 | 72 | 0.706415 | 0 | 0 | 0 | 0 | 280 | 0.211321 | 0 | 0 | 613 | 0.462642 |
34b0627ddf54dc4030ea1fef447d1fe5ea946c8c
| 297 |
py
|
Python
|
moviepy/video/fx/all/__init__.py
|
odidev/moviepy
|
b19a690fe81b17fa582622d1c0ebe73e4e6380e7
|
[
"MIT"
] | 8,558 |
2015-01-03T05:14:12.000Z
|
2022-03-31T21:45:38.000Z
|
moviepy/video/fx/all/__init__.py
|
odidev/moviepy
|
b19a690fe81b17fa582622d1c0ebe73e4e6380e7
|
[
"MIT"
] | 1,592 |
2015-01-02T22:12:54.000Z
|
2022-03-30T13:10:40.000Z
|
moviepy/video/fx/all/__init__.py
|
odidev/moviepy
|
b19a690fe81b17fa582622d1c0ebe73e4e6380e7
|
[
"MIT"
] | 1,332 |
2015-01-02T18:01:53.000Z
|
2022-03-31T22:47:28.000Z
|
"""
moviepy.video.fx.all is deprecated.
Use the fx method directly from the clip instance (e.g. ``clip.resize(...)``)
or import the function from moviepy.video.fx instead.
"""
import warnings
from moviepy.video.fx import * # noqa F401,F403
warnings.warn(f"\nMoviePy: {__doc__}", UserWarning)
| 22.846154 | 77 | 0.723906 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 215 | 0.723906 |
34b1459af9f8293d45f8ba2c83ea76abe97d3d5b
| 102 |
py
|
Python
|
core/src/autogluon/core/scheduler/resource/__init__.py
|
zhiqiangdon/autogluon
|
71ee7ef0f05d8f0aad112d8c1719174aa33194d9
|
[
"Apache-2.0"
] | 4,462 |
2019-12-09T17:41:07.000Z
|
2022-03-31T22:00:41.000Z
|
core/src/autogluon/core/scheduler/resource/__init__.py
|
zhiqiangdon/autogluon
|
71ee7ef0f05d8f0aad112d8c1719174aa33194d9
|
[
"Apache-2.0"
] | 1,408 |
2019-12-09T17:48:59.000Z
|
2022-03-31T20:24:12.000Z
|
core/src/autogluon/core/scheduler/resource/__init__.py
|
zhiqiangdon/autogluon
|
71ee7ef0f05d8f0aad112d8c1719174aa33194d9
|
[
"Apache-2.0"
] | 623 |
2019-12-10T02:04:18.000Z
|
2022-03-20T17:11:01.000Z
|
from .resource import *
from .dist_manager import *
from ...utils import get_cpu_count, get_gpu_count
| 25.5 | 49 | 0.794118 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
34b1bf44dd3558edd70025285efa4c01fcb9a1ed
| 101 |
py
|
Python
|
wellknownmsgs/AppStartingMsg.py
|
sdyiheng/SimplePythonWebApp
|
fc3188796a1e48a42a0c22b5f3b430c0de1be87a
|
[
"MIT"
] | null | null | null |
wellknownmsgs/AppStartingMsg.py
|
sdyiheng/SimplePythonWebApp
|
fc3188796a1e48a42a0c22b5f3b430c0de1be87a
|
[
"MIT"
] | null | null | null |
wellknownmsgs/AppStartingMsg.py
|
sdyiheng/SimplePythonWebApp
|
fc3188796a1e48a42a0c22b5f3b430c0de1be87a
|
[
"MIT"
] | null | null | null |
class AppStartingMsg(object):
'''应用程序启动消息'''
def __init__(self):
self.ErrorMsg = ""
| 16.833333 | 29 | 0.594059 | 116 | 0.991453 | 0 | 0 | 0 | 0 | 0 | 0 | 32 | 0.273504 |
34b2720c17b7d23f1bc3687785eec5b224b79560
| 109 |
py
|
Python
|
hello_world.py
|
TruthLacksLyricism/learning-python
|
7a279ad0698860b612fc6b76ff99c81acd29d31b
|
[
"MIT"
] | null | null | null |
hello_world.py
|
TruthLacksLyricism/learning-python
|
7a279ad0698860b612fc6b76ff99c81acd29d31b
|
[
"MIT"
] | null | null | null |
hello_world.py
|
TruthLacksLyricism/learning-python
|
7a279ad0698860b612fc6b76ff99c81acd29d31b
|
[
"MIT"
] | null | null | null |
print "This line will be printed."
x = "rachel"
if x == "rachel":
print "Rachel"
print "Hello, World!"
| 13.625 | 34 | 0.623853 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 67 | 0.614679 |
34b3e94aca6273776ec69ea9dd2046fec59f31b5
| 434 |
py
|
Python
|
src/pyonms.py
|
mmahacek/PyONMS
|
72ef0c2717044db66e4b99fe9820a69f435f44fd
|
[
"MIT"
] | null | null | null |
src/pyonms.py
|
mmahacek/PyONMS
|
72ef0c2717044db66e4b99fe9820a69f435f44fd
|
[
"MIT"
] | null | null | null |
src/pyonms.py
|
mmahacek/PyONMS
|
72ef0c2717044db66e4b99fe9820a69f435f44fd
|
[
"MIT"
] | null | null | null |
# pyonms.py
from dao import api, alarms, events, nodes
class pyonms():
def __init__(self, hostname, username, password):
self.hostname = hostname
self.api = api.API(hostname=hostname, username=username, password=password)
self.nodes = nodes.Nodes(self.api)
self.events = events.Events(self.api)
self.alarms = alarms.Alarms(self.api)
def __repr__(self):
return self.hostname
| 27.125 | 83 | 0.665899 | 375 | 0.864055 | 0 | 0 | 0 | 0 | 0 | 0 | 11 | 0.025346 |
34b47fa6cab25d27a526a824846c9378728893e8
| 5,195 |
py
|
Python
|
predicting_molecular_properties/nn_utils.py
|
ashesh-0/kaggle_competitions
|
58e927971b6ee67583a76a5ac821430a8d0bc31a
|
[
"MIT"
] | 1 |
2019-11-20T11:33:51.000Z
|
2019-11-20T11:33:51.000Z
|
predicting_molecular_properties/nn_utils.py
|
ashesh-0/kaggle_competitions
|
58e927971b6ee67583a76a5ac821430a8d0bc31a
|
[
"MIT"
] | null | null | null |
predicting_molecular_properties/nn_utils.py
|
ashesh-0/kaggle_competitions
|
58e927971b6ee67583a76a5ac821430a8d0bc31a
|
[
"MIT"
] | null | null | null |
"""
Utility functions for Neural network
"""
import pandas as pd
from keras.callbacks import EarlyStopping, ReduceLROnPlateau, ModelCheckpoint
from sklearn.preprocessing import StandardScaler
from keras.layers import Dense, BatchNormalization, Input, LeakyReLU, Dropout
from keras.models import Model, load_model
from keras.optimizers import Adam
def plot_history(history, label, loss_str='sc_outp_mean_absolute_error'):
import matplotlib.pyplot as plt
plt.plot(history.history[loss_str])
plt.plot(history.history[f'val_{loss_str}'])
plt.title(f'Loss for {label}')
plt.ylabel('Loss')
plt.xlabel('Epoch')
_ = plt.legend(['Train', 'Validation'], loc='upper left')
plt.show()
def create_nn_model(input_shape):
inp = Input(shape=(input_shape, ))
x = Dense(256)(inp)
x = BatchNormalization()(x)
x = LeakyReLU(alpha=0.05)(x)
x = Dropout(0.2)(x)
# x = Dense(1024)(x)
# x = BatchNormalization()(x)
# x = LeakyReLU(alpha=0.05)(x)
# x = Dropout(0.2)(x)
# x = Dense(1024)(x)
# x = BatchNormalization()(x)
# x = LeakyReLU(alpha=0.05)(x)
# x = Dropout(0.2)(x)
x = Dense(512)(x)
x = BatchNormalization()(x)
x = LeakyReLU(alpha=0.05)(x)
x = Dropout(0.2)(x)
# x = Dense(512)(x)
# x = BatchNormalization()(x)
# x = LeakyReLU(alpha=0.05)(x)
# x = Dropout(0.4)(x)
x = Dense(256)(x)
x = BatchNormalization()(x)
x = LeakyReLU(alpha=0.05)(x)
x = Dropout(0.2)(x)
out1 = Dense(20, activation="linear", name='int_outp')(x) #2 mulliken charge, tensor 6, tensor 12(others)
x = Dense(128)(x)
x = BatchNormalization()(x)
x = LeakyReLU(alpha=0.05)(x)
x = Dropout(0.2)(x)
# x = Dense(128)(x)
# x = BatchNormalization()(x)
# x = LeakyReLU(alpha=0.05)(x)
x = Dense(64)(x)
x = BatchNormalization()(x)
x = LeakyReLU(alpha=0.05)(x)
x = Dropout(0.2)(x)
out = Dense(1, activation="linear", name='sc_outp')(x) #scalar_coupling_constant
model = Model(inputs=inp, outputs=[out, out1])
# model = Model(inputs=inp, outputs=[out])
return model
def train_nn(nn_config, train_X, train_Y, val_X, val_Y, test_X):
model_name_wrt = f'molecule_model_{nn_config["type_enc"]}.hdf5'
assert isinstance(nn_config['load_model'], bool)
if nn_config['load_model'] is False:
model = create_nn_model(train_X.shape[1])
model.compile(loss='mse', metrics=['mae'], optimizer=Adam(lr=nn_config['lr']))
# tensorboard_callback = TensorBoard("logs/" + datetime.now().strftime('%H:%M:%S'), update_freq='epoch')
val_loss = 'val_sc_outp_mean_absolute_error'
es = EarlyStopping(monitor=val_loss, mode='min', patience=30, verbose=0)
rlr = ReduceLROnPlateau(monitor=val_loss, factor=0.1, patience=25, min_lr=1e-6, mode='auto', verbose=1)
sv_mod = ModelCheckpoint(
model_name_wrt, monitor='val_sc_outp_mean_absolute_error', save_best_only=True, period=1)
train_Y = train_Y.values
val_Y = val_Y.values
history = model.fit(
train_X, [train_Y[:, 0], train_Y[:, 1:]],
validation_data=(val_X, [val_Y[:, 0], val_Y[:, 1:]]),
epochs=nn_config['epochs'],
verbose=0,
batch_size=nn_config['batch_size'],
callbacks=[es, rlr, sv_mod])
plot_history(history, nn_config['type_enc'])
else:
print('Loading from file', model_name_wrt)
model = load_model(model_name_wrt)
output_dict = {
'model': model,
'train_prediction': model.predict(train_X)[0][:, 0],
'val_prediction': model.predict(val_X)[0][:, 0],
'test_prediction': model.predict(test_X)[0][:, 0],
}
return output_dict
def get_intermediate_Ydf(mulliken_df, magnetic_shielding_tensors_df, raw_train_df):
interm_Y_atomdata_df = pd.merge(
mulliken_df, magnetic_shielding_tensors_df, how='outer', on=['molecule_name', 'atom_index'])
Y_cols = interm_Y_atomdata_df.columns.tolist()
Y_cols.remove('molecule_name')
Y_cols.remove('atom_index')
interm_Y_df = raw_train_df[['molecule_name', 'atom_index_0', 'atom_index_1']].reset_index()
interm_Y_df = pd.merge(
interm_Y_df,
interm_Y_atomdata_df,
how='left',
left_on=['molecule_name', 'atom_index_0'],
right_on=['molecule_name', 'atom_index'])
interm_Y_df.rename({c: f'{c}_0' for c in Y_cols}, axis=1, inplace=True)
interm_Y_df.drop('atom_index', axis=1, inplace=True)
interm_Y_df = pd.merge(
interm_Y_df,
interm_Y_atomdata_df,
how='left',
left_on=['molecule_name', 'atom_index_1'],
right_on=['molecule_name', 'atom_index'])
interm_Y_df.rename({c: f'{c}_1' for c in Y_cols}, axis=1, inplace=True)
interm_Y_df.drop(['atom_index', 'atom_index_0', 'atom_index_1', 'molecule_name'], axis=1, inplace=True)
interm_Y_df.set_index('id', inplace=True)
# Normalization
interm_Y_df = pd.DataFrame(
StandardScaler().fit_transform(interm_Y_df), columns=interm_Y_df.columns, index=interm_Y_df.index)
return interm_Y_df
| 35.101351 | 112 | 0.638499 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,445 | 0.278152 |
34b6d3e8c3f61f05d6fffeaf8b3486a12424b3e5
| 2,809 |
py
|
Python
|
fn_portal/tests/api/test_fn026.py
|
AdamCottrill/FishNetPortal
|
4e58e05f52346ac1ab46698a03d4229c74828406
|
[
"MIT"
] | null | null | null |
fn_portal/tests/api/test_fn026.py
|
AdamCottrill/FishNetPortal
|
4e58e05f52346ac1ab46698a03d4229c74828406
|
[
"MIT"
] | null | null | null |
fn_portal/tests/api/test_fn026.py
|
AdamCottrill/FishNetPortal
|
4e58e05f52346ac1ab46698a03d4229c74828406
|
[
"MIT"
] | null | null | null |
"""=============================================================
~/fn_portal/fn_portal/tests/api/test_FN026.py
Created: 26 May 2021 18:01:30
DESCRIPTION:
This file contains a number of unit tests that verify that the api
endpoint for FN026 objects works as expected:
+ the fn026 list returns all of the spaces
associated with a specific project
+ the space detail endpoint will return the space code, space
description, dd_lat, dd_lon.
=============================================================
"""
import json
import pytest
from django.urls import reverse
from fn_portal.models import FN026
from fn_portal.tests.fixtures import api_client, project
from rest_framework import status
from ..factories import FN026Factory
@pytest.mark.django_db
def test_fn026_list(api_client, project):
""""""
prj_cd = project.prj_cd
url = reverse("fn_portal_api:fn026-list", kwargs={"prj_cd": prj_cd})
response = api_client.get(url)
assert response.status_code == status.HTTP_200_OK
data = [
(x.get("space"), x.get("space_des"), x.get("dd_lat"), x.get("dd_lon"))
for x in response.data["results"]
]
assert len(data) == 2
expected = [("S1", "Space 1", 45.1, -81.1), ("S2", "Space 2", 45.2, -81.2)]
assert data == expected
@pytest.mark.django_db
def test_fn026_detail(api_client, project):
""""""
prj_cd = project.prj_cd
space = "S1"
expected = {
"space": "S1",
"space_des": "Space 1",
"dd_lat": 45.1,
"dd_lon": -81.1,
}
FN026Factory(project=project, **expected)
url = reverse(
"fn_portal_api:fn026-detail", kwargs={"prj_cd": prj_cd, "space": space}
)
response = api_client.get(url)
assert response.status_code == status.HTTP_200_OK
for k, v in expected.items():
assert response.data[k] == expected[k]
expected_fields = {
"project",
"label",
"space",
"space_des",
"area_lst",
"grdep_ge",
"grdep_lt",
"sidep_lt",
"sidep_ge",
"grid_ge",
"grid_lt",
"site_lst",
"sitp_lst",
"dd_lat",
"dd_lon",
}
assert set(response.data.keys()) == expected_fields
args = [
("LHA_IA19_FOO", "S1"), # bad project code, good space
("LHA_IA19_000", "99"), # good project code, bad space
]
@pytest.mark.django_db
@pytest.mark.parametrize("prj_cd,space", args)
def test_fn026_detail_404(api_client, project, prj_cd, space):
"""If we ask for space or project that does exist we should get back a
404.
"""
url = reverse(
"fn_portal_api:fn026-detail", kwargs={"prj_cd": prj_cd, "space": space}
)
response = api_client.get(url)
assert response.status_code == status.HTTP_404_NOT_FOUND
| 24.215517 | 79 | 0.603062 | 0 | 0 | 0 | 0 | 1,920 | 0.683517 | 0 | 0 | 1,109 | 0.394802 |
34b7a38557b1f2eeaa8850899a55b7d31360dad9
| 13,534 |
py
|
Python
|
test/unit_tests/braket/jobs/local/test_local_job_container.py
|
orclassiq/amazon-braket-sdk-python
|
69acaf54237ecbee14b5b5f0549fa28e32eba83b
|
[
"Apache-2.0"
] | null | null | null |
test/unit_tests/braket/jobs/local/test_local_job_container.py
|
orclassiq/amazon-braket-sdk-python
|
69acaf54237ecbee14b5b5f0549fa28e32eba83b
|
[
"Apache-2.0"
] | null | null | null |
test/unit_tests/braket/jobs/local/test_local_job_container.py
|
orclassiq/amazon-braket-sdk-python
|
69acaf54237ecbee14b5b5f0549fa28e32eba83b
|
[
"Apache-2.0"
] | null | null | null |
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import base64
import subprocess
from pathlib import Path
from unittest.mock import Mock, patch
import pytest
from braket.jobs.local.local_job_container import _LocalJobContainer
@pytest.fixture
def repo_uri():
return "012345678901.dkr.ecr.us-west-2.amazonaws.com"
@pytest.fixture
def image_uri(repo_uri):
return f"{repo_uri}/my-repo:my-tag"
@pytest.fixture
def aws_session():
_aws_session = Mock()
return _aws_session
@patch("subprocess.check_output")
@patch("subprocess.run")
def test_start_and_stop(mock_run, mock_check_output, image_uri, aws_session):
local_image_name = "LocalImageName"
running_container_name = "RunningContainer"
mock_check_output.side_effect = [
str.encode(local_image_name),
str.encode(running_container_name),
]
with _LocalJobContainer(image_uri, aws_session):
pass
mock_check_output.assert_any_call(["docker", "images", "-q", image_uri])
mock_check_output.assert_any_call(
["docker", "run", "-d", "--rm", local_image_name, "tail", "-f", "/dev/null"]
)
assert mock_check_output.call_count == 2
mock_run.assert_any_call(["docker", "stop", running_container_name])
assert mock_run.call_count == 1
@patch("subprocess.check_output")
@patch("subprocess.run")
def test_pull_container(mock_run, mock_check_output, repo_uri, image_uri, aws_session):
local_image_name = "LocalImageName"
running_container_name = "RunningContainer"
test_token = "Test Token"
mock_check_output.side_effect = [
str.encode(""),
str.encode(local_image_name),
str.encode(running_container_name),
]
aws_session.ecr_client.get_authorization_token.return_value = {
"authorizationData": [{"authorizationToken": base64.b64encode(str.encode(test_token))}]
}
with _LocalJobContainer(image_uri, aws_session):
pass
mock_check_output.assert_any_call(["docker", "images", "-q", image_uri])
mock_check_output.assert_any_call(
["docker", "run", "-d", "--rm", local_image_name, "tail", "-f", "/dev/null"]
)
assert mock_check_output.call_count == 3
mock_run.assert_any_call(["docker", "login", "-u", "AWS", "-p", test_token, repo_uri])
mock_run.assert_any_call(["docker", "pull", image_uri])
mock_run.assert_any_call(["docker", "stop", running_container_name])
assert mock_run.call_count == 3
@patch("subprocess.check_output")
@patch("subprocess.run")
def test_run_job_success(mock_run, mock_check_output, repo_uri, image_uri, aws_session):
local_image_name = "LocalImageName"
running_container_name = "RunningContainer"
env_variables = {
"ENV0": "VALUE0",
"ENV1": "VALUE1",
}
run_program_name = "Run Program Name"
expected_run_output = "Expected Run Output"
mock_check_output.side_effect = [
str.encode(local_image_name),
str.encode(running_container_name),
str.encode(run_program_name),
str.encode(expected_run_output),
]
with _LocalJobContainer(image_uri, aws_session) as container:
container.run_local_job(env_variables)
run_output = container.run_log
assert run_output == expected_run_output
mock_check_output.assert_any_call(["docker", "images", "-q", image_uri])
mock_check_output.assert_any_call(
["docker", "run", "-d", "--rm", local_image_name, "tail", "-f", "/dev/null"]
)
mock_check_output.assert_any_call(
["docker", "exec", running_container_name, "printenv", "SAGEMAKER_PROGRAM"]
)
mock_check_output.assert_any_call(
[
"docker",
"exec",
"-w",
"/opt/ml/code/",
"-e",
"ENV0=VALUE0",
"-e",
"ENV1=VALUE1",
running_container_name,
"python",
run_program_name,
]
)
assert mock_check_output.call_count == 4
mock_run.assert_any_call(["docker", "stop", running_container_name])
assert mock_run.call_count == 1
@patch("subprocess.check_output")
@patch("subprocess.run")
def test_customer_script_fails(mock_run, mock_check_output, repo_uri, image_uri, aws_session):
local_image_name = "LocalImageName"
running_container_name = "RunningContainer"
env_variables = {
"ENV0": "VALUE0",
"ENV1": "VALUE1",
}
run_program_name = "Run Program Name"
expected_error_output = "Expected Error Output"
mock_check_output.side_effect = [
str.encode(local_image_name),
str.encode(running_container_name),
str.encode(run_program_name),
subprocess.CalledProcessError("Test Error", "test", str.encode(expected_error_output)),
]
with _LocalJobContainer(image_uri, aws_session) as container:
container.run_local_job(env_variables)
run_output = container.run_log
assert run_output == expected_error_output
assert mock_check_output.call_count == 4
mock_run.assert_any_call(["docker", "stop", running_container_name])
assert mock_run.call_count == 1
@patch("subprocess.check_output")
@patch("subprocess.run")
def test_make_dir(mock_run, mock_check_output, repo_uri, image_uri, aws_session):
local_image_name = "LocalImageName"
running_container_name = "RunningContainer"
test_dir_path = "/test/dir/path"
mock_check_output.side_effect = [
str.encode(local_image_name),
str.encode(running_container_name),
str.encode(""),
]
with _LocalJobContainer(image_uri, aws_session) as container:
container.makedir(test_dir_path)
mock_check_output.assert_any_call(["docker", "images", "-q", image_uri])
mock_check_output.assert_any_call(
["docker", "run", "-d", "--rm", local_image_name, "tail", "-f", "/dev/null"]
)
mock_check_output.assert_any_call(
["docker", "exec", running_container_name, "mkdir", "-p", test_dir_path]
)
assert mock_check_output.call_count == 3
mock_run.assert_any_call(["docker", "stop", running_container_name])
assert mock_run.call_count == 1
@patch("subprocess.check_output")
@patch("subprocess.run")
def test_copy_to(mock_run, mock_check_output, repo_uri, image_uri, aws_session):
local_image_name = "LocalImageName"
running_container_name = "RunningContainer"
source_path = str(Path("test", "source", "dir", "path", "srcfile.txt"))
dest_path = str(Path("test", "dest", "dir", "path", "dstfile.txt"))
mock_check_output.side_effect = [
str.encode(local_image_name),
str.encode(running_container_name),
str.encode(""),
str.encode(""),
]
with _LocalJobContainer(image_uri, aws_session) as container:
container.copy_to(source_path, dest_path)
mock_check_output.assert_any_call(["docker", "images", "-q", image_uri])
mock_check_output.assert_any_call(
["docker", "run", "-d", "--rm", local_image_name, "tail", "-f", "/dev/null"]
)
mock_check_output.assert_any_call(
[
"docker",
"exec",
running_container_name,
"mkdir",
"-p",
str(Path("test", "dest", "dir", "path")),
]
)
mock_check_output.assert_any_call(
["docker", "cp", source_path, f"{running_container_name}:{dest_path}"]
)
assert mock_check_output.call_count == 4
mock_run.assert_any_call(["docker", "stop", running_container_name])
assert mock_run.call_count == 1
@patch("subprocess.check_output")
@patch("subprocess.run")
def test_copy_from(mock_run, mock_check_output, repo_uri, image_uri, aws_session):
local_image_name = "LocalImageName"
running_container_name = "RunningContainer"
source_path = "/test/source/dir/path/srcfile.txt"
dest_path = "/test/dest/dir/path/dstfile.txt"
mock_check_output.side_effect = [
str.encode(local_image_name),
str.encode(running_container_name),
str.encode(""),
str.encode(""),
]
with _LocalJobContainer(image_uri, aws_session) as container:
container.copy_from(source_path, dest_path)
mock_check_output.assert_any_call(["docker", "images", "-q", image_uri])
mock_check_output.assert_any_call(
["docker", "run", "-d", "--rm", local_image_name, "tail", "-f", "/dev/null"]
)
mock_check_output.assert_any_call(
["docker", "cp", f"{running_container_name}:{source_path}", dest_path]
)
assert mock_check_output.call_count == 3
mock_run.assert_any_call(["docker", "stop", running_container_name])
assert mock_run.call_count == 1
@patch("subprocess.check_output")
@patch("subprocess.run")
@pytest.mark.xfail(raises=ValueError)
def test_run_fails_no_program(mock_run, mock_check_output, repo_uri, image_uri, aws_session):
local_image_name = "LocalImageName"
running_container_name = "RunningContainer"
env_variables = {
"ENV0": "VALUE0",
"ENV1": "VALUE1",
}
mock_check_output.side_effect = [
str.encode(local_image_name),
str.encode(running_container_name),
str.encode(""),
]
with _LocalJobContainer(image_uri, aws_session) as container:
container.run_local_job(env_variables)
@patch("subprocess.check_output")
@patch("subprocess.run")
@pytest.mark.xfail(raises=subprocess.CalledProcessError)
def test_make_dir_fails(mock_run, mock_check_output, repo_uri, image_uri, aws_session):
local_image_name = "LocalImageName"
running_container_name = "RunningContainer"
test_dir_path = "/test/dir/path"
mock_check_output.side_effect = [
str.encode(local_image_name),
str.encode(running_container_name),
subprocess.CalledProcessError("Test Error", "test", str.encode("test output")),
]
with _LocalJobContainer(image_uri, aws_session) as container:
container.makedir(test_dir_path)
@patch("subprocess.check_output")
@patch("subprocess.run")
@pytest.mark.xfail(raises=subprocess.CalledProcessError)
def test_copy_to_fails(mock_run, mock_check_output, repo_uri, image_uri, aws_session):
local_image_name = "LocalImageName"
running_container_name = "RunningContainer"
source_path = "/test/source/dir/path/srcfile.txt"
dest_path = "/test/dest/dir/path/dstfile.txt"
mock_check_output.side_effect = [
str.encode(local_image_name),
str.encode(running_container_name),
subprocess.CalledProcessError("Test Error", "test", str.encode("test output")),
]
with _LocalJobContainer(image_uri, aws_session) as container:
container.copy_to(source_path, dest_path)
@patch("subprocess.check_output")
@patch("subprocess.run")
@pytest.mark.xfail(raises=subprocess.CalledProcessError)
def test_copy_from_fails(mock_run, mock_check_output, repo_uri, image_uri, aws_session):
local_image_name = "LocalImageName"
running_container_name = "RunningContainer"
source_path = "/test/source/dir/path/srcfile.txt"
dest_path = "/test/dest/dir/path/dstfile.txt"
mock_check_output.side_effect = [
str.encode(local_image_name),
str.encode(running_container_name),
subprocess.CalledProcessError("Test Error", "test", str.encode("test output")),
]
with _LocalJobContainer(image_uri, aws_session) as container:
container.copy_from(source_path, dest_path)
@patch("subprocess.check_output")
@patch("subprocess.run")
@pytest.mark.xfail(raises=ValueError)
def test_pull_fails_no_auth(mock_run, mock_check_output, repo_uri, image_uri, aws_session):
local_image_name = "LocalImageName"
running_container_name = "RunningContainer"
mock_check_output.side_effect = [
str.encode(""),
str.encode(local_image_name),
str.encode(running_container_name),
]
aws_session.ecr_client.get_authorization_token.return_value = {}
with _LocalJobContainer(image_uri, aws_session):
pass
@patch("subprocess.check_output")
@patch("subprocess.run")
@pytest.mark.xfail(raises=ValueError)
def test_pull_fails_invalid_uri(mock_run, mock_check_output, aws_session):
local_image_name = "LocalImageName"
running_container_name = "RunningContainer"
mock_check_output.side_effect = [
str.encode(""),
str.encode(local_image_name),
str.encode(running_container_name),
]
aws_session.ecr_client.get_authorization_token.return_value = {}
with _LocalJobContainer("TestURI", aws_session):
pass
@patch("subprocess.check_output")
@patch("subprocess.run")
@pytest.mark.xfail(raises=ValueError)
def test_pull_fails_unknown_reason(mock_run, mock_check_output, repo_uri, image_uri, aws_session):
test_token = "Test Token"
mock_check_output.side_effect = [
str.encode(""),
str.encode(""),
]
aws_session.ecr_client.get_authorization_token.return_value = {
"authorizationData": [{"authorizationToken": base64.b64encode(str.encode(test_token))}]
}
with _LocalJobContainer(image_uri, aws_session):
pass
| 37.079452 | 98 | 0.69935 | 0 | 0 | 0 | 0 | 12,742 | 0.941481 | 0 | 0 | 3,175 | 0.234594 |
34b84d492bd695fe750bb5aa931064fc3ca9b938
| 31,261 |
py
|
Python
|
scripts/regression_test.py
|
seonmokim/cryptominisat2
|
706738236fa5553a6e3623f06806d5fded377220
|
[
"MIT"
] | null | null | null |
scripts/regression_test.py
|
seonmokim/cryptominisat2
|
706738236fa5553a6e3623f06806d5fded377220
|
[
"MIT"
] | null | null | null |
scripts/regression_test.py
|
seonmokim/cryptominisat2
|
706738236fa5553a6e3623f06806d5fded377220
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import with_statement # Required in 2.5
import subprocess
import os
import fnmatch
import gzip
import re
import commands
import getopt
import sys
import signal
import resource
import time
import struct
import random
from random import choice
from subprocess import Popen, PIPE, STDOUT
#from optparse import OptionParser
import optparse
sys.path.append('../../cnf-utils/')
from xor_to_cnf_class import *
maxTime = 40
maxTimeDiff = 20
class PlainHelpFormatter(optparse.IndentedHelpFormatter):
def format_description(self, description):
if description:
return description + "\n"
else:
return ""
usage = "usage: %prog [options] --fuzz/--regtest/--checkdir/filetocheck"
desc = """Example usages:
* check already computed SAT solutions (UNSAT cannot be checked):
./regression_test.py --checkdir ../../clusters/cluster93/ --cnfdir ../../satcomp09/
* check already computed SAT solutions (UNSAT cannot be checked):
./regression_test.py -c myfile.cnf -s sol.txt
* fuzz the solver with fuzz-generator
./regression_test.py -f
* go through regression listdir
./regression_test.py --regtest --checkdir ../tests/
"""
parser = optparse.OptionParser(usage=usage, description=desc, formatter=PlainHelpFormatter())
parser.add_option("--exec", metavar= "SOLVER", dest="solver"
, default="../build/cryptominisat"
, help="SAT solver executable. Default: %default"
)
parser.add_option("--extraopts", "-e", metavar= "OPTS", dest="extra_options"
, default=""
, help="Extra options to give to SAT solver"
)
parser.add_option("--verbose", "-v", action="store_true"
, default=False, dest="verbose"
, help="Print more output"
)
parser.add_option("-t", "--threads", dest="num_threads", metavar="NUM"
, default=1, type="int"
, help="Number of threads"
)
#for fuzz-testing
parser.add_option("-f", "--fuzz", dest="fuzz_test"
, default=False, action="store_true"
, help="Fuzz-test"
)
#for regression testing
parser.add_option("--regtest", dest="regressionTest"
, default=False, action="store_true"
, help="Regression test"
)
parser.add_option("--testdir", dest="testDir"
, default= "../tests/"
, help="Directory where the tests are"
)
parser.add_option("--testdirNewVar", dest="testDirNewVar"
, default= "../tests/newVar/"
, help="Directory where the tests are"
)
parser.add_option("--drup", dest="drup"
, default= False, action="store_true"
, help="Directory where the tests are"
)
#check dir stuff
parser.add_option("--checksol", dest="checkSol"
, default=False, action="store_true"
, help="Check solution at specified dir against problems at specified dir"
)
parser.add_option("--soldir", dest="checkDirSol"
, help="Check solutions found here"
)
parser.add_option("--probdir", dest="checkDirProb"
, default="/home/soos/media/sat/examples/satcomp09/"
, help="Directory of CNF files checked against"
)
parser.add_option("-c", "--check", dest="checkFile"
, default=None
, help="Check this file"
)
parser.add_option("-s", "--sol", dest="solutionFile"
, default=None
, help="Against this solution"
)
(options, args) = parser.parse_args()
def setlimits():
#sys.stdout.write("Setting resource limit in child (pid %d): %d s\n" % (os.getpid(), maxTime))
resource.setrlimit(resource.RLIMIT_CPU, (maxTime, maxTime))
def unique_fuzz_file(file_name_begin):
counter = 1
while 1:
file_name = file_name_begin + '_' + str(counter) + ".cnf"
try:
fd = os.open(file_name, os.O_CREAT | os.O_EXCL)
os.fdopen(fd).close()
return file_name
except OSError:
pass
counter += 1
class Tester:
def __init__(self):
self.check_unsat = False
self.testDir = options.testDir
self.testDirNewVar = options.testDirNewVar
self.ignoreNoSolution = False
self.needDebugLib = True
def random_options(self) :
cmd = " "
#extra = ["polarity-mode","rnd-freq","verbosity","randomize"
#,,"maxmatrixrows","minmatrixrows","savematrix"
#,,"threads", "maxconfl"]
#"maxsolutions"
opts = ["nonormxorfind","nobinxorfind"
,"noregbxorfind","doextendedscc","noconglomerate"
,"nosimplify","greedyunbound"
,"novarreplace"
,"pavgbranch"
,"nofailedlit","noheuleprocess","nosatelite","noxorsubs"
,"novarelim","nosubsume1","nomatrixfind"
,"noordercol","noiterreduce"
,"nohyperbinres","noremovebins"
,"noremlbins","nosubswithbins","nosubswithnbins"
,"noclausevivif","nosortwatched","nolfminim","nocalcreach"
,"nobxor","norecotfssr","nocacheotfssr","nootfsubsume"
,"plain"]
for opt in opts:
if random.randint(0,3) == 1 :
cmd += "--%s " % (opt)
cmd += "--gaussuntil=%d " % random.randint(0,50)
cmd += "--maxnummatrixes=%d " % random.randint(0,10)
cmd += "--restart=%s " % random.choice(["auto", "static", "dynamic"])
cmd += "--switchoffsubs=%s " % random.choice([20, random.randint(0, 200000)])
return cmd
def execute(self, fname, newVar=False, needToLimitTime=False, fnameDrup=None):
if os.path.isfile(options.solver) != True:
print "Error: Cannot find CryptoMiniSat executable. Searched in: '%s'" % \
options.solver
print "Error code 300"
exit(300)
#construct command
command = options.solver
command += self.random_options()
if self.needDebugLib :
command += "--debuglib "
if options.verbose == False:
command += "--verbosity=0 "
if newVar :
command += "--debugnewVar "
command += "--threads=%d " % options.num_threads
command += options.extra_options + " "
command += fname
if fnameDrup:
command += " --drupexistscheck 0 " + fnameDrup
print "Executing: %s " % command
#print time limit
if options.verbose:
print "CPU limit of parent (pid %d)" % os.getpid(), resource.getrlimit(resource.RLIMIT_CPU)
#if need time limit, then limit
if (needToLimitTime) :
p = subprocess.Popen(command.rsplit(), stderr=subprocess.STDOUT, stdout=subprocess.PIPE, preexec_fn=setlimits)
else:
p = subprocess.Popen(command.rsplit(), stderr=subprocess.STDOUT, stdout=subprocess.PIPE)
#print time limit after child startup
if options.verbose:
print "CPU limit of parent (pid %d) after startup of child" % \
os.getpid(), resource.getrlimit(resource.RLIMIT_CPU)
#Get solver output
consoleOutput, err = p.communicate()
if options.verbose:
print "CPU limit of parent (pid %d) after child finished executing" % \
os.getpid(), resource.getrlimit(resource.RLIMIT_CPU)
return consoleOutput
def parse_solution_from_output(self, output_lines):
if len(output_lines) == 0:
print "Error! SAT solver output is empty!"
print "output lines: ", output_lines
print "Error code 500"
exit(500)
#solution will be put here
satunsatfound = False
vlinefound = False
solution = {}
#parse in solution
for line in output_lines:
#skip comment
if (re.match('^c ', line)):
continue;
#solution
if (re.match('^s ', line)):
if (satunsatfound) :
print "ERROR: solution twice in solver output!"
exit(400)
if 'UNSAT' in line:
unsat = True
satunsatfound = True
continue;
if 'SAT' in line:
unsat = False
satunsatfound = True;
continue;
print "ERROR: line starts with 's' but no SAT/UNSAT on line"
exit(400)
#parse in solution
if (re.match('^v ', line)):
vlinefound = True
myvars = line.split(' ')
for var in myvars:
if (var == 'v') : continue;
if (int(var) == 0) : break;
vvar = int(var)
solution[abs(vvar)] = (vvar >= 0)
#print "Parsed values:", solution
if (self.ignoreNoSolution == False and
(satunsatfound == False or (unsat == False and vlinefound == False))
):
print "Error: Cannot find line starting with 's' or 'v' in output!"
print output_lines
print "Error code 500"
exit(500)
if (self.ignoreNoSolution == True and
(satunsatfound == False or (unsat == False and vlinefound == False))
):
print "Probably timeout, since no solution printed. Could, of course, be segfault/assert fault, etc."
print "Making it look like an UNSAT, so no checks!"
return (True, [])
if (satunsatfound == False) :
print "Error: Cannot find if SAT or UNSAT. Maybe didn't finish running?"
print output_lines
print "Error code 500"
exit(500)
if (unsat == False and vlinefound == False) :
print "Error: Solution is SAT, but no 'v' line"
print output_lines
print "Error code 500"
exit(500)
return (unsat, solution)
def check_regular_clause(self, line, solution):
lits = line.split()
final = False
for lit in lits:
numlit = int(lit)
if numlit != 0:
if (abs(numlit) not in solution): continue
if numlit < 0:
final |= ~solution[abs(numlit)]
else:
final |= solution[numlit]
if final == True:
break
if final == False:
print "Error: clause '%s' not satisfied." % line
print "Error code 100"
exit(100)
def check_xor_clause(self, line, solution):
line = line.lstrip('x')
lits = line.split()
final = False
for lit in lits:
numlit = int(lit)
if numlit != 0:
if abs(numlit) not in solution:
print "Error: var %d not solved, but referred to in a xor-clause of the CNF" % abs(numlit)
print "Error code 200"
exit(200)
final ^= solution[abs(numlit)]
final ^= numlit < 0
if final == False:
print "Error: xor-clause '%s' not satisfied." % line
exit(-1)
def test_found_solution(self, solution, fname, debugLibPart=None):
if debugLibPart == None:
print "Verifying solution for CNF file %s" % fname
else:
print "Verifying solution for CNF file %s, part %d" % (fname, debugLibPart)
if fnmatch.fnmatch(fname, '*.gz'):
f = gzip.open(fname, "r")
else:
f = open(fname, "r")
clauses = 0
thisDebugLibPart = 0
for line in f:
line = line.rstrip()
#skip empty lines
if len(line) == 0:
continue
#count debug lib parts
if line[0] == 'c' and "Solver::solve" in line:
thisDebugLibPart += 1
#if we are over debugLibPart, exit
if debugLibPart != None and thisDebugLibPart >= debugLibPart:
f.close()
return
#check solution against clause
if line[0] != 'c' and line[0] != 'p':
if line[0] != 'x':
self.check_regular_clause(line, solution)
else:
self.check_xor_clause(line, solution)
clauses += 1
f.close()
print "Verified %d original xor®ular clauses" % clauses
def checkUNSAT(self, fname) :
a = XorToCNF()
tmpfname = unique_fuzz_file("tmp_for_xor_to_cnf_convert")
a.convert(fname, tmpfname )
#execute with the other solver
toexec = "../../lingeling-587f/lingeling -f %s" % tmpfname
print "Solving with other solver.."
currTime = time.time()
p = subprocess.Popen(toexec.rsplit(), stdout=subprocess.PIPE,
preexec_fn=setlimits)
consoleOutput2 = p.communicate()[0]
os.unlink(tmpfname)
#if other solver was out of time, then we can't say anything
diffTime = time.time() - currTime
if diffTime > maxTime-maxTimeDiff:
print "Other solver: too much time to solve, aborted!"
return None
#extract output from the other solver
print "Checking other solver output..."
(otherSolverUNSAT, otherSolverSolution) = self.parse_solution_from_output(consoleOutput2.split("\n"))
#check if the other solver agrees with us
return otherSolverUNSAT
def extractLibPart(self, fname, debug_num, assumps, tofile) :
fromf = open(fname, "r")
thisDebugLibPart = 0
maxvar = 0
numcls = 0
for line in fromf :
line = line.strip()
#ignore empty strings and headers
if not line or line[0] == "p" :
continue
#process (potentially special) comments
if line[0] == "c" :
if "Solver::solve" in line:
thisDebugLibPart += 1
continue
#break out if we reached the debug lib part
if thisDebugLibPart >= debug_num :
break
#count clauses and get max var number
numcls += 1
maxvar = max(maxvar, self.get_max_var_from_clause(line))
fromf.close()
#now we can create the new CNF file
fromf = open(fname, "r")
tof = open(tofile, "w")
tof.write("p cnf %d %d\n" % (maxvar, numcls + len(assumps)))
thisDebugLibPart = 0
for line in fromf :
line = line.strip()
#skip empty lines and headers
if not line or line[0] == "p" :
continue
#parse up special header
if line[0] == "c" :
if "Solver::solve" in line:
thisDebugLibPart += 1
continue
#break out if we reached the debug lib part
if thisDebugLibPart >= debug_num :
break
tof.write(line + '\n')
#add assumptions
for lit in assumps:
tof.write("%d 0\n" % lit)
fromf.close()
tof.close()
def get_assumps(self, fname, debugLibPart) :
f = open(fname, "r")
thispart = 0
solveline = None
for line in f :
if "Solver::solve" in line :
thispart += 1
if thispart == debugLibPart :
solveline = line
break
f.close()
assert solveline != None
ret = re.match("c.*Solver::solve\((.*)\)", solveline)
assert ret != None
assumps = ret.group(1).strip().split()
assumps = [int(x) for x in assumps]
print "Assumptions: ", assumps
return assumps
def check_assumps_inside_solution(self, assumps, solution) :
for lit in assumps:
var = abs(lit)
val = lit > 0
if var in solution :
if solution[var] != val :
print "Solution pinted has literal %s but assumptions contained the inverse: '%s'" % (-1*lit, assumps)
exit(-100)
print "OK, all assumptions inside solution"
def checkDebugLib(self, fname) :
largestPart = -1
dirList2 = os.listdir(".")
for fname_debug in dirList2:
if fnmatch.fnmatch(fname_debug, "debugLibPart*.output"):
debugLibPart = int(fname_debug[fname_debug.index("t") + 1:fname_debug.rindex(".output")])
largestPart = max(largestPart, debugLibPart)
for debugLibPart in range(1, largestPart + 1):
fname_debug = "debugLibPart%d.output" % debugLibPart
print "Checking debug lib part ", debugLibPart
if (os.path.isfile(fname_debug) == False) :
print "Error: Filename to be read '%s' is not a file!" % fname_debug
print "Error code 400"
exit(400)
#take file into mem
f = open(fname_debug, "r")
text = f.read()
output_lines = text.splitlines()
f.close()
(unsat, solution) = self.parse_solution_from_output(output_lines)
assumps = self.get_assumps(fname, debugLibPart)
if unsat == False:
print "debugLib is SAT"
self.check_assumps_inside_solution(assumps, solution)
self.test_found_solution(solution, fname, debugLibPart)
else:
print "debugLib is UNSAT"
tmpfname = unique_fuzz_file("tempfile_for_extract_libpart")
self.extractLibPart(fname, debugLibPart, assumps, tmpfname)
#check with other solver
ret = self.checkUNSAT(tmpfname)
if ret == None :
print "Cannot check, other solver took too much time"
elif ret == True :
print "UNSAT verified by other solver"
else :
print "Grave bug: SAT-> UNSAT : Other solver found solution!!"
exit()
#delete temporary file
os.unlink(tmpfname)
def check(self, fname, fnameSolution=None, fnameDrup=None, newVar=False,
needSolve=True, needToLimitTime=False):
consoleOutput = ""
currTime = time.time()
#Do we need to solve the problem, or is it already solved?
if needSolve:
consoleOutput = self.execute(fname, newVar, needToLimitTime, fnameDrup=fnameDrup)
else:
if not os.path.isfile(fnameSolution) :
print "ERROR! Solution file '%s' is not a file!" % fnameSolution
exit(-1)
f = open(fnameSolution, "r")
consoleOutput = f.read()
f.close()
print "Read solution from file " , fnameSolution
#if time was limited, we need to know if we were over the time limit
#and that is why there is no solution
if needToLimitTime:
diffTime = time.time() - currTime
if diffTime > (maxTime - maxTimeDiff)/options.num_threads:
print "Too much time to solve, aborted!"
return
else:
print "Within time limit: %f s" % (time.time() - currTime)
print "filename: %s" % fname
#if library debug is set, check it
if (self.needDebugLib) :
self.checkDebugLib(fname)
print "Checking console output..."
(unsat, solution) = self.parse_solution_from_output(consoleOutput.split("\n"))
otherSolverUNSAT = True
if not unsat :
self.test_found_solution(solution, fname)
return;
#it's UNSAT and we should not check, so exit
if self.check_unsat == False:
print "Cannot check -- output is UNSAT"
return
#it's UNSAT, let's check with DRUP
if fnameDrup:
toexec = "drupcheck %s %s" % (fname, fnameDrup)
print "Checking DRUP...: ", toexec
p = subprocess.Popen(toexec.rsplit(), stdout=subprocess.PIPE)
#,preexec_fn=setlimits)
consoleOutput2 = p.communicate()[0]
diffTime = time.time() - currTime
#find verification code
foundVerif = False
drupLine = ""
for line in consoleOutput2.split('\n') :
if len(line) > 1 and line[:2] == "s " :
#print "verif: " , line
foundVerif = True
if line[2:10] != "VERIFIED" and line[2:] != "TRIVIAL UNSAT" :
print "DRUP verification error, it says:", consoleOutput2
assert line[2:10] == "VERIFIED" or line[2:] == "TRIVIAL UNSAT", "DRUP didn't verify problem!"
drupLine = line
#Check whether we have found a verification code
if foundVerif == False:
print "verifier error! It says:", consoleOutput2
assert foundVerif, "Cannot find DRUP verification code!"
else:
print "OK, DRUP says:", drupLine
#check with other solver
ret = self.checkUNSAT(fname)
if ret == None :
print "Other solver time-outed, cannot check"
elif ret == True:
print "UNSAT verified by other solver"
else :
print "Grave bug: SAT-> UNSAT : Other solver found solution!!"
exit()
def removeDebugLibParts(self) :
dirList = os.listdir(".")
for fname_unlink in dirList:
if fnmatch.fnmatch(fname_unlink, 'debugLibPart*'):
os.unlink(fname_unlink)
None
def callFromFuzzer(self, directory, fuzzer, file_name) :
if (len(fuzzer) == 1) :
call = "{0}{1} > {2}".format(directory, fuzzer[0], file_name)
elif(len(fuzzer) == 2) :
seed = struct.unpack("<L", os.urandom(4))[0]
call = "{0}{1} {2} {3} > {4}".format(directory, fuzzer[0], fuzzer[1], seed, file_name)
elif(len(fuzzer) == 3) :
seed = struct.unpack("<L", os.urandom(4))[0]
hashbits = (random.getrandbits(20) % 79) + 1
call = "%s %s %d %s %d > %s" % (fuzzer[0], fuzzer[1], hashbits, fuzzer[2], seed, file_name)
return call
def create_fuzz(self, fuzzers, fuzzer, directory, file_name) :
#handle special fuzzer
file_names_multi = []
if len(fuzzer) == 2 and fuzzer[1] == "special":
#create N files
file_names_multi = []
#sometimes just fuzz with all SAT problems
fixed = random.getrandbits(1) == 1
for i in range(random.randrange(2,4)) :
file_name2 = unique_fuzz_file("fuzzTest");
file_names_multi.append(file_name2)
#chose a ranom fuzzer, not multipart
fuzzer2 = ["multipart.py", "special"]
while (fuzzer2[0] == "multipart.py") :
fuzzer2 = choice(fuzzers)
#sometimes fuzz with SAT problems only
if (fixed) :
fuzzer2 = fuzzers[0]
print "fuzzer2 used: ", fuzzer2
call = self.callFromFuzzer(directory, fuzzer2, file_name2)
print "calling sub-fuzzer:", call
out = commands.getstatusoutput(call)
#construct multi-fuzzer call
call = ""
call += directory
call += fuzzer[0]
call += " "
for name in file_names_multi :
call += " " + name
call += " > " + file_name
return call, file_names_multi
#handle normal fuzzer
else :
return self.callFromFuzzer(directory, fuzzer, file_name), []
def file_len_no_comment(self, fname):
i = 0;
with open(fname) as f:
for l in f :
#ignore comments and empty lines and header
if not l or l[0] == "c" or l[0] == "p":
continue
i += 1
return i
def get_max_var_from_clause(self, line) :
maxvar = 0
#strip leading 'x'
line2 = line.strip()
if len(line2) > 0 and line2[0] == 'x' :
line2 = line2[1:]
for lit in line2.split() :
num = 0
try :
num = int(lit)
except ValueError:
print "line '%s' contains a non-integer variable" % line2
maxvar = max(maxvar, abs(num))
return maxvar
def generate_random_assumps(self, maxvar) :
assumps = ""
num = 0
varsInside = set()
#Half of the time, no assumptions at all
if random.randint(0,1) == 1:
return assumps
#use a distribution so that few will be in assumps
while (num < maxvar and random.randint(0,4) > 0) :
#get a var that is not already inside the assumps
thisVar = random.randint(1, maxvar)
while (thisVar in varsInside) :
thisVar = random.randint(1, maxvar)
varsInside.add(thisVar)
#random sign
if random.randint(0,1) :
thisVar *= -1
assumps += "%d " % thisVar
return assumps
def intersperse_with_debuglib(self, fname1, fname2) :
#approx number of solve()-s to add
if random.randint(0,1) == 1:
numtodo = random.randint(0,10)
else:
numtodo = 0
#based on length and number of solve()-s to add, intersperse
#file with ::solve()
file_len = self.file_len_no_comment(fname1)
if numtodo > 0:
nextToAdd = random.randint(1,(file_len/numtodo)*2+1)
else :
nextToAdd = file_len + 1
fin = open(fname1, "r")
fout = open(fname2, "w")
at = 0
maxvar = 0
for line in fin :
line = line.strip()
#ignore comments (but write them out)
if not line or line[0] == "c" or line[0] == 'p':
fout.write(line + '\n')
continue
at += 1
if at >= nextToAdd :
assumps = self.generate_random_assumps(maxvar)
#assumps = " "
fout.write("c Solver::solve( %s )\n" % assumps)
nextToAdd = at + random.randint(1,(file_len/numtodo)*2+1)
#calculate max variable
maxvar = max(maxvar, self.get_max_var_from_clause(line))
#copy line over
fout.write(line + '\n')
fout.close()
fin.close()
def fuzz_test(self) :
fuzzers = [
["../../sha1-sat/build/sha1-gen --attack preimage --rounds 18 --cnf", "--hash-bits", "--seed"] \
, ["../../sha1-sat/build/sha1-gen --xor --attack preimage --rounds 18 --cnf", "--hash-bits", "--seed"] \
, ["build/cnf-fuzz-biere"] \
#, ["build/cnf-fuzz-nossum"] \
#, ["build/largefuzzer"] \
, ["cnf-fuzz-brummayer.py"] \
, ["multipart.py", "special"] \
, ["build/sgen4 -unsat -n 50", "-s"] \
, ["cnf-fuzz-xor.py"] \
, ["build/sgen4 -sat -n 50", "-s"] \
]
directory = "../../cnf-utils/"
while True:
for fuzzer in fuzzers :
file_name = unique_fuzz_file("fuzzTest");
fnameDrup = None
if options.drup :
fnameDrup = unique_fuzz_file("fuzzTest");
#create the fuzz file
call, todel = self.create_fuzz(fuzzers, fuzzer, directory, file_name)
print "calling ", fuzzer, " : ", call
out = commands.getstatusoutput(call)
#adding debuglib to fuzz file
self.needDebugLib = True
#delete old debugLibPart files
dirList = os.listdir(".")
for fname in dirList:
if fnmatch.fnmatch(fname, 'debugLibPart*'):
os.unlink(fname)
file_name2 = unique_fuzz_file("fuzzTest");
self.intersperse_with_debuglib(file_name, file_name2)
os.unlink(file_name)
#check file
self.check(fname=file_name2, fnameDrup=fnameDrup, needToLimitTime=True)
#remove temporary filenames
os.unlink(file_name2)
for name in todel :
os.unlink(name)
if fnameDrup != None :
os.unlink(fnameDrup)
def checkDir(self) :
self.ignoreNoSolution = True
print "Checking already solved solutions"
#check if options.checkDirSol has bee set
if options.checkDirSol == "":
print "When checking, you must give test dir"
exit()
print "You gave testdir (where solutions are):", options.checkDirSol
print "You gave CNF dir (where problems are) :", options.checkDirProb
dirList = os.listdir(options.checkDirSol)
for fname in dirList:
if fnmatch.fnmatch(fname, '*.cnf.gz.out'):
#add dir, remove trailing .out
fname = fname[:len(fname) - 4]
fnameSol = options.checkDirSol + "/" + fname
#check now
self.check(fname=options.checkDirProb + "/" + fname, \
fnameSolution=fnameSol, needSolve=False)
def regressionTest(self) :
if False:
#first, test stuff with newVar
dirList = os.listdir(self.testDirNewVar)
for fname in dirList:
if fnmatch.fnmatch(fname, '*.cnf.gz'):
self.check(fname=self.testDirNewVar + fname, newVar=True, needToLimitTime=True)
dirList = os.listdir(self.testDir)
#test stuff without newVar
for fname in dirList:
if fnmatch.fnmatch(fname, '*.cnf.gz'):
self.check(fname=self.testDir + fname, newVar=False)
tester = Tester()
if options.checkFile :
tester.check_unsat = True
tester.check(options.checkFile, options.solutionFile, needSolve=False)
if options.fuzz_test:
tester.needDebugLib = False
tester.check_unsat = True
tester.fuzz_test()
if options.checkSol:
tester.checkDir()
if options.regressionTest:
tester.regressionTest()
| 34.504415 | 122 | 0.530949 | 26,628 | 0.851796 | 0 | 0 | 0 | 0 | 0 | 0 | 8,096 | 0.258981 |
34b86bc1dbc7088b4836cb91a3edc65c0a97f48c
| 5,182 |
py
|
Python
|
mindsdb/proxies/mysql/data_types/mysql_packet.py
|
aykuttasil/mindsdb
|
2c36b6f75f13d7104fe4d3dbb7ca307fa84f45ad
|
[
"MIT"
] | 1 |
2022-03-14T00:32:53.000Z
|
2022-03-14T00:32:53.000Z
|
mindsdb/proxies/mysql/data_types/mysql_packet.py
|
aykuttasil/mindsdb
|
2c36b6f75f13d7104fe4d3dbb7ca307fa84f45ad
|
[
"MIT"
] | null | null | null |
mindsdb/proxies/mysql/data_types/mysql_packet.py
|
aykuttasil/mindsdb
|
2c36b6f75f13d7104fe4d3dbb7ca307fa84f45ad
|
[
"MIT"
] | null | null | null |
"""
*******************************************************
* Copyright (C) 2017 MindsDB Inc. <[email protected]>
*
* This file is part of MindsDB Server.
*
* MindsDB Server can not be copied and/or distributed without the express
* permission of MindsDB Inc
*******************************************************
"""
import struct
import pprint
# import logging
from libs.helpers.logging import logging
from libs.constants.mysql import MAX_PACKET_SIZE
class Packet:
def __init__(self, length=0, seq=0, body='', packet_string = None, socket = None, session = None, proxy = None, parent_packet=None, **kwargs):
if parent_packet is None:
self.mysql_socket = socket
self.session = session
self.proxy = proxy
else:
self.mysql_socket = parent_packet.mysql_socket
self.session = parent_packet.session
self.proxy = parent_packet.proxy
self._kwargs = kwargs
self.setup()
if packet_string is not None:
self.loadFromPacketString(packet_string)
else:
self.loadFromParams(length, seq, body)
def setup(self, length=0, seq=0, body=None):
self.loadFromParams(length=length, seq=seq, body=body)
def loadFromParams(self, length, seq, body):
self._length = length
self._seq = seq
self._body = body
def setBody(self, body_string):
self._body = body_string
self._length = len(body_string)
def loadFromPacketString(self, packet_string):
len_header = struct.unpack('>i', struct.pack('1s', '') + packet_string[:3])[0]
count_header = struct.unpack('b', packet_string[3])[0]
body = packet_string[4:]
self.loadFromParams(length=len_header, seq=count_header, body=body)
def getPacketString(self):
body = self.body
len_header = struct.pack('<i', self.length)[:3] # keep it 3 bytes
count_header = struct.pack('b', self.seq)
packet = len_header + count_header + body
return packet
def get(self):
# packet_string = self._socket.request.recv(4)
# if len(packet_string)<4:
# val = 'Expecting packet, but header len is <0'
# logging.error(val)
# raise ValueError(val)
len_header = MAX_PACKET_SIZE
body = b''
count_header = 1
while len_header == MAX_PACKET_SIZE:
packet_string = self.mysql_socket.request.recv(4)
if len(packet_string) < 4:
logging.warning('Packet with less than 4 bytes in length')
return False
break
len_header = struct.unpack('i', packet_string[:3] + b'\x00')[0]
if len_header == 0:
break
count_header = int(packet_string[3])
body += self.mysql_socket.request.recv(len_header)
self.session.logging.debug('Got packet')
self.session.logging.debug(body)
self.session.count = int(count_header) + 1
self.setup(len(body), count_header, body)
return True
def send(self):
self._seq = self.proxy.count
string = self.getPacketString()
self.session.logging.debug('Sending packet string')
self.session.logging.debug(string)
self.mysql_socket.request.sendall(string)
self.proxy.count += 1
def pprintPacket(self,body = None):
if body is None:
body = self.body
print(str(self))
for i,x in enumerate(body):
part = '[BODY]'
print('''{part}{i}:{h} ({inte}:{actual})'''.format(part=part, i=i+1, h=hex(ord(x)), inte=ord(x), actual=str(x)))
def isEOF(self):
if self.length == 0:
return True
else:
return False
@property
def length(self):
#self._length = len(self.body)
return self._length
@property
def seq(self):
return self._seq
@property
def body(self):
return self._body
@staticmethod
def bodyStringToPackets(body_string):
"""
The method takes a string and turns it into mysql_packets
:param body_string: text to turn into mysql_packets
:return: a list of mysql_packets
"""
ret = []
body_len = len(body_string)
mod = body_len % MAX_PACKET_SIZE
num_packets = body_len / MAX_PACKET_SIZE + (1 if mod > 0 else 0)
for i in range(num_packets):
left_limit = i * MAX_PACKET_SIZE
right_limit = mod if i + 1 == num_packets else MAX_PACKET_SIZE * (i + 1)
body = body_string[left_limit:right_limit]
ret += [Packet(length=right_limit, seq=i+1, body=body)]
return ret
def __str__(self):
return str({'body': self.body, 'length': self.length, 'seq': self.seq})
def test():
import pprint
u = Packet()
#u.setBody('test')
pprint.pprint(Packet.bodyStringToPackets('abdds')[0].getPacketString())
#pprint.pprint(u.getPacketString())
# only run the test if this file is called from debugger
if __name__ == "__main__":
test()
| 28.31694 | 147 | 0.588383 | 4,418 | 0.852567 | 0 | 0 | 932 | 0.179853 | 0 | 0 | 1,052 | 0.20301 |
34b987e6c437ee88219466fd33845cf6a6a27b4b
| 2,538 |
py
|
Python
|
py/example/gol.py
|
zefaxet/libant
|
1be1865404eea729f8512e9ccd73899fbd5b7cb2
|
[
"MIT"
] | 4 |
2019-10-18T06:14:36.000Z
|
2020-06-01T14:28:57.000Z
|
py/example/gol.py
|
zefaxet/libant
|
1be1865404eea729f8512e9ccd73899fbd5b7cb2
|
[
"MIT"
] | null | null | null |
py/example/gol.py
|
zefaxet/libant
|
1be1865404eea729f8512e9ccd73899fbd5b7cb2
|
[
"MIT"
] | null | null | null |
import pygame
from random import randint
from time import sleep
import data
SCREEN_SIZE = 700
STAGE_SIZE = 175 # 175 is largest size without bezels for 700 x 700 window
sizeof_rect = int(SCREEN_SIZE / STAGE_SIZE)
bezel = int((SCREEN_SIZE - (STAGE_SIZE * sizeof_rect)) / 2)
def draw_bordered_square(x, y, filled, size):
pygame.draw.rect(screen, (0, 0, 0), (x, y, size, size)), filled
if not filled:
pygame.draw.rect(screen, (255, 255, 255), (x + 1, y + 1, size - 2, size - 2))
def grid_to_screen(x, y):
return x * sizeof_rect + bezel, y * sizeof_rect + bezel
def screen_to_grid(x, y):
return int((x - bezel) / sizeof_rect), int((y - bezel) / sizeof_rect)
def flip_cell(x, y):
cells[x][y] = not cells[x][y]
draw_bordered_square(*grid_to_screen(x, y), cells[x][y], sizeof_rect)
def draw_cells():
for x in range(bezel, STAGE_SIZE * sizeof_rect + bezel, sizeof_rect):
for y in range(bezel, STAGE_SIZE * sizeof_rect + bezel, sizeof_rect):
coord = screen_to_grid(x, y)
draw_bordered_square(x, y, cells[coord[0]][coord[1]], sizeof_rect)
directions = []
for x in [-1, 0, 1]:
for y in [-1, 0, 1]:
directions.append((x, y))
directions.remove((0,0))
def get_neighbours(x, y):
total = 0
for d in directions:
try:
if cells[(x + d[0]) % STAGE_SIZE][(y + d[1]) % STAGE_SIZE]:
total += 1
except:
print(x, y, d, (x + d[0]) % SCREEN_SIZE, (y + d[1]) % SCREEN_SIZE)
raise IndexError
return total
pygame.display.set_mode((SCREEN_SIZE, SCREEN_SIZE))
pygame.display.set_caption("Game of Life Classic Demo")
pygame.init()
screen = pygame.display.get_surface()
cells = data.grid
pause = True
round = 0
while True:
for event in pygame.event.get():
if event.type == pygame.KEYDOWN and event.key == pygame.K_SPACE:
pause = not pause
if event.type == pygame.MOUSEBUTTONDOWN:
x, y = pygame.mouse.get_pos()
x = int(x / int(SCREEN_SIZE / STAGE_SIZE))
y = int(y / int(SCREEN_SIZE / STAGE_SIZE))
flip_cell(x, y)
print(x, y)
if event.type == pygame.QUIT:
exit(0)
if not pause and round < 50:
new_cells = []
for x in range(STAGE_SIZE):
new_row = []
for y in range(STAGE_SIZE):
neighbours = get_neighbours(x, y)
cell = cells[x][y]
if cell:
if neighbours < 2 or neighbours > 3:
cell = 0
elif neighbours == 3:
cell = 1
new_row.append(cell)
new_cells.append(new_row)
cells = new_cells
draw_cells()
round += 1
pygame.display.flip()
| 25.897959 | 80 | 0.636722 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 85 | 0.033491 |
34ba56f92389624b3e0ca24dcce3ebbffc885fcd
| 3,494 |
py
|
Python
|
latexnewfloat.py
|
takaakiaoki/sphinx_latexnewfloat
|
e20c4b6825484976cf41c48a634b67524024007f
|
[
"BSD-2-Clause"
] | null | null | null |
latexnewfloat.py
|
takaakiaoki/sphinx_latexnewfloat
|
e20c4b6825484976cf41c48a634b67524024007f
|
[
"BSD-2-Clause"
] | null | null | null |
latexnewfloat.py
|
takaakiaoki/sphinx_latexnewfloat
|
e20c4b6825484976cf41c48a634b67524024007f
|
[
"BSD-2-Clause"
] | null | null | null |
r""" latexnewfloat.py extension for latex builder to replace
literal-block environment by \captionof{LiteralBlockNewFloat}{caption_title} command.
For \captionof command (in capt-of pacakge), the new environment
LiteralBlockNewFloat should be configured by newfloat pagage instead of
original float package.
needspace package is required, and \literalblockneedspace and \literalblockcaptionaboveskip
are introduced in order to control pagebreak around caption.
Usage:
add following latex preambles for latex_elements['preamble'] in conf.py
'preamble': r'''
% declare new LiteralBlockNewFloat. You may change `name` option
\DeclareFloatingEnvironment{LiteralBlockNewFloat}
% confiure additional options
\SetupFloatingEnvironment{LiteralBlockNewFloat}{name=Listing,placement=h,fileext=loc}
% change within option in similar to literal-block in sphinx.sty
\ifx\thechapter\undefined
\SetupFloatingEnvironment{LiteralBlockNewFloat}{within=section}
\else
\SetupFloatingEnvironment{LiteralBlockNewFloat}{within=chapter}
\fi
% if the left page space is less than \literalblockneedsapce, insert page-break
\newcommand{\literalblockneedspace}{5\baselineskip}
% margin before the caption of literal-block
\newcommand{\literalblockcaptionaboveskip}{0.5\baselineskip}
'''
Run sphinx with builder name 'latexnewfloat'
python -m sphinx.__init__ -b latexnewfloat {intpudir} {outputdir}
or
- add entry in makefile
- you may also override original latex builder entry using app.set_translator
"""
from sphinx.writers.latex import LaTeXTranslator
from sphinx.builders.latex import LaTeXBuilder
def setup(app):
app.add_builder(LaTeXNewFloatBuilder)
app.set_translator('latexnewfloat', LaTeXNewFloatTranslator)
# uncomment if you want to override stadnard latex builder
# app.set_translator('latex', LaTeXNewFloatTranslator)
app.add_latex_package('newfloat')
app.add_latex_package('capt-of')
app.add_latex_package('needspace')
return {'version': '0.3'}
# inherited from LaTeXBuilder
class LaTeXNewFloatBuilder(LaTeXBuilder):
name = 'latexnewfloat'
# inherited from LaTeXTranslator
class LaTeXNewFloatTranslator(LaTeXTranslator):
def __init__(self, document, builder):
LaTeXTranslator.__init__(self, document, builder)
# flag whether caption is under container[litelal_block=True] node
self.in_container_literal_block = 0
def visit_caption(self, node):
self.in_caption += 1
if self.in_container_literal_block:
self.body.append('\\needspace{\\literalblockneedspace}')
self.body.append('\\vspace{\\literalblockcaptionaboveskip}')
self.body.append('\\captionof{LiteralBlockNewFloat}{')
else:
self.body.append('\\caption{')
def visit_container(self, node):
if node.get('literal_block'):
self.in_container_literal_block += 1
ids = ''
for id in self.next_literal_ids:
ids += self.hypertarget(id, anchor=False)
if node['ids']:
ids += self.hypertarget(node['ids'][0])
self.next_literal_ids.clear()
self.body.append('\n')
self.context.append(ids + '\n')
def depart_container(self, node):
if node.get('literal_block'):
self.in_container_literal_block -= 1
self.body.append(self.context.pop())
| 40.627907 | 92 | 0.709788 | 1,339 | 0.383228 | 0 | 0 | 0 | 0 | 0 | 0 | 2,089 | 0.597882 |
34baa570e639a04a3c0bb24a77d73f14fd9abb0d
| 9,347 |
py
|
Python
|
django_g11n/tools/ipranges.py
|
martinphellwig/django-g11n
|
94eb9da7d7027061873cd44356fdf3378cdb3820
|
[
"BSD-2-Clause"
] | null | null | null |
django_g11n/tools/ipranges.py
|
martinphellwig/django-g11n
|
94eb9da7d7027061873cd44356fdf3378cdb3820
|
[
"BSD-2-Clause"
] | null | null | null |
django_g11n/tools/ipranges.py
|
martinphellwig/django-g11n
|
94eb9da7d7027061873cd44356fdf3378cdb3820
|
[
"BSD-2-Clause"
] | null | null | null |
"""
Module to fetch and parse regional NIC delegation data
"""
import urllib.parse
import ftplib
import os
from functools import lru_cache
import socket
import ipaddress
from binascii import hexlify
import tempfile
TWD = tempfile.gettempdir()
DELEGATES = [
# America (non-latin)
"ftp://ftp.arin.net/pub/stats/arin/delegated-arin-extended-latest",
# Europe
"ftp://ftp.ripe.net/ripe/stats/delegated-ripencc-extended-latest",
# Africa
"ftp://ftp.afrinic.net/pub/stats/afrinic/delegated-afrinic-extended-latest",
# Asia & Pacific
"ftp://ftp.apnic.net/pub/stats/apnic/delegated-apnic-extended-latest",
# Latin-America
"ftp://ftp.lacnic.net/pub/stats/lacnic/delegated-lacnic-extended-latest",]
@lru_cache(None)
def _split_url(url):
"Split delegate url into host, file_path and file_name."
url = urllib.parse.urlparse(url)
host = url.netloc
file_path, file_name = url.path.rsplit('/', 1)
return (host, file_path, file_name)
def _file_details(ftp, file_name):
"Retrieve details of the file."
details = None
print('# Retrieving file details')
try:
listing = list(ftp.mlsd())
print('# Server support mlsd, extracting details ...')
for entry in listing:
name, facts = entry
if name.lower() == file_name.lower():
details = facts
details['name_local'] = name
details['name_remote'] = name
break
except ftplib.error_perm:
print('# Server does not support mlsd, falling back.')
tmp = list()
ftp.retrlines('LIST %s' % file_name, callback=tmp.append)
if '->' in tmp[0]:
print('# Fall back: entry is a symbolic link, following ...')
link2name = tmp[0].split('->')[1].strip()
tmp = list()
ftp.retrlines('LIST %s' % link2name, callback=tmp.append)
details = dict()
tmp = tmp[0]
tmp = tmp.rsplit(' ', 1)[0]
details['name_local'] = file_name
details['name_remote'] = link2name
tmp, details['size'], month, day, time = tmp.rsplit(' ', 4)
details['modify'] = '_'.join([month, day, time.replace(':', '')])
return details
def download(url):
"Download the url."
host, file_path, file_name = _split_url(url)
print('# Connecting to: %s' % host)
ftp = ftplib.FTP(host)
print('# Logging in ...')
ftp.login()
print('# Changing cwd to: %s' % file_path)
ftp.cwd(file_path)
details = _file_details(ftp, file_name)
file_cache = '_'.join([details['name_local'],
details['size'],
details['modify']])
file_cache += '.csv'
if file_cache in os.listdir(TWD):
print('# File is already downloaded !')
return
print('# Downloading ...')
retr = 'RETR %s' % details['name_remote']
local_file = os.path.join(TWD, file_cache)
ftp.retrbinary(retr, open(local_file, 'wb').write)
print('# Downloaded!')
# The parsing part of the program
def _address_range_ipv4(address, width):
"Convert IPv4 address and amount to integer range."
# The width of ipv4 addresses is given in number of addresses which
# are not bounded by exact netmasks for example a width of 640 addresses.
blocks = address.split('.')
for index, block in enumerate(blocks):
blocks[index] = bin(int(block, 10))[2::].zfill(8)
blocks = ''.join(blocks)
network = int(blocks, 2)
broadcast = network + int(width) - 1
return(network, broadcast)
def _ipv6_to_int(ipv6_address):
"Convert an IPv6 address to an integer"
packed_string = socket.inet_pton(socket.AF_INET6, ipv6_address.exploded)
return int(hexlify(packed_string), 16)
def _address_range_ipv6(address, width):
"Convert IPv6 address and broadcast to integer range."
network = ipaddress.ip_network(address+'/'+width)
broadcast = _ipv6_to_int(network.broadcast_address)
network = _ipv6_to_int(network.network_address)
return(network, broadcast)
def _address_range(ipv, address, width):
"From an IP address create integers for the network and broadcast IP"
# This is essentially the range which in between an IP address is.
if ipv == 4:
# IPv4, the width is given as the number of IPs
network, broadcast = _address_range_ipv4(address, width)
else:
# IPv6, width is given by a netmask.
network, broadcast = _address_range_ipv6(address, width)
return (network, broadcast)
def _parse_row(row):
"Parse and modify the row."
columns = row.strip().split('|')
# If there isn't more then 6 columns I can't parse it, so skipping it.
if len(columns) > 6:
tmp = columns[:5]
if len(tmp[1].strip()) == 0:
# This is the country it is assigned to, if there is no country
# I am not interested in it.
return None
if tmp[2].strip().lower() not in ['ipv4', 'ipv6']:
# If the protocol is not an IP protocol (such as asn), I am not
# interested.
return None
if '6' in tmp[2]:
tmp[2] = 6
else:
tmp[2] = 4
# Convert the IP address and netmask/number of IP's to an IP range where
# the IPs are converted to a numerical value.
tmp[3], tmp[4] = _address_range(tmp[2], tmp[3], tmp[4])
return tmp
class CompactRanges(object):
"Try to compact the ranges."
def __init__(self):
self.ranges = list()
def add(self, *newer):
"Add a line to the ranges, compacting where possible."
# nic, tld, ipv, network, broadcast = *newer
newer = list(newer)
if len(self.ranges) == 0:
self.ranges.append(newer)
return
# Testing if current range is a continuation of the previous one
older = self.ranges[-1]
if older[0] == newer[0] and \
older[1] == newer[1] and \
older[2] == newer[2] and \
older[4] == newer[3] - 1:
# The older broadcast is the same as newer network - 1, thus is is a
# continuation, so extending the range of the older one.
self.ranges[-1][4] = newer[4]
else:
self.ranges.append(newer)
def length(self):
"return length of ranges"
return len(self.ranges)
def _local_file_from_url(url):
"Open the file, if available from the url"
file_name = _split_url(url)[2]
candidates = list()
for candidate in os.listdir(TWD):
if file_name.lower() in candidate.lower():
candidates.append(candidate)
candidates.sort(reverse=True)
if len(candidates) == 0:
print('# No files to parse')
return None
file_full = os.path.join(TWD, candidates[0])
return file_full
def parse_latest(url):
"Parse a file as it has been retrieved from the url."
file_name = _local_file_from_url(url)
if file_name is None:
print('# No files available to parse !')
return
print('# Opening file: %s' % file_name)
compacted = CompactRanges()
count_linesall = 0
count_relevant = 0
with open(file_name, 'r') as file_open:
for row in file_open:
count_linesall += 1
parsed = _parse_row(row)
if parsed is None:
continue
count_relevant += 1
compacted.add(*parsed)
print('# Parsed %s lines' % count_linesall)
print('# - of which relevant: %s' % count_relevant)
print('# - reduced to ranges: %s' % compacted.length())
return compacted.ranges
def _compact_string(text):
"try making text compacter"
# we go through the text and try to replace repeated characters with:
# _c_n_ where c is the character and n is the amount of if. The underscore
# in this context is guaranteed to not occur in text. As such we can use
# it as an escape character.
# Also we do not collapse if repeated character is below 5.
tmp = list()
last = ''
count = 0
for character in text+'_':
# Add the underscore so we make sure not to miss the last bit of the
# string if it happens to end on more then 4 identical characters.
count += 1
if character != last:
if count > 4:
tmp = tmp[:len(tmp)-count]
tmp.append('_%s_%s_' % (last, count))
count = 0
last = character
tmp.append(character)
# Remove the appended underscore before returning.
return ''.join(tmp)[:-1]
def get():
"Fetch and parse data"
print('#'*79)
print('# Fetching data from regional NICs.')
print('#'*79)
tmp = list()
for delegate in DELEGATES:
print('# Using: %s' % delegate)
download(delegate)
tmp += parse_latest(delegate)
print('#' * 79)
print('# A total of %s IP ranges have been defined.' % len(tmp))
for nic, country, ipv, network, broadcast in tmp:
hex_network = hex(network)[2::].zfill(32)
hex_broadcast = hex(broadcast)[2::].zfill(32)
rid = nic[:2]+country+str(ipv)+hex_broadcast+hex_network
rid = rid.lower()
rid = _compact_string(rid)
yield rid, nic, country, ipv, hex_network, hex_broadcast
| 32.120275 | 80 | 0.609714 | 968 | 0.103563 | 718 | 0.076816 | 248 | 0.026533 | 0 | 0 | 3,239 | 0.346528 |
34bac3615a35d59de02e3b0769b794431fef838e
| 548 |
py
|
Python
|
resource/index.py
|
TintypeMolly/Yuzuki
|
94dc874c4000ac918f0b52846927311b3f25ce2c
|
[
"MIT"
] | 6 |
2015-01-09T06:32:15.000Z
|
2015-08-15T13:23:34.000Z
|
resource/index.py
|
TintypeMolly/Yuzuki
|
94dc874c4000ac918f0b52846927311b3f25ce2c
|
[
"MIT"
] | 73 |
2015-01-08T11:38:34.000Z
|
2015-09-10T09:55:08.000Z
|
resource/index.py
|
TintypeMolly/Yuzuki
|
94dc874c4000ac918f0b52846927311b3f25ce2c
|
[
"MIT"
] | 11 |
2015-01-09T06:26:12.000Z
|
2015-03-26T13:16:19.000Z
|
# -*- coding: utf-8 -*-
from helper.resource import YuzukiResource
from helper.template import render_template
from config.config import SITE_DESCRIPTION
from helper.content import markdown_convert_file
class Index(YuzukiResource):
def render_GET(self, request):
with open("page/index") as f:
context = {
"site_description": SITE_DESCRIPTION,
"content": markdown_convert_file(f),
"is_index": True,
}
return render_template("page.html", request, context)
| 32.235294 | 61 | 0.656934 | 342 | 0.624088 | 0 | 0 | 0 | 0 | 0 | 0 | 83 | 0.15146 |
34bb5b87da16431c41b077d93418d9a992f6e4d0
| 6,281 |
py
|
Python
|
src/dh2019dataverse.py
|
Mish-JPFD/DAPIload
|
d1f2c0e9832e6731c7e98f03481712db765e9af6
|
[
"MIT"
] | null | null | null |
src/dh2019dataverse.py
|
Mish-JPFD/DAPIload
|
d1f2c0e9832e6731c7e98f03481712db765e9af6
|
[
"MIT"
] | null | null | null |
src/dh2019dataverse.py
|
Mish-JPFD/DAPIload
|
d1f2c0e9832e6731c7e98f03481712db765e9af6
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Author : Jacques Flores
Created : October 17th,2019
About: Script for creating datasets in Dataverse.
An Empty JSON file with Dataverse structure is imported and converted into a JSON dict
Metadata is imported from an excel file into a pandas dataframe and written into the empty JSON formatted string.
"""
from pyDataverse import api
from pyDataverse.utils import read_file_json
from pyDataverse.utils import dict_to_json
import pandas as pd
import copy
def create_datasets(dataverse, xl, template):
dataset = 0
entries = xl['paperID'].count()
handles_list = []
while dataset < entries:
#Make a copy of the dataverse json template as metadata
metadata = copy.deepcopy(template)
#Store metadata from excel into variables
authorname = xl.loc[:, xl.columns.str.endswith('name')]
authoraffiliations = xl.loc[:, xl.columns.str.endswith('organisation')]
contactname = xl.loc[dataset,'submitting_author']
title = xl.loc[dataset]['title']
contactemail = xl.loc[dataset]['authors_formatted_1_email']
subject = 'Arts and Humanities'
poster = xl.loc[dataset]['contribution_type'] == 'Poster'
fileid = xl.loc[dataset]['paperID']
#modify metadata
#title
metadata['datasetVersion']['metadataBlocks']['citation']['fields'][0]\
['value'] = title
#Authorname and affiliation
for author, affiliation in zip(authorname.iloc[dataset].dropna(), authoraffiliations.iloc[dataset].dropna()):
metadata['datasetVersion']['metadataBlocks']['citation']['fields'][1]\
['value'].append({\
'authorName': {'value': author , 'typeClass': 'primitive', 'multiple': False, 'typeName': 'authorName'},\
'authorAffiliation':{'value': affiliation , 'typeClass': 'primitive', 'multiple': False, 'typeName': 'authorAffiliation'}})
#E-mail contact
metadata['datasetVersion']['metadataBlocks']['citation']['fields'][2]\
['value'][0]['datasetContactEmail']['value'] = contactemail
#Dataset contact name
metadata['datasetVersion']['metadataBlocks']['citation']['fields'][2]\
['value'][0]['datasetContactName']['value'] = contactname
#Description
if poster:
metadata['datasetVersion']['metadataBlocks']['citation']['fields'][3]\
['value'][0]['dsDescriptionValue']['value'] = "Abstract and poster of paper %s presented at the Digital Humanities Conference 2019 (DH2019), Utrecht , the Netherlands 9-12 July, 2019." % fileid
else:
metadata['datasetVersion']['metadataBlocks']['citation']['fields'][3]\
['value'][0]['dsDescriptionValue']['value'] = "Abstract of paper %s presented at the Digital Humanities Conference 2019 (DH2019), Utrecht , the Netherlands 9-12 July, 2019." % fileid
#Subject (controlled vocabulary: only set values are allowed; check dataverse for these )
metadata['datasetVersion']['metadataBlocks']['citation']['fields'][4]\
['value'][0]= subject
#converting dictionary into a json formatted string
metadata1 = dict_to_json(metadata)
#creating Dataset in "RDMtest"dateverse with metadata and print response
dset = dataverse.create_dataset( "DH2019", metadata1)
print ('-' * 40)
print (dset.json())
print (dset.status_code)
#store persistent identifier from newly created dataset
handle = dset.json()['data']['persistentId']
handles_list.append((handle,fileid))
#upload files ( I had to edit the api upload file function (pkg: pydataverse) cause it kept raising an error, as a result it does not return a response)
#if there is a poster it will upload the abstract and the poster ELSE it will only upload the abstract
#The abstarct should be named as " (paperID).pdf [e.g. 100.pdf] and the poster as "paperIDp.pdf" [e.g. 100p.pdf] for it to work.
#If named differently this can be changed below
if poster:
dataverse.upload_file(handle , 'filesa/%sa.pdf' % (fileid))
dataverse.upload_file(handle , 'filesa/%sp.pdf' % (fileid))
else:
dataverse.upload_file(handle , 'filesa/%sa.pdf' % (fileid))
#publish dataset and print response
pubdset = dataverse.publish_dataset(handle, type = "major", auth = True)
print ('-' * 40)
print (pubdset.json())
print (pubdset.status_code)
#Counter for datsets and emptying metadata template
dataset = dataset + 1
metadata = {}
return(handles_list)
def publish_datasets(dataverse, handles_list) :
#publish dataset and print response
dataset = 0
entries = handles_list[0].count()
while dataset < entries:
handle = handles_list.iloc[dataset][0]
pubdset = dataverse.publish_dataset(handle, type = "major", auth = True)
print ('-' * 40)
print (pubdset.json())
print (pubdset.status_code)
# Confidential API Token (Do Not Distribute) ****last four digits removed)
apitoken = "38404b17-46f9-4fe5-808e-a4a38bd80aea"
# Demo Dataverse server
dtvserver = "https://dataverse.nl"
#Loading connection and authentication
dataverse = api.Api(dtvserver,apitoken)
#reading json file as dict
template = read_file_json('dataversetemplate.json')
#read excel file with metadata as pandas dataframe
xlfile = "DH2019_paperswithfiles.xlsx"
xl = pd.read_excel(xlfile, converters={'paperID': str})
handles = create_datasets(dataverse, xl, template)
handles_df = pd.DataFrame(handles)
handles_df.to_excel("handles.xlsx")
| 44.864286 | 215 | 0.603566 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,880 | 0.458526 |
34bbafd4c9930c0faccaa0114904fc2722169c13
| 778 |
py
|
Python
|
manage.py
|
YaroslavChyhryn/SchoolAPI
|
6b5eb4e1faf6b962561109fc227057ad0f8d4d92
|
[
"MIT"
] | null | null | null |
manage.py
|
YaroslavChyhryn/SchoolAPI
|
6b5eb4e1faf6b962561109fc227057ad0f8d4d92
|
[
"MIT"
] | null | null | null |
manage.py
|
YaroslavChyhryn/SchoolAPI
|
6b5eb4e1faf6b962561109fc227057ad0f8d4d92
|
[
"MIT"
] | null | null | null |
from flask_script import Manager, prompt_bool
# from flask_migrate import Migrate, MigrateCommand
from school_api.app import create_app
from school_api.db import create_tables, drop_tables
from school_api.data_generator import test_db
"""
Refused flask_migration because it was overkill for this project
"""
app = create_app()
# migrate = Migrate(app, db)
manager = Manager(app)
# manager.add_command('db', MigrateCommand)
@manager.command
def createtables():
drop_tables(app)
create_tables(app)
@manager.command
def testdb():
drop_tables(app)
create_tables(app)
test_db(app)
@manager.command
def droptables():
if prompt_bool("Are you sure you want to lose all your data"):
drop_tables(app)
if __name__ == '__main__':
manager.run()
| 20.473684 | 66 | 0.746787 | 0 | 0 | 0 | 0 | 297 | 0.381748 | 0 | 0 | 249 | 0.320051 |
34bbecbef412ab4c340ef6c39922f83c94f745b1
| 214 |
py
|
Python
|
parrot2.py
|
AmitSuresh/learning-python
|
f1ea5b9f3659f21504b1b0e452c03239b03cde85
|
[
"MIT"
] | null | null | null |
parrot2.py
|
AmitSuresh/learning-python
|
f1ea5b9f3659f21504b1b0e452c03239b03cde85
|
[
"MIT"
] | null | null | null |
parrot2.py
|
AmitSuresh/learning-python
|
f1ea5b9f3659f21504b1b0e452c03239b03cde85
|
[
"MIT"
] | null | null | null |
p="\nTell me something, and I will repeat it back to you"
p+="\nEnter 'quit' to end the program."
active = True
while active:
message=input(p)
if message =='quit':
active = False
else:
print(message)
| 23.777778 | 58 | 0.663551 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 97 | 0.453271 |
34bcda748e6f244af235e4cdcc2cf69df9e0d4a6
| 2,512 |
py
|
Python
|
info_modules/custom/example/layer_info.py
|
HusseinKabbout/qwc-feature-info-service
|
3d7cdbc1a3dc4a3725ba0529204848d47c4ed87e
|
[
"MIT"
] | null | null | null |
info_modules/custom/example/layer_info.py
|
HusseinKabbout/qwc-feature-info-service
|
3d7cdbc1a3dc4a3725ba0529204848d47c4ed87e
|
[
"MIT"
] | null | null | null |
info_modules/custom/example/layer_info.py
|
HusseinKabbout/qwc-feature-info-service
|
3d7cdbc1a3dc4a3725ba0529204848d47c4ed87e
|
[
"MIT"
] | 2 |
2020-03-24T09:13:14.000Z
|
2021-09-29T10:43:31.000Z
|
# Sample implementation of a custom layer info module
def layer_info(layer, x, y, crs, params, identity):
"""Query layer and return info result as dict:
{
'features': [
{
'id': <feature ID>, # optional
'attributes': [
{
'name': '<attribute name>',
'value': '<attribute value>'
}
],
'bbox': [<minx>, <miny>, <maxx>, <maxy>], # optional
'geometry': '<WKT geometry>' # optional
}
]
}
:param str layer: Layer name
:param float x: X coordinate of query
:param float y: Y coordinate of query
:param str crs: CRS of query coordinates
:param obj params: FeatureInfo service params
{
'i': <X ordinate of query point on map, in pixels>,
'j': <Y ordinate of query point on map, in pixels>,
'height': <Height of map output, in pixels>,
'width': <Width of map output, in pixels>,
'bbox': '<Bounding box for map extent as minx,miny,maxx,maxy>',
'crs': '<CRS for map extent>',
'feature_count': <Max feature count>,
'with_geometry': <Whether to return geometries in response
(default=1)>,
'with_maptip': <Whether to return maptip in response
(default=1)>,
'FI_POINT_TOLERANCE': <Tolerance for picking points, in pixels
(default=16)>,
'FI_LINE_TOLERANCE': <Tolerance for picking lines, in pixels
(default=8)>,
'FI_POLYGON_TOLERANCE': <Tolerance for picking polygons, in pixels
(default=4)>,
'resolution': <Resolution in map units per pixel>
}
:param str identity: User name or Identity dict
"""
features = []
feature_id = 123
attributes = [
{
'name': 'title',
'value': 'Feature for Layer %s' % layer
},
{
'name': 'name',
'value': 'Feature Name'
}
]
px = round(x)
py = round(y)
bbox = [px - 50, py - 50, px + 50, py + 50]
geometry = "POINT(%s %s)" % (px, py)
features.append({
'id': feature_id,
'attributes': attributes,
'bbox': bbox,
'geometry': geometry
})
return {
'features': features
}
| 31.797468 | 78 | 0.48328 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,016 | 0.802548 |
34bce8f103a1242d4cbbb176bc3c65328694b160
| 20,740 |
py
|
Python
|
integration/gCalIntegration.py
|
conzty01/RA_Scheduler
|
6bf4931871aef4058d93917e62ceb31766e06b3a
|
[
"MIT"
] | 1 |
2021-03-31T05:26:17.000Z
|
2021-03-31T05:26:17.000Z
|
integration/gCalIntegration.py
|
conzty01/RA_Scheduler
|
6bf4931871aef4058d93917e62ceb31766e06b3a
|
[
"MIT"
] | 83 |
2018-03-19T18:32:34.000Z
|
2022-02-01T02:15:01.000Z
|
integration/gCalIntegration.py
|
conzty01/RA_Scheduler
|
6bf4931871aef4058d93917e62ceb31766e06b3a
|
[
"MIT"
] | 2 |
2021-01-15T22:16:00.000Z
|
2021-02-10T01:03:32.000Z
|
from google.auth.transport.requests import Request
from googleapiclient.discovery import build
from googleapiclient.errors import HttpError
import google_auth_oauthlib.flow
import logging
import os
class gCalIntegratinator:
""" Object for handling interactions between RADSA and Google Calendar API.
This class uses the googleapiclient to interact with the Google Calendar API.
AUTHORIZATION WORKFLOW:
1) Redirect user to the Authorization URL
2) User consents to Google Calendar integration and is
redirected back to this application
3) The Authorization Response and State are returned from Google
and used to generate user credentials
4) User credentials are returned back to the application where they
are stored in the DB for later use
Method Return Statuses:
-5: Error Creating Google Calendar Event
-4: Invalid Calendar Id
-3: Invalid Credentials Received
-2: Need to Renew Credentials
-1: Unknown Error Occurred
0: Credentials are valid but needed refresh
1: Success
Args:
scopes (lst): A list containing the scopes required to interact with the
Google Calendar API. The default provided are
- .../auth/calendar.calendarlist.readonly
- .../auth/calendar.app.created
"""
SCOPES = ['https://www.googleapis.com/auth/calendar.app.created',
'https://www.googleapis.com/auth/calendar.calendarlist.readonly']
def __init__(self, scopes=SCOPES):
logging.debug("Creating gCalIntegratinator Object")
# Name of Google service being used
self.serviceName = "calendar"
# API version number of Google service being used
self.serviceVersion = "v3"
# Set the scopes for reference
self.scopes = scopes
# Load the app credentials from the environment
self.__appCreds = self._getCredsFromEnv()
# Generate the oAuth2 flow for handling the client/app authentication
self.flow = google_auth_oauthlib.flow.Flow.from_client_config(
self.__appCreds, scopes=scopes)
def _getCredsFromEnv(self):
# This will return a deserialized JSON object that is assembled per
# Google's specifications. This object will be configured for a 'web' app
# This does assume the following parameters are available in the environment:
# CLIENT_ID
# PROJECT_ID
# AUTH_URI
# TOKEN_URI
# AUTH_PROVIDER_X509_CERT_URL
# CLIENT_SECRET
# REDIRECT_URIS -> This should be the urls separated by a ',' only
# JAVASCRIPT_ORIGINS -> This should be the urls separated by a ',' only
logging.info("Loading app settings from environment")
return {
"web": {
"client_id": os.environ["CLIENT_ID"],
"project_id": os.environ["PROJECT_ID"],
"auth_uri": os.environ["AUTH_URI"],
"token_uri": os.environ["TOKEN_URI"],
"auth_provider_x509_cert_url": os.environ["AUTH_PROVIDER_X509_CERT_URL"],
"client_secret": os.environ["CLIENT_SECRET"],
"redirect_uris": [entry for entry in os.environ["REDIRECT_URIS"].split(",")],# ["https://b03bb12e8ff3.ngrok.io"],
"javascript_origins": [entry for entry in os.environ["JAVASCRIPT_ORIGINS"].split(",")]
}
}
def _validateCredentials(self, creds):
# Check to see if the client credentials are valid and that they have not
# expired. This method can have the following outcomes:
#
# If the credentials are valid, then the credentials will be returned
# If the credentials are not valid, an InvalidCalendarCredentialsError will be raised.
# If the credentials have expired, an ExpiredCalendarCredentialsError will be raised.
logging.debug("Checking Credentials")
try:
# Are the credentials invalid?
if not creds.valid:
# If the credentials are expired and can be refreshed,
# then refresh the credentials
if creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
# Otherwise we will need to prompt the user to log in
# and approve integration again.
logging.debug("Manual Credential Refresh Required")
raise self.ExpiredCalendarCredentialsError("Manual Credential Refresh Required")
else:
# If the credentials are valid, return successful
logging.debug("Credentials Valid")
except AttributeError:
# If we receive an AttributeError, then we did not receive the expected
# credentials object.
# Log the occurrence
logging.info("Invalid Credentials Received")
# Raise an InvalidCalendarCredentialsError
raise self.InvalidCalendarCredentialsError("Invalid Credentials Received")
except self.ExpiredCalendarCredentialsError as e:
# If we receive an ExpiredCalendarCredentialsError, then simply pass that up to
# the calling method.
raise e
except Exception as e:
# If we receive some other, unexpected Exception, then notify the calling method
# Log the occurrence
logging.error(str(e))
# Raise an UnknownError
raise self.UnexpectedError("Calendar Credential Validation", e, str(e))
# If we made it this far without raising an exception,
# then the credentials are valid.
return creds
def generateAuthURL(self, redirect_uri):
# Generate and return an authorization url as well as a state
logging.info("Generating Google Authorization URL")
# Set the flow's redirect_uri
self.flow.redirect_uri = redirect_uri
# Return (auth_url, state) for the given redirect_uri
return self.flow.authorization_url(access_type="offline",
include_granted_scopes="true",
prompt="select_account")
def handleAuthResponse(self, auth_response, redirect_uri):
# Generate authorization credentials from the authorization response
logging.info("Generating Google Client Credentials")
self.flow.redirect_uri = redirect_uri
# Generate the token
self.flow.fetch_token(authorization_response=auth_response)
return self.flow.credentials
def createGoogleCalendar(self, client_creds):
# Create a Secondary Google Calendar using the user credentials
logging.info("Creating Google Calendar")
# Check to make sure the credentials are valid
client_creds = self._validateCredentials(client_creds)
# Build the Google Calendar service with appropriate version
service = build(self.serviceName, self.serviceVersion, credentials=client_creds)
# Create the body of the request to create a new Google Calendar.
newCalBody = {
"summary": "RA Duty Schedule",
"description": "Calendar for the Resident Assistant Duty Schedule.\n\n"
"Created and added to by the RA Duty Scheduler Application (RADSA)."
}
try:
# Call the Google Calendar API Service to have the new calendar created
created_calendar = service.calendars().insert(body=newCalBody).execute()
except Exception as e:
# If we received an exception, then wrap it in an CalendarCreationError and
# pass that up to the calling function.
# Log the occurrence
logging.error("Error encountered when attempting to create Google Calendar: {}".format(str(e)))
# Raise the CalendarCreationError
raise self.CalendarCreationError(str(e))
logging.info("Calendar Creation Complete")
# Return the ID of the new calendar.
return created_calendar["id"]
def exportScheduleToGoogleCalendar(self, client_creds, calendarId, schedule, flaggedDutyLabel):
# Export the provided schedule to Google Calendar
# Check to make sure the credentials are valid
client_creds = self._validateCredentials(client_creds)
# Create the Google Calendar Service
service = build(self.serviceName, self.serviceVersion, credentials=client_creds)
# Check to see if the 'RA Duty Schedule' calendar exists. If not, create
# the calendar.
try:
logging.debug("Verifying that the 'RA Schedule Calendar' exists.")
res = service.calendarList().get(calendarId=calendarId).execute()
logging.debug("CalendarList().get() Result: {}".format(res))
except HttpError as e:
# An HttpError occurred which could indicate that the calendar no longer exists.
# If this is the case, the HttpError would be a 404 error.
# Log the occurrence of this issue.
logging.info("'RA Schedule Calendar' not found for client.")
logging.error(str(e))
# Plan B is to create a new Google Calendar.
try:
# Create the calendar using the client_creds
calendarId = self.createGoogleCalendar(client_creds)
except self.CalendarCreationError as subE:
# An error occurred when attempting to create the Calendar.
# Wrap the exception in a ScheduleExportError and raise it
raise self.ScheduleExportError(subE, "Unable to locate valid Google Calendar.")
# Once we are able to locate the calendar, start adding the events to it!
try:
logging.info("Exporting schedule")
# Iterate through the schedule
for duty in schedule:
# Check to see if this duty should be flagged
if "flagged" in duty["extendedProps"].keys() and duty["extendedProps"]["flagged"]:
# If so, then set the summary and description messages to include the flagged
# duty label.
summaryMsg = duty["title"] + " ({})".format(flaggedDutyLabel)
descriptionMsg = duty["title"] + " has been assigned for {} duty.".format(flaggedDutyLabel)
else:
# Otherwise, set the summary and description messages to be the default
summaryMsg = duty["title"]
descriptionMsg = duty["title"] + " has been assigned for duty."
# Create an Event Object that will handle assembling the event's body for the Google Calendar API
eb = Event(summaryMsg,
descriptionMsg,
duty["start"])
# Call the Google Calendar API to add the event
service.events().insert(calendarId=calendarId,
body=eb.getBody(),
supportsAttachments=False).execute()
except HttpError as e:
# An HttpError could indicate a number of things including a missing calendar or a
# Bad Request/malformed data. If this occurs, stop processing and report back to the
# server.
# Log the occurrence
logging.info("Error encountered while pushing Event: {} to Google Calendar".format(duty["start"]))
logging.error(str(e))
# Wrap the exception in a ScheduleExportError and raise it
raise self.ScheduleExportError(e, "Unable to export schedule to Google Calendar.")
logging.info("Export complete")
class BaseGCalIntegratinatorException(Exception):
# Base GCalIntegratinator Exception
def __init__(self, *args):
# If args are provided
if args:
# Then set the message as the first argument
self.message = args[0]
logging.debug("BASE ERROR CREATION: {}".format(args))
else:
# Otherwise set the message to None
self.message = None
# Set the exception name to GCalIntegratinatorError
self.exceptionName = "GCalIntegratinatorError"
def __str__(self):
# If a message has been defined
if self.message is not None:
# Then put the message in the string representation
return "{}".format(self.message)
else:
# Otherwise return a default string
return "{} has been raised".format(self.exceptionName)
class CalendarCreationError(BaseGCalIntegratinatorException):
"""GCalIntegratinator Exception to be raised when an error occurs
during the creation of the a Google Calendar."""
def __init__(self, *args):
# Pass the arguments to the parent class.
super().__init__(*args)
# Set the name of the exception
self.exceptionName = "GoogleCalendarCreationError"
class InvalidCalendarCredentialsError(BaseGCalIntegratinatorException):
"""GCalIntegratinator Exception to be raised if the provided
Google Calendar credentials are invalid."""
def __init__(self, *args):
# Pass the arguments to the parent class.
super().__init__(*args)
# Set the name of the exception
self.exceptionName = "InvalidCalendarCredentialsError"
class ExpiredCalendarCredentialsError(BaseGCalIntegratinatorException):
"""GCalIntegratinator Exception to be raised if the provided Google
Calendar calendar credentials have expired."""
def __init__(self, *args):
# Pass the arguments to the parent class.
super().__init__(*args)
# Set the name of the exception
self.exceptionName = "ExpiredCalendarCredentialsError"
class ScheduleExportError(BaseGCalIntegratinatorException):
"""GCalIntegratinator Exception to be raised if an error is encountered
when attempting to export a schedule to Google Calendar."""
def __init__(self, wrappedException, *args):
# Pass the arguments to the parent class.
super().__init__(*args)
# Set the name of the exception
self.exceptionName = "ScheduleExportError"
# Set the wrappedException
self.wrappedException = wrappedException
class UnexpectedError(BaseGCalIntegratinatorException):
"""GCalIntegratinator Exception to be raised if an unknown
error occurs within the GCalIntegratintor object"""
def __init__(self, location, wrappedException, *args):
# Pass the arguments to the parent class.
super().__init__(self, args)
# Set the name of the exception
self.exceptionName = "GCalIntegratinatorUnknownError"
# Set the location of where the error occurred.
self.exceptionLocation = location
# Set the wrapped exception
self.wrappedException = wrappedException
class Event:
""" Object for abstracting the Event schema that is used by the Google Calendar API """
def __init__(self, summary, description, date):
self.__body = {
# Taken from https://googleapis.github.io/google-api-python-client/docs/dyn/calendar_v3.events.html#insert
# with supplemental information from https://developers.google.com/calendar/v3/reference/events/insert
"summary": summary, # Title of the event.
"description": description, # Description of the event. Can contain HTML. Optional.
"start": { # The (inclusive) start time of the event. For a recurring event, this is the
# start time of the first instance.
"date": date # The date, in the format "yyyy-mm-dd", if this is an all-day event.
},
"end": { # The (exclusive) end time of the event. For a recurring event,
# this is the end time of the first instance.
"date": date # The date, in the format "yyyy-mm-dd", if this is an all-day event.
},
"status": "confirmed", # Status of the event. Optional. Possible values are:
# - "confirmed" - The event is confirmed. This is the default status.
# - "tentative" - The event is tentatively confirmed.
# - "cancelled" - The event is cancelled (deleted). The list method returns
# cancelled events only on incremental sync (when syncToken or
# updatedMin are specified) or if the showDeleted flag is set to
# true. The get method always returns them. A cancelled status
# represents two different states depending on the event type:
# - Cancelled exceptions of an uncancelled recurring event
# indicate that this instance should no longer be presented
# to the user. Clients should store these events for the
# lifetime of the parent recurring event.
# Cancelled exceptions are only guaranteed to have values
# for the id, recurringEventId and originalStartTime fields
# populated. The other fields might be empty.
# - All other cancelled events represent deleted events.
# Clients should remove their locally synced copies. Such
# cancelled events will eventually disappear, so do not
# rely on them being available indefinitely.
# Deleted events are only guaranteed to have the id field populated. On the
# organizer's calendar, cancelled events continue to expose event details
# (summary, location, etc.) so that they can be restored (undeleted). Similarly,
# the events to which the user was invited and that they manually removed continue
# to provide details. However, incremental sync requests with showDeleted set to
# false will not return these details.
# If an event changes its organizer (for example via the move operation) and the
# original organizer is not on the attendee list, it will leave behind a cancelled
# event where only the id field is guaranteed to be populated.
"transparency": "opaque", # Whether the event blocks time on the calendar. Optional. Possible values are:
# - "opaque" - Default value. The event does block time on the calendar. This
# is equivalent to setting Show me as to Busy in the Calendar UI.
# - "transparent" - The event does not block time on the calendar. This is
# equivalent to setting Show me as to Available in the
# Calendar UI.
}
def getBody(self):
# Return the Event Body
return self.__body
if __name__ == "__main__":
g = gCalIntegratinator()
| 46.711712 | 129 | 0.588091 | 20,478 | 0.987367 | 0 | 0 | 0 | 0 | 0 | 0 | 11,683 | 0.563308 |
34bff1450311d256bf20dadcb095880fb22acb44
| 933 |
py
|
Python
|
pins/admin.py
|
boyombo/smsbet
|
66c20494b729c930edec553fe71e2084222acc4a
|
[
"MIT"
] | null | null | null |
pins/admin.py
|
boyombo/smsbet
|
66c20494b729c930edec553fe71e2084222acc4a
|
[
"MIT"
] | null | null | null |
pins/admin.py
|
boyombo/smsbet
|
66c20494b729c930edec553fe71e2084222acc4a
|
[
"MIT"
] | null | null | null |
from random import choice
from django.contrib import admin
from pins.models import Batch, Pin
from pins.forms import BatchForm
def gen_randomdigits(size=10):
return ''.join([choice('1234567890') for i in range(size)])
class BatchAdmin(admin.ModelAdmin):
list_display = ('count', 'used', 'created_on', 'download_pins')
form = BatchForm
date_heirarchy = 'created_on'
def save_model(self, request, obj, form, change):
obj.save()
num_pins = form.cleaned_data['number_of_pins']
count = 0
if change:
return
while count < num_pins:
try:
Pin.objects.create(pin=gen_randomdigits(), batch=obj)
except:
pass
else:
count += 1
class PinAdmin(admin.ModelAdmin):
list_display = ('pin', 'batch', 'used')
admin.site.register(Batch, BatchAdmin)
admin.site.register(Pin, PinAdmin)
| 23.923077 | 69 | 0.622722 | 625 | 0.669882 | 0 | 0 | 0 | 0 | 0 | 0 | 100 | 0.107181 |
34c1c0f2296ec9a8cff26832714ccf9c61244f45
| 961 |
py
|
Python
|
2017/February/2_maxcross/maxcross.py
|
alantao5056/USACO_Silver
|
6998cb916692af58a0b40b1a4aff0708ee1106b8
|
[
"MIT"
] | null | null | null |
2017/February/2_maxcross/maxcross.py
|
alantao5056/USACO_Silver
|
6998cb916692af58a0b40b1a4aff0708ee1106b8
|
[
"MIT"
] | null | null | null |
2017/February/2_maxcross/maxcross.py
|
alantao5056/USACO_Silver
|
6998cb916692af58a0b40b1a4aff0708ee1106b8
|
[
"MIT"
] | null | null | null |
def getMinFix(K, N, signals):
if K == 1:
if set(signals) == {False}:
return 1
return 0
startInd = 0
endInd = K - 1
lastStart = signals[0]
curCount = signals[0:K].count(False)
minCount = curCount
for _ in range(N-K):
startInd += 1
endInd += 1
if not lastStart:
curCount -= 1
if not signals[endInd]:
curCount += 1
lastStart = signals[startInd]
minCount = min(minCount, curCount)
return minCount
def main(inputFile, outputFile):
maxcrossInput = open(inputFile, 'r')
maxcrossOutput = open(outputFile, 'w')
N, K, B = maxcrossInput.readline().strip().split()
N, K, B = int(N), int(K), int(B)
signals = [True] * N
for _ in range(B):
signals[int(maxcrossInput.readline().strip()) - 1] = False
# print(signals)
maxcrossOutput.write(str(getMinFix(K, N, signals)) + '\n')
maxcrossInput.close()
maxcrossOutput.close()
main('maxcross.in', 'maxcross.out')
| 20.891304 | 62 | 0.612903 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 53 | 0.055151 |
34c2dd9c20a2135a93d6b5c256d90be592b639fa
| 752 |
py
|
Python
|
Python/leetcode2/41. First Missing Positive.py
|
darrencheng0817/AlgorithmLearning
|
aec1ddd0c51b619c1bae1e05f940d9ed587aa82f
|
[
"MIT"
] | 2 |
2015-12-02T06:44:01.000Z
|
2016-05-04T21:40:54.000Z
|
Python/leetcode2/41. First Missing Positive.py
|
darrencheng0817/AlgorithmLearning
|
aec1ddd0c51b619c1bae1e05f940d9ed587aa82f
|
[
"MIT"
] | null | null | null |
Python/leetcode2/41. First Missing Positive.py
|
darrencheng0817/AlgorithmLearning
|
aec1ddd0c51b619c1bae1e05f940d9ed587aa82f
|
[
"MIT"
] | null | null | null |
'''
Given an unsorted integer array, find the smallest missing positive integer.
Example 1:
Input: [1,2,0]
Output: 3
Example 2:
Input: [3,4,-1,1]
Output: 2
Example 3:
Input: [7,8,9,11,12]
Output: 1
Note:
Your algorithm should run in O(n) time and uses constant extra space.
'''
class Solution:
def firstMissingPositive(self, nums):
if not nums:
return 1
for i, num in enumerate(nums):
if 0<num<len(nums):
print(nums, i, num)
nums[i], nums[num-1] = nums[num-1], nums[i]
print(nums)
for i, num in enumerate(nums):
if num != i+1:
return i+1
return len(nums)+1
s = Solution()
print(s.firstMissingPositive([-1,4,2,1,9,10]))
| 21.485714 | 76 | 0.575798 | 406 | 0.539894 | 0 | 0 | 0 | 0 | 0 | 0 | 282 | 0.375 |
34c375e2a66eb6bf3befc20ceb9878fbf3112409
| 6,531 |
py
|
Python
|
4/figs/figX9/entropy_comparison_ew_hsm_igm.py
|
t-young31/thesis
|
2dea31ef64f4b7d55b8bdfc2094bab6579a529e0
|
[
"MIT"
] | null | null | null |
4/figs/figX9/entropy_comparison_ew_hsm_igm.py
|
t-young31/thesis
|
2dea31ef64f4b7d55b8bdfc2094bab6579a529e0
|
[
"MIT"
] | null | null | null |
4/figs/figX9/entropy_comparison_ew_hsm_igm.py
|
t-young31/thesis
|
2dea31ef64f4b7d55b8bdfc2094bab6579a529e0
|
[
"MIT"
] | null | null | null |
"""
Calculate the translational entropy with EW, HSM and IGM models
"""
import numpy as np
import matplotlib.pyplot as plt
from scipy import integrate
plt.style.use("paper")
ha2kjmol = 627.5 * 4.184
class Constants:
hbar_au = 1.0
h_au = hbar_au * 2.0 * np.pi
kb_au = 3.1668114E-6 # hartrees K-1
h_SI = 6.62607004E-34 # J s
na = 6.02214086E23 # molecules mol-1
kb_SI = 1.38064852E-23 # J K-1
kb_JKmol = kb_SI * na # J K-1 mol-1
kb_kcalKmol = kb_SI / (4.184 * 1000) # kcal K-1 mol-1
k_b = kb_kcalKmol
n_a = 6.022140857E23 # molecules mol-1
r = k_b * n_a # J K-1 mol-1
h = 6.62607004E-34 # J s
atm_to_pa = 101325 # Pa
dm_to_m = 0.1 # m
amu_to_kg = 1.660539040E-27 # Kg
c = 299792458 # m s-1
c_in_cm = c * 100 # cm s-1
ang_to_m = 1E-10 # m
ang_to_au = 1.88973 # au Å-1
m_to_ang = 1E10 # Å
m_to_bohr = 1.89e+10 # au m-1
amu_to_au = 1822.888486 # m_e amu-1
kj_mol_to_au = 0.00038087980 # Ha (kJ mol-1)-1
kcal_mol_to_au = 0.001593601 # Ha (kcal mol-1)-1
inverse_ang_inverse_au = 1.0 / 1.88973 # au-1 Å
class Solute:
def _s_t_igm(self, length_au):
"""
S = k_B T dln(q_t)/dT + k_B ln(q_t)
= n k_B ((T / q) (3q / 2T) + ln(q)) dq/dT = 3q / 2T
= n k_B (3/2 + ln(q))
:param mass_au:
:param temp_K:
:param length_au:
:return:
"""
q_t = np.sqrt((2.0 * np.pi * self.mass_au * Constants.kb_au * temp_K) / (
Constants.h_au ** 2)) ** 3 * length_au ** 3
return Constants.r * (1.5 + np.log(q_t))
@property
def s_t_igm_1atm(self):
return self._s_t_igm(length_au=self.l_1atm_au)
@property
def s_t_igm_1m(self):
return self._s_t_igm(length_au=self.l_1molar_au)
def s_t_hsm(self, omega_au):
"""
S = k_B T dln(q_t)/dT + k_B ln(q_t)
= (n k_B T) d ln(q)/dT + n k_B ln(q)
= n k_B (3 hbar omega beta coth(hbar omega beta / 2) / 2T + ln(q))
:param temp_K:
:param omega_au:
:return:
"""
raise NotImplementedError
beta_au = 1.0 / (Constants.kb_au * temp_K)
q_t = 1.0 / (2.0 * np.sinh(
Constants.hbar_au * omega_au * beta_au / 2.0)) ** 3
term1 = 3.0 * (Constants.hbar_au * omega_au * beta_au / 2.0) / np.tanh(
Constants.hbar_au * omega_au * beta_au / 2.0)
return Constants.r * (term1 + np.log(q_t))
def _q_t_ew(self):
def exp_integrand(r, beta, a, b):
return r ** 2 * np.exp(- beta * a * np.exp(b * r))
cap_lambda = ((2.0 * self.mass_au * np.pi) / (
beta_au * Constants.h_au ** 2)) ** 1.5
integral = integrate.quad(exp_integrand, 0.0, 10.0,
args=(beta_au, self.a_au, self.b_inv_au))[0]
return 4.0 * np.pi * np.exp(beta_au * self.a_au) * cap_lambda * integral
@property
def s_t_ew(self):
beta_au = 1.0 / (Constants.kb_au * temp_K)
q_t = self._q_t_ew()
def integrand(r, beta, a, b):
return r ** 2 * np.exp(-beta * a * (np.exp(b * r) - 1.0) + b * r)
integral = integrate.quad(integrand, 0.0, 10.0, args=(beta_au, self.a_au, self.b_inv_au))[0]
cap_lambda = ((2.0 * self.mass_au * np.pi) / (beta_au * Constants.h_au ** 2)) ** 1.5
term_4 = 4.0 * np.pi * (self.a_au * beta_au * cap_lambda / q_t) * integral
analytic_s = Constants.r * (1.5 - self.a_au * beta_au + np.log(q_t) + term_4)
# d_temp = 1E-10
# dlnq_dtemp = ((np.log(
# q_t_ew(1.0 / (Constants.kb_au * (temp_K + d_temp)), self.a_au, self.b_inv_au,
# mass_au)) - np.log(q_t))
# / d_temp)
# numerical_s = Constants.r * (temp_K * dlnq_dtemp + np.log(q_t))
# print('Numerical derivative / analytic derivative = ', numerical_s / analytic_s)
return analytic_s
def __init__(self, mass_amu, k_kcal, a_inv_ang):
self.mass_au = mass_amu * Constants.amu_to_au
self.a_au = k_kcal * Constants.kcal_mol_to_au
self.b_inv_au = a_inv_ang * Constants.inverse_ang_inverse_au
# Harmonic oscillator
# k_au = k_kjmol * Constants.kj_mol_to_au
# omega_au = np.sqrt(k_au / mass_au)
v_eff_1atm_m3 = Constants.kb_SI * temp_K / Constants.atm_to_pa
l_1atm = v_eff_1atm_m3 ** (1 / 3) * Constants.m_to_ang
self.l_1atm_au = l_1atm * Constants.ang_to_au
v_eff_1molar_m3 = 1.0 / (Constants.n_a * (1.0 / Constants.dm_to_m) ** 3)
l_1molar = v_eff_1molar_m3 ** (1 / 3) * Constants.m_to_ang
self.l_1molar_au = l_1molar * Constants.ang_to_au
def ST(S):
return np.round(temp_K * S, decimals=3)
if __name__ == '__main__':
temp_K = 298.15
beta_au = 1.0 / (Constants.kb_au * temp_K)
Methane_Water = Solute(mass_amu=16.04, k_kcal=1.048, a_inv_ang=2.918)
Methane_Acetonitrile = Solute(mass_amu=16.04, k_kcal=0.529, a_inv_ang=2.793)
Methane_Benzene = Solute(mass_amu=16.04, k_kcal=0.679, a_inv_ang=2.736)
CO2_Water = Solute(mass_amu=44.01, k_kcal=0.545, a_inv_ang=4.075)
CO2_Acetonitrile = Solute(mass_amu=44.01, k_kcal=0.446 , a_inv_ang=2.93)
CO2_Benzene = Solute(mass_amu=44.01, k_kcal=0.415, a_inv_ang=3.431)
Alanine_Water = Solute(mass_amu=89.09, k_kcal=0.53, a_inv_ang=4.083)
Alanine_Acetonitrile = Solute(mass_amu=89.09, k_kcal=1.005, a_inv_ang=2.127)
Alanine_Benzene = Solute(mass_amu=89.09, k_kcal=0.368, a_inv_ang=2.878)
systems = [Methane_Water, Methane_Acetonitrile, Methane_Benzene,
CO2_Water, CO2_Acetonitrile, CO2_Benzene,
Alanine_Water, Alanine_Acetonitrile, Alanine_Benzene]
rels = []
for system in systems:
rel = ST(system.s_t_ew) / ST(system.s_t_igm_1m)
rels.append(rel)
print(ST(system.s_t_igm_1atm),
ST(system.s_t_igm_1m),
ST(system.s_t_ew),
'kcal mol-1',
sep=' & ')
print(np.average(np.array(rels)),
'±', np.std(np.array(rels))/np.sqrt(len(rels)))
| 34.739362 | 100 | 0.548767 | 4,908 | 0.751033 | 0 | 0 | 1,202 | 0.183933 | 0 | 0 | 1,268 | 0.194032 |
34c5294b5c38bdcb5b04e56497ca943887aae731
| 53 |
py
|
Python
|
gramex/apps/nlg/__init__.py
|
joshuamosesb/gramex
|
e416cb609698b5941a18b06743c853dee50e0500
|
[
"MIT"
] | 1 |
2020-05-17T18:03:44.000Z
|
2020-05-17T18:03:44.000Z
|
gramex/apps/nlg/__init__.py
|
joshuamosesb/gramex
|
e416cb609698b5941a18b06743c853dee50e0500
|
[
"MIT"
] | null | null | null |
gramex/apps/nlg/__init__.py
|
joshuamosesb/gramex
|
e416cb609698b5941a18b06743c853dee50e0500
|
[
"MIT"
] | null | null | null |
from .nlgsearch import templatize # NOQA: F401
| 26.5 | 52 | 0.698113 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 12 | 0.226415 |
34c843d990ddc136afa91e10afc82afbaed4398e
| 5,300 |
py
|
Python
|
MessagePassingRPC/rpc_client.py
|
asgokhale/DistributedSystemsCourse
|
9ae24ed65e7a7ef849c7e39ec5a1a8cc5973c12f
|
[
"Apache-2.0"
] | 4 |
2022-01-16T17:36:49.000Z
|
2022-02-07T16:57:33.000Z
|
MessagePassingRPC/rpc_client.py
|
asgokhale/DistributedSystemsCourse
|
9ae24ed65e7a7ef849c7e39ec5a1a8cc5973c12f
|
[
"Apache-2.0"
] | null | null | null |
MessagePassingRPC/rpc_client.py
|
asgokhale/DistributedSystemsCourse
|
9ae24ed65e7a7ef849c7e39ec5a1a8cc5973c12f
|
[
"Apache-2.0"
] | 1 |
2022-01-25T23:51:51.000Z
|
2022-01-25T23:51:51.000Z
|
##############################################
#
# Author: Aniruddha Gokhale
#
# Created: Spring 2022
#
# Purpose: demonstrate a basic remote procedure call-based client
#
# A RPC uses message passing under the hood but provides a more
# type-safe and intuitive way for users to make invocations on the remote
# side because the caller makes invocations on methods (these could be methods
# of a class object, which is what we show here). That object often is
# a proxy of the real, remote implementation. The proxy simply offers the same
# interface to the caller. Under the hood, the proxy's method will then use the
# traditional message passing style where the packet is created in the
# desired format using some serialization framework like Flatbuffers etc
#
##############################################
import argparse # for argument parsing
import zmq # ZeroMQ
# define a proxy class for the server that supports the same interface
# as the real server. The client then invokes methods on this proxy, which
# are then sent to the other side. The proxy offers exactly the same interface
# to the caller as what the real implementation does on the remote side.
#
# Such proxies are also referred to as stubs and skeletons and are often
# automatically generated from interface and packet format definitions by
# interface definition language (IDL) compilers. Although, in our implementation
# here, we show an extremely simple and manually created packet, one
# could use Flatbuffers or similar modern serialization framework to do the
# necessary packet serialization.
#
# Notice also that the only 3 methods one can invoke on this proxy are
# connect, get and put. Thus, it is impossible to send a wrong message type
# like "POST" as we did in the basic message passing client, or mess up the
# packet encoding by forgetting the space after the message type keyword
# because often the serialization code will be generated by frameworks like
# Flatbuffer
#
class ServerProxy ():
# constructor
def __init__ (self):
# get the context and set the correct socket type
self.context = zmq.Context ()
self.socket = self.context.socket (zmq.REQ)
def connect (self, args):
connect_str = "tcp://" + args.ipaddr + ":" + args.port
print ("Proxy::connect - Connecting to RPC server at {}".format (connect_str))
self.socket.connect (connect_str)
pass
def get (self, key):
# Note that we don't avoid creating the message but it gets
# done here inside individual supported message type.
# Moreover, often this code gets generated by an interface
# definition language compiler and so the chance of making
# a mistake is very low
print ("Proxy::get - Sending a valid GET message")
self.socket.send_string ("GET " + key)
reply = self.socket.recv_string ()
return reply # return to the caller
def put (self, key, value):
# Note that we don't avoid creating the message but it gets
# done here inside individual supported message type.
# Moreover, often this code gets generated by an interface
# definition language compiler and so the chance of making
# a mistake is very low
print ("Proxy::set - Sending a valid PUT message")
self.socket.send_string ("PUT "+ key + " " + value)
reply = self.socket.recv_string () # technically, this should be just an ack
print ("Received reply = {}".format (reply))
###################################
#
# Parse command line arguments
#
###################################
def parseCmdLineArgs ():
# instantiate a ArgumentParser object
parser = argparse.ArgumentParser (description="Message Passing Client")
# Now specify all the optional arguments we support
# server's IP address
parser.add_argument ("-a", "--ipaddr", default="localhost", help="IP address of the message passing server, default: localhost")
# server's port
parser.add_argument ("-p", "--port", default="5557", help="Port number used by message passing server, default: 5557")
return parser.parse_args()
##################################
#
# main program
#
##################################
def main ():
# first parse the arguments
print ("Main: parse command line arguments")
args = parseCmdLineArgs ()
print ("Current libzmq version is %s" % zmq.zmq_version())
print ("Current pyzmq version is %s" % zmq.__version__)
print ("Initialize our server proxy")
proxy = ServerProxy ()
# Now create the right kind of socket
print ("Connect the proxy to the real server")
proxy.connect (args)
# Now send messages using RPC
#
# Notice that we cannot make a mistake as we are bound by what the
# interface supports so sending a wrong message like POST that we
# did in the message passing example is not possible here.
print ("Invoking the GET RPC")
retval = proxy.get ("foo")
print ("Value obtained from get call is {}".format (retval))
print ("Invoking the PUT RPC")
proxy.put ("foo", "bar")
###################################
#
# Main entry point
#
###################################
if __name__ == "__main__":
main ()
| 37.588652 | 132 | 0.658679 | 1,554 | 0.293208 | 0 | 0 | 0 | 0 | 0 | 0 | 3,851 | 0.726604 |
34c9095464074f8f39e3db552d812f1238aad8c5
| 1,305 |
py
|
Python
|
main.py
|
Sirius1942/Agave_ui_tools
|
7789de1d40955046d2e40fbe1c552f4a082c1472
|
[
"MIT"
] | 1 |
2019-04-10T03:17:16.000Z
|
2019-04-10T03:17:16.000Z
|
main.py
|
Sirius1942/Agave_ui_tools
|
7789de1d40955046d2e40fbe1c552f4a082c1472
|
[
"MIT"
] | null | null | null |
main.py
|
Sirius1942/Agave_ui_tools
|
7789de1d40955046d2e40fbe1c552f4a082c1472
|
[
"MIT"
] | null | null | null |
import sys
from PyQt5.QtWidgets import QApplication,QMainWindow,QDialog
from PyQt5 import QtCore, QtGui, QtWidgets
from ui.main_window import Ui_MainWindow
# from login import Ui_dialog
from lib.tcmd import TCmdClass
# from SignalsE import Example
class MyMainWindow(QMainWindow,Ui_MainWindow):
def __init__(self,parent=None):
super(MyMainWindow,self).__init__(parent)
self.setupUi(self)
self.cmdlists=TCmdClass()
def addtext(self):
_translate = QtCore.QCoreApplication.translate
self.textEdit.setHtml(_translate("MainWindow",self.getText("abc")))
# 每次修改内容,自动将光标移到最后
cursor = self.textEdit.textCursor()
cursor.movePosition(QtGui.QTextCursor.End)
self.textEdit.setTextCursor(cursor)
def getText(self,text):
self.cmdlists.addText(text)
return self.cmdlists.getText()
# class SignalsWindow(QWidget,SignalsExample):
# def __init__(self,parent=None):
# super(SignalsExample,self).__init__(parent)
# self.setupUi(self)
# def keyPressEvent(self, e):
# if e.key() == Qt.Key_Escape:
# self.close()
if __name__=="__main__":
app=QApplication(sys.argv)
myWin=MyMainWindow()
myWin.show()
# sig=SignalsWindow()
sys.exit(app.exec_())
| 24.622642 | 75 | 0.683525 | 655 | 0.489903 | 0 | 0 | 0 | 0 | 0 | 0 | 415 | 0.310396 |
34ca769ede09a2256c0d08709d7ea01edfa2631c
| 1,675 |
py
|
Python
|
lib/python2.7/site-packages/setools/polcapquery.py
|
TinkerEdgeR-Android/prebuilts_python_linux-x86_2.7.5
|
5bcc5eb23dbb00d5e5dbf75835aa2fb79e8bafa2
|
[
"PSF-2.0"
] | null | null | null |
lib/python2.7/site-packages/setools/polcapquery.py
|
TinkerEdgeR-Android/prebuilts_python_linux-x86_2.7.5
|
5bcc5eb23dbb00d5e5dbf75835aa2fb79e8bafa2
|
[
"PSF-2.0"
] | null | null | null |
lib/python2.7/site-packages/setools/polcapquery.py
|
TinkerEdgeR-Android/prebuilts_python_linux-x86_2.7.5
|
5bcc5eb23dbb00d5e5dbf75835aa2fb79e8bafa2
|
[
"PSF-2.0"
] | 1 |
2020-05-14T05:25:00.000Z
|
2020-05-14T05:25:00.000Z
|
# Copyright 2014-2015, Tresys Technology, LLC
#
# This file is part of SETools.
#
# SETools is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1 of
# the License, or (at your option) any later version.
#
# SETools is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with SETools. If not, see
# <http://www.gnu.org/licenses/>.
#
import logging
from .mixins import MatchName
from .query import PolicyQuery
class PolCapQuery(MatchName, PolicyQuery):
"""
Query SELinux policy capabilities
Parameter:
policy The policy to query.
Keyword Parameters/Class attributes:
name The name of the policy capability to match.
name_regex If true, regular expression matching will
be used for matching the name.
"""
def __init__(self, policy, **kwargs):
super(PolCapQuery, self).__init__(policy, **kwargs)
self.log = logging.getLogger(__name__)
def results(self):
"""Generator which yields all matching policy capabilities."""
self.log.info("Generating policy capability results from {0.policy}".format(self))
self._match_name_debug(self.log)
for cap in self.policy.polcaps():
if not self._match_name(cap):
continue
yield cap
| 31.603774 | 90 | 0.699104 | 867 | 0.517612 | 354 | 0.211343 | 0 | 0 | 0 | 0 | 1,135 | 0.677612 |
34cc917b6b0a55b388657d87b459c3107ab03a8f
| 2,108 |
py
|
Python
|
src_Python/EtabsAPIbface1/EtabsAPIe_eUnits.py
|
fjmucho/APIdeEtabsYPython
|
a5c7f7fe1861c4ac3c9370ef06e291f94c6fd523
|
[
"MIT"
] | null | null | null |
src_Python/EtabsAPIbface1/EtabsAPIe_eUnits.py
|
fjmucho/APIdeEtabsYPython
|
a5c7f7fe1861c4ac3c9370ef06e291f94c6fd523
|
[
"MIT"
] | null | null | null |
src_Python/EtabsAPIbface1/EtabsAPIe_eUnits.py
|
fjmucho/APIdeEtabsYPython
|
a5c7f7fe1861c4ac3c9370ef06e291f94c6fd523
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
"""Description:
"""
import os
import sys
import comtypes.client
try:
ETABSObject = comtypes.client.GetActiveObject("CSI.ETABS.API.ETABSObject")
print("Coneccion exitosa!.\nadjuntando a una instancia existente.")
except (OSError, comtypes.COMError):
print("No se encontró ninguna instancia en ejecución del programa(Etabs).")
sys.exit(-1)
smodel = ETABSObject.SapModel
# Unlocking model | Abriendo modelo (hace referencia al candadito de etabs)
smodel.SetModelIsLocked(False)
# 'initialize model | Inicializa nuevo modelo en blanco
res = smodel.InitializeNewModel()
# create grid-only template model | Crea una nueva hoja con grilla
res = smodel.File.NewGridOnly(4,12,12,4,4,24,24)
# Unit Preferences | Preferencias de Unidad
# N_mm_C = 6 #kN_m_c
# smodel.SetPresentUnits(N_mm_C)
unitOption = {
'lb_in_F':1, 'lb_ft_F':2, 'kip_in_F':3, 'kip_ft_F':4,
'kN_mm_C':5, 'kN_m_C':6, 'kgf_mm_C':7, 'kgf_m_C':8, 'N_mm_C':9, 'N_m_C':10,
'Ton_mm_C':11, 'Ton_m_C':12, 'kN_cm_C':13, 'kgf_cm_C':14, 'N_cm_C':15, 'Ton_cm_C':16
}
_n_mm_n = unitOption['kN_m_C'] #, propiedad estatica y privada, deberia ser
# su utilidad de estas 2 variables se usara para las funciones/metodos
length="mm" # longitud
force="kN" #
# length can be either "m" or "mm"
# force can be either "N" or "kN"
if(length=="mm" and force=="N"):
# smodel.SetPresentUnits(9);
smodel.SetPresentUnits(_n_mm_n);
elif(length=="mm" and force=="kN"):
# smodel.SetPresentUnits(5);
smodel.SetPresentUnits(_n_mm_n);
elif(length=="m" and force=="N"):
# smodel.SetPresentUnits(10);
smodel.SetPresentUnits(_n_mm_n);
elif(length=="m" and force=="kN"):
# smodel.SetPresentUnits(6);
smodel.SetPresentUnits(_n_mm_n)
# ....
print(smodel.GetPresentUnits())
input("Enter para cerrar Etabs!")
# 'close ETABS | Cerrar aplicacion Etabs
ETABSObject.ApplicationExit(False)
# clean up variables | limpiamos las variables y eliminamos
ETABSObject, smodel, res = None, None, None
del ETABSObject, smodel, res
| 31.939394 | 85 | 0.693074 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,163 | 0.551185 |
34ccc2dc6b0cda2dc80e2e73e7a9e34065db3f8d
| 826 |
py
|
Python
|
casepro/msgs/migrations/0040_outgoing_as_single_pt1.py
|
rapidpro/ureport-partners
|
16e5b95eae36ecbbe8ab2a59f34a2f5fd32ceacd
|
[
"BSD-3-Clause"
] | 21 |
2015-07-21T15:57:49.000Z
|
2021-11-04T18:26:35.000Z
|
casepro/msgs/migrations/0040_outgoing_as_single_pt1.py
|
rapidpro/ureport-partners
|
16e5b95eae36ecbbe8ab2a59f34a2f5fd32ceacd
|
[
"BSD-3-Clause"
] | 357 |
2015-05-22T07:26:45.000Z
|
2022-03-12T01:08:28.000Z
|
casepro/msgs/migrations/0040_outgoing_as_single_pt1.py
|
rapidpro/ureport-partners
|
16e5b95eae36ecbbe8ab2a59f34a2f5fd32ceacd
|
[
"BSD-3-Clause"
] | 24 |
2015-05-28T12:30:25.000Z
|
2021-11-19T01:57:38.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("msgs", "0039_outgoing_text_non_null")]
operations = [
migrations.AlterField(
model_name="outgoing",
name="backend_id",
field=models.IntegerField(help_text="Broadcast id from the backend", null=True),
),
migrations.RenameField(model_name="outgoing", old_name="backend_id", new_name="backend_broadcast_id"),
migrations.RemoveField(model_name="outgoing", name="recipient_count"),
migrations.AddField(
model_name="outgoing",
name="contact",
field=models.ForeignKey(to="contacts.Contact", null=True, on_delete=models.PROTECT),
),
]
| 33.04 | 110 | 0.654964 | 717 | 0.868039 | 0 | 0 | 0 | 0 | 0 | 0 | 219 | 0.265133 |
34d35a78f92c8bdc372877964e8913cfb9da9911
| 197 |
py
|
Python
|
Areatriangulo.py
|
ChristianSalas1234567/salas-yupanqui
|
36fdbe3ebc51cd73f62870fcc8b646ad98133ae7
|
[
"Apache-2.0"
] | 1 |
2021-04-22T12:34:37.000Z
|
2021-04-22T12:34:37.000Z
|
Areatriangulo.py
|
ChristianSalas1234567/salas-yupanqui
|
36fdbe3ebc51cd73f62870fcc8b646ad98133ae7
|
[
"Apache-2.0"
] | null | null | null |
Areatriangulo.py
|
ChristianSalas1234567/salas-yupanqui
|
36fdbe3ebc51cd73f62870fcc8b646ad98133ae7
|
[
"Apache-2.0"
] | null | null | null |
#variables de entrada
print("area del triangulo")
#datos de entrada
B=int(input("ingrese base:"))
H=int(input("ingrese haltura:"))
#proceso
area=(B*H)/2
#datos de salida
print("el area es: ", area)
| 21.888889 | 32 | 0.71066 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 129 | 0.654822 |
34d4e4b6e7b86d15d1faa785806544997cfd4d94
| 1,756 |
py
|
Python
|
testing/session.py
|
edchelstephens/django-rest-utils
|
15cee427149217d1e53384281894f91e9653b6b4
|
[
"BSD-3-Clause"
] | 1 |
2022-02-20T01:37:25.000Z
|
2022-02-20T01:37:25.000Z
|
testing/session.py
|
edchelstephens/django-rest-utils
|
15cee427149217d1e53384281894f91e9653b6b4
|
[
"BSD-3-Clause"
] | null | null | null |
testing/session.py
|
edchelstephens/django-rest-utils
|
15cee427149217d1e53384281894f91e9653b6b4
|
[
"BSD-3-Clause"
] | null | null | null |
from typing import Any, Optional
from django.contrib.sessions.middleware import SessionMiddleware
class SessionTestMixin:
"""Session Test mixin."""
def is_session_empty(self, request) -> int:
"""Check if request.session is empty."""
return len(request.session.values()) == 0
def assertSessionEmpty(self, request, msg: Optional[str] = None) -> None:
"""Assert that request.session does not contain any values."""
if msg is None:
msg = "request.session contains values. request.session.values() == {}".format(
request.session.values()
)
assert self.is_session_empty(request), msg
def assertSessionNotEmpty(self, request, msg: Optional[str] = None) -> None:
"""Assert that request.session contains at least one key value pair."""
if msg is None:
msg = "request.session has no key value pairs. request.session.values() == {}".format(
request.session.values()
)
assert not self.is_session_empty(request), msg
class SessionRequiredTestMixin(SessionTestMixin):
"""Mixin for tests with request.session requirements."""
def add_session(self, request) -> None:
"""Add session object to request by using SessionMiddleware."""
middleware = SessionMiddleware()
middleware.process_request(request)
request.session.save()
def set_session_value(self, request, key: str, value: Any) -> None:
"""Set key value on request.session"""
request.session[key] = value
def set_session_values(self, request, **kwargs) -> None:
"""Set key values in form of keyword arguments on request.session"""
request.session.update(kwargs)
| 35.12 | 98 | 0.649772 | 1,650 | 0.939636 | 0 | 0 | 0 | 0 | 0 | 0 | 560 | 0.318907 |
34d967b2599f558aa7f42f79ee2207cb821523d7
| 2,930 |
py
|
Python
|
test/integration/test_integration_halo.py
|
cloudpassage/provision_csp_accounts
|
a99fd6322116d5482bc183c4084a9066d81bc0b3
|
[
"BSD-3-Clause"
] | 2 |
2020-02-11T21:47:55.000Z
|
2021-01-16T02:49:06.000Z
|
test/integration/test_integration_halo.py
|
cloudpassage/provision_csp_accounts
|
a99fd6322116d5482bc183c4084a9066d81bc0b3
|
[
"BSD-3-Clause"
] | 2 |
2019-05-31T22:30:46.000Z
|
2020-02-11T21:31:38.000Z
|
test/integration/test_integration_halo.py
|
cloudpassage/provision_csp_accounts
|
a99fd6322116d5482bc183c4084a9066d81bc0b3
|
[
"BSD-3-Clause"
] | 1 |
2020-02-11T21:05:34.000Z
|
2020-02-11T21:05:34.000Z
|
import cloudpassage
import provisioner
import pytest
import os
here_dir = os.path.abspath(os.path.dirname(__file__))
fixture_dir = os.path.join(here_dir, "../fixture")
class TestIntegrationHalo(object):
def instantiate_halo_object(self, config):
"""Return an instance of the provisioner.Halo object."""
return provisioner.Halo(config)
def instantiate_config_object_nonworking(self):
"""Return a dummy, non-working config object."""
conf_obj = provisioner.ConfigManager()
conf_obj.halo_api_key = "ABC123"
conf_obj.halo_api_secret_key = "DEF456"
return conf_obj
def test_instantiate_halo_object(self):
"""Instantiation will work, even with onoworking credentials."""
config = self.instantiate_config_object_nonworking()
assert self.instantiate_halo_object(config)
def test_halo_object_sanity_fail(self):
"""Sanity check fails gracefully."""
config = self.instantiate_config_object_nonworking()
halo = self.instantiate_halo_object(config)
with pytest.raises(SystemExit) as e:
halo.sanity_check()
assert e.type == SystemExit
assert e.value.code == 1
def test_halo_object_construct_arn(self):
"""Ensure role ARN is constructed correctly."""
desired_result = "arn:aws:iam::065368812710:role/trusted-Newark"
account_id = "065368812710"
role_name = "trusted-Newark"
actual_result = provisioner.Halo.construct_role_arn(account_id,
role_name)
assert desired_result == actual_result
def test_halo_validate_object_id_string_ok(self):
"""Well-formed ID passes."""
testval = "92a11bcc905f11e896cb7f3cbd0e8cfd"
retval = provisioner.Halo.validate_object_id(testval)
assert retval is True
def test_halo_validate_object_id_string_bad(self):
"""Badly-formed ID raises CloudPassageValidation."""
testval = "92a11bcc905f11e896cb7f3cbd0../e8cfd"
with pytest.raises(cloudpassage.CloudPassageValidation) as e:
provisioner.Halo.validate_object_id(testval)
assert e
def test_halo_validate_object_id_type_bad(self):
"""Badly-formed ID raises CloudPassageValidation."""
testval = {'onething': "92a11bcc905f11e896cb7f3cbd0e8cfd"}
with pytest.raises(cloudpassage.CloudPassageValidation) as e:
provisioner.Halo.validate_object_id(testval)
assert e
def test_halo_integration_string(self):
"""Ensure integration string constructed correctly with package name and version"""
integration_string_desired = "Provision-CSP-Accounts/%s" % provisioner.__version__
config = self.instantiate_config_object_nonworking()
halo = self.instantiate_halo_object(config)
assert integration_string_desired == halo.integration
| 41.267606 | 91 | 0.695904 | 2,758 | 0.941297 | 0 | 0 | 0 | 0 | 0 | 0 | 713 | 0.243345 |
34da6249230478c06324343ddaaf9e58a8828973
| 22,665 |
py
|
Python
|
tests/python3/test_lambda.py
|
pecigonzalo/aws-lambda-ddns-function
|
06e6c06bced80611238734d202deb284a5680813
|
[
"Apache-2.0"
] | 120 |
2018-02-14T21:36:45.000Z
|
2022-03-23T20:52:17.000Z
|
tests/python3/test_lambda.py
|
pecigonzalo/aws-lambda-ddns-function
|
06e6c06bced80611238734d202deb284a5680813
|
[
"Apache-2.0"
] | 17 |
2018-03-29T09:21:23.000Z
|
2021-04-21T21:48:42.000Z
|
tests/python3/test_lambda.py
|
pecigonzalo/aws-lambda-ddns-function
|
06e6c06bced80611238734d202deb284a5680813
|
[
"Apache-2.0"
] | 70 |
2018-02-15T13:03:05.000Z
|
2022-02-24T13:52:43.000Z
|
import os
import sys
import boto3
import boto
import moto
import botocore
import unittest
import logging
import re
import sure
import botocore.session
from datetime import datetime
from moto import mock_sns_deprecated, mock_sqs_deprecated
from botocore.stub import Stubber
from freezegun import freeze_time
from mock import patch
#from moto import mock_dynamodb2, mock_dynamodb2_deprecated
#from moto.dynamodb2 import dynamodb_backend2
from moto import mock_ec2, mock_ec2_deprecated, mock_route53
myPath = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0,myPath+'/..')
from union_python3 import publish_to_sns, delete_item_from_dynamodb_table, get_subnet_cidr_block, get_item_from_dynamodb_table, list_hosted_zones, get_hosted_zone_properties, is_dns_support_enabled, is_dns_hostnames_enabled, associate_zone, create_reverse_lookup_zone, get_reversed_domain_prefix, reverse_list, get_dhcp_configurations, create_dynamodb_table, list_tables, put_item_in_dynamodb_table, get_dynamodb_table, create_table, change_resource_recordset, create_resource_record, delete_resource_record, get_zone_id, is_valid_hostname, get_dhcp_option_set_id_for_vpc
try:
import boto.dynamodb2
except ImportError:
print("This boto version is not supported")
logging.basicConfig(level=logging.DEBUG)
os.environ["AWS_ACCESS_KEY_ID"] = '1111'
os.environ["AWS_SECRET_ACCESS_KEY"] = '2222'
class TestLambda(unittest.TestCase):
def test_get_subnet_cidr_block(selt):
mock = moto.mock_ec2()
mock.start()
client = boto3.client('ec2', region_name='us-east-1')
vpc = client.create_vpc(CidrBlock="10.0.0.0/16")
subnet = client.create_subnet(VpcId=vpc['Vpc']['VpcId'], CidrBlock="10.0.0.0/18")
results = get_subnet_cidr_block(client, subnet['Subnet']['SubnetId'] )
assert results == '10.0.0.0/18'
mock.stop()
def test_listed_hosted_zones(self):
mock = moto.mock_route53()
mock.start()
client = boto3.client(
'route53',
region_name='us-east-1',
aws_access_key_id=os.environ["AWS_ACCESS_KEY_ID"],
aws_secret_access_key=os.environ["AWS_SECRET_ACCESS_KEY"],
aws_session_token='123',
)
response = client.create_hosted_zone(
Name='test4',
VPC={
'VPCRegion': 'us-east-1',
'VPCId': 'vpc-43248d39'
},
CallerReference='string',
HostedZoneConfig={
'Comment': 'string',
'PrivateZone': True
}
)
hosted_zone_id = response['HostedZone']['Id']
response = list_hosted_zones(client)
assert response['HostedZones'][0]['Name'] == 'test4.'
mock.stop()
def test_get_hosted_zone_properties(self):
mock = moto.mock_route53()
mock.start()
client = boto3.client(
'route53',
region_name='us-east-1',
aws_access_key_id=os.environ["AWS_ACCESS_KEY_ID"],
aws_secret_access_key=os.environ["AWS_SECRET_ACCESS_KEY"],
aws_session_token='123',
)
response = client.create_hosted_zone(
Name='string',
VPC={
'VPCRegion': 'us-east-1',
'VPCId': 'vpc-43248d39'
},
CallerReference='string',
HostedZoneConfig={
'Comment': 'string',
'PrivateZone': True
}
)
hosted_zone_id = response['HostedZone']['Id']
response = get_hosted_zone_properties(client, hosted_zone_id)
assert response['HostedZone']['Id'] == hosted_zone_id
mock.stop()
def test_is_dns_support_enabled(self):
mock = moto.mock_ec2()
mock.start()
client = boto3.client('ec2', region_name='us-east-1')
dhcp_options = client.create_dhcp_options(
DhcpConfigurations=[
{
'Key': 'example.com',
'Values': [
'10.0.0.6',
'10.0.0.7'
]
}
]
)
print('dhcp options: '+str(dhcp_options))
vpc1 = client.create_vpc(CidrBlock="10.0.0.0/16")
print('vpc1: '+str(vpc1))
response = client.modify_vpc_attribute(
EnableDnsSupport={
'Value': True
},
VpcId=vpc1['Vpc']['VpcId']
)
print('response: '+str(response))
results = is_dns_support_enabled(client, vpc1['Vpc']['VpcId'])
print('results: '+str(results))
assert results == True
mock.stop()
def test_is_dns_hostnames_enabled(self):
mock = moto.mock_ec2()
mock.start()
client = boto3.client('ec2', region_name='us-east-1')
dhcp_options = client.create_dhcp_options(
DhcpConfigurations=[
{
'Key': 'example.com',
'Values': [
'10.0.0.6',
'10.0.0.7'
]
}
]
)
print('dhcp options: '+str(dhcp_options))
vpc1 = client.create_vpc(CidrBlock="10.0.0.0/16")
print('vpc1: '+str(vpc1))
response = client.modify_vpc_attribute(
EnableDnsHostnames={
'Value': True
},
VpcId=vpc1['Vpc']['VpcId']
)
print('response: '+str(response))
results = is_dns_hostnames_enabled(client, vpc1['Vpc']['VpcId'])
print('results: '+str(results))
assert results == True
mock.stop()
@unittest.skip("moto need associate vpc added")
def test_associate_zone(self):
mock = moto.mock_route53()
mock.start()
client = boto3.client(
'route53',
region_name='us-east-1',
aws_access_key_id=os.environ["AWS_ACCESS_KEY_ID"],
aws_secret_access_key=os.environ["AWS_SECRET_ACCESS_KEY"],
aws_session_token='123',
)
response = client.create_hosted_zone(
Name='string',
VPC={
'VPCRegion': 'us-east-1',
'VPCId': 'vpc-43248d39'
},
CallerReference='string',
HostedZoneConfig={
'Comment': 'string',
'PrivateZone': True
}
)
hosted_zone_id = response['HostedZone']['Id']
print('response: '+str(response))
results = associate_zone(client, hosted_zone_id, 'us-east-1', 'vpc-43248d39')
assert results == 'test'
mock.stop()
def test_create_reverse_lookup_zone(self):
instance = {
'Reservations' :[
{
'Instances': [
{
'VpcId': '123'
}
]
}
]
}
mock = moto.mock_route53()
mock.start()
client = boto3.client('route53', region_name='us-east-1')
response = create_reverse_lookup_zone(client, instance, 'abc.', 'us-east-1')
assert response['HostedZone']['Name'] == 'abc.in-addr.arpa.'
mock.stop()
def test_get_reversed_domain_prefix_16(self):
results = get_reversed_domain_prefix(16, '10.0.0.1')
assert results == '10.0.0.'
def test_get_reversed_domain_prefix_24(self):
results = get_reversed_domain_prefix(24, '10.0.0.1')
assert results == '10.0.0.'
@patch('union_python3.publish_to_sns')
def test_reverse_list_with_invalid_ip(
self,
sns
):
sns.return_value == None
response = reverse_list('test')
assert response == None
def test_reverse_list(self):
results = reverse_list('172.168.3.7')
assert results == '7.3.168.172.'
def test_get_dhcp_configurations(self):
mock = moto.mock_ec2()
mock.start()
client = boto3.client('ec2', region_name='us-east-1')
dhcp_options = client.create_dhcp_options(
DhcpConfigurations=[
{
'Key': 'example.com',
'Values': [
'10.0.0.6',
'10.0.0.7'
]
}
]
)
print('dhcp options: '+str(dhcp_options))
vpc1 = client.create_vpc(CidrBlock="10.0.0.0/16")
print('vpc1: '+str(vpc1))
vpc2 = client.create_vpc(CidrBlock="10.0.0.0/16")
print('vpc2: '+str(vpc2))
vpc3 = client.create_vpc(CidrBlock="10.0.0.0/24")
print('vpc3: '+str(vpc3))
client.associate_dhcp_options(DhcpOptionsId=dhcp_options['DhcpOptions']['DhcpOptionsId'], VpcId=vpc1['Vpc']['VpcId'])
client.associate_dhcp_options(DhcpOptionsId=dhcp_options['DhcpOptions']['DhcpOptionsId'], VpcId=vpc2['Vpc']['VpcId'])
results = get_dhcp_configurations(client, dhcp_options['DhcpOptions']['DhcpOptionsId'] )
# Returning nothing now because moto needs fixed
assert results == []
mock.stop()
def test_create_dynamodb_table(self):
mock = moto.mock_dynamodb2()
mock.start()
client = boto3.client('dynamodb', region_name='us-east-1')
results = create_dynamodb_table(client, 'DDNS')
assert results['TableDescription']['TableName'] == 'DDNS'
mock.stop()
def test_get_dhcp_option_set_id_for_vpc(self):
SAMPLE_DOMAIN_NAME = u'example.com'
SAMPLE_NAME_SERVERS = [u'10.0.0.6', u'10.0.0.7']
mock = moto.mock_ec2()
mock.start()
client = boto3.client('ec2', region_name='us-east-1')
dhcp_options = client.create_dhcp_options(
DhcpConfigurations=[
{
'Key': 'example.com',
'Values': [
'10.0.0.6',
'10.0.0.7'
]
}
]
)
print('dhcp options: '+str(dhcp_options))
vpc1 = client.create_vpc(CidrBlock="10.0.0.0/16")
print('vpc1: '+str(vpc1))
vpc2 = client.create_vpc(CidrBlock="10.0.0.0/16")
print('vpc2: '+str(vpc2))
vpc3 = client.create_vpc(CidrBlock="10.0.0.0/24")
print('vpc3: '+str(vpc3))
client.associate_dhcp_options(DhcpOptionsId=dhcp_options['DhcpOptions']['DhcpOptionsId'], VpcId=vpc1['Vpc']['VpcId'])
client.associate_dhcp_options(DhcpOptionsId=dhcp_options['DhcpOptions']['DhcpOptionsId'], VpcId=vpc2['Vpc']['VpcId'])
#vpcs = client.describe_vpcs(Filters=[{'Name': 'dhcp-options-id', 'Values': [dhcp_options['DhcpOptions']['DhcpOptionsId']]}])
results = get_dhcp_option_set_id_for_vpc(client, vpc1['Vpc']['VpcId'])
assert results == dhcp_options['DhcpOptions']['DhcpOptionsId']
mock.stop()
def test_is_invalid_hostname(self):
results = is_valid_hostname( None)
assert results == False
def test_is_valid_hostname(self):
results = is_valid_hostname( 'test')
assert results == True
def test_get_zone_id(self):
mock = moto.mock_route53()
mock.start()
client = boto3.client('route53', region_name='us-east-1')
client.create_hosted_zone(
Name="db.",
CallerReference=str(hash('foo')),
HostedZoneConfig=dict(
PrivateZone=True,
Comment="db",
)
)
zones = client.list_hosted_zones_by_name(DNSName="db.")
hosted_zone_id = zones["HostedZones"][0]["Id"]
# Create A Record.
a_record_endpoint_payload = {
'Comment': 'Create A record prod.redis.db',
'Changes': [
{
'Action': 'CREATE',
'ResourceRecordSet': {
'Name': 'prod.redis.db.',
'Type': 'A',
'TTL': 10,
'ResourceRecords': [{
'Value': '127.0.0.1'
}]
}
}
]
}
client.change_resource_record_sets(HostedZoneId=hosted_zone_id, ChangeBatch=a_record_endpoint_payload)
results = get_zone_id( client, 'db.')
assert len(results) == 15
mock.stop()
def test_delete_resource_record(self):
mock = moto.mock_route53()
mock.start()
client = boto3.client('route53', region_name='us-east-1')
client.create_hosted_zone(
Name="db.",
CallerReference=str(hash('foo')),
HostedZoneConfig=dict(
PrivateZone=True,
Comment="db",
)
)
zones = client.list_hosted_zones_by_name(DNSName="db.")
hosted_zone_id = zones["HostedZones"][0]["Id"]
# Create A Record.
a_record_endpoint_payload = {
'Comment': 'Create A record prod.redis.db',
'Changes': [
{
'Action': 'CREATE',
'ResourceRecordSet': {
'Name': 'prod.redis.db.',
'Type': 'A',
'TTL': 10,
'ResourceRecords': [{
'Value': '127.0.0.1'
}]
}
}
]
}
client.change_resource_record_sets(HostedZoneId=hosted_zone_id, ChangeBatch=a_record_endpoint_payload)
results = delete_resource_record( client, hosted_zone_id,'prod','redis.db.','A','127.0.0.1')
assert results['ChangeInfo']['Status'] == 'INSYNC'
mock.stop()
def test_create_resource_record(self):
mock = moto.mock_route53()
mock.start()
client = boto3.client('route53', region_name='us-east-1')
client.create_hosted_zone(
Name="db.",
CallerReference=str(hash('foo')),
HostedZoneConfig=dict(
PrivateZone=True,
Comment="db",
)
)
zones = client.list_hosted_zones_by_name(DNSName="db.")
hosted_zone_id = zones["HostedZones"][0]["Id"]
results = create_resource_record( client, hosted_zone_id,'prod','redis.db.','A','127.0.0.1')
assert results['ChangeInfo']['Status'] == 'INSYNC'
mock.stop()
def test_change_resource_recordset(self):
mock = moto.mock_route53()
mock.start()
client = boto3.client('route53', region_name='us-east-1')
client.create_hosted_zone(
Name="db.",
CallerReference=str(hash('foo')),
HostedZoneConfig=dict(
PrivateZone=True,
Comment="db",
)
)
zones = client.list_hosted_zones_by_name(DNSName="db.")
hosted_zone_id = zones["HostedZones"][0]["Id"]
results = change_resource_recordset( client, hosted_zone_id,'prod','redis.db.','A','127.0.0.1')
assert results['ChangeInfo']['Status'] == 'INSYNC'
mock.stop()
def test_create_table(self):
mock = moto.mock_dynamodb2()
mock.start()
client = boto3.client("dynamodb")
results = create_table(client, 'DDNS')
assert results == True
mock.stop()
def test_get_dynamodb_table(self):
mock = moto.mock_dynamodb2()
mock.start()
client = boto3.client("dynamodb")
client.create_table(TableName="DDNS"
, KeySchema=[{"AttributeName": "id", "KeyType": "HASH"}]
, AttributeDefinitions=[{"AttributeName": "id", "AttributeType": "S"}]
, ProvisionedThroughput={"ReadCapacityUnits": 1, "WriteCapacityUnits": 1})
results = get_dynamodb_table(client, 'DDNS')
assert results['Table']['TableName'] == 'DDNS'
mock.stop()
def test_list_tables(self):
dynamodb_client = botocore.session.get_session().create_client('dynamodb','us-east-1')
dynamodb_client_stubber = Stubber(dynamodb_client)
response = {
'TableNames': [
'DDNS',
],
'LastEvaluatedTableName': 'DDNS'
}
expected_params = {}
dynamodb_client_stubber.add_response('list_tables', response, expected_params)
with dynamodb_client_stubber:
results = list_tables(dynamodb_client)
assert results['TableNames'][0]== 'DDNS'
def test_put_item_in_dynamodb_table(self):
dynamodb_client = botocore.session.get_session().create_client('dynamodb','us-east-1')
dynamodb_client_stubber = Stubber(dynamodb_client)
response = {
'Attributes': {
'InstanceId': {
'S': '123',
'NULL': True,
'BOOL': True
},
'InstanceAttributes': {
'S': '123',
'NULL': True,
'BOOL': True
}
},
'ConsumedCapacity': {
'TableName': 'string',
'CapacityUnits': 123.0,
'ReadCapacityUnits': 123.0,
'WriteCapacityUnits': 123.0,
'Table': {
'ReadCapacityUnits': 123.0,
'WriteCapacityUnits': 123.0,
'CapacityUnits': 123.0
},
'LocalSecondaryIndexes': {
'string': {
'ReadCapacityUnits': 123.0,
'WriteCapacityUnits': 123.0,
'CapacityUnits': 123.0
}
},
'GlobalSecondaryIndexes': {
'string': {
'ReadCapacityUnits': 123.0,
'WriteCapacityUnits': 123.0,
'CapacityUnits': 123.0
}
}
},
'ItemCollectionMetrics': {
'ItemCollectionKey': {
'string': {
'S': 'string',
'N': 'string',
'B': b'bytes',
'SS': [
'string',
],
'NS': [
'string',
],
'BS': [
b'bytes',
],
'M': {
'string': {}
},
'L': [
{},
],
'NULL': True,
'BOOL': True
}
},
'SizeEstimateRangeGB': [
123.0,
]
}
}
expected_params = {
'TableName': 'DDNS',
'Item': {
'InstanceId': {'S':'123'},
'InstanceAttributes': {'S':'123'}
}
}
dynamodb_client_stubber.add_response('put_item', response, expected_params)
with dynamodb_client_stubber:
results = put_item_in_dynamodb_table(dynamodb_client, 'DDNS', '123','123')
assert results == response
def test_get_item_from_dynamodb_table(self):
mock = moto.mock_dynamodb2()
mock.start()
client = boto3.client('dynamodb',
region_name='us-west-2',
aws_access_key_id="ak",
aws_secret_access_key="sk")
results = create_table(client, 'DDNS')
print('results: '+str(results))
results = put_item_in_dynamodb_table(client, 'DDNS', '123', '123')
print('results: '+str(results))
results = get_item_from_dynamodb_table(client, 'DDNS', '123')
print('results: '+str(results))
assert results == 123
mock.stop()
def test_delete_item_from_dynamodb_table(self):
mock = moto.mock_dynamodb2()
mock.start()
client = boto3.client('dynamodb',
region_name='us-east-1',
aws_access_key_id="ak",
aws_secret_access_key="sk")
results = create_table(client, 'DDNS')
print('results: '+str(results))
results = put_item_in_dynamodb_table(client, 'DDNS', '123', '123')
print('results: '+str(results))
results = get_item_from_dynamodb_table(client, 'DDNS', '123')
print('results: '+str(results))
assert results == 123
results = delete_item_from_dynamodb_table(client, 'DDNS', '123')
print('results: '+str(results))
results = get_item_from_dynamodb_table(client, 'DDNS', '123')
print('results: ' + str(results))
assert results == None
mock.stop()
@unittest.skip("moto needs TopicArn added to publish")
@mock_sqs_deprecated
@mock_sns_deprecated
def test_publish_to_sns(self):
MESSAGE_FROM_SQS_TEMPLATE = '{\n "Message": "%s",\n "MessageId": "%s"\n}'
conn = boto.connect_sns()
conn.create_topic("some-topic")
topics_json = conn.get_all_topics()
topic_arn = topics_json["ListTopicsResponse"][
"ListTopicsResult"]["Topics"][0]['TopicArn']
sqs_conn = boto.connect_sqs()
sqs_conn.create_queue("test-queue")
conn.subscribe(topic_arn, "sqs",
"arn:aws:sqs:us-east-1:123456789012:test-queue")
message_to_publish = 'my message'
subject_to_publish = "test subject"
with freeze_time("2015-01-01 12:00:00"):
published_message = publish_to_sns(conn, '123456789012', 'us-east-1', message_to_publish)
published_message_id = published_message['MessageId']
queue = sqs_conn.get_queue("test-queue")
message = queue.read(1)
expected = MESSAGE_FROM_SQS_TEMPLATE % (
message_to_publish, published_message_id, subject_to_publish, 'us-east-1')
acquired_message = re.sub("\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3}Z", '2015-01-01T12:00:00.000Z',
message.get_body())
acquired_message.should.equal(expected)
| 30.382038 | 571 | 0.528039 | 21,278 | 0.938804 | 0 | 0 | 2,612 | 0.115244 | 0 | 0 | 4,564 | 0.201368 |
34db9103dcbc551abbdebff8ae585f4f1742d35b
| 1,929 |
py
|
Python
|
web/transiq/api/decorators.py
|
manibhushan05/transiq
|
763fafb271ce07d13ac8ce575f2fee653cf39343
|
[
"Apache-2.0"
] | null | null | null |
web/transiq/api/decorators.py
|
manibhushan05/transiq
|
763fafb271ce07d13ac8ce575f2fee653cf39343
|
[
"Apache-2.0"
] | 14 |
2020-06-05T23:06:45.000Z
|
2022-03-12T00:00:18.000Z
|
web/transiq/api/decorators.py
|
manibhushan05/transiq
|
763fafb271ce07d13ac8ce575f2fee653cf39343
|
[
"Apache-2.0"
] | null | null | null |
import json
from api.helper import json_405_response, json_error_response
def no_test(func):
"""
Use for URLs that do not require testing, use wisely
"""
def inner(request, *args, **kwargs):
return func(request, *args, **kwargs)
inner.__name__ = func.__name__
inner.__module__ = func.__module__
inner.__doc__ = func.__doc__
inner.__dict__ = func.__dict__
inner.do_not_test = True
return inner
def api_get(func):
def inner(request, *args, **kwargs):
if request.method != "GET":
return json_405_response()
request.data = {}
return func(request, *args, **kwargs)
inner.__name__ = func.__name__
inner.__module__ = func.__module__
inner.__doc__ = func.__doc__
inner.__dict__ = func.__dict__
return inner
def api_post(func):
def inner(request, *args, **kwargs):
if request.method != "POST":
return json_405_response()
try:
request.data = {} if not request.body else json.loads(request.body.decode('utf-8'))
except ValueError:
request.data = {}
return func(request, *args, **kwargs)
inner.__name__ = func.__name__
inner.__module__ = func.__module__
inner.__doc__ = func.__doc__
inner.__dict__ = func.__dict__
return inner
def authenticated_user(func):
def inner(request, *args, **kwargs):
if not request.user:
return json_error_response('no user present', 401)
if not request.user.is_authenticated:
return json_error_response('user is not authenticated', 401)
if not request.user.is_active:
return json_error_response('user authenticated but inactive', 401)
return func(request, *args, **kwargs)
inner.__name__ = func.__name__
inner.__module__ = func.__module__
inner.__doc__ = func.__doc__
inner.__dict__ = func.__dict__
return inner
| 30.619048 | 95 | 0.653188 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 163 | 0.0845 |
34dbc736ddc462f2c6d882b037aad3cf68384021
| 16,484 |
py
|
Python
|
ASHMC/courses/management/commands/populate_from_csv.py
|
haaksmash/Webfront
|
8eb942394c568c681a83bc2c375d7552f4b3a30c
|
[
"Apache-2.0"
] | null | null | null |
ASHMC/courses/management/commands/populate_from_csv.py
|
haaksmash/Webfront
|
8eb942394c568c681a83bc2c375d7552f4b3a30c
|
[
"Apache-2.0"
] | null | null | null |
ASHMC/courses/management/commands/populate_from_csv.py
|
haaksmash/Webfront
|
8eb942394c568c681a83bc2c375d7552f4b3a30c
|
[
"Apache-2.0"
] | null | null | null |
'''
Created on Apr 16, 2012
@author: Haak Saxberg
'''
from django.core.management.base import BaseCommand, CommandError
from django.core.exceptions import ObjectDoesNotExist
from django.db.utils import IntegrityError
from ...models import Campus, Course, Professor, Section, Meeting, Timeslot, Day, Semester,\
CourseArea, RoomInfo, Log, Department, Room, Building
import csv, pprint, re, datetime
class Command(BaseCommand):
args = '<directory_of_csv_files>'
help = 'Populates the Course tables with information from csv files.'
def handle(self, *args, **options):
if len(args) != 1:
raise CommandError("Expects a single directory as an argument.")
dirs = args[0]
if dirs[-1] != '/':
dirs += '/'
# Gather up the relevant csv files.
# ENSURE THESE ARE ENCODED AS UTF-8 BEFORE RUNNING THIS SCRIPT
MEETINGS = open(dirs + 'meetings 1.csv')
SECTIONS = open(dirs + 'sections 1.csv')
DESCRIPT = open(dirs + 'courses 1.csv')
AREAS = open(dirs + 'courses 2.csv')
PREREQS = open(dirs + 'courses 3.csv')
# Gather campuses. We'll need them later.
CAMPUS_LOOKUP = dict([ (x.code, x) for x in Campus.objects.all()])
def find_campus(string, dictionary):
try:
camp = CAMPUS_LOOKUP[string]
except KeyError:
print "Invalid campus: {}".format(string)
try:
camp = CAMPUS_LOOKUP[dictionary['campus']]
except KeyError:
print "Falling back to UN"
camp = CAMPUS_LOOKUP['UN']
return camp
SEMESTER_LOOKUP = dict([ ((x.half, x.year), x) for x in Semester.objects.all()])
DAY_LOOKUP = dict([(x.code, x) for x in Day.objects.all()])
self.stderr.write("{}\n{}\n{}\n".format(CAMPUS_LOOKUP, SEMESTER_LOOKUP, DAY_LOOKUP))
meetings_r = csv.DictReader(MEETINGS)
sections_r = csv.DictReader(SECTIONS)
descript_r = csv.DictReader(DESCRIPT)
areas_r = csv.DictReader(AREAS)
prereqs_r = csv.DictReader(PREREQS)
# First, we go through the descriptions, since that's supposedly the
# canonical list of courses that exist.
# As we go, populate a dictionary with keys that are the course codes,
# creating a json-like dict of dicts.
courses = {}
for row in descript_r:
# pull out some interesting data from this csv file, for later use.
#print row
code = row['\xef\xbb\xbf"Course Number"'] # ugly because of unicode conversion
course = {}
#self.stderr.write("Parsing description for {}\n".format(code))
course['code'] = code[:4]
course['number'] = code[4:9]
course['code_campus'] = code[-2:]
course['title'] = row['Course Title']
course['dept'] = row['Course Department Code']
course['campus'] = row['Prim Assoc']
course['minhours'] = row['Min Hrs']
course['maxhours'] = row['Max Hrs']
course['descr'] = row['Abstr']
course['requisites'] = [] # empty prereq list for later.
course['sections'] = {} # empty section dict for later; every course should (?)
# have at least one section associated with it; use number
# as key.
course['attention'] = False
course['area'] = [] # a course can have more than one area associated with it
courses[code] = course
# populate areas; use this to filter in place of department.
for row in areas_r:
code = row['\xef\xbb\xbf"Course Number"']
#self.stderr.write("Parsing area for {}\n".format(code))
course = courses[code]
course['area'] += [row["Course Area Code"]]
# populate prerequisite/concurrent enrollment requirements
for row in prereqs_r:
code = row['\xef\xbb\xbf"Course Number"']
#self.stderr.write("Parsing requisites for {}\n".format(code))
course = courses[code]
# C = Corequisite, P = Prerequisite, N = Concurrent Enrollment Required
if row['Requisite Course Category'] in ['C', 'N', 'P']:
if '*' in row['Requisite Course Number']:
# a * indicates a wildcard.
# Course codes are [4 letters for area][at least 3 and up to 5 characters for number][2 letters for campus]
# Ex: MATH030G HM
# Ex: MUS 052HNPO
# Ex: CSCI005GLHM
#course['attention'] = True
pass
requisite = {'code':row['Requisite Course Number']}
if courses.has_key(requisite['code']):
requisite['req_attention'] = False
else:
requisite['req_attention'] = True
requisite['wildcard'] = '*' in requisite
course['requisites'] += [(row['Requisite Course Category'],
requisite)]
# get sections, associate them with specific courses
for row in sections_r:
code = row['\xef\xbb\xbf"Course Number"']
course = courses[code]
section = {}
section['title'] = row['Section Title'] if len(row['Section Title']) > 0 else None
section['number'] = row['Section Number']
section['semester'] = row['Session Code']
section['year'] = int(row['Year'])
section['open'] = True if row['Section Status'] == 'O' else False
section['starts'] = row['Section Begin Date']
section['ends'] = row['Section End Date']
section['cred_hours'] = row['Section Credit Hours']
section['seats'] = row['Maximum Registration']
section['meetings'] = {} # empty meeting dict for later use
section['attention'] = False
course['sections'][section['number']] = section
# get meetings, associate them with specific sections
for row in meetings_r:
code = row['\xef\xbb\xbf"Course Number"']
course = courses[code]
if len(course['sections'].keys()) < 1:
continue
try:
section = course['sections'][row['Section Number']]
except KeyError, e:
pprint.pprint(course, self.stderr)
self.stderr.write("ERROR: {}\n".format(e))
section = {}
section['title'] = None
section['year'] = Semester.get_this_semester().next().year
section['open'] = 0
section['starts'] = datetime.datetime.now() # trash values
section['ends'] = datetime.datetime.now() # trash values
section['cred_hours'] = 0
section['seats'] = 0
section['attention'] = True
section['semester'] = Semester.get_this_semester().next().half
section['number'] = row['Section Number']
section['meetings'] = {}
course['sections'][section['number']] = section
if section['meetings'].has_key(row['Meeting Number']):
meeting = section['meetings'][row["Meeting Number"]] # If we've seen this row before, don't mess with anything
else:
meeting = {}
meeting['instructors'] = []
meeting['meet_num'] = row['Meeting Number']
meeting['days'] = row['Class Meeting Days']
meeting['start_time'] = row['Class Begin Time (24 Hour)']
meeting['end_time'] = row['Class End Time (24 Hour)']
meeting['campus_code'] = row['Campus Code']
meeting['building'] = row['Building Code']
meeting['room'] = row['Room Number']
meeting['attention'] = False
section['meetings'][meeting['meet_num']] = meeting
meeting['instructors'] += [row['Instructor Name']] # multiple instructors
#self.stderr.write("{}\n".format(courses.keys()[-4]))
#self.stderr.write("{}\n".format((courses.keys()[-4] == "SPAN033 PO")))
#pprint.pprint(courses['PHYS051 HM'], self.stderr)
# We've now gleaned all the information that we can from the csv's, and put them into a monster
# of a dict of nested dicts.
non_base_courses = []
base_courses = []
for key in courses.keys():
#self.stderr.write("{}\n".format(key))
course = courses[key]
if len(course['requisites']) > 0:
non_base_courses += [course]
else:
base_courses += [course]
pprint.pprint("{} base courses".format(len(base_courses)), self.stderr)
pprint.pprint("{} non-base courses".format(len(non_base_courses)),
self.stderr)
#created_courses = {}
repeats = 0
fucked_bases = []
for course in base_courses+non_base_courses:
# First, ensure that we have the course area and the department
for area in course['area']:
if area in ['BIOL','CHEM','CSCI','ENGR','MATH','PHYS']:
science = True
else:
science = False
if re.match(r'^\d',area):
req_status = True
else:
req_status = False
ca, new = CourseArea.objects.get_or_create(code=area,
name=area,
hard_science=science,
is_req_area=req_status)
primary_campus = CAMPUS_LOOKUP[course['campus']]
dept, new = Department.objects.get_or_create(code=course['dept'],
campus=primary_campus,
)
#print course['number'],course['code_campus'], course['title']
#print course['descr']
# build the course itself
c, new = Course.objects.get_or_create(
title=course['title'],
codeletters=course['code'],
codenumber=course['number'],
codecampus=course['code_campus'],
campus=find_campus(course['code_campus'], course),
description=unicode(course['descr'],"UTF-8"),
)
if not new: repeats += 1
c.departments.add(dept)
c.areas.add(ca)
c.code = c.construct_code()
c.needs_attention=course['attention']
c.min_hours=course['minhours']
c.max_hours=course['maxhours']
if c.campus.code == "HM": c.credit_multiplier = 1.00
c.save()
#Now, we add sections.
for section in course['sections'].keys():
sec = course['sections'][section]
#print "Section ", section
try:
s, new = Section.objects.get_or_create(
course=c,
number=sec['number'],
)
s.title=sec['title']
s.credit_hours=sec['cred_hours']
s.semester=SEMESTER_LOOKUP[(sec['semester'],sec['year'])]
s.seats=sec['seats']
s.openseats=sec['seats'] # assume totally free classes!
s.start_date=sec['starts']
s.end_date=sec['ends']
s.needs_attention=sec['attention']
s.save()
except IntegrityError, e:
print e
print "Adding this course to the 'fucked' list"
fucked_bases += [(course, section)]
continue
# add meetings to sections
for meet in sec['meetings'].keys():
print "\tMeeting code: ", meet
meeting = sec['meetings'][meet]
m, new = Meeting.objects.get_or_create(section=s,
meeting_code=int(meeting['meet_num']),
)
m.needs_attention=meeting['attention']
m.campus=find_campus(meeting['campus_code'],course)
m.save()
# Add instructors to meetings
for teacher in meeting['instructors']:
names = teacher.split(',')
last = names[0]
if len(names) > 1:
first = names[1].split(' ')[1] # clears out middle initial, if extant
else:
first = None
p, new = Professor.objects.get_or_create(last_name=last,
first_name=first,
)
p.departments.add(dept)
m.teachers.add(p)
# spatial location of this meeting
#print "\tBuilding: ",meeting['building']
print "\t Building: ", meeting['building']
if not meeting['building']: # no building specified
meeting['building'] = "TBA"
try:
b = Building.objects.get(campus=m.campus,
code=meeting['building'])
except ObjectDoesNotExist, e:
print 'no building with code {} and campus {}'.format(meeting['building'], m.campus)
b = Building.objects.get(code=meeting['building'])
room, new = Room.objects.get_or_create(building=b,
title=meeting['room'])
is_arr = b.code == "ARR"
is_tba = b.code == "TBA"
# temporal location of this meeting
for day in meeting['days']:
if day == '-': continue # don't care about days we don't meet on
#print "\t", day
d = DAY_LOOKUP[day]
try:
starter = datetime.datetime.strptime(meeting['start_time'],"%H%M")
except ValueError, e:
print e
starter = datetime.datetime.strptime(meeting['start_time'].zfill(2),"%H%M")
try:
ender = datetime.datetime.strptime(meeting['end_time'],"%H%M")
except ValueError, e:
print e
ender = datetime.datetime.strptime(meeting['end_time'].zfill(2),"%H%M")
t, new = Timeslot.objects.get_or_create(
starts=starter,
ends=ender,
day=d
)
# Finally register the meeting with a timeslot (in a room)
ri, new = RoomInfo.objects.get_or_create(meeting=m,
timeslot=t,
room=room,
)
ri.is_arr=is_arr,
ri.is_tba=is_tba
ri.save()
| 44.672087 | 127 | 0.473186 | 16,060 | 0.974278 | 0 | 0 | 0 | 0 | 0 | 0 | 4,754 | 0.288401 |
34dcd15230933fd8287950f334911c981fecf57b
| 858 |
py
|
Python
|
Alura/MLClassificacao2/A4V4_Classificando_email.py
|
EduardoMoraesRitter/Alura
|
c0f5e7e9807e8e1d1dc46e6b847df8a8085783a6
|
[
"MIT"
] | null | null | null |
Alura/MLClassificacao2/A4V4_Classificando_email.py
|
EduardoMoraesRitter/Alura
|
c0f5e7e9807e8e1d1dc46e6b847df8a8085783a6
|
[
"MIT"
] | null | null | null |
Alura/MLClassificacao2/A4V4_Classificando_email.py
|
EduardoMoraesRitter/Alura
|
c0f5e7e9807e8e1d1dc46e6b847df8a8085783a6
|
[
"MIT"
] | null | null | null |
import pandas as pd
classificacoes = pd.read_csv('email.csv')
textosPuros = classificacoes['email']
textosQuebrados = textosPuros.str.lower().str.split(' ')
dicionario = set()
for lista in textosQuebrados:
dicionario.update(lista)
totalPalavras = len(dicionario)
tuplas = list(zip(dicionario, range(totalPalavras)))
tradutor = {palavra:indice for palavra, indice in tuplas}
print(totalPalavras)
def vetorizar_texto(texto, tradutor):
vetor = [0] * len(tradutor)
for palavra in texto:
if palavra in tradutor:
vetor[tradutor[palavra]] += 1
return vetor
#print(vetorizar_texto(textosQuebrados[0], tradutor))
#print(vetorizar_texto(textosQuebrados[1], tradutor))
#print(vetorizar_texto(textosQuebrados[2], tradutor))
vetoresDeTexto = [vetorizar_texto(texto, tradutor) for texto in textosQuebrados]
print(vetoresDeTexto)
| 28.6 | 80 | 0.749417 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 180 | 0.20979 |
34dd7a78ed67d24dec24e1e267661e54ecc9605b
| 23,901 |
py
|
Python
|
tests/test_challonge.py
|
eprouty/dgcastle
|
b4b56f7675648987f30d016a45e716cb76c69516
|
[
"MIT"
] | null | null | null |
tests/test_challonge.py
|
eprouty/dgcastle
|
b4b56f7675648987f30d016a45e716cb76c69516
|
[
"MIT"
] | 13 |
2017-01-30T15:38:38.000Z
|
2017-06-09T00:15:58.000Z
|
tests/test_challonge.py
|
eprouty/dgcastle
|
b4b56f7675648987f30d016a45e716cb76c69516
|
[
"MIT"
] | null | null | null |
import copy
import os
import pickle
import unittest
from unittest.mock import patch
from dgcastle import dgcastle
from dgcastle.exceptions import IncompleteException
from dgcastle.exceptions import ValidationException
from dgcastle.handlers.challonge import Challonge
TEST_TOURNAMENT = pickle.loads(b'\x80\x03}q\x00(X\x02\x00\x00\x00idq\x01J.X6\x00X\x04\x00\x00\x00nameq\x02X\x07\x00\x00\x00DG Testq\x03X\x03\x00\x00\x00urlq\x04X\x08\x00\x00\x00mwtmsdjsq\x05X\x0b\x00\x00\x00descriptionq\x06X\x1b\x00\x00\x00This is just a test bracketq\x07X\x0f\x00\x00\x00tournament-typeq\x08X\x12\x00\x00\x00single eliminationq\tX\n\x00\x00\x00started-atq\ncdatetime\ndatetime\nq\x0bC\n\x07\xe1\x06\t\x0e,\x13\x00\x00\x00q\x0cciso8601.iso8601\nFixedOffset\nq\rJ\xfc\xff\xff\xffK\x00X\x06\x00\x00\x00-04:00q\x0e\x87q\x0fRq\x10}q\x11(X\x1a\x00\x00\x00_FixedOffset__offset_hoursq\x12J\xfc\xff\xff\xffX\x1c\x00\x00\x00_FixedOffset__offset_minutesq\x13K\x00X\x14\x00\x00\x00_FixedOffset__offsetq\x14cdatetime\ntimedelta\nq\x15J\xff\xff\xff\xffJ@\x19\x01\x00K\x00\x87q\x16Rq\x17X\x12\x00\x00\x00_FixedOffset__nameq\x18h\x0eub\x86q\x19Rq\x1aX\x0c\x00\x00\x00completed-atq\x1bh\x0bC\n\x07\xe1\x06\t\x10\x14$\x00\x00\x00q\x1ch\rJ\xfc\xff\xff\xffK\x00X\x06\x00\x00\x00-04:00q\x1d\x87q\x1eRq\x1f}q (h\x12J\xfc\xff\xff\xffh\x13K\x00h\x14h\x15J\xff\xff\xff\xffJ@\x19\x01\x00K\x00\x87q!Rq"h\x18h\x1dub\x86q#Rq$X\x17\x00\x00\x00require-score-agreementq%\x89X\x1e\x00\x00\x00notify-users-when-matches-openq&\x88X\n\x00\x00\x00created-atq\'h\x0bC\n\x07\xe1\x06\t\x0e+\x10\x00\x00\x00q(h\rJ\xfc\xff\xff\xffK\x00X\x06\x00\x00\x00-04:00q)\x87q*Rq+}q,(h\x12J\xfc\xff\xff\xffh\x13K\x00h\x14h\x15J\xff\xff\xff\xffJ@\x19\x01\x00K\x00\x87q-Rq.h\x18h)ub\x86q/Rq0X\n\x00\x00\x00updated-atq1h\x0bC\n\x07\xe1\x06\t\x10\x14$\x00\x00\x00q2h\rJ\xfc\xff\xff\xffK\x00X\x06\x00\x00\x00-04:00q3\x87q4Rq5}q6(h\x12J\xfc\xff\xff\xffh\x13K\x00h\x14h\x15J\xff\xff\xff\xffJ@\x19\x01\x00K\x00\x87q7Rq8h\x18h3ub\x86q9Rq:X\x05\x00\x00\x00stateq;X\x08\x00\x00\x00completeq<X\x0b\x00\x00\x00open-signupq=\x89X%\x00\x00\x00notify-users-when-the-tournament-endsq>\x88X\x0e\x00\x00\x00progress-meterq?KdX\r\x00\x00\x00quick-advanceq@\x89X\x16\x00\x00\x00hold-third-place-matchqA\x89X\x10\x00\x00\x00pts-for-game-winqBcdecimal\nDecimal\nqCX\x03\x00\x00\x000.0qD\x85qERqFX\x10\x00\x00\x00pts-for-game-tieqGhCX\x03\x00\x00\x000.0qH\x85qIRqJX\x11\x00\x00\x00pts-for-match-winqKhCX\x03\x00\x00\x001.0qL\x85qMRqNX\x11\x00\x00\x00pts-for-match-tieqOhCX\x03\x00\x00\x000.5qP\x85qQRqRX\x0b\x00\x00\x00pts-for-byeqShCX\x03\x00\x00\x001.0qT\x85qURqVX\x0c\x00\x00\x00swiss-roundsqWK\x00X\x07\x00\x00\x00privateqX\x89X\t\x00\x00\x00ranked-byqYX\n\x00\x00\x00match winsqZX\x0b\x00\x00\x00show-roundsq[\x88X\n\x00\x00\x00hide-forumq\\\x89X\x13\x00\x00\x00sequential-pairingsq]\x89X\x12\x00\x00\x00accept-attachmentsq^\x89X\x13\x00\x00\x00rr-pts-for-game-winq_hCX\x03\x00\x00\x000.0q`\x85qaRqbX\x13\x00\x00\x00rr-pts-for-game-tieqchCX\x03\x00\x00\x000.0qd\x85qeRqfX\x14\x00\x00\x00rr-pts-for-match-winqghCX\x03\x00\x00\x001.0qh\x85qiRqjX\x14\x00\x00\x00rr-pts-for-match-tieqkhCX\x03\x00\x00\x000.5ql\x85qmRqnX\x0e\x00\x00\x00created-by-apiqo\x89X\r\x00\x00\x00credit-cappedqp\x89X\x08\x00\x00\x00categoryqqNX\n\x00\x00\x00hide-seedsqr\x89X\x11\x00\x00\x00prediction-methodqsK\x00X\x15\x00\x00\x00predictions-opened-atqtNX\x10\x00\x00\x00anonymous-votingqu\x89X\x18\x00\x00\x00max-predictions-per-userqvK\x01X\n\x00\x00\x00signup-capqwNX\x07\x00\x00\x00game-idqxK@X\x12\x00\x00\x00participants-countqyK\x08X\x14\x00\x00\x00group-stages-enabledqz\x89X!\x00\x00\x00allow-participant-match-reportingq{\x88X\x05\x00\x00\x00teamsq|\x89X\x11\x00\x00\x00check-in-durationq}NX\x08\x00\x00\x00start-atq~NX\x16\x00\x00\x00started-checking-in-atq\x7fNX\n\x00\x00\x00tie-breaksq\x80X\x05\x00\x00\x00\n q\x81X\t\x00\x00\x00locked-atq\x82NX\x08\x00\x00\x00event-idq\x83NX$\x00\x00\x00public-predictions-before-start-timeq\x84\x89X\x06\x00\x00\x00rankedq\x85\x89X\x15\x00\x00\x00grand-finals-modifierq\x86NX\x1a\x00\x00\x00predict-the-losers-bracketq\x87\x89X\x04\x00\x00\x00spamq\x88NX\x03\x00\x00\x00hamq\x89NX\x12\x00\x00\x00description-sourceq\x8aX\x1b\x00\x00\x00This is just a test bracketq\x8bX\t\x00\x00\x00subdomainq\x8cNX\x12\x00\x00\x00full-challonge-urlq\x8dX\x1d\x00\x00\x00http://challonge.com/mwtmsdjsq\x8eX\x0e\x00\x00\x00live-image-urlq\x8fX!\x00\x00\x00http://challonge.com/mwtmsdjs.svgq\x90X\x0b\x00\x00\x00sign-up-urlq\x91NX\x18\x00\x00\x00review-before-finalizingq\x92\x88X\x15\x00\x00\x00accepting-predictionsq\x93\x89X\x13\x00\x00\x00participants-lockedq\x94\x88X\t\x00\x00\x00game-nameq\x95X\t\x00\x00\x00Disc Golfq\x96X\x16\x00\x00\x00participants-swappableq\x97\x89X\x10\x00\x00\x00team-convertableq\x98\x89X\x19\x00\x00\x00group-stages-were-startedq\x99\x89u.')
TEST_MATCH_INDEX = pickle.loads(b'\x80\x03]q\x00(}q\x01(X\x02\x00\x00\x00idq\x02J\xef\xd0Z\x05X\r\x00\x00\x00tournament-idq\x03J.X6\x00X\x05\x00\x00\x00stateq\x04X\x08\x00\x00\x00completeq\x05X\n\x00\x00\x00player1-idq\x06J\xfc.c\x03X\n\x00\x00\x00player2-idq\x07J\x0e/c\x03X\x17\x00\x00\x00player1-prereq-match-idq\x08NX\x17\x00\x00\x00player2-prereq-match-idq\tNX\x1d\x00\x00\x00player1-is-prereq-match-loserq\n\x89X\x1d\x00\x00\x00player2-is-prereq-match-loserq\x0b\x89X\t\x00\x00\x00winner-idq\x0cJ\xfc.c\x03X\x08\x00\x00\x00loser-idq\rJ\x0e/c\x03X\n\x00\x00\x00started-atq\x0ecdatetime\ndatetime\nq\x0fC\n\x07\xe1\x06\t\x0e,\x13\x00\x00\x00q\x10ciso8601.iso8601\nFixedOffset\nq\x11J\xfc\xff\xff\xffK\x00X\x06\x00\x00\x00-04:00q\x12\x87q\x13Rq\x14}q\x15(X\x1a\x00\x00\x00_FixedOffset__offset_hoursq\x16J\xfc\xff\xff\xffX\x1c\x00\x00\x00_FixedOffset__offset_minutesq\x17K\x00X\x14\x00\x00\x00_FixedOffset__offsetq\x18cdatetime\ntimedelta\nq\x19J\xff\xff\xff\xffJ@\x19\x01\x00K\x00\x87q\x1aRq\x1bX\x12\x00\x00\x00_FixedOffset__nameq\x1ch\x12ub\x86q\x1dRq\x1eX\n\x00\x00\x00created-atq\x1fh\x0fC\n\x07\xe1\x06\t\x0e,\x13\x00\x00\x00q h\x11J\xfc\xff\xff\xffK\x00X\x06\x00\x00\x00-04:00q!\x87q"Rq#}q$(h\x16J\xfc\xff\xff\xffh\x17K\x00h\x18h\x19J\xff\xff\xff\xffJ@\x19\x01\x00K\x00\x87q%Rq&h\x1ch!ub\x86q\'Rq(X\n\x00\x00\x00updated-atq)h\x0fC\n\x07\xe1\x06\t\x0e-\r\x00\x00\x00q*h\x11J\xfc\xff\xff\xffK\x00X\x06\x00\x00\x00-04:00q+\x87q,Rq-}q.(h\x16J\xfc\xff\xff\xffh\x17K\x00h\x18h\x19J\xff\xff\xff\xffJ@\x19\x01\x00K\x00\x87q/Rq0h\x1ch+ub\x86q1Rq2X\n\x00\x00\x00identifierq3X\x01\x00\x00\x00Aq4X\x0e\x00\x00\x00has-attachmentq5\x89X\x05\x00\x00\x00roundq6K\x01X\r\x00\x00\x00player1-votesq7NX\r\x00\x00\x00player2-votesq8NX\x08\x00\x00\x00group-idq9NX\x10\x00\x00\x00attachment-countq:NX\x0e\x00\x00\x00scheduled-timeq;NX\x08\x00\x00\x00locationq<NX\x0b\x00\x00\x00underway-atq=NX\x08\x00\x00\x00optionalq>\x89X\x08\x00\x00\x00rushb-idq?NX\x0c\x00\x00\x00completed-atq@h\x0fC\n\x07\xe1\x06\t\x0e-\r\x00\x00\x00qAh\x11J\xfc\xff\xff\xffK\x00X\x06\x00\x00\x00-04:00qB\x87qCRqD}qE(h\x16J\xfc\xff\xff\xffh\x17K\x00h\x18h\x19J\xff\xff\xff\xffJ@\x19\x01\x00K\x00\x87qFRqGh\x1chBub\x86qHRqIX\x14\x00\x00\x00suggested-play-orderqJK\x01X\x1a\x00\x00\x00prerequisite-match-ids-csvqKNX\n\x00\x00\x00scores-csvqLX\x03\x00\x00\x002-0qMu}qN(h\x02J\xf0\xd0Z\x05h\x03J.X6\x00h\x04X\x08\x00\x00\x00completeqOh\x06J\xff.c\x03h\x07J\x00/c\x03h\x08Nh\tNh\n\x89h\x0b\x89h\x0cJ\x00/c\x03h\rJ\xff.c\x03h\x0eh\x0fC\n\x07\xe1\x06\t\x0e,\x13\x00\x00\x00qPh\x11J\xfc\xff\xff\xffK\x00X\x06\x00\x00\x00-04:00qQ\x87qRRqS}qT(h\x16J\xfc\xff\xff\xffh\x17K\x00h\x18h\x19J\xff\xff\xff\xffJ@\x19\x01\x00K\x00\x87qURqVh\x1chQub\x86qWRqXh\x1fh\x0fC\n\x07\xe1\x06\t\x0e,\x13\x00\x00\x00qYh\x11J\xfc\xff\xff\xffK\x00X\x06\x00\x00\x00-04:00qZ\x87q[Rq\\}q](h\x16J\xfc\xff\xff\xffh\x17K\x00h\x18h\x19J\xff\xff\xff\xffJ@\x19\x01\x00K\x00\x87q^Rq_h\x1chZub\x86q`Rqah)h\x0fC\n\x07\xe1\x06\t\x10\x123\x00\x00\x00qbh\x11J\xfc\xff\xff\xffK\x00X\x06\x00\x00\x00-04:00qc\x87qdRqe}qf(h\x16J\xfc\xff\xff\xffh\x17K\x00h\x18h\x19J\xff\xff\xff\xffJ@\x19\x01\x00K\x00\x87qgRqhh\x1chcub\x86qiRqjh3X\x01\x00\x00\x00Bqkh5\x89h6K\x01h7Nh8Nh9Nh:Nh;Nh<Nh=Nh>\x89h?Nh@h\x0fC\n\x07\xe1\x06\t\x10\x123\x00\x00\x00qlh\x11J\xfc\xff\xff\xffK\x00X\x06\x00\x00\x00-04:00qm\x87qnRqo}qp(h\x16J\xfc\xff\xff\xffh\x17K\x00h\x18h\x19J\xff\xff\xff\xffJ@\x19\x01\x00K\x00\x87qqRqrh\x1chmub\x86qsRqthJK\x02hKNhLX\x03\x00\x00\x002-4quu}qv(h\x02J\xf1\xd0Z\x05h\x03J.X6\x00h\x04X\x08\x00\x00\x00completeqwh\x06J\xfd.c\x03h\x07J\x02/c\x03h\x08Nh\tNh\n\x89h\x0b\x89h\x0cJ\x02/c\x03h\rJ\xfd.c\x03h\x0eh\x0fC\n\x07\xe1\x06\t\x0e,\x13\x00\x00\x00qxh\x11J\xfc\xff\xff\xffK\x00X\x06\x00\x00\x00-04:00qy\x87qzRq{}q|(h\x16J\xfc\xff\xff\xffh\x17K\x00h\x18h\x19J\xff\xff\xff\xffJ@\x19\x01\x00K\x00\x87q}Rq~h\x1chyub\x86q\x7fRq\x80h\x1fh\x0fC\n\x07\xe1\x06\t\x0e,\x13\x00\x00\x00q\x81h\x11J\xfc\xff\xff\xffK\x00X\x06\x00\x00\x00-04:00q\x82\x87q\x83Rq\x84}q\x85(h\x16J\xfc\xff\xff\xffh\x17K\x00h\x18h\x19J\xff\xff\xff\xffJ@\x19\x01\x00K\x00\x87q\x86Rq\x87h\x1ch\x82ub\x86q\x88Rq\x89h)h\x0fC\n\x07\xe1\x06\t\x10\x13\r\x00\x00\x00q\x8ah\x11J\xfc\xff\xff\xffK\x00X\x06\x00\x00\x00-04:00q\x8b\x87q\x8cRq\x8d}q\x8e(h\x16J\xfc\xff\xff\xffh\x17K\x00h\x18h\x19J\xff\xff\xff\xffJ@\x19\x01\x00K\x00\x87q\x8fRq\x90h\x1ch\x8bub\x86q\x91Rq\x92h3X\x01\x00\x00\x00Cq\x93h5\x89h6K\x01h7Nh8Nh9Nh:Nh;Nh<Nh=Nh>\x89h?Nh@h\x0fC\n\x07\xe1\x06\t\x10\x13\r\x00\x00\x00q\x94h\x11J\xfc\xff\xff\xffK\x00X\x06\x00\x00\x00-04:00q\x95\x87q\x96Rq\x97}q\x98(h\x16J\xfc\xff\xff\xffh\x17K\x00h\x18h\x19J\xff\xff\xff\xffJ@\x19\x01\x00K\x00\x87q\x99Rq\x9ah\x1ch\x95ub\x86q\x9bRq\x9chJK\x03hKNhLX\x03\x00\x00\x000-2q\x9du}q\x9e(h\x02J\xf2\xd0Z\x05h\x03J.X6\x00h\x04X\x08\x00\x00\x00completeq\x9fh\x06J\xfe.c\x03h\x07J\x01/c\x03h\x08Nh\tNh\n\x89h\x0b\x89h\x0cJ\xfe.c\x03h\rJ\x01/c\x03h\x0eh\x0fC\n\x07\xe1\x06\t\x0e,\x13\x00\x00\x00q\xa0h\x11J\xfc\xff\xff\xffK\x00X\x06\x00\x00\x00-04:00q\xa1\x87q\xa2Rq\xa3}q\xa4(h\x16J\xfc\xff\xff\xffh\x17K\x00h\x18h\x19J\xff\xff\xff\xffJ@\x19\x01\x00K\x00\x87q\xa5Rq\xa6h\x1ch\xa1ub\x86q\xa7Rq\xa8h\x1fh\x0fC\n\x07\xe1\x06\t\x0e,\x13\x00\x00\x00q\xa9h\x11J\xfc\xff\xff\xffK\x00X\x06\x00\x00\x00-04:00q\xaa\x87q\xabRq\xac}q\xad(h\x16J\xfc\xff\xff\xffh\x17K\x00h\x18h\x19J\xff\xff\xff\xffJ@\x19\x01\x00K\x00\x87q\xaeRq\xafh\x1ch\xaaub\x86q\xb0Rq\xb1h)h\x0fC\n\x07\xe1\x06\t\x10\x134\x00\x00\x00q\xb2h\x11J\xfc\xff\xff\xffK\x00X\x06\x00\x00\x00-04:00q\xb3\x87q\xb4Rq\xb5}q\xb6(h\x16J\xfc\xff\xff\xffh\x17K\x00h\x18h\x19J\xff\xff\xff\xffJ@\x19\x01\x00K\x00\x87q\xb7Rq\xb8h\x1ch\xb3ub\x86q\xb9Rq\xbah3X\x01\x00\x00\x00Dq\xbbh5\x89h6K\x01h7Nh8Nh9Nh:Nh;Nh<Nh=Nh>\x89h?Nh@h\x0fC\n\x07\xe1\x06\t\x10\x134\x00\x00\x00q\xbch\x11J\xfc\xff\xff\xffK\x00X\x06\x00\x00\x00-04:00q\xbd\x87q\xbeRq\xbf}q\xc0(h\x16J\xfc\xff\xff\xffh\x17K\x00h\x18h\x19J\xff\xff\xff\xffJ@\x19\x01\x00K\x00\x87q\xc1Rq\xc2h\x1ch\xbdub\x86q\xc3Rq\xc4hJK\x04hKNhLX\x03\x00\x00\x009-8q\xc5u}q\xc6(h\x02J\xf3\xd0Z\x05h\x03J.X6\x00h\x04X\x08\x00\x00\x00completeq\xc7h\x06J\xfc.c\x03h\x07J\x00/c\x03h\x08J\xef\xd0Z\x05h\tJ\xf0\xd0Z\x05h\n\x89h\x0b\x89h\x0cJ\xfc.c\x03h\rJ\x00/c\x03h\x0eh\x0fC\n\x07\xe1\x06\t\x10\x123\x00\x00\x00q\xc8h\x11J\xfc\xff\xff\xffK\x00X\x06\x00\x00\x00-04:00q\xc9\x87q\xcaRq\xcb}q\xcc(h\x16J\xfc\xff\xff\xffh\x17K\x00h\x18h\x19J\xff\xff\xff\xffJ@\x19\x01\x00K\x00\x87q\xcdRq\xceh\x1ch\xc9ub\x86q\xcfRq\xd0h\x1fh\x0fC\n\x07\xe1\x06\t\x0e,\x13\x00\x00\x00q\xd1h\x11J\xfc\xff\xff\xffK\x00X\x06\x00\x00\x00-04:00q\xd2\x87q\xd3Rq\xd4}q\xd5(h\x16J\xfc\xff\xff\xffh\x17K\x00h\x18h\x19J\xff\xff\xff\xffJ@\x19\x01\x00K\x00\x87q\xd6Rq\xd7h\x1ch\xd2ub\x86q\xd8Rq\xd9h)h\x0fC\n\x07\xe1\x06\t\x10\x14\x0c\x00\x00\x00q\xdah\x11J\xfc\xff\xff\xffK\x00X\x06\x00\x00\x00-04:00q\xdb\x87q\xdcRq\xdd}q\xde(h\x16J\xfc\xff\xff\xffh\x17K\x00h\x18h\x19J\xff\xff\xff\xffJ@\x19\x01\x00K\x00\x87q\xdfRq\xe0h\x1ch\xdbub\x86q\xe1Rq\xe2h3X\x01\x00\x00\x00Eq\xe3h5\x89h6K\x02h7Nh8Nh9Nh:Nh;Nh<Nh=Nh>\x89h?Nh@h\x0fC\n\x07\xe1\x06\t\x10\x14\x0c\x00\x00\x00q\xe4h\x11J\xfc\xff\xff\xffK\x00X\x06\x00\x00\x00-04:00q\xe5\x87q\xe6Rq\xe7}q\xe8(h\x16J\xfc\xff\xff\xffh\x17K\x00h\x18h\x19J\xff\xff\xff\xffJ@\x19\x01\x00K\x00\x87q\xe9Rq\xeah\x1ch\xe5ub\x86q\xebRq\xechJK\x05hKX\x11\x00\x00\x0089837807,89837808q\xedhLX\x03\x00\x00\x001-0q\xeeu}q\xef(h\x02J\xf4\xd0Z\x05h\x03J.X6\x00h\x04X\x08\x00\x00\x00completeq\xf0h\x06J\x02/c\x03h\x07J\xfe.c\x03h\x08J\xf1\xd0Z\x05h\tJ\xf2\xd0Z\x05h\n\x89h\x0b\x89h\x0cJ\xfe.c\x03h\rJ\x02/c\x03h\x0eh\x0fC\n\x07\xe1\x06\t\x10\x134\x00\x00\x00q\xf1h\x11J\xfc\xff\xff\xffK\x00X\x06\x00\x00\x00-04:00q\xf2\x87q\xf3Rq\xf4}q\xf5(h\x16J\xfc\xff\xff\xffh\x17K\x00h\x18h\x19J\xff\xff\xff\xffJ@\x19\x01\x00K\x00\x87q\xf6Rq\xf7h\x1ch\xf2ub\x86q\xf8Rq\xf9h\x1fh\x0fC\n\x07\xe1\x06\t\x0e,\x13\x00\x00\x00q\xfah\x11J\xfc\xff\xff\xffK\x00X\x06\x00\x00\x00-04:00q\xfb\x87q\xfcRq\xfd}q\xfe(h\x16J\xfc\xff\xff\xffh\x17K\x00h\x18h\x19J\xff\xff\xff\xffJ@\x19\x01\x00K\x00\x87q\xffRr\x00\x01\x00\x00h\x1ch\xfbub\x86r\x01\x01\x00\x00Rr\x02\x01\x00\x00h)h\x0fC\n\x07\xe1\x06\t\x10\x14\x02\x00\x00\x00r\x03\x01\x00\x00h\x11J\xfc\xff\xff\xffK\x00X\x06\x00\x00\x00-04:00r\x04\x01\x00\x00\x87r\x05\x01\x00\x00Rr\x06\x01\x00\x00}r\x07\x01\x00\x00(h\x16J\xfc\xff\xff\xffh\x17K\x00h\x18h\x19J\xff\xff\xff\xffJ@\x19\x01\x00K\x00\x87r\x08\x01\x00\x00Rr\t\x01\x00\x00h\x1cj\x04\x01\x00\x00ub\x86r\n\x01\x00\x00Rr\x0b\x01\x00\x00h3X\x01\x00\x00\x00Fr\x0c\x01\x00\x00h5\x89h6K\x02h7Nh8Nh9Nh:Nh;Nh<Nh=Nh>\x89h?Nh@h\x0fC\n\x07\xe1\x06\t\x10\x14\x02\x00\x00\x00r\r\x01\x00\x00h\x11J\xfc\xff\xff\xffK\x00X\x06\x00\x00\x00-04:00r\x0e\x01\x00\x00\x87r\x0f\x01\x00\x00Rr\x10\x01\x00\x00}r\x11\x01\x00\x00(h\x16J\xfc\xff\xff\xffh\x17K\x00h\x18h\x19J\xff\xff\xff\xffJ@\x19\x01\x00K\x00\x87r\x12\x01\x00\x00Rr\x13\x01\x00\x00h\x1cj\x0e\x01\x00\x00ub\x86r\x14\x01\x00\x00Rr\x15\x01\x00\x00hJK\x06hKX\x11\x00\x00\x0089837809,89837810r\x16\x01\x00\x00hLX\x03\x00\x00\x003-4r\x17\x01\x00\x00u}r\x18\x01\x00\x00(h\x02J\xf5\xd0Z\x05h\x03J.X6\x00h\x04X\x08\x00\x00\x00completer\x19\x01\x00\x00h\x06J\xfc.c\x03h\x07J\xfe.c\x03h\x08J\xf3\xd0Z\x05h\tJ\xf4\xd0Z\x05h\n\x89h\x0b\x89h\x0cJ\xfe.c\x03h\rJ\xfc.c\x03h\x0eh\x0fC\n\x07\xe1\x06\t\x10\x14\x0c\x00\x00\x00r\x1a\x01\x00\x00h\x11J\xfc\xff\xff\xffK\x00X\x06\x00\x00\x00-04:00r\x1b\x01\x00\x00\x87r\x1c\x01\x00\x00Rr\x1d\x01\x00\x00}r\x1e\x01\x00\x00(h\x16J\xfc\xff\xff\xffh\x17K\x00h\x18h\x19J\xff\xff\xff\xffJ@\x19\x01\x00K\x00\x87r\x1f\x01\x00\x00Rr \x01\x00\x00h\x1cj\x1b\x01\x00\x00ub\x86r!\x01\x00\x00Rr"\x01\x00\x00h\x1fh\x0fC\n\x07\xe1\x06\t\x0e,\x13\x00\x00\x00r#\x01\x00\x00h\x11J\xfc\xff\xff\xffK\x00X\x06\x00\x00\x00-04:00r$\x01\x00\x00\x87r%\x01\x00\x00Rr&\x01\x00\x00}r\'\x01\x00\x00(h\x16J\xfc\xff\xff\xffh\x17K\x00h\x18h\x19J\xff\xff\xff\xffJ@\x19\x01\x00K\x00\x87r(\x01\x00\x00Rr)\x01\x00\x00h\x1cj$\x01\x00\x00ub\x86r*\x01\x00\x00Rr+\x01\x00\x00h)h\x0fC\n\x07\xe1\x06\t\x10\x14\x1d\x00\x00\x00r,\x01\x00\x00h\x11J\xfc\xff\xff\xffK\x00X\x06\x00\x00\x00-04:00r-\x01\x00\x00\x87r.\x01\x00\x00Rr/\x01\x00\x00}r0\x01\x00\x00(h\x16J\xfc\xff\xff\xffh\x17K\x00h\x18h\x19J\xff\xff\xff\xffJ@\x19\x01\x00K\x00\x87r1\x01\x00\x00Rr2\x01\x00\x00h\x1cj-\x01\x00\x00ub\x86r3\x01\x00\x00Rr4\x01\x00\x00h3X\x01\x00\x00\x00Gr5\x01\x00\x00h5\x89h6K\x03h7Nh8Nh9Nh:Nh;Nh<Nh=Nh>\x89h?Nh@h\x0fC\n\x07\xe1\x06\t\x10\x14\x1d\x00\x00\x00r6\x01\x00\x00h\x11J\xfc\xff\xff\xffK\x00X\x06\x00\x00\x00-04:00r7\x01\x00\x00\x87r8\x01\x00\x00Rr9\x01\x00\x00}r:\x01\x00\x00(h\x16J\xfc\xff\xff\xffh\x17K\x00h\x18h\x19J\xff\xff\xff\xffJ@\x19\x01\x00K\x00\x87r;\x01\x00\x00Rr<\x01\x00\x00h\x1cj7\x01\x00\x00ub\x86r=\x01\x00\x00Rr>\x01\x00\x00hJK\x07hKX\x11\x00\x00\x0089837811,89837812r?\x01\x00\x00hLX\x03\x00\x00\x001-3r@\x01\x00\x00ue.')
TEST_PARTICIPANTS_INDEX = pickle.loads(b'\x80\x03]q\x00(}q\x01(X\x02\x00\x00\x00idq\x02J\xfc.c\x03X\r\x00\x00\x00tournament-idq\x03J.X6\x00X\x04\x00\x00\x00nameq\x04X\x01\x00\x00\x00Aq\x05X\x04\x00\x00\x00seedq\x06K\x01X\x06\x00\x00\x00activeq\x07\x88X\n\x00\x00\x00created-atq\x08cdatetime\ndatetime\nq\tC\n\x07\xe1\x06\t\x0e+/\x00\x00\x00q\nciso8601.iso8601\nFixedOffset\nq\x0bJ\xfc\xff\xff\xffK\x00X\x06\x00\x00\x00-04:00q\x0c\x87q\rRq\x0e}q\x0f(X\x1a\x00\x00\x00_FixedOffset__offset_hoursq\x10J\xfc\xff\xff\xffX\x1c\x00\x00\x00_FixedOffset__offset_minutesq\x11K\x00X\x14\x00\x00\x00_FixedOffset__offsetq\x12cdatetime\ntimedelta\nq\x13J\xff\xff\xff\xffJ@\x19\x01\x00K\x00\x87q\x14Rq\x15X\x12\x00\x00\x00_FixedOffset__nameq\x16h\x0cub\x86q\x17Rq\x18X\n\x00\x00\x00updated-atq\x19h\tC\n\x07\xe1\x06\t\x0e,\x0f\x00\x00\x00q\x1ah\x0bJ\xfc\xff\xff\xffK\x00X\x06\x00\x00\x00-04:00q\x1b\x87q\x1cRq\x1d}q\x1e(h\x10J\xfc\xff\xff\xffh\x11K\x00h\x12h\x13J\xff\xff\xff\xffJ@\x19\x01\x00K\x00\x87q\x1fRq h\x16h\x1bub\x86q!Rq"X\x0c\x00\x00\x00invite-emailq#NX\n\x00\x00\x00final-rankq$K\x02X\x04\x00\x00\x00miscq%NX\x04\x00\x00\x00iconq&NX\x0f\x00\x00\x00on-waiting-listq\'\x89X\r\x00\x00\x00invitation-idq(NX\x08\x00\x00\x00group-idq)NX\r\x00\x00\x00checked-in-atq*NX\x12\x00\x00\x00challonge-usernameq+NX \x00\x00\x00challonge-email-address-verifiedq,NX\t\x00\x00\x00removableq-\x89X%\x00\x00\x00participatable-or-invitation-attachedq.\x89X\x0e\x00\x00\x00confirm-removeq/\x88X\x12\x00\x00\x00invitation-pendingq0\x89X*\x00\x00\x00display-name-with-invitation-email-addressq1h\x05X\n\x00\x00\x00email-hashq2NX\x08\x00\x00\x00usernameq3NX\x0c\x00\x00\x00display-nameq4h\x05X$\x00\x00\x00attached-participatable-portrait-urlq5NX\x0c\x00\x00\x00can-check-inq6\x89X\n\x00\x00\x00checked-inq7\x89X\r\x00\x00\x00reactivatableq8\x89X\r\x00\x00\x00check-in-openq9\x89X\x10\x00\x00\x00group-player-idsq:NX\x13\x00\x00\x00has-irrelevant-seedq;\x89u}q<(h\x02J\xfd.c\x03h\x03J.X6\x00h\x04X\x01\x00\x00\x00Bq=h\x06K\x02h\x07\x88h\x08h\tC\n\x07\xe1\x06\t\x0e+/\x00\x00\x00q>h\x0bJ\xfc\xff\xff\xffK\x00X\x06\x00\x00\x00-04:00q?\x87q@RqA}qB(h\x10J\xfc\xff\xff\xffh\x11K\x00h\x12h\x13J\xff\xff\xff\xffJ@\x19\x01\x00K\x00\x87qCRqDh\x16h?ub\x86qERqFh\x19h\tC\n\x07\xe1\x06\t\x0e+/\x00\x00\x00qGh\x0bJ\xfc\xff\xff\xffK\x00X\x06\x00\x00\x00-04:00qH\x87qIRqJ}qK(h\x10J\xfc\xff\xff\xffh\x11K\x00h\x12h\x13J\xff\xff\xff\xffJ@\x19\x01\x00K\x00\x87qLRqMh\x16hHub\x86qNRqOh#Nh$K\x05h%Nh&Nh\'\x89h(Nh)Nh*Nh+Nh,Nh-\x89h.\x89h/\x88h0\x89h1h=h2Nh3Nh4h=h5Nh6\x89h7\x89h8\x89h9\x89h:Nh;\x89u}qP(h\x02J\xfe.c\x03h\x03J.X6\x00h\x04X\x01\x00\x00\x00CqQh\x06K\x03h\x07\x88h\x08h\tC\n\x07\xe1\x06\t\x0e+/\x00\x00\x00qRh\x0bJ\xfc\xff\xff\xffK\x00X\x06\x00\x00\x00-04:00qS\x87qTRqU}qV(h\x10J\xfc\xff\xff\xffh\x11K\x00h\x12h\x13J\xff\xff\xff\xffJ@\x19\x01\x00K\x00\x87qWRqXh\x16hSub\x86qYRqZh\x19h\tC\n\x07\xe1\x06\t\x0e+/\x00\x00\x00q[h\x0bJ\xfc\xff\xff\xffK\x00X\x06\x00\x00\x00-04:00q\\\x87q]Rq^}q_(h\x10J\xfc\xff\xff\xffh\x11K\x00h\x12h\x13J\xff\xff\xff\xffJ@\x19\x01\x00K\x00\x87q`Rqah\x16h\\ub\x86qbRqch#Nh$K\x01h%Nh&Nh\'\x89h(Nh)Nh*Nh+Nh,Nh-\x89h.\x89h/\x88h0\x89h1hQh2Nh3Nh4hQh5Nh6\x89h7\x89h8\x89h9\x89h:Nh;\x89u}qd(h\x02J\xff.c\x03h\x03J.X6\x00h\x04X\x01\x00\x00\x00Dqeh\x06K\x04h\x07\x88h\x08h\tC\n\x07\xe1\x06\t\x0e+/\x00\x00\x00qfh\x0bJ\xfc\xff\xff\xffK\x00X\x06\x00\x00\x00-04:00qg\x87qhRqi}qj(h\x10J\xfc\xff\xff\xffh\x11K\x00h\x12h\x13J\xff\xff\xff\xffJ@\x19\x01\x00K\x00\x87qkRqlh\x16hgub\x86qmRqnh\x19h\tC\n\x07\xe1\x06\t\x0e+/\x00\x00\x00qoh\x0bJ\xfc\xff\xff\xffK\x00X\x06\x00\x00\x00-04:00qp\x87qqRqr}qs(h\x10J\xfc\xff\xff\xffh\x11K\x00h\x12h\x13J\xff\xff\xff\xffJ@\x19\x01\x00K\x00\x87qtRquh\x16hpub\x86qvRqwh#Nh$K\x05h%Nh&Nh\'\x89h(Nh)Nh*Nh+Nh,Nh-\x89h.\x89h/\x88h0\x89h1heh2Nh3Nh4heh5Nh6\x89h7\x89h8\x89h9\x89h:Nh;\x89u}qx(h\x02J\x00/c\x03h\x03J.X6\x00h\x04X\x01\x00\x00\x00Eqyh\x06K\x05h\x07\x88h\x08h\tC\n\x07\xe1\x06\t\x0e+/\x00\x00\x00qzh\x0bJ\xfc\xff\xff\xffK\x00X\x06\x00\x00\x00-04:00q{\x87q|Rq}}q~(h\x10J\xfc\xff\xff\xffh\x11K\x00h\x12h\x13J\xff\xff\xff\xffJ@\x19\x01\x00K\x00\x87q\x7fRq\x80h\x16h{ub\x86q\x81Rq\x82h\x19h\tC\n\x07\xe1\x06\t\x0e+/\x00\x00\x00q\x83h\x0bJ\xfc\xff\xff\xffK\x00X\x06\x00\x00\x00-04:00q\x84\x87q\x85Rq\x86}q\x87(h\x10J\xfc\xff\xff\xffh\x11K\x00h\x12h\x13J\xff\xff\xff\xffJ@\x19\x01\x00K\x00\x87q\x88Rq\x89h\x16h\x84ub\x86q\x8aRq\x8bh#Nh$K\x03h%Nh&Nh\'\x89h(Nh)Nh*Nh+Nh,Nh-\x89h.\x89h/\x88h0\x89h1hyh2Nh3Nh4hyh5Nh6\x89h7\x89h8\x89h9\x89h:Nh;\x89u}q\x8c(h\x02J\x01/c\x03h\x03J.X6\x00h\x04X\x01\x00\x00\x00Fq\x8dh\x06K\x06h\x07\x88h\x08h\tC\n\x07\xe1\x06\t\x0e+/\x00\x00\x00q\x8eh\x0bJ\xfc\xff\xff\xffK\x00X\x06\x00\x00\x00-04:00q\x8f\x87q\x90Rq\x91}q\x92(h\x10J\xfc\xff\xff\xffh\x11K\x00h\x12h\x13J\xff\xff\xff\xffJ@\x19\x01\x00K\x00\x87q\x93Rq\x94h\x16h\x8fub\x86q\x95Rq\x96h\x19h\tC\n\x07\xe1\x06\t\x0e+/\x00\x00\x00q\x97h\x0bJ\xfc\xff\xff\xffK\x00X\x06\x00\x00\x00-04:00q\x98\x87q\x99Rq\x9a}q\x9b(h\x10J\xfc\xff\xff\xffh\x11K\x00h\x12h\x13J\xff\xff\xff\xffJ@\x19\x01\x00K\x00\x87q\x9cRq\x9dh\x16h\x98ub\x86q\x9eRq\x9fh#Nh$K\x05h%Nh&Nh\'\x89h(Nh)Nh*Nh+Nh,Nh-\x89h.\x89h/\x88h0\x89h1h\x8dh2Nh3Nh4h\x8dh5Nh6\x89h7\x89h8\x89h9\x89h:Nh;\x89u}q\xa0(h\x02J\x02/c\x03h\x03J.X6\x00h\x04X\x01\x00\x00\x00Gq\xa1h\x06K\x07h\x07\x88h\x08h\tC\n\x07\xe1\x06\t\x0e+/\x00\x00\x00q\xa2h\x0bJ\xfc\xff\xff\xffK\x00X\x06\x00\x00\x00-04:00q\xa3\x87q\xa4Rq\xa5}q\xa6(h\x10J\xfc\xff\xff\xffh\x11K\x00h\x12h\x13J\xff\xff\xff\xffJ@\x19\x01\x00K\x00\x87q\xa7Rq\xa8h\x16h\xa3ub\x86q\xa9Rq\xaah\x19h\tC\n\x07\xe1\x06\t\x0e+/\x00\x00\x00q\xabh\x0bJ\xfc\xff\xff\xffK\x00X\x06\x00\x00\x00-04:00q\xac\x87q\xadRq\xae}q\xaf(h\x10J\xfc\xff\xff\xffh\x11K\x00h\x12h\x13J\xff\xff\xff\xffJ@\x19\x01\x00K\x00\x87q\xb0Rq\xb1h\x16h\xacub\x86q\xb2Rq\xb3h#Nh$K\x03h%Nh&Nh\'\x89h(Nh)Nh*Nh+Nh,Nh-\x89h.\x89h/\x88h0\x89h1h\xa1h2Nh3Nh4h\xa1h5Nh6\x89h7\x89h8\x89h9\x89h:Nh;\x89u}q\xb4(h\x02J\x0e/c\x03h\x03J.X6\x00h\x04X\x01\x00\x00\x00Hq\xb5h\x06K\x08h\x07\x88h\x08h\tC\n\x07\xe1\x06\t\x0e+8\x00\x00\x00q\xb6h\x0bJ\xfc\xff\xff\xffK\x00X\x06\x00\x00\x00-04:00q\xb7\x87q\xb8Rq\xb9}q\xba(h\x10J\xfc\xff\xff\xffh\x11K\x00h\x12h\x13J\xff\xff\xff\xffJ@\x19\x01\x00K\x00\x87q\xbbRq\xbch\x16h\xb7ub\x86q\xbdRq\xbeh\x19h\tC\n\x07\xe1\x06\t\x0e,\x0f\x00\x00\x00q\xbfh\x0bJ\xfc\xff\xff\xffK\x00X\x06\x00\x00\x00-04:00q\xc0\x87q\xc1Rq\xc2}q\xc3(h\x10J\xfc\xff\xff\xffh\x11K\x00h\x12h\x13J\xff\xff\xff\xffJ@\x19\x01\x00K\x00\x87q\xc4Rq\xc5h\x16h\xc0ub\x86q\xc6Rq\xc7h#Nh$K\x05h%Nh&Nh\'\x89h(Nh)Nh*Nh+Nh,Nh-\x89h.\x89h/\x88h0\x89h1h\xb5h2Nh3Nh4h\xb5h5Nh6\x89h7\x89h8\x89h9\x89h:Nh;\x89ue.')
class TestChallongeImport(unittest.TestCase):
def setUp(self):
self.dgcastle = dgcastle.DGCastle(testDb="TestChallonge")
os.environ['CHALLONGE_API'] = 'test,test'
def tearDown(self):
del(self.dgcastle)
def test_import(self):
t = None
with patch('challonge.tournaments') as mock_tournaments:
mock_tournaments.show.return_value = TEST_TOURNAMENT
with patch('challonge.participants') as mock_participants:
mock_participants.index.return_value = TEST_PARTICIPANTS_INDEX
with patch('challonge.matches') as mock_matches:
mock_matches.index.return_value = TEST_MATCH_INDEX
t = self.dgcastle.challonge_import(1)
self.assertEqual(len(t.participants), 8)
self.assertEqual(len(t.matches), 7)
def test_incompleteBracket(self):
with patch('challonge.tournaments') as mock_tournaments:
ret_val = copy.deepcopy(TEST_TOURNAMENT)
ret_val['state'] = 'not complete'
mock_tournaments.show.return_value = ret_val
self.assertRaises(IncompleteException, self.dgcastle.challonge_import, 1)
def test_incorrectResultsData(self):
with patch('challonge.tournaments') as mock_tournaments:
mock_tournaments.show.return_value = TEST_TOURNAMENT
with patch('challonge.participants') as mock_participants:
mock_participants.index.return_value = TEST_PARTICIPANTS_INDEX
with patch('challonge.matches') as mock_matches:
ret_val = copy.deepcopy(TEST_MATCH_INDEX)
ret_val[0]['scores-csv'] = '1-0,2-1'
mock_matches.index.return_value = ret_val
self.assertRaises(ValidationException, self.dgcastle.challonge_import, 1)
| 426.803571 | 10,690 | 0.770554 | 1,874 | 0.078407 | 0 | 0 | 0 | 0 | 0 | 0 | 21,886 | 0.915694 |
34e14659ac3348a14f3cb971dd1656c1b96e47ab
| 4,917 |
py
|
Python
|
MIDI Remote Scripts/pushbase/step_duplicator.py
|
aarkwright/ableton_devices
|
fe5df3bbd64ccbc136bba722ba1e131a02969798
|
[
"MIT"
] | null | null | null |
MIDI Remote Scripts/pushbase/step_duplicator.py
|
aarkwright/ableton_devices
|
fe5df3bbd64ccbc136bba722ba1e131a02969798
|
[
"MIT"
] | null | null | null |
MIDI Remote Scripts/pushbase/step_duplicator.py
|
aarkwright/ableton_devices
|
fe5df3bbd64ccbc136bba722ba1e131a02969798
|
[
"MIT"
] | null | null | null |
# uncompyle6 version 3.3.5
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.7.3 (default, Apr 24 2019, 15:29:51) [MSC v.1915 64 bit (AMD64)]
# Embedded file name: c:\Jenkins\live\output\win_64_static\Release\python-bundle\MIDI Remote Scripts\pushbase\step_duplicator.py
# Compiled at: 2018-11-30 15:48:12
from __future__ import absolute_import, print_function, unicode_literals
from functools import partial
from ableton.v2.base import liveobj_valid, nop
from ableton.v2.control_surface import Component
from ableton.v2.control_surface.control import ButtonControl
from .consts import MessageBoxText
from .message_box_component import Messenger
ALL_NOTES = -1
def get_transposition_amount(source_step, destination_step):
transposition = destination_step[0] - source_step[0]
if ALL_NOTES == source_step[0]:
transposition = 0
elif destination_step[0] == ALL_NOTES:
transposition = source_step[0]
return transposition
class NullStepDuplicator(object):
@property
def is_duplicating(self):
return False
def set_clip(self, _):
pass
def set_loop(clip, loop_start, loop_end):
if loop_start >= clip.loop_end:
clip.loop_end = loop_end
if clip.loop_end == loop_end:
clip.loop_start = loop_start
clip.end_marker = loop_end
clip.start_marker = loop_start
else:
clip.loop_start = loop_start
if clip.loop_start == loop_start:
clip.loop_end = loop_end
clip.end_marker = loop_end
clip.start_marker = loop_start
class StepDuplicatorComponent(Component, Messenger):
button = ButtonControl()
def __init__(self, *a, **k):
super(StepDuplicatorComponent, self).__init__(*a, **k)
self._clip = None
self._source_step = None
self._notification_reference = partial(nop, None)
return
@property
def is_duplicating(self):
return self.button.is_pressed and liveobj_valid(self._clip)
def set_clip(self, clip):
self._cancel_duplicate()
self._clip = clip
def add_step_with_pitch(self, note, step_start, step_end, nudge_offset=0, is_page=False):
if self.is_enabled() and self.is_duplicating:
current_step = (note,
step_start,
step_end - step_start,
nudge_offset,
is_page)
if self._source_step is not None:
self._duplicate_to(current_step)
else:
self._duplicate_from(current_step)
return
def add_step(self, step_start, step_end, nudge_offset=0, is_page=False):
self.add_step_with_pitch(ALL_NOTES, step_start, step_end, nudge_offset, is_page)
def _duplicate_from(self, source_step):
message = MessageBoxText.CANNOT_COPY_EMPTY_PAGE if source_step[4] else MessageBoxText.CANNOT_COPY_EMPTY_STEP
from_pitch = source_step[0]
pitch_span = 1
if from_pitch == ALL_NOTES:
from_pitch = 0
pitch_span = 127
notes = self._clip.get_notes(source_step[1], from_pitch, source_step[2], pitch_span)
if len(notes) > 0:
message = MessageBoxText.COPIED_PAGE if source_step[4] else MessageBoxText.COPIED_STEP
self._source_step = source_step
self._notification_reference = self.show_notification(message)
def _duplicate_to(self, destination_step):
if self._source_step[4] == destination_step[4]:
message = MessageBoxText.CANNOT_PASTE_TO_SOURCE_PAGE if destination_step[4] else MessageBoxText.CANNOT_PASTE_TO_SOURCE_STEP
if destination_step != self._source_step:
message = MessageBoxText.PASTED_PAGE if destination_step[4] else MessageBoxText.PASTED_STEP
self._clip.duplicate_region(self._source_step[1], self._source_step[2], destination_step[1] + self._source_step[3], self._source_step[0], get_transposition_amount(self._source_step, destination_step))
else:
message = MessageBoxText.CANNOT_PASTE_FROM_STEP_TO_PAGE if destination_step[4] else MessageBoxText.CANNOT_PASTE_FROM_PAGE_TO_STEP
loop_start = destination_step[1]
loop_end = loop_start + self._source_step[2]
if destination_step[4] and not (loop_start >= self._clip.loop_start and loop_end <= self._clip.loop_end):
set_loop(self._clip, loop_start, loop_end)
self._notification_reference = self.show_notification(message)
self._source_step = None
return
def _cancel_duplicate(self):
self._source_step = None
if self._notification_reference() is not None:
self._notification_reference().hide()
return
@button.released
def button(self, _):
self._cancel_duplicate()
def update(self):
super(StepDuplicatorComponent, self).update()
self._cancel_duplicate()
| 39.653226 | 216 | 0.690462 | 3,473 | 0.706325 | 0 | 0 | 241 | 0.049014 | 0 | 0 | 309 | 0.062843 |
34e3c30f1eecc4a83cc074f6ae2e470a42d8d132
| 1,058 |
py
|
Python
|
cride/users/models/exchanges.py
|
albertoaldanar/betmatcherAPI
|
c0590025efd79f4e489f9c9433b17554ea6ba23f
|
[
"MIT"
] | null | null | null |
cride/users/models/exchanges.py
|
albertoaldanar/betmatcherAPI
|
c0590025efd79f4e489f9c9433b17554ea6ba23f
|
[
"MIT"
] | 7 |
2020-06-05T20:53:27.000Z
|
2022-03-11T23:47:12.000Z
|
cride/users/models/exchanges.py
|
albertoaldanar/betmatcherAPI
|
c0590025efd79f4e489f9c9433b17554ea6ba23f
|
[
"MIT"
] | null | null | null |
from django.db import models
#Utilities
from cride.utils.models import BetmatcherModel
class Exchange(BetmatcherModel):
user = models.ForeignKey(
"users.User",
on_delete = models.CASCADE,
related_name = "user"
)
prize = models.ForeignKey(
"users.Prize",
on_delete = models.CASCADE,
related_name = "prize"
)
adress = models.CharField(max_length = 60, blank = True, null = True)
phone = models.CharField(max_length = 25, blank = True, null = True)
email = models.CharField(max_length = 25, blank = True, null = True)
cp = models.CharField(max_length = 25, blank = True, null = True)
country = models.CharField(max_length = 25, blank = True, null = True)
city = models.CharField(max_length = 25, blank = True, null = True)
full_name = models.CharField(max_length = 55, blank = True, null = True)
state = models.CharField(max_length = 25, blank = True, null = True)
date = models.DateTimeField(
"event_date",
help_text = "Date of the event"
)
def __str__(self):
return self.user.username
| 28.594595 | 74 | 0.6862 | 967 | 0.913989 | 0 | 0 | 0 | 0 | 0 | 0 | 79 | 0.074669 |
34e4d3ae291ecf089e466ddec64c7d9c23c88213
| 1,540 |
py
|
Python
|
Python/Examples/Macros/MoveAxis.py
|
halmusaibeli/RoboDK-API
|
e017aa26715bc8d0fcbbc05e57acc32f2d2d6174
|
[
"MIT"
] | null | null | null |
Python/Examples/Macros/MoveAxis.py
|
halmusaibeli/RoboDK-API
|
e017aa26715bc8d0fcbbc05e57acc32f2d2d6174
|
[
"MIT"
] | null | null | null |
Python/Examples/Macros/MoveAxis.py
|
halmusaibeli/RoboDK-API
|
e017aa26715bc8d0fcbbc05e57acc32f2d2d6174
|
[
"MIT"
] | null | null | null |
# This macro allows changing the position of an external axis by hand or within a program as a function call.
# Example of a function call (units are in mm or deg):
# MoveAxis(0)
# MoveAxis(100)
# https://robodk.com/doc/en/RoboDK-API.html
import sys # allows getting the passed argument parameters
from robodk.robodialogs import *
# Enter the name of the axis (leave empty to select the first mechanism/robot available
MECHANISM_NAME = ''
# Enter the default value:
DEFAULT_VALUE = 0
# Set to blocking to make the program wait until it the axis stopped moving
BLOCKING = True
# --------------- PROGRAM START -------------------------
VALUE = DEFAULT_VALUE
if len(sys.argv) < 2:
# Promt the user to enter a new value if the macro is just double clicked
print('This macro be called as MoveAxis(value)')
print('Number of arguments: ' + str(len(sys.argv)))
#raise Exception('Invalid parameters provided: ' + str(sys.argv))
entry = mbox('Move one axis. Enter the new value in mm or deg\n\nNote: this can be called as a program.\nExample: MoveAxis(VALUE)', entry=str(DEFAULT_VALUE))
if not entry:
#raise Exception('Operation cancelled by user')
quit()
VALUE = float(entry)
else:
# Take the argument as new joint value
VALUE = float(sys.argv[1])
# Use the RoboDK API:
from robodk.robolink import * # API to communicate with RoboDK
RDK = Robolink()
# Get the robot item:
axis = RDK.Item(MECHANISM_NAME, ITEM_TYPE_ROBOT)
# Move the robot/mechanism
axis.MoveJ([VALUE], BLOCKING)
| 32.765957 | 161 | 0.698701 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,038 | 0.674026 |
34e56db9261caf77c7f05ca57c7245a93b1deefe
| 11,035 |
py
|
Python
|
tests/test_setup.py
|
rozuur/ptvsd
|
046fd0f054b2eed91ec5df02e5f36151b71e36b1
|
[
"MIT"
] | null | null | null |
tests/test_setup.py
|
rozuur/ptvsd
|
046fd0f054b2eed91ec5df02e5f36151b71e36b1
|
[
"MIT"
] | null | null | null |
tests/test_setup.py
|
rozuur/ptvsd
|
046fd0f054b2eed91ec5df02e5f36151b71e36b1
|
[
"MIT"
] | null | null | null |
import os.path
import unittest
from setup import iter_vendored_files
class IterVendoredFilesTests(unittest.TestCase):
def test_all(self):
filenames = set(iter_vendored_files())
self.assertEqual(filenames, VENDORED)
VENDORED = {file.replace('/', os.path.sep) for file in [
'pydevd/pydev_run_in_console.py',
'pydevd/setup_cython.py',
'pydevd/pydev_app_engine_debug_startup.py',
'pydevd/pydevd_tracing.py',
'pydevd/pydev_pysrc.py',
'pydevd/pydevconsole.py',
'pydevd/pydevd.py',
'pydevd/pydev_coverage.py',
'pydevd/pydevd_file_utils.py',
'pydevd/pydevd_attach_to_process/attach_linux_x86.so',
'pydevd/pydevd_attach_to_process/attach_pydevd.py',
'pydevd/pydevd_attach_to_process/attach_amd64.dll',
'pydevd/pydevd_attach_to_process/_test_attach_to_process.py',
'pydevd/pydevd_attach_to_process/attach_linux_amd64.so',
'pydevd/pydevd_attach_to_process/attach_x86.dll',
'pydevd/pydevd_attach_to_process/_always_live_program.py',
'pydevd/pydevd_attach_to_process/attach_x86.dylib',
'pydevd/pydevd_attach_to_process/_check.py',
'pydevd/pydevd_attach_to_process/README.txt',
'pydevd/pydevd_attach_to_process/add_code_to_python_process.py',
'pydevd/pydevd_attach_to_process/attach_x86_64.dylib',
'pydevd/pydevd_attach_to_process/attach_script.py',
'pydevd/pydevd_attach_to_process/_test_attach_to_process_linux.py',
'pydevd/pydevd_attach_to_process/dll/attach.h',
'pydevd/pydevd_attach_to_process/dll/python.h',
'pydevd/pydevd_attach_to_process/dll/attach.cpp',
'pydevd/pydevd_attach_to_process/dll/stdafx.h',
'pydevd/pydevd_attach_to_process/dll/compile_dll.bat',
'pydevd/pydevd_attach_to_process/dll/stdafx.cpp',
'pydevd/pydevd_attach_to_process/dll/targetver.h',
'pydevd/pydevd_attach_to_process/winappdbg/module.py',
'pydevd/pydevd_attach_to_process/winappdbg/event.py',
'pydevd/pydevd_attach_to_process/winappdbg/process.py',
'pydevd/pydevd_attach_to_process/winappdbg/thread.py',
'pydevd/pydevd_attach_to_process/winappdbg/disasm.py',
'pydevd/pydevd_attach_to_process/winappdbg/textio.py',
'pydevd/pydevd_attach_to_process/winappdbg/sql.py',
'pydevd/pydevd_attach_to_process/winappdbg/util.py',
'pydevd/pydevd_attach_to_process/winappdbg/crash.py',
'pydevd/pydevd_attach_to_process/winappdbg/registry.py',
'pydevd/pydevd_attach_to_process/winappdbg/breakpoint.py',
'pydevd/pydevd_attach_to_process/winappdbg/search.py',
'pydevd/pydevd_attach_to_process/winappdbg/compat.py',
'pydevd/pydevd_attach_to_process/winappdbg/window.py',
'pydevd/pydevd_attach_to_process/winappdbg/interactive.py',
'pydevd/pydevd_attach_to_process/winappdbg/__init__.py',
'pydevd/pydevd_attach_to_process/winappdbg/system.py',
'pydevd/pydevd_attach_to_process/winappdbg/debug.py',
'pydevd/pydevd_attach_to_process/winappdbg/win32/shlwapi.py',
'pydevd/pydevd_attach_to_process/winappdbg/win32/kernel32.py',
'pydevd/pydevd_attach_to_process/winappdbg/win32/advapi32.py',
'pydevd/pydevd_attach_to_process/winappdbg/win32/__init__.py',
'pydevd/pydevd_attach_to_process/winappdbg/win32/psapi.py',
'pydevd/pydevd_attach_to_process/winappdbg/win32/defines.py',
'pydevd/pydevd_attach_to_process/winappdbg/win32/user32.py',
'pydevd/pydevd_attach_to_process/winappdbg/win32/dbghelp.py',
'pydevd/pydevd_attach_to_process/winappdbg/win32/version.py',
'pydevd/pydevd_attach_to_process/winappdbg/win32/peb_teb.py',
'pydevd/pydevd_attach_to_process/winappdbg/win32/context_amd64.py',
'pydevd/pydevd_attach_to_process/winappdbg/win32/shell32.py',
'pydevd/pydevd_attach_to_process/winappdbg/win32/ntdll.py',
'pydevd/pydevd_attach_to_process/winappdbg/win32/wtsapi32.py',
'pydevd/pydevd_attach_to_process/winappdbg/win32/context_i386.py',
'pydevd/pydevd_attach_to_process/winappdbg/win32/gdi32.py',
'pydevd/pydevd_attach_to_process/winappdbg/plugins/__init__.py',
'pydevd/pydevd_attach_to_process/winappdbg/plugins/do_symfix.py',
'pydevd/pydevd_attach_to_process/winappdbg/plugins/README',
'pydevd/pydevd_attach_to_process/winappdbg/plugins/do_exchain.py',
'pydevd/pydevd_attach_to_process/winappdbg/plugins/do_example.py',
'pydevd/pydevd_attach_to_process/winappdbg/plugins/do_exploitable.py',
'pydevd/pydevd_attach_to_process/linux/gdb_threads_settrace.py',
'pydevd/pydevd_attach_to_process/linux/compile_mac.sh',
'pydevd/pydevd_attach_to_process/linux/Makefile',
'pydevd/pydevd_attach_to_process/linux/lldb_prepare.py',
'pydevd/pydevd_attach_to_process/linux/compile_so.sh',
'pydevd/pydevd_attach_to_process/linux/python.h',
'pydevd/pydevd_attach_to_process/linux/attach_linux.c',
'pydevd/pydevd_attach_to_process/linux/lldb_threads_settrace.py',
'pydevd/_pydev_bundle/_pydev_imports_tipper.py',
'pydevd/_pydev_bundle/_pydev_getopt.py',
'pydevd/_pydev_bundle/pydev_umd.py',
'pydevd/_pydev_bundle/fix_getpass.py',
'pydevd/_pydev_bundle/pydev_is_thread_alive.py',
'pydevd/_pydev_bundle/pydev_ipython_console.py',
'pydevd/_pydev_bundle/_pydev_jy_imports_tipper.py',
'pydevd/_pydev_bundle/pydev_imports.py',
'pydevd/_pydev_bundle/pydev_override.py',
'pydevd/_pydev_bundle/pydev_monkey.py',
'pydevd/_pydev_bundle/pydev_localhost.py',
'pydevd/_pydev_bundle/pydev_log.py',
'pydevd/_pydev_bundle/pydev_ipython_console_011.py',
'pydevd/_pydev_bundle/_pydev_tipper_common.py',
'pydevd/_pydev_bundle/pydev_monkey_qt.py',
'pydevd/_pydev_bundle/_pydev_log.py',
'pydevd/_pydev_bundle/_pydev_filesystem_encoding.py',
'pydevd/_pydev_bundle/pydev_versioncheck.py',
'pydevd/_pydev_bundle/__init__.py',
'pydevd/_pydev_bundle/_pydev_completer.py',
'pydevd/_pydev_bundle/pydev_import_hook.py',
'pydevd/_pydev_bundle/pydev_console_utils.py',
'pydevd/_pydev_bundle/_pydev_calltip_util.py',
'pydevd/pydevd_plugins/jinja2_debug.py',
'pydevd/pydevd_plugins/django_debug.py',
'pydevd/pydevd_plugins/__init__.py',
'pydevd/pydevd_plugins/extensions/README.md',
'pydevd/pydevd_plugins/extensions/__init__.py',
'pydevd/pydevd_plugins/extensions/types/pydevd_plugin_numpy_types.py',
'pydevd/pydevd_plugins/extensions/types/__init__.py',
'pydevd/pydevd_plugins/extensions/types/pydevd_helpers.py',
'pydevd/pydevd_plugins/extensions/types/pydevd_plugins_django_form_str.py',
'pydevd/_pydev_runfiles/pydev_runfiles_coverage.py',
'pydevd/_pydev_runfiles/pydev_runfiles_nose.py',
'pydevd/_pydev_runfiles/pydev_runfiles_parallel.py',
'pydevd/_pydev_runfiles/pydev_runfiles_pytest2.py',
'pydevd/_pydev_runfiles/pydev_runfiles.py',
'pydevd/_pydev_runfiles/pydev_runfiles_parallel_client.py',
'pydevd/_pydev_runfiles/__init__.py',
'pydevd/_pydev_runfiles/pydev_runfiles_xml_rpc.py',
'pydevd/_pydev_runfiles/pydev_runfiles_unittest.py',
'pydevd/pydevd_concurrency_analyser/pydevd_concurrency_logger.py',
'pydevd/pydevd_concurrency_analyser/pydevd_thread_wrappers.py',
'pydevd/pydevd_concurrency_analyser/__init__.py',
'pydevd/_pydev_imps/_pydev_xmlrpclib.py',
'pydevd/_pydev_imps/_pydev_execfile.py',
'pydevd/_pydev_imps/_pydev_SimpleXMLRPCServer.py',
'pydevd/_pydev_imps/_pydev_saved_modules.py',
'pydevd/_pydev_imps/_pydev_sys_patch.py',
'pydevd/_pydev_imps/_pydev_inspect.py',
'pydevd/_pydev_imps/_pydev_SocketServer.py',
'pydevd/_pydev_imps/_pydev_BaseHTTPServer.py',
'pydevd/_pydev_imps/__init__.py',
'pydevd/_pydev_imps/_pydev_pkgutil_old.py',
'pydevd/_pydev_imps/_pydev_uuid_old.py',
'pydevd/_pydevd_frame_eval/pydevd_frame_eval_cython_wrapper.py',
'pydevd/_pydevd_frame_eval/pydevd_frame_evaluator.c',
'pydevd/_pydevd_frame_eval/pydevd_modify_bytecode.py',
'pydevd/_pydevd_frame_eval/pydevd_frame_evaluator.pyx',
'pydevd/_pydevd_frame_eval/__init__.py',
'pydevd/_pydevd_frame_eval/pydevd_frame_eval_main.py',
'pydevd/_pydevd_frame_eval/pydevd_frame_evaluator.pxd',
'pydevd/_pydevd_frame_eval/pydevd_frame_tracing.py',
'pydevd/pydev_ipython/inputhookpyglet.py',
'pydevd/pydev_ipython/inputhookgtk3.py',
'pydevd/pydev_ipython/inputhookqt5.py',
'pydevd/pydev_ipython/inputhookglut.py',
'pydevd/pydev_ipython/matplotlibtools.py',
'pydevd/pydev_ipython/inputhookqt4.py',
'pydevd/pydev_ipython/inputhookwx.py',
'pydevd/pydev_ipython/__init__.py',
'pydevd/pydev_ipython/qt_loaders.py',
'pydevd/pydev_ipython/inputhook.py',
'pydevd/pydev_ipython/README',
'pydevd/pydev_ipython/version.py',
'pydevd/pydev_ipython/qt_for_kernel.py',
'pydevd/pydev_ipython/inputhooktk.py',
'pydevd/pydev_ipython/qt.py',
'pydevd/pydev_ipython/inputhookgtk.py',
'pydevd/_pydevd_bundle/pydevd_vm_type.py',
'pydevd/_pydevd_bundle/pydevd_additional_thread_info_regular.py',
'pydevd/_pydevd_bundle/pydevd_reload.py',
'pydevd/_pydevd_bundle/pydevd_trace_dispatch_regular.py',
'pydevd/_pydevd_bundle/pydevd_cython.pyx',
'pydevd/_pydevd_bundle/pydevd_collect_try_except_info.py',
'pydevd/_pydevd_bundle/pydevd_extension_utils.py',
'pydevd/_pydevd_bundle/pydevd_stackless.py',
'pydevd/_pydevd_bundle/pydevd_constants.py',
'pydevd/_pydevd_bundle/pydevd_frame_utils.py',
'pydevd/_pydevd_bundle/pydevd_dont_trace_files.py',
'pydevd/_pydevd_bundle/pydevd_frame.py',
'pydevd/_pydevd_bundle/pydevd_xml.py',
'pydevd/_pydevd_bundle/pydevd_extension_api.py',
'pydevd/_pydevd_bundle/pydevd_comm.py',
'pydevd/_pydevd_bundle/pydevd_kill_all_pydevd_threads.py',
'pydevd/_pydevd_bundle/pydevd_traceproperty.py',
'pydevd/_pydevd_bundle/pydevd_command_line_handling.py',
'pydevd/_pydevd_bundle/pydevd_io.py',
'pydevd/_pydevd_bundle/pydevd_dont_trace.py',
'pydevd/_pydevd_bundle/pydevd_trace_dispatch.py',
'pydevd/_pydevd_bundle/pydevd_signature.py',
'pydevd/_pydevd_bundle/pydevd_import_class.py',
'pydevd/_pydevd_bundle/pydevd_custom_frames.py',
'pydevd/_pydevd_bundle/pydevd_additional_thread_info.py',
'pydevd/_pydevd_bundle/pydevd_exec.py',
'pydevd/_pydevd_bundle/pydevd_vars.py',
'pydevd/_pydevd_bundle/pydevd_exec2.py',
'pydevd/_pydevd_bundle/pydevd_cython_wrapper.py',
'pydevd/_pydevd_bundle/pydevd_plugin_utils.py',
'pydevd/_pydevd_bundle/pydevconsole_code_for_ironpython.py',
'pydevd/_pydevd_bundle/pydevd_process_net_command.py',
'pydevd/_pydevd_bundle/pydevd_resolver.py',
'pydevd/_pydevd_bundle/pydevd_utils.py',
'pydevd/_pydevd_bundle/pydevd_console.py',
'pydevd/_pydevd_bundle/pydevd_referrers.py',
'pydevd/_pydevd_bundle/pydevd_cython.c',
'pydevd/_pydevd_bundle/pydevd_breakpoints.py',
'pydevd/_pydevd_bundle/__init__.py',
'pydevd/_pydevd_bundle/pydevd_trace_api.py',
'pydevd/_pydevd_bundle/pydevd_save_locals.py',
'pydevd/pydev_sitecustomize/sitecustomize.py',
'pydevd/pydev_sitecustomize/__not_in_default_pythonpath.txt',
]}
| 50.852535 | 79 | 0.786226 | 167 | 0.015134 | 0 | 0 | 0 | 0 | 0 | 0 | 9,536 | 0.864159 |
34e5b5fd754168ef6338e900c37a7a2ea6696ba4
| 3,126 |
py
|
Python
|
pgcsv/db.py
|
pudo/pgcsv
|
9a6ae352da2ae3de5953b8b1f4c48dfcab403a3e
|
[
"MIT"
] | 66 |
2017-02-05T19:36:03.000Z
|
2022-01-25T21:41:18.000Z
|
pgcsv/db.py
|
pudo/pgcsv
|
9a6ae352da2ae3de5953b8b1f4c48dfcab403a3e
|
[
"MIT"
] | 4 |
2020-05-19T20:26:13.000Z
|
2021-06-25T15:27:47.000Z
|
pgcsv/db.py
|
pudo/pgcsv
|
9a6ae352da2ae3de5953b8b1f4c48dfcab403a3e
|
[
"MIT"
] | 6 |
2017-12-02T15:37:38.000Z
|
2021-07-21T15:19:02.000Z
|
from psycopg2 import connect
from psycopg2.sql import SQL, Identifier, Literal, Composed
from collections import OrderedDict
from itertools import count
from pgcsv.util import normalize_column
class Database(object):
def __init__(self, uri, table, headers):
self.conn = connect(uri)
self.table = normalize_column(table)
self._raw_headers = headers
@property
def headers(self):
if not hasattr(self, '_headers'):
self._headers = OrderedDict()
for name in self._raw_headers:
normalized = normalize_column(name)
if normalized is None or not len(normalized):
normalized = 'column'
column = normalized
for i in count(2):
if column not in self._headers:
break
column = '%s_%s' % (normalized, i)
self._headers[column] = name
return self._headers
def drop(self):
with self.conn.cursor() as cursor:
stmt = SQL('DROP TABLE IF EXISTS {};')
stmt = stmt.format(Identifier(self.table))
# print stmt.as_string(cursor)
cursor.execute(stmt)
self.conn.commit()
def sync(self):
with self.conn.cursor() as cursor:
stmt = SQL("CREATE TABLE IF NOT EXISTS {} ();")
stmt = stmt.format(Identifier(self.table))
# print stmt.as_string(cursor)
cursor.execute(stmt)
stmt = SQL("SELECT column_name FROM "
"information_schema.columns "
"WHERE table_name = %s;") # noqa
cursor.execute(stmt, (self.table,))
columns = [c[0] for c in cursor.fetchall()]
# columns = [c.decode(self.conn.encoding) for c in columns]
for column, label in self.headers.items():
if column not in columns:
stmt = SQL("ALTER TABLE {} ADD COLUMN {} TEXT;")
stmt = stmt.format(Identifier(self.table),
Identifier(column))
# print stmt.as_string(cursor)
cursor.execute(stmt)
stmt = SQL("COMMENT ON COLUMN {}.{} IS {};")
stmt = stmt.format(Identifier(self.table),
Identifier(column),
Literal(label))
cursor.execute(stmt)
self.conn.commit()
def load(self, fh, delimiter):
with self.conn.cursor() as cursor:
headers = list(self.headers.keys())
stmt = SQL("COPY {} ({}) FROM STDIN "
"WITH CSV HEADER DELIMITER AS {} NULL AS ''")
columns = Composed([Identifier(c) for c in headers])
columns = columns.join(', ')
stmt = stmt.format(Identifier(self.table),
columns,
Literal(delimiter))
print(stmt.as_string(cursor))
cursor.copy_expert(stmt, fh)
self.conn.commit()
| 39.075 | 71 | 0.519834 | 2,929 | 0.93698 | 0 | 0 | 597 | 0.190979 | 0 | 0 | 462 | 0.147793 |
34e6daa9b5c20ae4ca92bdc6ec1b8667b677185b
| 4,629 |
py
|
Python
|
rate_coeff.py
|
emilyng/Chemistry-Solver
|
f4f21dd37898d35d669f9d0223674e251a4c58dd
|
[
"MIT"
] | null | null | null |
rate_coeff.py
|
emilyng/Chemistry-Solver
|
f4f21dd37898d35d669f9d0223674e251a4c58dd
|
[
"MIT"
] | null | null | null |
rate_coeff.py
|
emilyng/Chemistry-Solver
|
f4f21dd37898d35d669f9d0223674e251a4c58dd
|
[
"MIT"
] | null | null | null |
##Rate Coefficients
import numpy as np
def k1(T):
T_eV = T / 11605.
log_T_eV = np.log(T_eV)
rv = np.exp(-32.71396786375
+ 13.53655609057*log_T_eV
- 5.739328757388*log_T_eV**2
+ 1.563154982022*log_T_eV**3
- 0.2877056004391*log_T_eV**4
+ 0.03482559773736999*log_T_eV**5
- 0.00263197617559*log_T_eV**6
+ 0.0001119543953861*log_T_eV**7
- 2.039149852002e-6*log_T_eV**8)
return rv
def k2(T):
T_eV = T / 11605.
log_T_eV = np.log(T_eV)
rv = np.exp(-28.61303380689232
- 0.7241125657826851*log_T_eV
- 0.02026044731984691*log_T_eV**2
- 0.002380861877349834*log_T_eV**3
- 0.0003212605213188796*log_T_eV**4
- 0.00001421502914054107*log_T_eV**5
+ 4.989108920299513e-6*log_T_eV**6
+ 5.755614137575758e-7*log_T_eV**7
- 1.856767039775261e-8*log_T_eV**8
- 3.071135243196595e-9*log_T_eV**9)
return rv
def k3(T):
T_eV = T / 11605.
log_T_eV = np.log(T_eV)
rv = np.exp(-44.09864886561001
+ 23.91596563469*log_T_eV
- 10.75323019821*log_T_eV**2
+ 3.058038757198*log_T_eV**3
- 0.5685118909884001*log_T_eV**4
+ 0.06795391233790001*log_T_eV**5
- 0.005009056101857001*log_T_eV**6
+ 0.0002067236157507*log_T_eV**7
- 3.649161410833e-6*log_T_eV**8)
return rv
def k4(T):
T_eV = T / 11605.
rv = 3.92e-13/T_eV**0.6353
return rv
def k5(T):
T_eV = T / 11605.
log_T_eV = np.log(T_eV)
rv = np.exp(-68.71040990212001
+ 43.93347632635*log_T_eV
- 18.48066993568*log_T_eV**2
+ 4.701626486759002*log_T_eV**3
- 0.7692466334492*log_T_eV**4
+ 0.08113042097303*log_T_eV**5
- 0.005324020628287001*log_T_eV**6
+ 0.0001975705312221*log_T_eV**7
- 3.165581065665e-6*log_T_eV**8)
return rv
def k6(T):
rv = 3.36e-10/np.sqrt(T)/(T/1.**3)**0.2/ (1.+(T/1.**6)**0.7)
return rv
def k7(T):
rv = 3.0e-16 * (T/3.**2)**0.95 * np.exp(-T/9.32e3)
return rv
def k8(T):
rv = 1.35e-9*(T**9.8493e-2 + 3.2852e-1
* T**5.5610e-1 + 2.771e-7 * T**2.1826)/ (1. + 6.191e-3 * T**1.0461
+ 8.9712e-11 * T**3.0424
+ 3.2576e-14 * T**3.7741)
return rv
def k9(T):
rv = 2.10e-20 * (T/30.0)**(-0.15)
return rv
def k10(T):
rv = 6.0e-10
return rv
def k11(T):
T_eV = T / 11605.
log_T_eV = np.log(T_eV)
rv = np.exp(-24.24914687
+ 3.40082444*log_T_eV
- 3.89800396*log_T_eV**2
+ 2.04558782*log_T_eV**3
- 0.541618285*log_T_eV**4
+ 8.41077503*(10**-2)*log_T_eV**5
- 7.87902615*(10**-3)*log_T_eV**6
+ 4.13839842*(10**-4)*log_T_eV**7
- 9.36345888*(10**-6)*log_T_eV**8)
return rv
def k12(T):
rv = 5.6*(10**-11)*(T**0.5)*np.exp(-102124/T)
return rv
def k13(T):
T_eV = T / 11605.
rv = 1.067*(10**-10)*(T_eV**2.012)*np.exp((-4.463/T_eV)*((1+0.2472*T_eV)**3.512))
return rv
def k14(T):
T_eV = T / 11605.
log_T_eV = np.log(T_eV)
rv = np.exp(-18.01849334
+ 2.3608522*log_T_eV
- 0.28274430*log_T_eV**2
+ 1.62331664*log_T_eV**3
- 3.36501203*log_T_eV**4
+ 1.17832978*(10**-2)*log_T_eV**5
- 1.65619470*(10**-3)*log_T_eV**6
+ 1.06827520*(10**-4)*log_T_eV**7
- 2.63128581*(10**-6)*log_T_eV**8)
return rv
def k15(T):
T_eV = T / 11605.
log_T_eV = np.log(T_eV)
rv = np.exp(-20.37260896
+ 1.13944933*log_T_eV
- 0.14210135*log_T_eV**2
+ 8.4644554*(10**-3)*log_T_eV**3
- 1.4327641*(10**-3)*log_T_eV**4
+ 2.0122503*(10**-4)*log_T_eV**5
+ 8.6639632*(10**-5)*log_T_eV**6
- 2.5850097*(10**-5)*log_T_eV**7
+ 2.4555012*(10**-6)*log_T_eV**8
- 8.0683825*(10**-8)*log_T_eV**9)
return rv
def k16(T):
rv = 7*(10**-8)*((T/100)**-0.5)
return rv
def k17(T):
T_eV = T / 11605.
if T_eV < 1.719:
rv = 1.e-8*T**(-0.4)
else:
rv = 4.0e-4*T**(-1.4)*np.exp(-15100./T)
return rv
def k18(T):
if T < 617:
rv = 1.e-8
else:
rv = 1.32e-6 * T**(-0.76)
return rv
def k19(T):
rv = 5.e-7*np.sqrt(100./T)
return rv
| 27.885542 | 85 | 0.499244 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 19 | 0.004105 |
34e6e2e24b84eeef879d6258960994f7f583e7ce
| 2,525 |
py
|
Python
|
control/thrust_vectoring.py
|
cuauv/software
|
5ad4d52d603f81a7f254f365d9b0fe636d03a260
|
[
"BSD-3-Clause"
] | 70 |
2015-11-16T18:04:01.000Z
|
2022-03-05T09:04:02.000Z
|
control/thrust_vectoring.py
|
cuauv/software
|
5ad4d52d603f81a7f254f365d9b0fe636d03a260
|
[
"BSD-3-Clause"
] | 1 |
2016-08-03T05:13:19.000Z
|
2016-08-03T06:19:39.000Z
|
control/thrust_vectoring.py
|
cuauv/software
|
5ad4d52d603f81a7f254f365d9b0fe636d03a260
|
[
"BSD-3-Clause"
] | 34 |
2015-12-15T17:29:23.000Z
|
2021-11-18T14:15:12.000Z
|
import math
import numpy as np
FULL_RANGE = 1024
class VectoringThrusterData(object):
def __init__(self, position, axis, start, angle, offset, _reversed = False):
vector_axis = np.array(axis)
vector_1 = np.array(start)
self.vector_axis = vector_axis / np.linalg.norm(vector_axis)
self.force_1 = vector_1 / np.linalg.norm(vector_1)
# Enforce orthogonality.
assert self.force_1.dot(self.vector_axis) < 1e-8
# assert 0 < angle < 360
self.angle = math.radians(angle)
self.force_2 = np.cross(self.vector_axis, self.force_1)
self.torque_1 = np.cross(np.array(position), self.force_1)
self.torque_2 = np.cross(np.array(position), self.force_2)
self.torque_weight = 1
self.offset = offset
self._reversed = _reversed
# 1024 = 2pi rad
# self.rads_per_value = self.angle / (1024 / 2 / math.pi)
self.rads_per_value = 2 * math.pi / 1024
def angle_from_value(self, value):
return ((value - self.offset) % FULL_RANGE) * self.rads_per_value
def value_from_angle(self, angle):
return (int(round(angle / self.rads_per_value)) + self.offset) % FULL_RANGE
def get_angle(self, desired_output_s):
force = desired_output_s[:3]
torque = desired_output_s[3:]
# Our thrust data, self.force_1, self.torque_1, etc. is in sub space,
# The angle will be the same if we convert the incoming forces and torques
# that are now in world space into sub space.
opt_angle = math.atan2(self.torque_2.dot(torque) * self.torque_weight + \
self.force_2.dot(force), \
self.torque_1.dot(torque) * self.torque_weight + \
self.force_1.dot(force))
# Currently vector motor offset by pi/2
if opt_angle < 0:
opt_angle += 2*math.pi
# Thrusters work both ways.
# TODO is it worth favoring positive thrust over negative thrust?
if opt_angle > math.pi:
opt_angle -= math.pi
# Find the angle that most closely matches the calculated.
# Note that if the angle of thrust vectoring is more than half of a
# circle i.e. more than pi, all the below checks will fail so this should
# work in all cases.
# There is a legitmate potential here for oscillations when desires
# are equally between both the negative and positive thrust cones.
thresh = 0.5*(math.pi + self.angle)
if opt_angle >= thresh:
opt_angle = 0
elif self.angle < opt_angle < thresh:
opt_angle = self.angle
return opt_angle
| 34.589041 | 79 | 0.670099 | 2,473 | 0.979406 | 0 | 0 | 0 | 0 | 0 | 0 | 791 | 0.313267 |
34e7cf6e1775685d6271aef6702b1730ac2e95bc
| 1,956 |
py
|
Python
|
tests/unit/test_bad_cluster.py
|
ylipacbio/pbtranscript
|
6b4ef164f191ffd4201feb62b951d9eeac3315b6
|
[
"BSD-3-Clause"
] | null | null | null |
tests/unit/test_bad_cluster.py
|
ylipacbio/pbtranscript
|
6b4ef164f191ffd4201feb62b951d9eeac3315b6
|
[
"BSD-3-Clause"
] | null | null | null |
tests/unit/test_bad_cluster.py
|
ylipacbio/pbtranscript
|
6b4ef164f191ffd4201feb62b951d9eeac3315b6
|
[
"BSD-3-Clause"
] | 1 |
2021-02-26T10:08:09.000Z
|
2021-02-26T10:08:09.000Z
|
# XXX verification for bug 30828 - runs ice_pbdagcon on a spurious cluster
# and checks that it discards the resulting all-N consensus sequence
import subprocess
import tempfile
import unittest
import os.path as op
from pbcore.io import FastaReader
CLUSTER_FA = """\
>m54007_151222_230824/47383194/334_64_CCS
CATTGAAGACGTCCACCTCAACGCTATGAACGTTAGTTGAGACAATGTTAAAGCAAACGACAACGTCATTGTGATCTACATACACAGTGGATGGTTAGCGTAAACATGGTGGAACGTACTTTGACTGCGCTGCAAGAAATGGTTGGGTCGATCGTAATGCTAGTCGTTACATCGGAACAAGCCAAAACAAAATCATTCGCTGGATTTAGACCTACTGCACGACGACGTCGACACAAGACATTCTTGAAAGGTAATTGACGTGGACGTTTC
>m54007_151222_230824/28640158/287_60_CCS
CAAACGACAACGTCATTGTGATCTACATACACAGTGGATGGTTAGGCGTAAACATGGTGGGAACGTACTTTGACTGCGCTGCAAGAAATGGGTTGGGTCGATCGTAATGCTAGTCGTTACATCGGAACAAGCCAAAAAACAAACATCATTCGCTGGATTTAGACTACTACTGCACGACCGACGTCGACACAAGACATTCTCTGAAAGGTAATTGACGTGGACGTTTC
>m54007_151222_230824/49611437/382_58_CCS
ACTGAACTACGGGTCAGCTTCCCCATTTGAAGTCATGTAGTGGTTGTCTACTTTTTCATTGAGACGTCCACCTCAACGCTATGAACGTTAGTTGAGACAATGTTAAAGCAAACGACAACGTCATTGTGATCTACATACACAGTGGATGGTTAGCGTAAACATGGTGGAACGTACTTTGACTGCGCTGCAAGAAATGGTGTGGGTCGATCGTAATGCTAGTCGTTACATCGGAACAAGCCAAAACAAAATCATTCGCTGGATTTAGACCTACTGCACGACGACGTCGACACAAGACATTCTTGAAAGGTAATTGACGTGGACGTT"""
class TestBadCluster(unittest.TestCase):
def setUp(self):
self.cluster_fa = tempfile.NamedTemporaryFile(suffix=".fasta").name
with open(self.cluster_fa, "w") as fa_out:
fa_out.write(CLUSTER_FA)
def test_ice_pbdagcon_bad_cluster(self):
out_fa = tempfile.NamedTemporaryFile(suffix=".fasta").name
prefix = op.splitext(out_fa)[0]
args = [
"python", "-m", "pbtranscript.ice_pbdagcon",
self.cluster_fa,
prefix,
"c5006"
]
assert subprocess.call(args) == 0
with FastaReader(out_fa) as fa_out:
self.assertEqual(len([rec for rec in fa_out]), 0)
if __name__ == "__main__":
unittest.main()
| 44.454545 | 327 | 0.812883 | 680 | 0.347648 | 0 | 0 | 0 | 0 | 0 | 0 | 1,174 | 0.600204 |
34ea6bc99bdf93d5fbca0d7c5dabe8656d17800e
| 96 |
py
|
Python
|
venv/lib/python3.8/site-packages/poetry/core/packages/constraints/union_constraint.py
|
Retraces/UkraineBot
|
3d5d7f8aaa58fa0cb8b98733b8808e5dfbdb8b71
|
[
"MIT"
] | 2 |
2022-03-13T01:58:52.000Z
|
2022-03-31T06:07:54.000Z
|
venv/lib/python3.8/site-packages/poetry/core/packages/constraints/union_constraint.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | 19 |
2021-11-20T04:09:18.000Z
|
2022-03-23T15:05:55.000Z
|
venv/lib/python3.8/site-packages/poetry/core/packages/constraints/union_constraint.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | null | null | null |
/home/runner/.cache/pip/pool/a5/a1/10/06eab95524f667caa51362a09c577fd5f6d45980e5390034745c0a322f
| 96 | 96 | 0.895833 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
34ebc2d0c5cd30f9146703ef59fd7175839c43d2
| 4,028 |
py
|
Python
|
test/unit3/test_docstring.py
|
timmartin/skulpt
|
2e3a3fbbaccc12baa29094a717ceec491a8a6750
|
[
"MIT"
] | 2,671 |
2015-01-03T08:23:25.000Z
|
2022-03-31T06:15:48.000Z
|
test/unit3/test_docstring.py
|
timmartin/skulpt
|
2e3a3fbbaccc12baa29094a717ceec491a8a6750
|
[
"MIT"
] | 972 |
2015-01-05T08:11:00.000Z
|
2022-03-29T13:47:15.000Z
|
test/unit3/test_docstring.py
|
timmartin/skulpt
|
2e3a3fbbaccc12baa29094a717ceec491a8a6750
|
[
"MIT"
] | 845 |
2015-01-03T19:53:36.000Z
|
2022-03-29T18:34:22.000Z
|
import unittest
def banana():
"Yellow"
return 42
def orange():
"Oran" "ge"
def blackbirds():
4 + 20 # Not a docstring
def do_nothing():
pass
def make_adder(n):
"Function adding N"
def add(x):
"Compute N + X"
return n + x
return add
class Strawberry:
"Delicious"
doc = __doc__
def weight(self):
"Heavy"
return 0.25
@classmethod
def is_red(cls):
"Very red"
return True
@staticmethod
def pick():
"Picked"
return None
class Tangerine:
def peel():
pass
class Pear:
"This will not be the __doc__"
__doc__ = "Conference"
class TestDocstrings(unittest.TestCase):
def test_builtin(self):
self.assertTrue(divmod.__doc__.startswith("Return the tuple (x//y, x%y)"))
def test_library_function(self):
# This test will need updating if/when random.seed gets a docstring.
# It also fails under cpython.
import random
self.assertEqual(random.seed.__doc__, None)
def test_function(self):
self.assertEqual(banana.__doc__, "Yellow")
self.assertEqual(orange.__doc__, "Orange")
self.assertEqual(make_adder.__doc__, "Function adding N")
def test_runtime_function(self):
self.assertEqual(make_adder(3).__doc__, "Compute N + X")
def test_non_string_expr(self):
self.assertEqual(blackbirds.__doc__, None)
def test_no_expr(self):
self.assertEqual(do_nothing.__doc__, None)
def test_class(self):
self.assertEqual(Strawberry.__doc__, "Delicious")
self.assertEqual(Strawberry.doc, "Delicious")
def test_class_no_docstring(self):
self.assertEqual(Tangerine.__doc__, None)
self.assertEqual(Tangerine.peel.__doc__, None)
self.assertEqual(Tangerine().peel.__doc__, None)
def test_class_explicit_docstring(self):
self.assertEqual(Pear.__doc__, "Conference")
def test_method(self):
self.assertEqual(Strawberry.weight.__doc__, "Heavy")
s = Strawberry()
self.assertEqual(s.weight.__doc__, "Heavy")
def test_classmethod(self):
self.assertEqual(Strawberry.is_red.__doc__, "Very red")
def test_staticmethod(self):
self.assertEqual(Strawberry.pick.__doc__, "Picked")
def test_module(self):
import bisect
self.assertEqual(bisect.__doc__, "Bisection algorithms.")
def test_local_module(self):
import copydocstring
self.assertEqual(copydocstring.__doc__, "Copy docstring")
self.assertEqual(copydocstring.doc, "Copy docstring")
def test_local_module_overwriting_docstring(self):
import overwritedocstring
self.assertEqual(overwritedocstring.__doc__, "Second docstring")
def test_lambda(self):
f = lambda x: 42
self.assertEqual(f.__doc__, None)
def test_setting_on_function(self):
def f():
"hello"
return 42
self.assertEqual(f.__doc__, "hello")
f.__doc__ = "world"
self.assertEqual(f.__doc__, "world")
# Setting to a non-string is odd but allowed:
f.__doc__ = 42
self.assertEqual(f.__doc__, 42)
# Attemping to delete __doc__ instead sets it to None:
del f.__doc__
self.assertEqual(f.__doc__, None)
def test_setting_on_method(self):
class Banana:
def peel(self):
"Remove peel"
pass
self.assertEqual(Banana.peel.__doc__, "Remove peel")
# We can set the doc when accessed on the class:
Banana.peel.__doc__ = "Take out of peel"
self.assertEqual(Banana.peel.__doc__, "Take out of peel")
# But not when accessed via an instance:
def set_on_method_of_instance():
banana = Banana()
banana.peel.__doc__ = "this will not work"
self.assertRaises(AttributeError, set_on_method_of_instance)
if __name__ == '__main__':
unittest.main()
| 24.711656 | 82 | 0.638034 | 3,676 | 0.912612 | 0 | 0 | 138 | 0.03426 | 0 | 0 | 771 | 0.19141 |
34ebcfd140d8b8342551373bb548dcc3e38235a3
| 11,609 |
py
|
Python
|
mixer.py
|
ejhumphrey/mixer_bingo
|
d78174384e4476de70348d3e17a72d45ff04d960
|
[
"0BSD"
] | null | null | null |
mixer.py
|
ejhumphrey/mixer_bingo
|
d78174384e4476de70348d3e17a72d45ff04d960
|
[
"0BSD"
] | null | null | null |
mixer.py
|
ejhumphrey/mixer_bingo
|
d78174384e4476de70348d3e17a72d45ff04d960
|
[
"0BSD"
] | null | null | null |
from __future__ import print_function
import argparse
import json
import jsonschema
import logging
import numpy as np
import networkx as nx
import os
import pandas as pd
import random
import sys
logger = logging.getLogger(name=__file__)
def _load_schema():
schema_file = os.path.join(os.path.dirname(__file__),
'participant_schema.json')
return json.load(open(schema_file))
__SCHEMA__ = _load_schema()
def validate(participant_data):
"""Check that a number of records conforms to the expected format.
Parameters
----------
participant_data : array_like of dicts
Collection of user records to validate.
Returns
-------
is_valid : bool
True if the provided data validates.
"""
is_valid = True
try:
jsonschema.validate(participant_data, __SCHEMA__)
except jsonschema.ValidationError as failed:
logger.debug("Schema Validation Failed: {}".format(failed))
is_valid = False
return is_valid
def tokenize(records):
"""Create a token mapping from objects to integers.
Parameters
----------
records : array_like of iterables.
Collection of nested arrays.
Returns
-------
enum_map : dict
Enumeration map of objects (any hashable) to tokens (int).
"""
unique_items = set(i for row in records for i in row)
unique_items = sorted(list(unique_items))
return dict([(k, n) for n, k in enumerate(unique_items)])
def items_to_bitmap(records, enum_map=None):
"""Turn a collection of sparse items into a binary bitmap.
Parameters
----------
records : iterable of iterables, len=n
Items to represent as a matrix.
enum_map : dict, or None, len=k
Token mapping items to ints; if None, one will be generated and
returned.
Returns
-------
bitmap : np.ndarray, shape=(n, k)
Active items.
enum_map : dict
Mapping of items to integers, if one is not given.
"""
return_mapping = False
if enum_map is None:
enum_map = tokenize(records)
return_mapping = True
bitmap = np.zeros([len(records), len(enum_map)], dtype=bool)
for idx, row in enumerate(records):
for i in row:
bitmap[idx, enum_map[i]] = True
return bitmap, enum_map if return_mapping else bitmap
def categorical_sample(pdf):
"""Randomly select a categorical index of a given PDF.
Parameters
----------
x
Returns
-------
y
"""
pdf = pdf / pdf.sum()
return int(np.random.multinomial(1, pdf).nonzero()[0])
WEIGHTING_FUNCTIONS = {
'l0': lambda x: float(np.sum(x) > 0),
'l1': lambda x: float(np.sum(x)),
'mean': lambda x: float(np.mean(x)),
'null': 0.0,
'euclidean': lambda x: np.sqrt(x),
'norm_euclidean': lambda x: np.sqrt(x) / 3.0,
'quadratic': lambda x: x,
'norm_quadratic': lambda x: x / 9.0
}
def build_graph(records, forced_edges=None, null_edges=None,
interest_func='l0', seniority_func='l0',
combination_func=np.sum):
"""writeme
Parameters
----------
data: pd.DataFrame
Loaded participant records
forced_edges: np.ndarray, or None
One-hot assignment matrix; no row or column can sum to more than one.
null_edges: np.ndarray, or None
Matches to set to zero.
interest_func: str
'l1', 'l0'
seniority_func: str
'l1', 'l0'
combination_func: function
Numpy functions, e.g. prod, sum, max.
Returns
-------
graph : networkx.Graph
Connected graph to be factored.
"""
if not isinstance(records, pd.DataFrame):
records = pd.DataFrame(records)
interest_bitmap, interest_enum = items_to_bitmap(records.interests)
# Coerce null / forced edges for datatype compliance.
null_edges = ([] if null_edges is None
else [tuple(v) for v in null_edges])
forced_edges = ([] if forced_edges is None
else [tuple(v) for v in forced_edges])
graph = nx.Graph()
for i, row_i in records.iterrows():
for j, row_j in records.iterrows():
# Skip self, shared affiliations, or same grouping
skip_conditions = [i == j,
(i, j) in null_edges,
(j, i) in null_edges,
row_i.affiliation == row_j.affiliation]
if any(skip_conditions):
continue
# Interest weighting
interest_weight = WEIGHTING_FUNCTIONS[interest_func](
interest_bitmap[i] * interest_bitmap[j])
# Seniority weighting
seniority_weight = WEIGHTING_FUNCTIONS[seniority_func](
(row_i.seniority - row_j.seniority) ** 2.0)
if (i, j) in forced_edges or (j, i) in forced_edges:
weights = [2.0 ** 32]
else:
weights = [interest_weight, seniority_weight]
graph.add_weighted_edges_from([(i, j, combination_func(weights))])
return graph
def harmonic_mean(values):
"""writeme
Parameters
----------
x
Returns
-------
y
"""
return np.power(np.prod(values), 1.0 / len(values))
def select_matches(records, k_matches=5, forced_edges=None, null_edges=None,
interest_func='l0', seniority_func='l0',
combination_func=np.sum, seed=None):
"""Pick affinity matches, and back-fill randomly if under-populated.
Parameters
----------
x
Returns
-------
y
"""
null_edges = ([] if null_edges is None
else [tuple(v) for v in null_edges])
forced_edges = ([] if forced_edges is None
else [tuple(v) for v in forced_edges])
matches = {i: set() for i in range(len(records))}
for k in range(k_matches):
graph = build_graph(
records, null_edges=null_edges, forced_edges=forced_edges,
seniority_func='quadratic', interest_func='mean',
combination_func=np.mean)
forced_edges = None
links = nx.max_weight_matching(graph)
for row, col in links.items():
null_edges += (row, col)
matches[row].add(col)
catch_count = 0
rng = np.random.RandomState(seed=seed)
for row in matches:
possible_matches = set(range(len(records)))
possible_matches = possible_matches.difference(matches[row])
while len(matches[row]) != k_matches:
col = rng.choice(np.asarray(possible_matches))
matches[row].add(col)
null_edges += [(row, col)]
catch_count += 1
logger.debug("backfilled %d" % catch_count)
return matches
def select_topic(row_a, row_b):
"""writeme
Parameters
----------
x
Returns
-------
y
"""
topics_a = parse_interests(row_a[7])
topics_b = parse_interests(row_b[7])
topics = list(set(topics_a).intersection(set(topics_b)))
if topics:
return topics[categorical_sample(np.ones(len(topics)))]
TEXT_FMTS = [
("Find someone from %s.", 'affiliation'),
("Find someone currently located in %s.", 'country'),
("Find someone who is an expert on %s", 'topics'),
("Find someone in academia at the %s level", 'education')]
TEXT = [
"Find someone who works in industry",
"Introduce someone to someone else",
"Help someone solve a square",
"Find someone who plays an instrument.",
"Find someone who has attended ISMIR for more than 5 years",
"Find someone for which this is their first ISMIR"]
def generate_text(rows, target_idx, matches, num_outputs=24):
outputs = []
for match_idx in matches[target_idx]:
outputs.append("Talk to %s" % rows[match_idx][1])
outputs.extend(TEXT)
categories = {
'affiliation': get_affilations(rows),
'education': get_education(rows),
'topics': get_topics(rows),
'country': get_countries(rows)
}
while len(outputs) < num_outputs:
fmt, key = random.choice(TEXT_FMTS)
value = random.choice(categories[key])
outputs.append(fmt % value)
return outputs
def make_card(name, contents, outfile):
"""writeme
Parameters
----------
x
Returns
-------
y
"""
tex_lines = []
tex_lines.append(r'\documentclass[10pt, a4paper]{article}')
tex_lines.append(r'\usepackage{tikz}')
tex_lines.append(r'\usepackage{fullpage}')
tex_lines.append(r'\usetikzlibrary{positioning,matrix}')
tex_lines.append(r'\renewcommand*{\familydefault}{\sfdefault}')
tex_lines.append(r'\usepackage{array}')
tex_lines.append(r'\begin{document}')
tex_lines.append(r'\pagestyle{empty}')
tex_lines.append(r'\begin{center}')
tex_lines.append(r'\Huge ISMIR 2014 Mixer Bingo\\')
tex_lines.append(r"\bigskip \huge \emph{%s} \\" % name)
tex_lines.append(r'\normalsize')
tex_lines.append(r'')
tex_lines.append(r'\bigskip')
random.shuffle(contents)
c = contents[0:12] + [r'FREE'] + contents[12:24]
tex_lines.append(r'\begin{tikzpicture}')
tex_lines.append(r"""\tikzset{square matrix/.style={
matrix of nodes,
column sep=-\pgflinewidth, row sep=-\pgflinewidth,
nodes={draw,
text height=#1/2-2.5em,
text depth=#1/2+2.5em,
text width=#1,
align=center,
inner sep=0pt
},
},
square matrix/.default=3.2cm
}""")
tex_lines.append(r'\matrix [square matrix]')
tex_lines.append(r'(shi)')
tex_lines.append(r'{')
tex_lines.append(
r"%s & %s & %s & %s & %s\\" % (c[0], c[1], c[2], c[3], c[4]))
tex_lines.append(
r"%s & %s & %s & %s & %s\\" % (c[5], c[6], c[7], c[8], c[9]))
tex_lines.append(
r"%s & %s & %s & %s & %s\\" % (c[10], c[11], c[12], c[13], c[14]))
tex_lines.append(
r"%s & %s & %s & %s & %s\\" % (c[15], c[16], c[17], c[18], c[19]))
tex_lines.append(
r"%s & %s & %s & %s & %s\\" % (c[20], c[21], c[22], c[23], c[24]))
tex_lines.append(r'};')
tex_lines.append(r'\foreach \i in {1,2,3,4,5}')
tex_lines.append(
r'\draw[line width=2pt] (shi-1-\i.north east) -- (shi-5-\i.south east);')
tex_lines.append(
r'\foreach \i in {1,2,3,4,5}')
tex_lines.append(
r'\draw[line width=2pt] (shi-1-\i.north west) -- (shi-5-\i.south west);')
tex_lines.append(
r'\foreach \i in {1,2,3,4,5}')
tex_lines.append(
r'\draw[line width=2pt] (shi-\i-1.north west) -- (shi-\i-5.north east);')
tex_lines.append(
r'\foreach \i in {1,2,3,4,5}')
tex_lines.append(
r'\draw[line width=2pt] (shi-\i-1.south west) -- (shi-\i-5.south east);')
tex_lines.append(r'\end{tikzpicture}')
tex_lines.append('')
tex_lines.append(r'\pagebreak')
tex_lines.append('')
tex_lines.append(r'\end{center}')
tex_lines.append(r'\end{document}')
with open(outfile, 'w') as f:
for line in tex_lines:
f.write("%s\n" % line)
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('a',
help='writeme')
parser.add_argument('--b', type=str,
default='apple',
help='writeme')
parser.add_argument('--verbose', action='store_true',
help='Print progress to the console.')
args = parser.parse_args()
sys.exit(0)
| 27.839329 | 81 | 0.593591 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,341 | 0.373934 |
34f18dca2b35de2acae03f31c1e789e6be8e839f
| 756 |
py
|
Python
|
mwthesaurus/model.py
|
PederHA/mwthesaurus
|
c58d9bdf82fce906c8a2c908b803b2a9db4bc0a2
|
[
"MIT"
] | null | null | null |
mwthesaurus/model.py
|
PederHA/mwthesaurus
|
c58d9bdf82fce906c8a2c908b803b2a9db4bc0a2
|
[
"MIT"
] | null | null | null |
mwthesaurus/model.py
|
PederHA/mwthesaurus
|
c58d9bdf82fce906c8a2c908b803b2a9db4bc0a2
|
[
"MIT"
] | null | null | null |
from dataclasses import dataclass, field
from typing import List
from itertools import chain
@dataclass
class Word:
word: str
wordtype: str
shortdef: List[str] = field(default_factory=list)
synonyms: List[str] = field(default_factory=list)
antonyms: List[str] = field(default_factory=list)
stems: List[str] = field(default_factory=list)
@classmethod
def from_response(cls, r: dict) -> object:
obj = cls.__new__(cls)
obj.word = r["meta"]["id"]
obj.wordtype = r["fl"]
obj.shortdef = r["shortdef"]
obj.synonyms = list(chain.from_iterable(r["meta"]["syns"]))
obj.antonyms = list(chain.from_iterable(r["meta"]["ants"]))
obj.stems = r["meta"]["stems"]
return obj
| 30.24 | 67 | 0.640212 | 649 | 0.858466 | 0 | 0 | 660 | 0.873016 | 0 | 0 | 61 | 0.080688 |
34f1d3eabe979cf0b88b9e39d396c23d11f9013e
| 9,487 |
py
|
Python
|
bouncingball_cortix.py
|
seamuss1/BouncingBall
|
6c4ff0838fa0366798efd8922c2632a8bfa5f15b
|
[
"MIT"
] | 1 |
2019-08-11T23:55:05.000Z
|
2019-08-11T23:55:05.000Z
|
bouncingball_cortix.py
|
seamuss1/BouncingBall
|
6c4ff0838fa0366798efd8922c2632a8bfa5f15b
|
[
"MIT"
] | null | null | null |
bouncingball_cortix.py
|
seamuss1/BouncingBall
|
6c4ff0838fa0366798efd8922c2632a8bfa5f15b
|
[
"MIT"
] | null | null | null |
import os, time, datetime, threading, random
import numpy as np
import matplotlib.pyplot as plt
import sys
from cortix.src.module import Module
from cortix.src.port import Port
from cortix.src.cortix_main import Cortix
import time
from bb_plot import Plot
import shapely.geometry as geo
import shapely.ops
from shapely import affinity
class BouncingBall(Module):
def __init__(self,shape=None, runtime=10):
super().__init__()
self.shape = shape
self.runtime = runtime
self.bndry = []
coords = list(self.shape.exterior.coords)
#Parse the box(LineRing) to create a list of line obstacles
for c,f in enumerate(coords):
try:
cr = geo.LineString([coords[c],coords[c+1]])
except IndexError:
cr = geo.LineString([coords[c],coords[-1]])
break
self.bndry.append(cr)
bn = self.shape.bounds
self.r=1.0
for i in range(100): #Attempt to spawn ball within boundary
self.p0 = [random.uniform(bn[0],bn[2]),random.uniform(bn[1],bn[3])]
self.pnt = geo.point.Point(self.p0[0],self.p0[1])
self.circle = self.pnt.buffer(self.r)
if self.shape.contains(self.circle):
break
self.v0 = [random.uniform(-50,50),random.uniform(-30,30)]
self.cor = 0.95
self.a = (0,-9.81)
self.m = 1
self.KE = 0.5*self.m*((self.v0[0]**2+self.v0[1]**2)**0.5)**2
self.timestamp=str(datetime.datetime.now())
#Customize container class that is sent to other modules
self.messenger = Messenger()
self.messenger.circle = self.circle
self.messenger.timestamp = self.timestamp
self.messenger.m,self.messenger.r = 1,1
self.messenger.v = self.v0
self.messenger.p = self.p0
def run(self):
t = 0.01
self.elapsed, oe = 0,0
its = round(self.runtime/t)
portdic = dict()
for i in self.ports: #Send initial properties
if 'plot' not in str(i):
self.send(self.messenger,i)
for i in self.ports:
if 'plot' not in str(i):
portdic[str(i)] = self.recv(i)
for i in range(its):
self.elapsed += t
if oe != int(self.elapsed):
print('Time Elapsed: ', int(self.elapsed),'seconds\nVelocity: ', str(round(self.v0[0],2))+'i +'+str(round(self.v0[1],2))+'j')
oe = int(self.elapsed)
#Gravity calculations for timestep
self.p0[1] = 0.5*self.a[1]*t**2+self.v0[1]*t+self.p0[1]
self.p0[0] = 0.5*self.a[0]*t**2+self.v0[0]*t+self.p0[0]
self.v0[1] = self.a[1]*t + self.v0[1]
self.v0[0] = self.a[0]*t + self.v0[0]
#Update position and velocity variables
self.pnt = geo.point.Point(self.p0[0],self.p0[1])
self.circle = self.pnt.buffer(self.r)
self.messenger.v = self.v0
for shape in self.bndry: #Detects collision with boundary
if self.circle.crosses(shape) or self.circle.touches(shape) or self.circle.intersects(shape):
self.wall_collision(shape)
for name in portdic: #Detects collision with other objects
messenger = portdic[name]
shape = portdic[name].circle
ts = portdic[name].timestamp
for line in portdic[name].collision: #Undetected Collisions received as a message
if self.timestamp == line:
self.ball_collision(messenger)
if self.circle.crosses(shape) or self.circle.touches(shape) or self.circle.intersects(shape):
self.ball_shift(shape)
#Reacts to intersection between this object and another
if self.circle.crosses(shape) or self.circle.touches(shape) or self.circle.intersects(shape):
self.ball_collision(messenger)
self.ball_shift(shape)
self.messenger.collision.append(ts)
self.messenger.circle = self.circle
self.messenger.p = self.p0
for i in self.ports: #Send and receive messages for each timestep
self.send(self.messenger,i)
for i in self.ports:
if 'plot' in str(i): #Not receiving messages from plotting
continue
messenger = self.recv(i)
portdic[str(i)] = messenger
self.messenger.collision = [] #Reset list of collisions
for i in self.ports: #Send 'done' string to plot module as end condition
if 'plot' in str(i):
self.send('done',i)
print('Time Elapsed: ', self.elapsed,'seconds\nVelocity: ', str(round(self.v0[0],2))+'i +'+str(round(self.v0[1],2))+'j')
print('done')
return
def wall_collision(self,shape):
p1,p2 = shapely.ops.nearest_points(self.pnt,shape)
angle3 = np.arctan2(p2.y - p1.y, p2.x - p1.x)
d = shape.distance(self.pnt)
self.p0 = [self.p0[0]-(self.r-d)*np.cos(angle3), self.p0[1]-(self.r-d)*np.sin(angle3)]
self.circle = self.pnt.buffer(self.r)
angle2 = np.arctan2(self.v0[1], self.v0[0])
v = (self.v0[0]**2+self.v0[1]**2)**0.5
theta = angle2-angle3
vbi, vbj = v*np.sin(theta), v*np.cos(theta)
vbj = -vbj *self.cor
v = (vbi**2+vbj**2)**0.5
angle4 = np.arctan2(vbj, vbi)
angle1 = angle4 - angle3
self.v0 = [np.sin(angle1)*v, np.cos(angle1)*v]
def ball_shift(self,shape):
p1,p2 = shapely.ops.nearest_points(self.pnt,shape)
angle = np.arctan2(p2.y - p1.y, p2.x - p1.x)
d = shape.distance(self.pnt)
self.p0 = [self.p0[0]-(self.r*1.01-d)*np.cos(angle),self.p0[1]-(self.r*1.01-d)*np.sin(angle)]
self.pnt = geo.point.Point(self.p0[0],self.p0[1])
self.circle = self.pnt.buffer(self.r)
def ball_collision(self,messenger):
shape = messenger.circle
v2,m = messenger.v,messenger.m
v3 = (v2[0]**2+v2[1]**2)**0.5
phi = np.arctan2(v2[1],v2[0])
p1,p2 = shapely.ops.nearest_points(self.pnt,shape)
angle = np.arctan2(p2.y - p1.y, p2.x - p1.x)
angle2 = np.arctan2(self.v0[1], self.v0[0])
v = (self.v0[0]**2+self.v0[1]**2)**0.5
#Equation source: https://en.wikipedia.org/wiki/Elastic_collision
vpx=((v*np.cos(angle2-angle)*(self.m-m)+2*m*v3*np.cos(phi-angle))/(self.m+m))*np.cos(angle)+v*np.sin(angle2-angle)*np.cos(angle+np.pi/2)
vpy=((v*np.cos(angle2-angle)*(self.m-m)+2*m*v3*np.cos(phi-angle))/(self.m+m))*np.sin(angle)+v*np.sin(angle2-angle)*np.sin(angle+np.pi/2)
vp = (vpx**2+vpy**2)**0.5
self.v0 = [vpx,vpy]
print('Ball collision')
class Messenger:
def __init__(self, circle=None, collision = [], timestamp='0'):
self.circle = circle
self.collision = collision
self.timestamp = timestamp
self.m = 1
self.r = 1
self.v = []
self.p = []
#Example driver script
if __name__ == '__main__':
cortix = Cortix(use_mpi=False)
mod_list = []
shapes = ['triangle', 'squares', 'diamond']
while True:
print('Choose a shape: 1) Triangle, 2) Square, or 3) Diamond\n')
shape = input('>>>')
shape = shape.lower()
if shape == 'triangle' or shape =='1':
shape = geo.Polygon([(0, 0), (0, 60), (30, 30)])
break
if shape == 'square' or shape =='2':
shape = geo.box(-30,0,30,50)
break
if shape == 'triangle' or shape =='3':
shape = geo.box(-30,0,30,50)
shape = affinity.rotate(shape,45)
break
print('Input not recognized, try again')
while True:
print('Choose the number of Bouncing Balls\n')
balls = input('>>>')
try:
balls = int(balls)
if balls > 1000:
print('Wow good luck')
elif balls > 0:
break
else:
print('Choose a better number')
except:
print('Entry invalid')
while True:
print('How many seconds is the simulation?\n')
secs = input('>>>')
try:
secs = int(secs)
if secs > 50000:
print('Wow good luck')
elif secs > 0:
break
else:
print('Choose a better number')
except:
print('Entry invalid')
plot = Plot(shape=shape, length=balls)
cortix.add_module(plot)
for i in range(balls):
time.sleep(0.01)
app = BouncingBall(shape,runtime=secs)
mod_list.append(app)
cortix.add_module(app)
for c,i in enumerate(mod_list):
i.connect('plot-send{}'.format(c),plot.get_port('plot-receive{}'.format(c)))
for j in mod_list:
if i == j:
continue
name = '{}{}'.format(i.timestamp,j.timestamp)
name2 = '{}{}'.format(j.timestamp,i.timestamp)
j.connect(name, i.get_port(name2))
cortix.draw_network('network_graph.png')
cortix.run()
print('bye')
| 39.529167 | 144 | 0.54211 | 6,915 | 0.728892 | 0 | 0 | 0 | 0 | 0 | 0 | 1,246 | 0.131338 |
34f2d98b52c7839e3432965351129274c2ecd039
| 23,904 |
py
|
Python
|
pgimp/GimpFileCollectionTest.py
|
netogallo/pgimp
|
bb86254983e1673d702e1fa2ed207166fd15ec65
|
[
"MIT"
] | 5 |
2018-10-29T10:09:37.000Z
|
2020-12-28T04:47:32.000Z
|
pgimp/GimpFileCollectionTest.py
|
netogallo/pgimp
|
bb86254983e1673d702e1fa2ed207166fd15ec65
|
[
"MIT"
] | 1 |
2020-10-21T18:35:44.000Z
|
2021-06-17T06:27:26.000Z
|
pgimp/GimpFileCollectionTest.py
|
netogallo/pgimp
|
bb86254983e1673d702e1fa2ed207166fd15ec65
|
[
"MIT"
] | 4 |
2019-09-20T05:14:39.000Z
|
2021-04-05T01:55:47.000Z
|
# Copyright 2018 Mathias Burger <[email protected]>
#
# SPDX-License-Identifier: MIT
import os
import shutil
import textwrap
from tempfile import TemporaryDirectory
import numpy as np
import pytest
from pgimp.GimpFile import GimpFile, GimpFileType
from pgimp.GimpFileCollection import GimpFileCollection, NonExistingPathComponentException, \
GimpMissingRequiredParameterException, MaskForegroundColor
from pgimp.util import file
from pgimp.util.TempFile import TempFile
from pgimp.util.string import escape_single_quotes
def test_create_from_pathname_with_file():
prefix = file.relative_to(__file__, 'test-resources/files/')
collection = GimpFileCollection.create_from_pathname(file.relative_to(__file__, 'test-resources/files/first'))
assert len(collection.get_files()) == 1
assert prefix == collection.get_prefix()
collection = GimpFileCollection.create_from_pathname(file.relative_to(__file__, 'test-resources/files/first.xcf'))
assert len(collection.get_files()) == 1
assert prefix == collection.get_prefix()
collection = GimpFileCollection.create_from_pathname(file.relative_to(__file__, 'test-resources/files/first.png'))
assert len(collection.get_files()) == 0
assert '' == collection.get_prefix()
def test_create_from_pathname_with_directory():
prefix = file.relative_to(__file__, 'test-resources/files/')
collection = GimpFileCollection.create_from_pathname(file.relative_to(__file__, 'test-resources/files'))
assert len(collection.get_files()) == 2
assert prefix == collection.get_prefix()
collection = GimpFileCollection.create_from_pathname(file.relative_to(__file__, 'test-resources/files/'))
assert len(collection.get_files()) == 2
assert prefix == collection.get_prefix()
collection = GimpFileCollection.create_from_pathname(file.relative_to(__file__, 'test-resources/files/*'))
assert len(collection.get_files()) == 2
assert prefix == collection.get_prefix()
collection = GimpFileCollection.create_from_pathname(file.relative_to(__file__, 'test-resources/files/*.xcf'))
assert len(collection.get_files()) == 2
assert prefix == collection.get_prefix()
collection = GimpFileCollection.create_from_pathname(file.relative_to(__file__, 'test-resources/files/*.png'))
assert len(collection.get_files()) == 0
assert '' == collection.get_prefix()
def test_create_from_pathname_with_recursive_match():
prefix = file.relative_to(__file__, 'test-resources/files/')
collection = GimpFileCollection.create_from_pathname(file.relative_to(__file__, 'test-resources/files/**'))
assert len(collection.get_files()) == 4
assert prefix == collection.get_prefix()
collection = GimpFileCollection.create_from_pathname(file.relative_to(__file__, 'test-resources/files/**/'))
assert len(collection.get_files()) == 4
assert prefix == collection.get_prefix()
collection = GimpFileCollection.create_from_pathname(file.relative_to(__file__, 'test-resources/files/**/*'))
assert len(collection.get_files()) == 4
assert prefix == collection.get_prefix()
collection = GimpFileCollection.create_from_pathname(file.relative_to(__file__, 'test-resources/files/**/*.xcf'))
assert len(collection.get_files()) == 4
assert prefix == collection.get_prefix()
collection = GimpFileCollection.create_from_pathname(file.relative_to(__file__, 'test-resources/files/**/*.png'))
assert len(collection.get_files()) == 0
assert '' == collection.get_prefix()
def test_ordering():
prefix = file.relative_to(__file__, 'test-resources/files/')
collection = GimpFileCollection.create_from_pathname(file.relative_to(__file__, 'test-resources/files/**'))
collection = collection.replace_prefix(prefix)
assert [
'first.xcf',
'second.xcf',
'a/third.xcf',
'a/b/fourth.xcf',
] == collection.get_files()
def test_replace_path_components():
prefix = file.relative_to(__file__, 'test-resources/files/')
suffix = '.xcf'
collection = GimpFileCollection.create_from_pathname(file.relative_to(__file__, 'test-resources/files/**'))
collection = collection.replace_path_components(prefix, '#', suffix, '%')
assert [
'#first%.xcf',
'#second%.xcf',
'#a/third%.xcf',
'#a/b/fourth%.xcf',
] == collection.get_files()
def test_replace_path_components_with_non_existing_component():
collection = GimpFileCollection.create_from_pathname(file.relative_to(__file__, 'test-resources/files/**'))
with pytest.raises(NonExistingPathComponentException):
collection.replace_path_components('wrong_prefix', '#')
def test_replace_path_components_without_replacements():
collection = GimpFileCollection.create_from_pathname(file.relative_to(__file__, 'test-resources/files/**'))
files_before = collection.get_files()
collection = collection.replace_path_components()
files_after = collection.get_files()
assert files_before == files_after
def test_find_files_containing_layer_by_predictate():
with TempFile('.xcf') as with_white, TempFile('.xcf') as without_white:
GimpFile(with_white)\
.create('Background', np.zeros(shape=(1, 1), dtype=np.uint8))\
.add_layer_from_numpy('White', np.ones(shape=(1, 1), dtype=np.uint8)*255)
GimpFile(without_white) \
.create('Background', np.zeros(shape=(1, 1), dtype=np.uint8)) \
.add_layer_from_numpy('Black', np.zeros(shape=(1, 1), dtype=np.uint8))
collection = GimpFileCollection([with_white, without_white])
files = collection.find_files_containing_layer_by_predictate(
lambda layers: 'White' in map(lambda layer: layer.name, layers)
)
assert len(files) == 1
assert with_white == files[0]
files = collection.find_files_containing_layer_by_predictate(
lambda layers: 'Not existing' in map(lambda layer: layer.name, layers)
)
assert len(files) == 0
def test_find_files_containing_layer_by_name():
with TempFile('.xcf') as with_white, TempFile('.xcf') as without_white:
GimpFile(with_white)\
.create('Background', np.zeros(shape=(1, 1), dtype=np.uint8))\
.add_layer_from_numpy('White', np.ones(shape=(1, 1), dtype=np.uint8)*255)
GimpFile(without_white) \
.create('Background', np.zeros(shape=(1, 1), dtype=np.uint8)) \
.add_layer_from_numpy('Black', np.zeros(shape=(1, 1), dtype=np.uint8))
collection = GimpFileCollection([with_white, without_white])
files = collection.find_files_containing_layer_by_name('White', timeout_in_seconds=10)
assert len(files) == 1
assert with_white == files[0]
files = collection.find_files_containing_layer_by_name('Not existing', timeout_in_seconds=10)
assert len(files) == 0
def test_find_files_by_script_with_script_that_takes_single_file():
with TempFile('.xcf') as with_white, TempFile('.xcf') as without_white:
GimpFile(with_white)\
.create('Background', np.zeros(shape=(1, 1), dtype=np.uint8))\
.add_layer_from_numpy('White', np.ones(shape=(1, 1), dtype=np.uint8)*255)
GimpFile(without_white) \
.create('Background', np.zeros(shape=(1, 1), dtype=np.uint8)) \
.add_layer_from_numpy('Black', np.zeros(shape=(1, 1), dtype=np.uint8))
collection = GimpFileCollection([with_white, without_white])
script = textwrap.dedent(
"""
from pgimp.gimp.file import open_xcf
from pgimp.gimp.parameter import return_bool
image = open_xcf('__file__')
for layer in image.layers:
if layer.name == '{0:s}':
return_bool(True)
return_bool(False)
"""
)
files = collection.find_files_by_script(script.format(escape_single_quotes('White')), timeout_in_seconds=3)
assert len(files) == 1
assert with_white == files[0]
files = collection.find_files_by_script(script.format(escape_single_quotes('Not existing')), timeout_in_seconds=3)
assert len(files) == 0
def test_find_files_by_script_with_script_that_takes_multiple_files():
with TempFile('.xcf') as with_white, TempFile('.xcf') as without_white:
GimpFile(with_white)\
.create('Background', np.zeros(shape=(1, 1), dtype=np.uint8))\
.add_layer_from_numpy('White', np.ones(shape=(1, 1), dtype=np.uint8)*255)
GimpFile(without_white) \
.create('Background', np.zeros(shape=(1, 1), dtype=np.uint8)) \
.add_layer_from_numpy('Black', np.zeros(shape=(1, 1), dtype=np.uint8))
collection = GimpFileCollection([with_white, without_white])
script = textwrap.dedent(
"""
import gimp
from pgimp.gimp.file import XcfFile
from pgimp.gimp.parameter import return_json, get_json
files = get_json('__files__')
matches = []
for file in files:
with XcfFile(file) as image:
for layer in image.layers:
if layer.name == '{0:s}':
matches.append(file)
return_json(matches)
"""
)
files = collection.find_files_by_script(script.format(escape_single_quotes('White')), timeout_in_seconds=3)
assert len(files) == 1
assert with_white == files[0]
files = collection.find_files_by_script(script.format(escape_single_quotes('Not existing')), timeout_in_seconds=3)
assert len(files) == 0
def test_find_files_by_script_without_required_parameters():
collection = GimpFileCollection([])
script = textwrap.dedent(
"""
print(1)
"""
)
with pytest.raises(GimpMissingRequiredParameterException):
collection.find_files_by_script(script, timeout_in_seconds=3)
def test_execute_script_and_return_json_with_script_that_takes_single_file():
with TempFile('.xcf') as with_white, TempFile('.xcf') as without_white:
GimpFile(with_white)\
.create('Background', np.zeros(shape=(1, 1), dtype=np.uint8))\
.add_layer_from_numpy('White', np.ones(shape=(1, 1), dtype=np.uint8)*255)
GimpFile(without_white) \
.create('Background', np.zeros(shape=(1, 1), dtype=np.uint8)) \
.add_layer_from_numpy('Black', np.zeros(shape=(1, 1), dtype=np.uint8))
collection = GimpFileCollection([with_white, without_white])
script = textwrap.dedent(
"""
from pgimp.gimp.file import open_xcf
from pgimp.gimp.parameter import return_json
image = open_xcf('__file__')
for layer in image.layers:
if layer.name == '{0:s}':
return_json(True)
return_json(False)
"""
)
files = collection.execute_script_and_return_json(script.format(escape_single_quotes('White')), timeout_in_seconds=3)
assert {
with_white: True,
without_white: False,
} == files
def test_execute_script_and_return_json_with_script_that_takes_multiple_files_using_open():
with TempFile('.xcf') as with_white, TempFile('.xcf') as without_white:
GimpFile(with_white)\
.create('Background', np.zeros(shape=(1, 1), dtype=np.uint8))\
.add_layer_from_numpy('White', np.ones(shape=(1, 1), dtype=np.uint8)*255)
GimpFile(without_white) \
.create('Background', np.zeros(shape=(1, 1), dtype=np.uint8)) \
.add_layer_from_numpy('Black', np.zeros(shape=(1, 1), dtype=np.uint8))
collection = GimpFileCollection([with_white, without_white])
script = textwrap.dedent(
"""
import gimp
from pgimp.gimp.file import open_xcf
from pgimp.gimp.parameter import return_json, get_json
files = get_json('__files__')
matches = []
for file in files:
image = open_xcf(file)
for layer in image.layers:
if layer.name == '{0:s}':
matches.append(file)
gimp.pdb.gimp_image_delete(image)
return_json(matches)
"""
)
files = collection.execute_script_and_return_json(script.format(escape_single_quotes('White')), timeout_in_seconds=3)
assert len(files) == 1
assert with_white == files[0]
def test_execute_script_and_return_json_with_script_that_takes_multiple_files_using_xcf_file():
with TempFile('.xcf') as with_white, TempFile('.xcf') as without_white:
GimpFile(with_white)\
.create('Background', np.zeros(shape=(1, 1), dtype=np.uint8))\
.add_layer_from_numpy('White', np.ones(shape=(1, 1), dtype=np.uint8)*255)
GimpFile(without_white) \
.create('Background', np.zeros(shape=(1, 1), dtype=np.uint8)) \
.add_layer_from_numpy('Black', np.zeros(shape=(1, 1), dtype=np.uint8))
collection = GimpFileCollection([with_white, without_white])
script = textwrap.dedent(
"""
import gimp
from pgimp.gimp.file import XcfFile
from pgimp.gimp.parameter import return_json, get_json
files = get_json('__files__')
matches = []
for file in files:
with XcfFile(file) as image:
for layer in image.layers:
if layer.name == '{0:s}':
matches.append(file)
return_json(matches)
"""
)
files = collection.execute_script_and_return_json(script.format(escape_single_quotes('White')), timeout_in_seconds=3)
assert len(files) == 1
assert with_white == files[0]
def test_execute_script_and_return_json_with_script_that_takes_multiple_files_using_for_each():
with TempFile('.xcf') as with_white, TempFile('.xcf') as without_white:
GimpFile(with_white) \
.create('Background', np.zeros(shape=(1, 1), dtype=np.uint8)) \
.add_layer_from_numpy('White', np.ones(shape=(1, 1), dtype=np.uint8) * 255)
GimpFile(without_white) \
.create('Background', np.zeros(shape=(1, 1), dtype=np.uint8)) \
.add_layer_from_numpy('Black', np.zeros(shape=(1, 1), dtype=np.uint8))
collection = GimpFileCollection([with_white, without_white])
script = textwrap.dedent(
"""
from pgimp.gimp.file import for_each_file
from pgimp.gimp.parameter import return_json, get_json
matches = []
def layer_matches(image, file):
for layer in image.layers:
if layer.name == '{0:s}':
matches.append(file)
for_each_file(layer_matches)
return_json(matches)
"""
)
files = collection.execute_script_and_return_json(script.format(escape_single_quotes('White')),
timeout_in_seconds=3)
assert len(files) == 1
assert with_white == files[0]
def test_copy_layer_from():
with TemporaryDirectory('_src') as srcdir, TemporaryDirectory('_dst') as dstdir:
src_1 = GimpFile(os.path.join(srcdir, 'file1.xcf'))\
.create('Background', np.zeros(shape=(1, 1), dtype=np.uint8))\
.add_layer_from_numpy('White', np.ones(shape=(1, 1), dtype=np.uint8)*255)
src_2 = GimpFile(os.path.join(srcdir, 'file2.xcf'))\
.create('Background', np.zeros(shape=(1, 1), dtype=np.uint8)) \
.add_layer_from_numpy('White', np.ones(shape=(1, 1), dtype=np.uint8)*255)
dst_1 = GimpFile(os.path.join(dstdir, 'file1.xcf')) \
.create('Background', np.zeros(shape=(1, 1), dtype=np.uint8)) \
.add_layer_from_numpy('White', np.zeros(shape=(1, 1), dtype=np.uint8)*255)
dst_2 = GimpFile(os.path.join(dstdir, 'file2.xcf')) \
.create('Background', np.zeros(shape=(1, 1), dtype=np.uint8))
src_collection = GimpFileCollection([src_1.get_file(), src_2.get_file()])
dst_collection = GimpFileCollection([dst_1.get_file(), dst_2.get_file()])
dst_collection.copy_layer_from(src_collection, 'White', layer_position=1, timeout_in_seconds=10)
assert np.all(dst_1.layer_to_numpy('White') == 255)
assert ['Background', 'White'] == dst_1.layer_names()
assert 'White' in dst_2.layer_names()
assert np.all(dst_2.layer_to_numpy('White') == 255)
assert ['Background', 'White'] == dst_2.layer_names()
def test_merge_mask_layer_from_with_grayscale_and_foreground_color_white():
with TemporaryDirectory('_src') as srcdir, TemporaryDirectory('_dst') as dstdir:
src_1 = GimpFile(os.path.join(srcdir, 'file1.xcf'))\
.create('Mask', np.array([[255, 0]], dtype=np.uint8))
dst_1 = GimpFile(os.path.join(dstdir, 'file1.xcf')) \
.create('Mask', np.array([[0, 255]], dtype=np.uint8))
dst_2 = GimpFile(os.path.join(dstdir, 'file2.xcf')) \
.create('Mask', np.array([[0, 255]], dtype=np.uint8))
src_collection = GimpFileCollection([src_1.get_file()])
dst_collection = GimpFileCollection([dst_1.get_file(), dst_2.get_file()])
dst_collection.merge_mask_layer_from(src_collection, 'Mask', MaskForegroundColor.WHITE, timeout_in_seconds=10)
assert np.all(dst_1.layer_to_numpy('Mask') == [[255], [255]])
assert ['Mask'] == dst_1.layer_names()
assert 'Mask' in dst_2.layer_names()
assert np.all(dst_2.layer_to_numpy('Mask') == [[0], [255]])
assert ['Mask'] == dst_2.layer_names()
def test_merge_mask_layer_from_with_grayscale_and_foreground_color_black():
with TemporaryDirectory('_src') as srcdir, TemporaryDirectory('_dst') as dstdir:
src_1 = GimpFile(os.path.join(srcdir, 'file1.xcf'))\
.create('Mask', np.array([[255, 0]], dtype=np.uint8))
dst_1 = GimpFile(os.path.join(dstdir, 'file1.xcf')) \
.create('Mask', np.array([[0, 255]], dtype=np.uint8))
dst_2 = GimpFile(os.path.join(dstdir, 'file2.xcf')) \
.create('Mask', np.array([[0, 255]], dtype=np.uint8))
src_collection = GimpFileCollection([src_1.get_file()])
dst_collection = GimpFileCollection([dst_1.get_file(), dst_2.get_file()])
dst_collection.merge_mask_layer_from(src_collection, 'Mask', MaskForegroundColor.BLACK, timeout_in_seconds=10)
assert np.all(dst_1.layer_to_numpy('Mask') == [[0], [0]])
assert ['Mask'] == dst_1.layer_names()
assert 'Mask' in dst_2.layer_names()
assert np.all(dst_2.layer_to_numpy('Mask') == [[0], [255]])
assert ['Mask'] == dst_2.layer_names()
def test_merge_mask_layer_from_with_color():
with TemporaryDirectory('_src') as srcdir, TemporaryDirectory('_dst') as dstdir:
src_1 = GimpFile(os.path.join(srcdir, 'file1.xcf'))\
.create('Mask', np.array([[[255, 255, 255], [0, 0, 0]]], dtype=np.uint8))
dst_1 = GimpFile(os.path.join(dstdir, 'file1.xcf')) \
.create('Mask', np.array([[[0, 0, 0], [255, 255, 255]]], dtype=np.uint8))
dst_2 = GimpFile(os.path.join(dstdir, 'file2.xcf')) \
.create('Mask', np.array([[[0, 0, 0], [255, 255, 255]]], dtype=np.uint8))
src_collection = GimpFileCollection([src_1.get_file()])
dst_collection = GimpFileCollection([dst_1.get_file(), dst_2.get_file()])
dst_collection.merge_mask_layer_from(src_collection, 'Mask', MaskForegroundColor.WHITE, timeout_in_seconds=10)
assert np.all(dst_1.layer_to_numpy('Mask') == [[255, 255, 255], [255, 255, 255]])
assert ['Mask'] == dst_1.layer_names()
assert 'Mask' in dst_2.layer_names()
assert np.all(dst_2.layer_to_numpy('Mask') == [[0, 0, 0], [255, 255, 255]])
assert ['Mask'] == dst_2.layer_names()
def test_merge_mask_layer_from_with_mask_not_available_in_files_in_both_collections_and_foreground_color_white():
with TemporaryDirectory('_src') as srcdir, TemporaryDirectory('_dst') as dstdir:
src_1 = GimpFile(os.path.join(srcdir, 'file1.xcf')) \
.create_empty(2, 1, GimpFileType.GRAY)
dst_1 = GimpFile(os.path.join(dstdir, 'file1.xcf')) \
.create_empty(2, 1, GimpFileType.GRAY)
src_collection = GimpFileCollection([src_1.get_file()])
dst_collection = GimpFileCollection([dst_1.get_file()])
dst_collection.merge_mask_layer_from(src_collection, 'Mask', MaskForegroundColor.WHITE, timeout_in_seconds=10)
assert np.all(dst_1.layer_to_numpy('Mask') == [[0], [0]])
assert ['Mask'] == dst_1.layer_names()
def test_merge_mask_layer_from_with_mask_not_available_in_files_in_both_collections_and_foreground_color_black():
with TemporaryDirectory('_src') as srcdir, TemporaryDirectory('_dst') as dstdir:
src_1 = GimpFile(os.path.join(srcdir, 'file1.xcf')) \
.create_empty(2, 1, GimpFileType.GRAY)
dst_1 = GimpFile(os.path.join(dstdir, 'file1.xcf')) \
.create_empty(2, 1, GimpFileType.GRAY)
src_collection = GimpFileCollection([src_1.get_file()])
dst_collection = GimpFileCollection([dst_1.get_file()])
dst_collection.merge_mask_layer_from(src_collection, 'Mask', MaskForegroundColor.BLACK, timeout_in_seconds=10)
assert np.all(dst_1.layer_to_numpy('Mask') == [[255], [255]])
assert ['Mask'] == dst_1.layer_names()
def test_clear_selection():
file_with_selection_original = file.relative_to(__file__, 'test-resources/selection.xcf')
with TempFile('.xcf') as file_with_selection:
shutil.copyfile(file_with_selection_original, file_with_selection)
collection = GimpFileCollection([file_with_selection])
selections_before = _has_selections(collection)
assert selections_before[file_with_selection]
collection.clear_selection(timeout_in_seconds=10)
selections_after = _has_selections(collection)
assert not selections_after[file_with_selection]
def _has_selections(collection):
result = collection.execute_script_and_return_json(
textwrap.dedent(
"""
import gimp
from pgimp.gimp.parameter import get_json, return_json
from pgimp.gimp.file import XcfFile
files = get_json('__files__')
selections = {}
for file in files:
with XcfFile(file, save=True) as image:
selections[file] = not gimp.pdb.gimp_selection_is_empty(image)
return_json(selections)
"""
),
timeout_in_seconds=10
)
return result
def test_remove_layers_by_name():
data = np.array([[0, 255]], dtype=np.uint8)
with TemporaryDirectory('_files') as dir:
file1 = GimpFile(os.path.join(dir, 'file1.xcf')) \
.create('Background', data) \
.add_layer_from_numpy('Layer 1', data) \
.add_layer_from_numpy('Layer 2', data) \
.add_layer_from_numpy('Layer 3', data)
file2 = GimpFile(os.path.join(dir, 'file2.xcf')) \
.create('Background', data) \
.add_layer_from_numpy('Layer 1', data) \
.add_layer_from_numpy('Layer 2', data)
collection = GimpFileCollection([file1.get_file(), file2.get_file()])
collection.remove_layers_by_name(['Layer 1', 'Layer 3'], timeout_in_seconds=10)
assert file1.layer_names() == ['Layer 2', 'Background']
assert file2.layer_names() == ['Layer 2', 'Background']
| 42.084507 | 125 | 0.650602 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5,124 | 0.214357 |
34f2f00e1352060e6764c5a58765eca101cee86d
| 2,542 |
py
|
Python
|
lstm.py
|
ryubidragonfire/text-emo
|
a03a9aa0d2e055277fc63a70822816853e5a35c0
|
[
"MIT"
] | null | null | null |
lstm.py
|
ryubidragonfire/text-emo
|
a03a9aa0d2e055277fc63a70822816853e5a35c0
|
[
"MIT"
] | null | null | null |
lstm.py
|
ryubidragonfire/text-emo
|
a03a9aa0d2e055277fc63a70822816853e5a35c0
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 29 13:07:10 2016
@author: chyam
purpose: A vanila lstm model for text classification.
"""
from __future__ import print_function
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation
from keras.layers import LSTM
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.cross_validation import train_test_split
import pandas as pd
from datetime import datetime
import preputils as pu
def main():
### Load data
filename = "C:/git/german-emo/data/clean-data-21092016.tsv"
df = pd.read_csv(filename, delimiter='\t'); print(df.shape)
### Perpare label
y, le = pu.prep_label(df);
### Prepare features (word-based) -> Split data into training and test sets
tfidf_vectorizer = TfidfVectorizer(analyzer='word', stop_words=None, ngram_range=(1,1), max_df=0.9, min_df=1)
X_tfidf_word_11gram = tfidf_vectorizer.fit_transform(df['Text'].values.astype('U')); print(X_tfidf_word_11gram.shape); #11468x26778
X_train, X_test, y_train, y_test = train_test_split(X_tfidf_word_11gram, y, test_size=0.3, train_size=0.7, random_state=88); del X_tfidf_word_11gram
X_train_array = X_train.toarray(); del X_train
X_test_array = X_test.toarray(); del X_test
### LSTM
nb_classes = len(le.classes_)
lstm(X_train_array, y_train, X_test_array, y_test, timesteps=1, batch_size=50, nb_epoch=2, nb_classes=nb_classes)
### Clean up
del X_train_array, X_test_array, y_train, y_test
return
def lstm(X_train, y_train, X_test, y_test, timesteps, batch_size, nb_epoch, nb_classes):
""" Building a lstm model."""
print('Starting LSTM ...')
print(str(datetime.now()))
feature_len = X_train.shape; print(feature_len[1])
model = Sequential()
#model.add(Embedding(max_features, 256, input_length=maxlen))
model.add(LSTM(input_dim=feature_len[1], output_dim=128, activation='sigmoid', inner_activation='hard_sigmoid'))
model.add(Dropout(0.5))
model.add(Dense(nb_classes))
model.add(Activation('sigmoid'))
model.compile(loss='sparse_categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch)
score = model.evaluate(X_test, y_test, batch_size=batch_size)
print('Test score:', score[0])
print('Test accuracy:', score[1])
print('LSTM finished ...')
print(str(datetime.now()))
return
if __name__ == '__main__':
main()
| 35.802817 | 152 | 0.712825 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 593 | 0.233281 |
34f37412bfc46db2a069f8207380b8ab7bc124d7
| 207 |
py
|
Python
|
day_1/fibonacci.py
|
Ishaan-99-cyber/ml-workshop-wac-1
|
186d4b6544c7e55cea052312934e455a51d1698a
|
[
"MIT"
] | 6 |
2020-12-24T07:10:58.000Z
|
2021-04-11T09:19:18.000Z
|
day_1/fibonacci.py
|
Ishaan-99-cyber/ml-workshop-wac-1
|
186d4b6544c7e55cea052312934e455a51d1698a
|
[
"MIT"
] | null | null | null |
day_1/fibonacci.py
|
Ishaan-99-cyber/ml-workshop-wac-1
|
186d4b6544c7e55cea052312934e455a51d1698a
|
[
"MIT"
] | 6 |
2020-12-24T09:42:25.000Z
|
2021-01-26T01:34:38.000Z
|
# 1 1 2 3 5 8 13 21 ....
# f(n) = f(n - 1) + f(n - 2)
# f(0) = f(1) = 1
def fibonacci(n: int):
if n == 0 or n == 1:
return 1
return fibonacci(n - 1) + fibonacci(n - 2)
print(fibonacci(5))
| 17.25 | 46 | 0.468599 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 69 | 0.333333 |
34f37aac957cd2dcc7bd8c099147d678a15c4634
| 160 |
py
|
Python
|
pythran/tests/user_defined_import/simple_case_main.py
|
davidbrochart/pythran
|
24b6c8650fe99791a4091cbdc2c24686e86aa67c
|
[
"BSD-3-Clause"
] | 1,647 |
2015-01-13T01:45:38.000Z
|
2022-03-28T01:23:41.000Z
|
pythran/tests/user_defined_import/simple_case_main.py
|
davidbrochart/pythran
|
24b6c8650fe99791a4091cbdc2c24686e86aa67c
|
[
"BSD-3-Clause"
] | 1,116 |
2015-01-01T09:52:05.000Z
|
2022-03-18T21:06:40.000Z
|
pythran/tests/user_defined_import/simple_case_main.py
|
davidbrochart/pythran
|
24b6c8650fe99791a4091cbdc2c24686e86aa67c
|
[
"BSD-3-Clause"
] | 180 |
2015-02-12T02:47:28.000Z
|
2022-03-14T10:28:18.000Z
|
#pythran export entry()
#runas entry()
import simple_case_import
def forward(a):
return simple_case_import.imported(a)
def entry():
return forward(1)
| 16 | 41 | 0.7375 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 37 | 0.23125 |
34f48be78d73f96e6ef88433990d92e5dd1a350b
| 7,584 |
py
|
Python
|
python/models/model_factory.py
|
rwightman/pytorch-cdiscount
|
95901bd77888f7480f282e4b1541c0fc1e021bf9
|
[
"Apache-2.0"
] | 1 |
2022-03-09T09:40:43.000Z
|
2022-03-09T09:40:43.000Z
|
python/models/model_factory.py
|
rwightman/pytorch-cdiscount
|
95901bd77888f7480f282e4b1541c0fc1e021bf9
|
[
"Apache-2.0"
] | null | null | null |
python/models/model_factory.py
|
rwightman/pytorch-cdiscount
|
95901bd77888f7480f282e4b1541c0fc1e021bf9
|
[
"Apache-2.0"
] | null | null | null |
import torchvision.models
from .resnext101_32x4d import resnext101_32x4d
from .inception_v4 import inception_v4
from .inception_resnet_v2 import inception_resnet_v2
from .wrn50_2 import wrn50_2
from .my_densenet import densenet161, densenet121, densenet169, densenet201
from .my_resnet import resnet18, resnet34, resnet50, resnet101, resnet152
from .fbresnet200 import fbresnet200
from .dpn import dpn68, dpn68b, dpn92, dpn98, dpn131, dpn107
#from .transformed_model import TransformedModel
from .load_checkpoint import load_checkpoint
def normalizer_from_model(model_name):
if 'inception' in model_name:
normalizer = 'le'
elif 'dpn' in model_name:
normalizer = 'dpn'
else:
normalizer = 'torchvision'
return normalizer
model_config_dict = {
'resnet18': {
'model_name': 'resnet18', 'num_classes': 1000, 'input_size': 224, 'normalizer': 'torchvision',
'checkpoint_file': 'resnet18-5c106cde.pth', 'drop_first_class': False},
'resnet34': {
'model_name': 'resnet34', 'num_classes': 1000, 'input_size': 224, 'normalizer': 'torchvision',
'checkpoint_file': 'resnet34-333f7ec4.pth', 'drop_first_class': False},
'resnet50': {
'model_name': 'resnet50', 'num_classes': 1000, 'input_size': 224, 'normalizer': 'torchvision',
'checkpoint_file': 'resnet50-19c8e357.pth', 'drop_first_class': False},
'resnet101': {
'model_name': 'resnet101', 'num_classes': 1000, 'input_size': 224, 'normalizer': 'torchvision',
'checkpoint_file': 'resnet101-5d3b4d8f.pth', 'drop_first_class': False},
'resnet152': {
'model_name': 'resnet152', 'num_classes': 1000, 'input_size': 224, 'normalizer': 'torchvision',
'checkpoint_file': 'resnet152-b121ed2d.pth', 'drop_first_class': False},
'densenet121': {
'model_name': 'densenet121', 'num_classes': 1000, 'input_size': 224, 'normalizer': 'torchvision',
'checkpoint_file': 'densenet121-241335ed.pth', 'drop_first_class': False},
'densenet169': {
'model_name': 'densenet169', 'num_classes': 1000, 'input_size': 224, 'normalizer': 'torchvision',
'checkpoint_file': 'densenet169-6f0f7f60.pth', 'drop_first_class': False},
'densenet201': {
'model_name': 'densenet201', 'num_classes': 1000, 'input_size': 224, 'normalizer': 'torchvision',
'checkpoint_file': 'densenet201-4c113574.pth', 'drop_first_class': False},
'densenet161': {
'model_name': 'densenet161', 'num_classes': 1000, 'input_size': 224, 'normalizer': 'torchvision',
'checkpoint_file': 'densenet161-17b70270.pth', 'drop_first_class': False},
'dpn107': {
'model_name': 'dpn107', 'num_classes': 1000, 'input_size': 299, 'normalizer': 'dualpathnet',
'checkpoint_file': 'dpn107_extra-fc014e8ec.pth', 'drop_first_class': False},
'dpn92_extra': {
'model_name': 'dpn92', 'num_classes': 1000, 'input_size': 299, 'normalizer': 'dualpathnet',
'checkpoint_file': 'dpn92_extra-1f58102b.pth', 'drop_first_class': False},
'dpn92': {
'model_name': 'dpn92', 'num_classes': 1000, 'input_size': 299, 'normalizer': 'dualpathnet',
'checkpoint_file': 'dpn92-7d0f7156.pth', 'drop_first_class': False},
'dpn68': {
'model_name': 'dpn68', 'num_classes': 1000, 'input_size': 299, 'normalizer': 'dualpathnet',
'checkpoint_file': 'dpn68-abcc47ae.pth', 'drop_first_class': False},
'dpn68b': {
'model_name': 'dpn68b', 'num_classes': 1000, 'input_size': 299, 'normalizer': 'dualpathnet',
'checkpoint_file': 'dpn68_extra.pth', 'drop_first_class': False},
'dpn68b_extra': {
'model_name': 'dpn68b', 'num_classes': 1000, 'input_size': 299, 'normalizer': 'dualpathnet',
'checkpoint_file': 'dpn68_extra.pth', 'drop_first_class': False},
'inception_resnet_v2': {
'model_name': 'inception_resnet_v2', 'num_classes': 1001, 'input_size': 299, 'normalizer': 'le',
'checkpoint_file': 'inceptionresnetv2-d579a627.pth', 'drop_first_class': True},
}
def config_from_string(string, output_fn='log_softmax'):
config = model_config_dict[string]
config['output_fn'] = output_fn
return config
def create_model(
model_name='resnet50',
pretrained=False,
num_classes=1000,
checkpoint_path='',
**kwargs):
if 'test_time_pool' in kwargs:
test_time_pool = kwargs.pop('test_time_pool')
else:
test_time_pool = 0
if model_name == 'dpn68':
model = dpn68(
num_classes=num_classes, pretrained=pretrained, test_time_pool=test_time_pool)
elif model_name == 'dpn68b':
model = dpn68b(
num_classes=num_classes, pretrained=pretrained, test_time_pool=test_time_pool)
elif model_name == 'dpn92':
model = dpn92(
num_classes=num_classes, pretrained=pretrained, test_time_pool=test_time_pool)
elif model_name == 'dpn98':
model = dpn98(
num_classes=num_classes, pretrained=pretrained, test_time_pool=test_time_pool)
elif model_name == 'dpn131':
model = dpn131(
num_classes=num_classes, pretrained=pretrained, test_time_pool=test_time_pool)
elif model_name == 'dpn107':
model = dpn107(
num_classes=num_classes, pretrained=pretrained, test_time_pool=test_time_pool)
elif model_name == 'resnet18':
model = resnet18(num_classes=num_classes, pretrained=pretrained, **kwargs)
elif model_name == 'resnet34':
model = resnet34(num_classes=num_classes, pretrained=pretrained, **kwargs)
elif model_name == 'resnet50':
model = resnet50(num_classes=num_classes, pretrained=pretrained, **kwargs)
elif model_name == 'resnet101':
model = resnet101(num_classes=num_classes, pretrained=pretrained, **kwargs)
elif model_name == 'resnet152':
model = resnet152(num_classes=num_classes, pretrained=pretrained, **kwargs)
elif model_name == 'densenet121':
model = densenet121(num_classes=num_classes, pretrained=pretrained, **kwargs)
elif model_name == 'densenet161':
model = densenet161(num_classes=num_classes, pretrained=pretrained, **kwargs)
elif model_name == 'densenet169':
model = densenet169(num_classes=num_classes, pretrained=pretrained, **kwargs)
elif model_name == 'densenet201':
model = densenet201(num_classes=num_classes, pretrained=pretrained, **kwargs)
elif model_name == 'inception_resnet_v2':
model = inception_resnet_v2(num_classes=num_classes, pretrained=pretrained, **kwargs)
elif model_name == 'inception_v4':
model = inception_v4(num_classes=num_classes, pretrained=pretrained, **kwargs)
elif model_name == 'resnext101_32x4d':
model = resnext101_32x4d(num_classes=num_classes, pretrained=pretrained, **kwargs)
elif model_name == 'wrn50':
model = wrn50_2(num_classes=num_classes, pretrained=pretrained, **kwargs)
elif model_name == 'fbresnet200':
model = fbresnet200(num_classes=num_classes, pretrained=pretrained, **kwargs)
else:
assert False and "Invalid model"
if checkpoint_path and not pretrained:
load_checkpoint(model, checkpoint_path)
return model
def create_model_from_cfg(mc, checkpoint_path=''):
if 'kwargs' not in mc:
mc['kwargs'] = {}
model = create_model(
model_name=mc['model_name'],
num_classes=mc['num_classes'],
checkpoint_path=checkpoint_path if checkpoint_path else mc['checkpoint_file'],
**mc['kwargs']
)
return model
| 47.10559 | 105 | 0.681566 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,735 | 0.360628 |
34f4d60dbd3b87a88dce9e6ef7fc8bd1f475fd71
| 585 |
py
|
Python
|
add_+x.py
|
racytech/rpctests
|
886d97b9e16fd030586d0fca6945d8f7a277ae27
|
[
"Apache-2.0"
] | null | null | null |
add_+x.py
|
racytech/rpctests
|
886d97b9e16fd030586d0fca6945d8f7a277ae27
|
[
"Apache-2.0"
] | null | null | null |
add_+x.py
|
racytech/rpctests
|
886d97b9e16fd030586d0fca6945d8f7a277ae27
|
[
"Apache-2.0"
] | 1 |
2021-09-03T17:14:55.000Z
|
2021-09-03T17:14:55.000Z
|
#!/usr/bin/env python3
"""
Add +x to every .sh file
"""
# import argparse
import os
import subprocess
def go_recursive(search_dir: str):
objects = os.listdir(search_dir)
for obj in objects:
if obj == ".git":
continue
obj_path = f"{search_dir}/{obj}"
if os.path.isdir(obj_path):
go_recursive(obj_path)
if os.path.isfile(obj_path) and obj.endswith(".sh"):
try:
subprocess.run(["chmod", "+x", obj_path])
except:
print("There is an exception")
go_recursive(".")
| 20.172414 | 60 | 0.560684 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 140 | 0.239316 |
34f5a27bc2eb816c9dabb375d6159c8eb8e17312
| 25,985 |
py
|
Python
|
texas.py
|
isaact23/texas
|
1ac70b00f0acf2f196aca87476d7bac97418afba
|
[
"MIT"
] | null | null | null |
texas.py
|
isaact23/texas
|
1ac70b00f0acf2f196aca87476d7bac97418afba
|
[
"MIT"
] | null | null | null |
texas.py
|
isaact23/texas
|
1ac70b00f0acf2f196aca87476d7bac97418afba
|
[
"MIT"
] | null | null | null |
# TEXAS HOLD'EM (Program by Isaac Thompson)
import random, itertools, copy, sys
import os
from playsound import playsound
import pyttsx3
# Set to true to enable betting.
do_bets = False
RANKS = ['2', '3', '4', '5', '6', '7', '8', '9', 'T', 'J', 'Q', 'K', 'A']
SUITS = ['C', 'D', 'H', 'S']
SORT_RANKS = {'2': 0, '3': 1, '4': 2, '5': 3, '6': 4, '7': 5, '8': 6, '9': 7, 'T': 8, 'J': 9, 'Q': 10, 'K': 11, 'A': 12}
SORT_SUITS = {'C': 0, 'D': 1, 'H': 2, 'S': 3}
RANK_NAMES = {'2': 'Two', '3': 'Three', '4': 'Four', '5': 'Five', '6': 'Six', '7': 'Seven', '8': 'Eight', '9': 'Nine', 'T': 'Ten',
'J': 'Jack', 'Q': 'Queen', 'K': 'King', 'A': 'Ace'}
SUIT_NAMES = {'C': 'Clubs', 'D': 'Diamonds', 'H': 'Hearts', 'S': 'Spades'}
DEAL_IN = ["Deal me in.",
"What are you waiting for? Give me two cards.",
"You're the dealer. Go ahead and deal.",
"Give me some cards please."]
FLOP = ["Time for the flop.",
"Put down the first three cards"]
PLAYER_SPEECH_1 = ["Not bad.",
"That's more than half.",
"The odds are in your favor.",
"You have an acknowledgable chance, my friend.",
"Just you wait. This will all change."]
PLAYER_SPEECH_2 = ["That's pretty good.",
"How sad.",
"Don't worry, the odds will change shortly.",
"You hear that? It's the winds of change.",
"I have to say I am not happy with you."]
PLAYER_SPEECH_3 = ["I might as well fold.",
"This is rather unfortunate.",
"Dang.",
"No. This can't be happening. No!",
"Welp. This is happening."]
PLAYER_WIN = ["You won this time around.",
"You win. What a shame.",
"You won. For the first time. For the last time.",
"Welp, I've been destroyed.",
"Good game.",
"Let's play again so I can righteously win."]
CPU_SPEECH_1 = ["Looks good for me.",
"That's a good thing.",
"Hopefully it stays that way.",
"Flip a coin and it'll land on my side.",
"Heh."]
CPU_SPEECH_2 = ["Prepare to lose.",
"The odds are in my favor.",
"Ha ha ha ha.",
"I will be beating you shortly.",
"I will trump you!"]
CPU_SPEECH_3 = ["You sir are doomed.",
"You might as well fold",
"Just give up. As far as you know I've got pocket aces",
"Prepare yourself mentally to be obliterated",
"This is the end for you!",
"Ha! You can't win!",
"You humans will never beat me!"]
CPU_WIN = ["You lose. Would you like to play again?",
"You have been righteously destroyed.",
"Good golly. Looks like humans are being phased out.",
"Rest in peace.",
"I win. Let's play again so I can win again.",
"Victory goes to me. What a surprise.",
"Get wrecked.",
"You've been destroyed by a computer. How do you feel?",
"Wow, what a loser. You should have been luckier."]
NEURAL_SPEECH = ["Well, this is going to be a boring round.",
"The outlook is not great for either of us",
"Let's both fold on three. One, two, three. Just kidding, I never fold.",
"I cannot express my infinite exhilaration through my sarcastic robot voice.",
"Yawn."]
DRAW = ["We tied. What are the odds?",
"Tie game. How embarassing for both of us."]
# Set up audio engine
audio_engine = pyttsx3.init()
audio_engine.setProperty('rate', 210)
# Synthesize text as speech
def say(text):
print(text)
try:
audio_engine.say(text)
audio_engine.runAndWait()
except:
pass
# Convert a card identity to a name (like 3H to Three of Hearts)
def name_card(card):
return RANK_NAMES[card[0]] + " of " + SUIT_NAMES[card[1]]
# Report the calculated game odds to the player.
# Includes statements of astronomical wit.
def tell_odds(prediction):
player_percent = str(round(prediction['player_win'] * 1000)/10)
player_str = "You are " + player_percent + " percent likely to win. "
computer_percent = str(round(prediction['computer_win'] * 1000)/10)
computer_str = "I am " + computer_percent + " percent likely to win. "
#draw = str(round(chances['draw'] * 1000)/10)
if prediction['draw'] > 0.5:
say("This is probably going to be a tie round.")
elif prediction['player_win'] > 0.9:
say(player_str + random.choice(PLAYER_SPEECH_3))
elif prediction['player_win'] > 0.7:
say(player_str + random.choice(PLAYER_SPEECH_2))
elif prediction['player_win'] >= 0.5:
say(player_str + random.choice(PLAYER_SPEECH_1))
elif prediction['computer_win'] > 0.9:
say(computer_str + random.choice(CPU_SPEECH_3))
elif prediction['computer_win'] > 0.7:
say(computer_str + random.choice(CPU_SPEECH_2))
elif prediction['computer_win'] > 0.5:
say(computer_str + random.choice(CPU_SPEECH_1))
else:
say(random.choice(NEUTRAL_SPEECH))
# A class that runs the game.
class PredictionAlgorithm():
def __init__(this):
this.turn = 1
this.next_round() # Main setup function - executed every round
this.player_money = 500
this.computer_money = 500
def next_round(this): # Reset and switch turns
if this.turn == 0:
this.turn = 1
else:
this.turn = 0
this.community_cards = []
this.computer_hand = []
this.player_hand = []
this.player_bet = 0
this.computer_bet = 0
this.deck = []
for suit in SUITS:
for rank in RANKS:
this.deck.append(rank + suit)
random.shuffle(this.deck)
this.maximum_percent_loss = round((random.random() + 0.15) * 10) / 10 # Prevents overbetting. The higher, the more aggressive.
this.maximum_percent_loss = 0.5
def draw_card(this): # Allow the player to specify a card, then remove it from the deck.
card = None
while card == None:
card = input("Draw a card: ")
card = card.upper()
if len(card) != 2:
print("Not a valid card. Must be two characters long: one for the rank, second for the suit.")
card = None
elif (not card[0] in RANKS) or (not card[1] in SUITS):
print("Not a valid card. Use the format: 2H for Two of Hearts, TC for Ten of Clubs, etc.")
card = None
elif not card in this.deck:
print("That card is no longer in the deck. Choose a different card.")
card = None
this.deck.remove(card)
print("Drew the " + RANK_NAMES[card[0]] + " of " + SUIT_NAMES[card[1]])
return card
def play(this): # Go through a round until a winner is determined.
if do_bets:
say("You have " + str(this.player_money) + " tokens. I have " + str(this.computer_money) + " tokens.")
#say("I am playing with an aggressiveness factor of " + str(this.maximum_percent_loss))
if this.computer_money == 0:
say(random.choice(PLAYER_WIN))
sys.exit()
elif this.computer_money == 1:
say("You play the big blind this round.")
this.player_bet = 2
this.computer_bet = 1
elif (this.turn == 1 or this.player_money == 1) and this.player_money != 0:
say("You play the small blind this round.")
this.player_bet = 1
this.computer_bet = 2
elif this.turn == 0 and this.player_money > 1:
say("You play the big blind this round.")
this.player_bet = 2
this.computer_bet = 1
else:
say(random.choice(CPU_WIN))
sys.exit()
say(random.choice(DEAL_IN))
#CardDetector.GetComputerHand()
for i in range(2):
this.computer_hand.append(this.draw_card())
result = this.bets()
if result == 2:
return
if do_bets:
this.state_bets()
say(random.choice(FLOP))
for i in range(3):
this.community_cards.append(this.draw_card())
if do_bets:
if this.computer_bet != this.computer_money and this.player_bet != this.player_money:
result = this.bets()
if result == 2:
return
this.state_bets()
tell_odds(this.find_winning_chances(0.35))
say("Deal another card.")
this.community_cards.append(this.draw_card())
if do_bets:
if this.computer_bet != this.computer_money and this.player_bet != this.player_money:
result = this.bets()
if result == 2:
return
this.state_bets()
tell_odds(this.find_winning_chances(0.4))
say("Deal the final card.")
this.community_cards.append(this.draw_card())
if do_bets:
if this.computer_bet != this.computer_money and this.player_bet != this.player_money:
result = this.bets()
if result == 2:
return
this.state_bets()
tell_odds(this.find_winning_chances(0.45))
say("Alright, show me your hand.")
for i in range(2):
this.player_hand.append(this.draw_card())
player_best_hand = this.find_best_hand(this.community_cards + this.player_hand)
computer_best_hand = this.find_best_hand(this.community_cards + this.computer_hand)
say("Your best hand was " + player_best_hand.name)
say("My best hand was " + computer_best_hand.name)
winner = this.winning_hand(player_best_hand, computer_best_hand)
if winner == 0:
say(random.choice(PLAYER_WIN))
this.player_wins()
elif winner == 1:
say(random.choice(CPU_WIN))
this.computer_wins()
elif winner == 2:
say(random.choice(DRAW))
def computer_wins(this):
this.computer_money += this.player_bet
this.player_money -= this.player_bet
this.next_round()
def player_wins(this):
this.player_money += this.computer_bet
this.computer_money -= this.computer_bet
this.next_round()
def state_bets(this):
print("You have bet " + str(this.player_bet))
print("Computer has bet " + str(this.computer_bet))
# Run through a betting cycle with the player.
def bets(this):
if do_bets:
computer_played = False
player_played = False
skip_player = False
if this.turn == 1:
skip_player = True
chances = None
while True: # Betting cycle
this.state_bets()
if skip_player:
skip_player = False
else: # Player bet
bet = [""] # Obtain the command
commands = ["bet", "raise", "call", "check", "fold"]
while not bet[0] in commands:
bet = input("Bet, Raise, Call, Check, or Fold: ").split()
if len(bet) == 0:
bet = [""]
else:
bet[0] = bet[0].lower()
player_played = True
if bet[0] == "bet" or bet[0] == "raise": # Parse the command
try:
bet[1] = int(bet[1])
amount = bet[1]
if bet[1] + this.computer_bet > this.player_money:
print("You only have " + str(this.player_money))
raise Exception() # Break out of try loop and ask for a new bet
except: # Get a valid bet
amount = -1
while amount < 0:
amount = input("How much: ")
try:
amount = int(amount)
if amount + this.computer_bet > this.player_money:
amount = -1
print("You only have " + str(this.player_money))
except:
amount = -1
this.player_bet += amount
elif bet[0] == "call" or bet[0] == "check":
if this.player_bet < this.computer_bet: # Raise the bet to match the computer bet
this.player_bet = this.computer_bet
if this.player_bet > this.player_money: # Limit calling to the player's money
this.player_bet = this.player_money
if player_played and computer_played:
return
elif bet[0] == "fold":
this.computer_wins()
return 2
this.state_bets()
# Computer bet
if chances == None:
chances = this.find_winning_chances({0: 0.22, 3: 0.3, 4: 0.3, 5: 1}[len(this.community_cards)])
if this.player_bet > this.computer_bet:
call_bet = this.player_bet # Match the player without exceeding the computer balance.\
else:
call_bet = this.computer_bet
if call_bet > this.computer_money:
call_bet = this.computer_money
expected_outcomes = {'fold': this.computer_money - this.computer_bet}
if this.computer_money - this.computer_bet + 1 > 0: # If we can raise, calculate which raise value is the best.
for raise_value in range(0, this.computer_money - this.player_bet + 1):
expected_outcomes[raise_value] = {}
expected_outcomes[raise_value]['win'] = this.computer_money + this.player_bet + raise_value
expected_outcomes[raise_value]['loss'] = this.computer_money - this.player_bet - raise_value
expected_outcomes[raise_value]['draw'] = this.computer_money
expected_outcomes[raise_value]['expected'] = expected_outcomes[raise_value]['win'] * chances['computer_win'] + \
expected_outcomes[raise_value]['loss'] * chances['player_win'] + \
expected_outcomes[raise_value]['draw'] * chances['draw']
computer_played = True
best_choice = 'fold'
best_expected_value = expected_outcomes['fold']
for choice in expected_outcomes:
if not choice == 'fold':
if expected_outcomes[choice]['expected'] > best_expected_value:
#if expected_outcomes[choice]['loss'] >= this.computer_money * (1 - this.maximum_percent_loss):
best_choice = choice
best_expected_value = expected_outcomes[choice]['expected']
if best_choice == 'fold':
say("I fold.")
this.player_wins()
return 2
elif best_choice == 0: # Call/Check
if this.computer_bet == this.player_bet:
say("I check.")
else:
say("I call.")
this.computer_bet = call_bet
if player_played and computer_played:
return
else: # Call and raise
say("I call and raise " + str(best_choice))
this.computer_bet = this.player_bet + best_choice
def find_winning_chances(this, accuracy=1): # Accuracy of 1 calculates perfectly, but lower values are faster.
# Increments for each possible final outcome, then calculated as percentages in the end.
player_wins = 0
computer_wins = 0
draws = 0
false_deck = copy.deepcopy(this.deck)
while len(false_deck) > len(this.deck) * accuracy:
false_deck.pop(random.randrange(0, len(false_deck)))
community_combos = list(itertools.combinations(false_deck, 5 - len(this.community_cards)))
for community_combo in community_combos:
community_combo = list(community_combo)
for card in community_combo:
false_deck.remove(card) # Temporarily remove the community cards from the deck
full_community_combo = community_combo + this.community_cards # Add the cards we already know so we have a set of five
player_combos = list(itertools.combinations(false_deck, 2))
for player_combo in player_combos: # Based on reduced deck, determine all possibilities for the player's hand and beating the computer
player_combo = list(player_combo)
best_player_hand = this.find_best_hand(player_combo + full_community_combo)
best_computer_hand = this.find_best_hand(this.computer_hand + full_community_combo)
winner = this.winning_hand(best_player_hand, best_computer_hand)
if winner == 0:
player_wins += 1
elif winner == 1:
computer_wins += 1
else:
draws += 1
false_deck += community_combo # Add the cards back to the deck
total_scenarios = player_wins + computer_wins + draws
return {'player_win': player_wins / total_scenarios,
'computer_win': computer_wins / total_scenarios,
'draw': draws / total_scenarios}
def find_best_hand(this, cards): # Find the best hand out of the community cards and two personal cards.
combinations = list(itertools.combinations(cards, 5))
best_hand = None
for combo in combinations:
hand = Hand()
hand.cards = list(combo)
hand.evaluate()
if best_hand == None:
best_hand = hand
else:
if this.winning_hand(hand, best_hand) == 0:
best_hand = hand
return best_hand
def winning_hand(this, hand0, hand1):
if hand0.hand_type < hand1.hand_type:
return 0
elif hand0.hand_type > hand1.hand_type:
return 1
else:
assert len(hand0.high_cards) == len(hand1.high_cards), 'These two hands have a different number of kickers: ' \
+ str(hand0.cards) + ' ' + str(hand1.cards)
i = 0
while i < len(hand0.high_cards):
if SORT_RANKS[hand0.high_cards[i][0]] > SORT_RANKS[hand1.high_cards[i][0]]:
return 0
elif SORT_RANKS[hand0.high_cards[i][0]] < SORT_RANKS[hand1.high_cards[i][0]]:
return 1
i += 1
return 2 # Draw
class Hand(): # 5 card hand
# Initialize an empty hand.
def __init__(this, cards=[]):
this.name = None
this.hand_type = -1
this.high_cards = {}
this.cards = cards
# Add a card to the hand.
def append(this, card):
this.cards.append(card)
# Determine the value of our hand.
# Identify royal flush, straights, two-pair combos, etc.
def evaluate(this):
this.cards = sorted(this.cards, key=lambda card: (SORT_RANKS[card[0]], card[1])) # Sort the cards by rank first, then by suit
# Flush
flush = True
for i, card in enumerate(this.cards):
if i == 4:
break
elif this.cards[i][1] != this.cards[i + 1][1]:
flush = False
break
# Royal flush
if flush:
if this.cards[0][0] == 'T' and this.cards[1][0] == 'J' and this.cards[2][0] == 'Q' and this.cards[3][0] == 'K' and this.cards[4][0] == 'A':
this.hand_type = 0
this.name = 'Royal Flush'
return
# Straight (Search both ways from the beginning card to find straight from both directions
straight = False
r = 0
l = 0
while True:
difference = abs(SORT_RANKS[this.cards[r + 1][0]] - SORT_RANKS[this.cards[r][0]])
if difference == 1 or difference == 12:
r += 1
if r == 4:
break
else:
break
while True:
difference = abs(SORT_RANKS[this.cards[-l - 1][0]] - SORT_RANKS[this.cards[-l][0]])
if difference == 1 or difference == 12:
l += 1
if l == 4:
break
else:
break
if r + l == 4:
straight = True
# Straight flush
if straight and flush:
this.hand_type = 1
if this.cards[r][0] == '4' or this.cards[r][0] == '3' or this.cards[r][0] == '2':
this.name = 'Straight Flush (Ace-High)'
this.high_cards[0] = this.cards[4]
elif this.cards[4][0] == 'A' and this.cards[3][0] == '5':
this.name = 'Straight Flush (Steel Wheel)'
this.high_cards[0] = this.cards[r]
else:
this.name = 'Straight Flush (' + RANK_NAMES[this.cards[4][0]] + '-High)'
this.high_cards[0] = this.cards[4]
return
# Group cards for later disambiguation
groups = [[this.cards[0]]]
for i, card in enumerate(this.cards):
if i > 0:
new_card = True
for group in groups:
if group[0][0] == card[0]:
group.append(card)
new_card = False
break
if new_card:
groups.append([card])
groups = sorted(groups, key=lambda group: -len(group)) # Biggest groups first
# 4 of a kind
if len(groups[0]) == 4:
this.hand_type = 2
this.high_cards[0] = groups[0][0]
this.high_cards[1] = groups[1][0]
this.name = 'Four of a Kind'
return
# Full House
if len(groups[0]) == 3 and len(groups[1]) == 2:
this.hand_type = 3
this.high_cards[0] = groups[0][0]
this.high_cards[1] = groups[1][0]
if groups[0][0][0] == 'K' and groups[0][1][0] == 'K' and groups[0][2][0] == 'K' and groups[1][0][0] == 'A' and groups[1][1][0] == 'A':
this.name = 'Full House (Nativity)'
else:
this.name = 'Full House'
return
# Flush
if flush:
this.hand_type = 4
this.high_cards[0] = groups[0][0]
this.name = 'Flush'
return
# Straight - code adapted from earlier (Wheel, Sucker Straight)
if straight:
this.hand_type = 5
if this.cards[r][0] == '4' or this.cards[r][0] == '3' or this.cards[r][0] == '2':
this.name = 'Straight (Ace-High)'
this.high_cards[0] = this.cards[4]
elif this.cards[4][0] == 'A' and this.cards[3][0] == '5':
this.name = 'Sucker Straight'
this.high_cards[0] = this.cards[r]
else:
this.name = 'Straight (' + RANK_NAMES[this.cards[4][0]] + '-High)'
this.high_cards[0] = this.cards[4]
return
# Three of a Kind
if len(groups[0]) == 3:
this.hand_type = 6
this.name = 'Three of a Kind'
this.high_cards[0] = groups[0][0] # Three of a kind
this.high_cards[1] = groups[2][0] # High kicker
this.high_cards[2] = groups[1][0] # Low kicker
return
# Two Pairs
if len(groups[0]) == 2 and len(groups[1]) == 2:
this.hand_type = 7
this.name = 'Two Pairs'
this.high_cards[0] = groups[1][0] # Highest pair
this.high_cards[1] = groups[0][0] # Lowest pair
this.high_cards[2] = groups[2][0] # Kicker
return
# Pair
if len(groups[0]) == 2:
this.hand_type = 8
this.name = 'Pair'
this.high_cards[0] = groups[0][0] # Pair
this.high_cards[1] = groups[3][0] # High kicker
this.high_cards[2] = groups[2][0] # Mid kicker
this.high_cards[3] = groups[1][0] # Low kicker
return
# Junk
this.hand_type = 9
this.name = 'Junk'
assert len(groups) == 5, "Error! We have been dealt a broken hand."
this.high_cards[0] = groups[4][0] # Highest card
this.high_cards[1] = groups[3][0]
this.high_cards[2] = groups[2][0]
this.high_cards[3] = groups[1][0]
this.high_cards[4] = groups[0][0] # Lowest card
return
if __name__ == "__main__":
ALG = PredictionAlgorithm()
say("Let's play Texas Hold'Em.")
while True:
ALG.play()
| 40.792779 | 151 | 0.516452 | 20,571 | 0.791649 | 0 | 0 | 0 | 0 | 0 | 0 | 6,112 | 0.235213 |
34f5d659040a322d337330d8a9d7b5449d63b66f
| 443 |
py
|
Python
|
no. of occurences of substring.py
|
devAmoghS/Python-Programs
|
5b8a67a2a41e0e4a844ae052b59fc22fdcdbdbf9
|
[
"MIT"
] | 1 |
2019-09-18T14:06:50.000Z
|
2019-09-18T14:06:50.000Z
|
no. of occurences of substring.py
|
devAmoghS/Python-Programs
|
5b8a67a2a41e0e4a844ae052b59fc22fdcdbdbf9
|
[
"MIT"
] | null | null | null |
no. of occurences of substring.py
|
devAmoghS/Python-Programs
|
5b8a67a2a41e0e4a844ae052b59fc22fdcdbdbf9
|
[
"MIT"
] | null | null | null |
"""
s="preeni"
ss="e"
"""
s=input("enter the string:")
ss=input("enter the substring:")
j=0
for i in range(len(s)):
m=s.find(ss)
#the first occurence of ss
if(j==0):
print("m=%d"%m)
if(m== -1 and j==0):
print("no such substring is available")
break
if(m== -1):
break
else :
j=j+1
s=s[m+1:]
# print(s)
print("no. of occurences is %s"%j)
| 17.038462 | 48 | 0.465011 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 170 | 0.383747 |
34f78347f261274809e3a7533c6bc409939bf9b0
| 1,039 |
py
|
Python
|
leetcode/code/maximumProduct.py
|
exchris/Pythonlearn
|
174f38a86cf1c85d6fc099005aab3568e7549cd0
|
[
"MIT"
] | null | null | null |
leetcode/code/maximumProduct.py
|
exchris/Pythonlearn
|
174f38a86cf1c85d6fc099005aab3568e7549cd0
|
[
"MIT"
] | 1 |
2018-11-27T09:58:54.000Z
|
2018-11-27T09:58:54.000Z
|
leetcode/code/maximumProduct.py
|
exchris/pythonlearn
|
174f38a86cf1c85d6fc099005aab3568e7549cd0
|
[
"MIT"
] | null | null | null |
#!/bin/usr/python
# -*- coding:utf-8 -*-
# 628.三个数的最大乘积
class Solution:
def maximumProduct(self, nums):
if len(nums) == 3:
return nums[0] * nums[1] * nums[2]
elif len(nums) < 3:
return None
else:
z_num, f_num = [], []
for i in nums:
if i < 0:
f_num.append(i) # 负数列表
else:
z_num.append(i) # 正数列表
z_num.sort(reverse=True)
f_num.sort()
sum1, sum2 = 1, 1
if len(f_num) < 2:
return z_num[0] * z_num[1] * z_num[2]
elif len(z_num) < 2:
return f_num[0] * f_num[1] * z_num[0]
else:
sum2 *= f_num[0]
sum2 *= f_num[1]
sum2 *= z_num[0]
sum1 *= z_num[0]
sum1 *= z_num[1]
sum1 *= z_num[2]
return max(sum1, sum2)
s = Solution()
num = s.maximumProduct([-4, -3, -2, -1, 60])
print(num)
| 25.341463 | 53 | 0.405197 | 923 | 0.861811 | 0 | 0 | 0 | 0 | 0 | 0 | 97 | 0.09057 |
34f87e0983bc87b776a41bf8fa6bd6191f64154d
| 437 |
py
|
Python
|
exemplo_47_inspect.py
|
alef123vinicius/Estudo_python
|
30b121d611f94eb5df9fbb41ef7279546143221b
|
[
"Apache-2.0"
] | null | null | null |
exemplo_47_inspect.py
|
alef123vinicius/Estudo_python
|
30b121d611f94eb5df9fbb41ef7279546143221b
|
[
"Apache-2.0"
] | null | null | null |
exemplo_47_inspect.py
|
alef123vinicius/Estudo_python
|
30b121d611f94eb5df9fbb41ef7279546143221b
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 16 15:35:53 2021
@author: alef
"""
import os.path
# modulo de instropecção amigável
import inspect
print('Objeto: ', inspect.getmodule(os.path))
print('Classe?', inspect.isclass(str))
# Lista todas as funções que existem em os.path
print('Membros: ')
for name, struct in inspect.getmembers(os.path):
if inspect.isfunction(struct):
print(name)
| 17.48 | 48 | 0.681922 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 218 | 0.493213 |
34f90454724956c5a7e90a92e40de7bf13365c40
| 20,610 |
py
|
Python
|
src/hr_system/hr_system.py
|
pablomarcel/HR-System
|
25edf82d0f4f37ededfb6c6b713a5d7c455ff67e
|
[
"MIT"
] | null | null | null |
src/hr_system/hr_system.py
|
pablomarcel/HR-System
|
25edf82d0f4f37ededfb6c6b713a5d7c455ff67e
|
[
"MIT"
] | null | null | null |
src/hr_system/hr_system.py
|
pablomarcel/HR-System
|
25edf82d0f4f37ededfb6c6b713a5d7c455ff67e
|
[
"MIT"
] | null | null | null |
import sys
import pyfiglet
import pandas as pd
import numpy as np
from tabulate import tabulate
import dateutil
import datetime
import re
result = pyfiglet.figlet_format("h r s y s t e m", font="slant")
strStatus = ""
class UserSelection:
"""Handles User Selection Logic
the class is used to implement a case-switch construct in python
"""
def switch(self, strChoice):
"""Builds a function name based off user choice and triggers the actions"""
default = "Incorrect Selection"
return getattr(self, "case_" + str(strChoice), lambda: default)()
def case_1(self):
"""User selected print a list of all employees"""
IO.print_all_employees(IO.get_employee_db())
pass
def case_2(self):
"""User selected Print a list of employees currently employed"""
IO.print_all_employees_employed(IO.get_employee_db())
pass
def case_3(self):
"""User selected Print a list of employees who have left in the past month"""
IO.print_employees_departures(IO.get_employee_db())
pass
def case_4(self):
"""User selected Display a reminder to schedule annual review"""
IO.print_review_reminders(IO.get_employee_db())
pass
def case_5(self):
"""User selected Capture employee information"""
(
employeeID,
firstName,
lastName,
fullName,
address,
ssn,
dateOfBirth,
jobTitle,
startDate,
endDate,
) = IO.capture_employee_data(IO.get_employee_db())
df = Processor.append_row(
IO.get_employee_db(),
employeeID,
firstName,
lastName,
fullName,
address,
ssn,
dateOfBirth,
jobTitle,
startDate,
endDate,
)
Processor.append_to_csv(df)
pass
def case_6(self):
"""User selected Delete record"""
fullName = IO.input_name_to_delete()
Processor.delete_record(IO.get_employee_db(), fullName)
pass
def case_7(self):
"""User selected Exit"""
print("Goodbye ")
sys.exit()
class Processor:
"""Performs Processing tasks"""
@staticmethod
def delete_record(dframe, name):
"""Generates a new DataFrame Filtering Out the record corresponding to the name to delete
:param dframe: (Pandas DataFrame) DataFrame containing employee information
:param name: (String) String representing the name to delete
:return: nothing
"""
df = dframe[(dframe.FullName != name)]
newdf = df.copy()
Processor.update_csv(newdf)
@staticmethod
def update_csv(dframe):
"""Writes the filtered DataFrame to a csv file.
This method is used when the user decides to delete record
:param dframe: (Pandas DataFrame) DataFrame containing employee information
:return: nothing
"""
dframe.to_csv("EmployeeData.csv", index=False)
@staticmethod
def generate_employee_id(dframe):
"""Generates unique employee id for the next employee to be added
:param dframe: (Pandas DataFrame) DataFrame containing employee information
:return next_id: (Integer) Next ID to be used for an employee record
"""
max_id = dframe["EmployeeID"].max()
next_id = max_id + 1
return next_id
@staticmethod
def append_row(
df, id, first, last, full, address, ssn, dob, job, startDate, endDate
):
"""Generates a row of data to be appended to a pandas DataFrame
:param dframe: (Pandas DataFrame) DataFrame containing employee information
:param id: (Integer) Next ID to be used for an employee record
:param first: (String) First Name to be used for an employee record
:param last: (String) Last Name to be used for an employee record
:param full: (String) Full Name to be used for an employee record
:param address: (String) Address to be used for an employee record
:param ssn: (String) Social Security Number to be used for an employee record
:param dob: (String) Date of Birth to be used for an employee record
:param job: (String) Job Title to be used for an employee record
:param startDate: (String) Start Date to be used for an employee record
:param endDate: (String) End Date to be used for an employee record
:return df: (Pandas DataFrame) a new Pandas DataFrame to be written to a csv
"""
new_row = {
"EmployeeID": id,
"FirstName": first,
"LastName": last,
"FullName": full,
"Address": address,
"ssn": ssn,
"DateOfBirth": dob,
"JobTitle": job,
"StartDate": startDate,
"EndDate": endDate,
}
# append row to the dataframe
df = df.append(new_row, ignore_index=True)
return df
@staticmethod
def append_to_csv(df):
"""Writes a new DataFarme to the csv file.
This method is used when the user decides to add a new record to the csv
:param df: (Pandas DataFrame) DataFrame containing employee information
:return: nothing
"""
df.to_csv("EmployeeData.csv", index=False)
@staticmethod
def isValidSSN(str):
"""Validates the social security format
:param str: (String) string that represents the social security number
:return: (Boolean)
"""
# This code is contributed by avanitrachhadiya2155
# Regex to check valid
# SSN (Social Security Number).
regex = "^(?!666|000|9\\d{2})\\d{3}-(?!00)\\d{2}-(?!0{4})\\d{4}$"
# Compile the ReGex
p = re.compile(regex)
# If the string is empty
# return false
if str == None:
return False
# Return if the string
# matched the ReGex
if re.search(p, str):
return True
else:
return False
class IO:
"""Performs Input and Output tasks"""
@staticmethod
def get_menu(argument):
"""Uses dictionaries to display options to the user
:param argument: (Integer) None
:return: (String) the value of the switcher dictionary
"""
def one():
return "1) Print a list of all employees"
def two():
return "2) Print a list of employees currently employed"
def three():
return "3) Print a list of employees who have left in the past month"
def four():
return "4) Display reminder to schedule annual review"
def five():
return "5) Capture employee information"
def six():
return "6) Delete record"
def seven():
return "7) Exit"
switcher = {
1: one(),
2: two(),
3: three(),
4: four(),
5: five(),
6: six(),
7: seven(),
}
return switcher.get(argument, "Invalid Selection")
@staticmethod
def input_menu_choice():
"""Gets the menu choice from a user
:param: None
:return: string
"""
while True:
try:
choice = str(
input("Which option would you like to perform? [1 to 7] - ")
).strip()
if choice not in ["1", "2", "3", "4", "5", "6", "7"]:
raise ValueError("Choice not an option, enter 1, 2, 3, 4, 5, 6, 7")
except ValueError as e:
print(e)
else:
break
print() # Add an extra line for looks
return choice
@staticmethod
def input_press_to_continue(optional_message=""):
"""Pause program and show a message before continuing
:param optional_message: An optional message you want to display
:return: nothing
"""
print(optional_message)
input("Press the [Enter] key to continue.")
@staticmethod
def print_all_employees(dframe):
"""Displays all employees
:param dframe: (Pandas DataFrame) a Pandas DataFrame containing all employee info.
:return: nothing
"""
IO.print_header()
print("List of all employees: ")
IO.print_footer()
df = dframe.copy()
df["StartDate"] = pd.to_datetime(df["StartDate"])
print(tabulate(df, headers="keys", tablefmt="psql", showindex=False))
@staticmethod
def get_employee_db():
"""Reads the csv and puts it in a pandas dataframe
:param: None
:return df: (Data Frame) a pandas dataframe
"""
df = pd.read_csv("EmployeeData.csv")
return df
@staticmethod
def print_all_employees_employed(dframe):
"""Displays the employees currently employed at the company
:param dframe: (Pandas DataFrame) DataFrame containing employee information
:return: nothing
"""
# Filter out those employees who have left.
# That is, the ones that have a real 'EndDate'
# The employees currently employed have EndDate = None
newdf = dframe[(dframe.EndDate == "NONE")]
df = newdf.copy()
df["StartDate"] = pd.to_datetime(df["StartDate"])
IO.print_header()
print("List of all employees currently employed: ")
IO.print_footer()
print(tabulate(df, headers="keys", tablefmt="psql", showindex=False))
@staticmethod
def print_employees_departures(dframe):
"""Displays a list of employees that have left the company in the past 30 days
:param dframe: (Pandas DataFrame) A DataFrame that contains employee information
:return: nothing
"""
# Filter out those employees who have NOT left.
# That is, the ones that have EndDate = None
# The employees who have left have EndDate = xx/xx/xxxx
df = dframe[(dframe.EndDate != "NONE")]
newdf = df.copy()
newdf["EndDate"] = pd.to_datetime(newdf["EndDate"])
date = datetime.datetime.today().replace(microsecond=0)
df_filter = newdf[newdf.EndDate > date - pd.to_timedelta("30day")]
IO.print_header()
print("List of all employees who have left the company in the past 30 days: ")
IO.print_footer()
print(tabulate(df_filter, headers="keys", tablefmt="psql", showindex=False))
@staticmethod
def print_review_reminders(dframe):
"""Displays a list of employees that have left the company in the past 30 days
:param dframe: (Pandas DataFrame) A DataFrame that contains employee information
:return: nothing
"""
df = dframe[(dframe.EndDate == "NONE")]
newdf = df.copy()
date = datetime.datetime.today().replace(microsecond=0)
newdf["StartDate"] = pd.to_datetime(newdf["StartDate"])
newdf['Month'] = pd.DatetimeIndex(newdf['StartDate']).month
newdf['Day'] = pd.DatetimeIndex(newdf['StartDate']).day
newdf['CalendarYear'] = date.year
newdf['DateForReview'] = pd.to_datetime((newdf.CalendarYear * 10000 + newdf.Month * 100 + newdf.Day).apply(str),
format='%Y%m%d')
df_filter = newdf[(newdf.DateForReview - pd.to_timedelta("90days") < date) & (newdf.DateForReview >= date)]
df_df = df_filter.drop(['Month', 'Day', 'CalendarYear', 'ssn'], axis=1)
#IO.print_header()
print()
print('FRIENDLY REMINDER! Anual Reviews are coming up for the following employees: ')
#IO.print_footer()
print(tabulate(df_df, headers="keys", tablefmt="psql", showindex=False))
@staticmethod
def input_name_to_delete():
"""Captures the name of the employee to delete
:param: None
:return strName: (String) String containing the full name of the person
"""
while True:
try:
strName = str(input("Enter Full Name: ")).strip()
if strName.isnumeric():
raise ValueError("Name is Numeric. Enter a valid name: ")
elif strName == "":
raise ValueError("Name is empty. Enter a valid Name: ")
except ValueError as e:
print(e)
else:
break
return strName
@staticmethod
def capture_employee_data(dframe):
"""Captures employee data for new record
:param dframe: (Pandas DataFrame) a DataFrame with employee info
:return employeeID: (Integer) Unique Employee ID
:return firstName: (String) First Name
:return lastName: (String) Last Name
:return fullName: (String) Full Name
:return address: (String) Address
:return ssn: (String) Social Security Number
:return dateOfBirth: (String) Date of Birth
:return jobTitle: (String) Job Title
:return startDate: (String) Start Date
:return endDate: (String) End Date
"""
employeeID = Processor.generate_employee_id(dframe)
firstName = IO.capture_first_name()
lastName = IO.capture_last_name()
fullName = firstName + " " + lastName
address = IO.capture_address()
ssn = IO.capture_ssn()
dateOfBirth = IO.capture_date_of_birth()
jobTitle = IO.capture_job_title()
startDate = IO.capture_start_date()
endDate = IO.capture_end_date()
return (
employeeID,
firstName,
lastName,
fullName,
address,
ssn,
dateOfBirth,
jobTitle,
startDate,
endDate,
)
@staticmethod
def capture_first_name():
"""Captures First Name
:param: None
:return: Nothing
"""
while True:
try:
strText = str(input("Enter First Name: ")).strip()
if strText.isnumeric():
raise ValueError(
"First Name is Numeric. Enter a valid First Name: "
)
elif strText == "":
raise ValueError("First Name is empty. Enter a valid First Name: ")
except ValueError as e:
print(e)
else:
break
return strText
@staticmethod
def capture_last_name():
"""Captures Last Name
:param: None
:return: Nothing
"""
while True:
try:
strText = str(input("Enter Last Name: ")).strip()
if strText.isnumeric():
raise ValueError("Last Name is Numeric. Enter a valid Last name: ")
elif strText == "":
raise ValueError("Last Name is empty. Enter a valid Last Name: ")
except ValueError as e:
print(e)
else:
break
return strText
@staticmethod
def capture_address():
"""Captures Address
:param: None
:return: Nothing
"""
while True:
try:
strText = str(input("Enter Address: ")).strip()
if strText.isnumeric():
raise ValueError("Address is Numeric. Enter a valid address: ")
elif strText == "":
raise ValueError("Address is empty. Enter a valid address: ")
except ValueError as e:
print(e)
else:
break
return strText
@staticmethod
def capture_ssn():
"""Captures Social Security Number
:param: None
:return: Nothing
"""
while True:
try:
strText = str(input("Enter ssn (000-00-0000): ")).strip()
if Processor.isValidSSN(strText) == False:
raise ValueError(
"ssn is not in the proper format. Enter a valid ssn: "
)
elif strText == "":
raise ValueError("ssn is empty. Enter a valid ssn: ")
except ValueError as e:
print(e)
else:
break
return strText
@staticmethod
def capture_date_of_birth():
"""Captures Date of Birth
:param: None
:return: Nothing
"""
formt = "%m/%d/%Y"
while True:
try:
strText = str(
input("Enter Date of Birth, MM/DD/YYYY (%m/%d/%Y): ")
).strip()
res = bool(datetime.datetime.strptime(strText, formt))
except ValueError as e:
print(e)
else:
break
return strText
@staticmethod
def capture_job_title():
"""Captures Job Title
:param: None
:return: Nothing
"""
while True:
try:
strText = str(input("Enter Job Title: ")).strip()
if strText.isnumeric():
raise ValueError("Job Title is Numeric. Enter a valid Job Title: ")
elif strText == "":
raise ValueError("Job Title is empty. Enter a valid Job Title: ")
except ValueError as e:
print(e)
else:
break
return strText
@staticmethod
def capture_start_date():
"""Captures Start Date
:param: None
:return: Nothing
"""
formt = "%m/%d/%Y"
while True:
try:
strText = str(
input("Enter Start Date, MM/DD/YYYY (%m/%d/%Y): ")
).strip()
res = bool(datetime.datetime.strptime(strText, formt))
except ValueError as e:
print(e)
else:
break
return strText
@staticmethod
def capture_end_date():
"""Captures End Date
:param: None
:return: Nothing
"""
formt = "%m/%d/%Y"
while True:
strText = (
str(input("Enter End Date, MM/DD/YYYY (%m/%d/%Y): ")).strip().upper()
)
if strText != "NONE":
try:
res = bool(datetime.datetime.strptime(strText, formt))
except ValueError as e:
print(e)
else:
break
else:
break
return strText
@staticmethod
def activate_reminders(dframe):
"""Activate the reminders
:param: None
:return: Nothing
"""
IO.print_review_reminders(dframe)
@staticmethod
def print_header():
"""Prints the header of the report
:param: None
:return: nothing
"""
print(
"+--------------+-------------+------------+--------------"
"+-----------+-------------+---------------+------------"
"+---------------------+-----------+"
)
@staticmethod
def print_footer():
"""Prints the footer of the report
:param: None
:return: nothing
"""
print(
"+--------------+-------------+------------+--------------"
"+-----------+-------------+---------------+------------"
"+---------------------+-----------+"
)
# Main Body of Script ------------------------------------------------------ #
if __name__ == "__main__":
while True:
# reminder for annual review can be a separate class
print(result)
print("Menu of Options")
print(IO.get_menu(1))
print(IO.get_menu(2))
print(IO.get_menu(3))
print(IO.get_menu(4))
print(IO.get_menu(5))
print(IO.get_menu(6))
print(IO.get_menu(7))
IO.activate_reminders(IO.get_employee_db())
# menu printed
strChoice = IO.input_menu_choice() # Get menu option
s = UserSelection()
s.switch(
strChoice
) # Calls the UserSelection class to handle the tasks in the menu
IO.input_press_to_continue(strStatus)
continue # to show the menu
| 29.783237 | 120 | 0.539835 | 19,558 | 0.948957 | 0 | 0 | 17,248 | 0.836875 | 0 | 0 | 8,960 | 0.43474 |
34fa480bc9c2232054eb51335128e85dbc56e507
| 169 |
py
|
Python
|
mskit/metric/__init__.py
|
gureann/MSKit
|
8b360d38288100476740ad808e11b6c1b454dc2c
|
[
"MIT"
] | null | null | null |
mskit/metric/__init__.py
|
gureann/MSKit
|
8b360d38288100476740ad808e11b6c1b454dc2c
|
[
"MIT"
] | null | null | null |
mskit/metric/__init__.py
|
gureann/MSKit
|
8b360d38288100476740ad808e11b6c1b454dc2c
|
[
"MIT"
] | null | null | null |
from . import similarity
from .distance import frechet_dist
from .similarity import pcc, sa
from . import robust
from .robust import iqr, cv, fwhm, count_missing_values
| 28.166667 | 55 | 0.804734 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
34fae8de500f0fefa1f47e343071d4f10241d272
| 229 |
py
|
Python
|
__main__.py
|
Luke-zhang-04/powercord-plugin-emojify
|
8713f048fb6da160cfdc7d2e7e7aa925eeaaf79e
|
[
"MIT"
] | null | null | null |
__main__.py
|
Luke-zhang-04/powercord-plugin-emojify
|
8713f048fb6da160cfdc7d2e7e7aa925eeaaf79e
|
[
"MIT"
] | null | null | null |
__main__.py
|
Luke-zhang-04/powercord-plugin-emojify
|
8713f048fb6da160cfdc7d2e7e7aa925eeaaf79e
|
[
"MIT"
] | null | null | null |
import scraper.scraper as scraper
import scraper.parser as parser
import sys
from dotenv import load_dotenv
load_dotenv()
if __name__ == "__main__":
if "--noScrape" not in sys.argv:
scraper.main()
parser.main()
| 19.083333 | 36 | 0.716157 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 22 | 0.09607 |
34fdc3e2fc15cdbce1665add1d016bcc03924a78
| 1,928 |
py
|
Python
|
monitor/redisplay_monitor.py
|
perone/redisplay
|
32a8902fafec1af6ecfb12b57f8412dc7018940d
|
[
"MIT"
] | 20 |
2015-01-14T22:52:27.000Z
|
2018-02-20T20:34:22.000Z
|
monitor/redisplay_monitor.py
|
perone/redisplay
|
32a8902fafec1af6ecfb12b57f8412dc7018940d
|
[
"MIT"
] | null | null | null |
monitor/redisplay_monitor.py
|
perone/redisplay
|
32a8902fafec1af6ecfb12b57f8412dc7018940d
|
[
"MIT"
] | null | null | null |
from __future__ import division
import time
import json
from collections import deque
import serial
import redis
import numpy as np
def build_update_basic(info):
basic_info = {
"cmd": "update_basic",
"clients" : "%d" % info["connected_clients"],
"memory" : info["used_memory_human"],
"ops/s": "%d" % info["instantaneous_ops_per_sec"],
}
return basic_info
def build_update_advanced(info):
advanced_info = {
"cmd": "update_advanced",
"rej_conn" : "%d" % info["rejected_connections"],
"key_hits" : "%d" % info["keyspace_hits"],
"key_miss": "%d" % info["keyspace_misses"],
}
return advanced_info
def build_update_ops_sec(ops_sec_deque):
arr = np.array(ops_sec_deque)
arr_max = arr.max()
if arr_max > 0:
arr *= 40/arr_max
ops_sec = {
"cmd" : "update_ops_sec",
"graph_values" : arr.tolist(),
"max_value" : "%d" % arr_max,
}
return ops_sec
def run_main():
print "Stat Cube Redis Monitor"
stat_cube = serial.Serial("/dev/ttyUSB0", 9600)
redis_conn = redis.StrictRedis(host='localhost', port=6379, db=0)
ops_sec_deque = deque([0] * 40)
print "Monitoring Redis..."
while True:
info = redis_conn.info()
_ = ops_sec_deque.popleft()
ops_sec_deque.append(info["instantaneous_ops_per_sec"])
basic_info = json.dumps(build_update_basic(info))
stat_cube.write(basic_info + '\n')
stat_cube.flush()
time.sleep(0.1)
advanced_info = json.dumps(build_update_advanced(info))
stat_cube.write(advanced_info + '\n')
stat_cube.flush()
time.sleep(0.1)
ops_sec_info = json.dumps(build_update_ops_sec(ops_sec_deque))
stat_cube.write(ops_sec_info + '\n')
stat_cube.flush()
time.sleep(0.1)
time.sleep(1)
if __name__ == '__main__':
run_main()
| 25.706667 | 70 | 0.618776 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 404 | 0.209544 |
34fe561195c6b90ab544a5eb2c6644d2c927879f
| 433 |
py
|
Python
|
maniacal-moths/newsly/aggregator/models.py
|
Kushagra-0801/summer-code-jam-2020
|
aae9a678b0b30f20ab3cc6cf2b0606ee1f762ca0
|
[
"MIT"
] | null | null | null |
maniacal-moths/newsly/aggregator/models.py
|
Kushagra-0801/summer-code-jam-2020
|
aae9a678b0b30f20ab3cc6cf2b0606ee1f762ca0
|
[
"MIT"
] | null | null | null |
maniacal-moths/newsly/aggregator/models.py
|
Kushagra-0801/summer-code-jam-2020
|
aae9a678b0b30f20ab3cc6cf2b0606ee1f762ca0
|
[
"MIT"
] | 1 |
2020-08-04T05:44:34.000Z
|
2020-08-04T05:44:34.000Z
|
from django.db import models
from django.utils import timezone
class Article(models.Model):
title = models.CharField(max_length = 100)
content = models.TextField()
date_posted = models.DateTimeField(default=timezone.now)
author = models.CharField(max_length = 100)
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse('Article-detail', kwargs={'pk': self.pk})
| 30.928571 | 64 | 0.704388 | 369 | 0.852194 | 0 | 0 | 0 | 0 | 0 | 0 | 20 | 0.046189 |
34ff02f32b21e2a010eed7ca24f81bd53b637b63
| 73 |
py
|
Python
|
syft_tensorflow/serde/__init__.py
|
shubham3121/PySyft-TensorFlow
|
a8a6e47f206e324469dbeb995dc7117c09438ba0
|
[
"Apache-2.0"
] | 39 |
2019-10-02T13:48:03.000Z
|
2022-01-22T21:18:43.000Z
|
syft_tensorflow/serde/__init__.py
|
shubham3121/PySyft-TensorFlow
|
a8a6e47f206e324469dbeb995dc7117c09438ba0
|
[
"Apache-2.0"
] | 19 |
2019-10-10T22:04:47.000Z
|
2020-12-15T18:00:34.000Z
|
syft_tensorflow/serde/__init__.py
|
shubham3121/PySyft-TensorFlow
|
a8a6e47f206e324469dbeb995dc7117c09438ba0
|
[
"Apache-2.0"
] | 12 |
2019-10-24T15:11:27.000Z
|
2022-03-25T09:03:52.000Z
|
from syft_tensorflow.serde.serde import MAP_TF_SIMPLIFIERS_AND_DETAILERS
| 36.5 | 72 | 0.917808 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
550139e4cbbe8d6698266d6e274ba52d8ab1332c
| 549 |
py
|
Python
|
challenges/03_analysis/A_re.py
|
deniederhut/workshop_pyintensive
|
f8f494081c6daabeae0724aa058c2b80fe42878b
|
[
"BSD-2-Clause"
] | 1 |
2016-10-04T00:04:56.000Z
|
2016-10-04T00:04:56.000Z
|
challenges/03_analysis/A_re.py
|
deniederhut/workshop_pyintensive
|
f8f494081c6daabeae0724aa058c2b80fe42878b
|
[
"BSD-2-Clause"
] | 8 |
2015-12-26T05:49:39.000Z
|
2016-05-26T00:10:57.000Z
|
challenges/03_analysis/A_re.py
|
deniederhut/workshop_pyintensive
|
f8f494081c6daabeae0724aa058c2b80fe42878b
|
[
"BSD-2-Clause"
] | null | null | null |
#!/bin/env python
# In this challenge, we are going to use regular expressions to manipulate
# text data
import re
# 1. Compile a regular expression that matches URLs
P_URL = re.compile(r'http://dlab\.berkeley\.edu', flags=re.I)
# 2. Using that pattern, write a function that pulls all of the URLs out of a document and returns them as a list
def get_urls(document):
assert that document is a string object
initialize empty list
for match in re.something:
get match group
append match group to list
return list
| 26.142857 | 113 | 0.71949 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 295 | 0.537341 |
5501abe3bf73d0c0ea6df544abcad09c5f1dc8eb
| 8,706 |
py
|
Python
|
src/pipeline/featureranking.py
|
heindorf/wsdmcup17-wdvd-classification
|
7c75447370b0645276e1f918ed1215a3e8a6c62e
|
[
"MIT"
] | 2 |
2018-03-21T13:21:43.000Z
|
2018-06-13T21:58:51.000Z
|
src/pipeline/featureranking.py
|
wsdm-cup-2017/wsdmcup17-wdvd-classification
|
7c75447370b0645276e1f918ed1215a3e8a6c62e
|
[
"MIT"
] | null | null | null |
src/pipeline/featureranking.py
|
wsdm-cup-2017/wsdmcup17-wdvd-classification
|
7c75447370b0645276e1f918ed1215a3e8a6c62e
|
[
"MIT"
] | 2 |
2018-03-21T14:07:32.000Z
|
2020-02-24T10:40:52.000Z
|
# -----------------------------------------------------------------------------
# WSDM Cup 2017 Classification and Evaluation
#
# Copyright (c) 2017 Stefan Heindorf, Martin Potthast, Gregor Engels, Benno Stein
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# -----------------------------------------------------------------------------
import itertools
import logging
import pandas as pd
from sklearn import ensemble
from sklearn.externals.joblib import Parallel, delayed
import config
from src import evaluationutils
_logger = logging.getLogger()
########################################################################
# Feature Ranking
########################################################################
def rank_features(training, validation):
_logger.info("Ranking features...")
metrics = _compute_metrics_for_single_features(training, validation)
group_metrics = _compute_metrics_for_feature_groups(training, validation)
metrics = pd.concat([metrics, group_metrics], axis=0)
_output_sorted_by_group(
validation.get_time_label(), validation.get_system_name(),
metrics, validation.get_group_names(), validation.get_subgroup_names())
_logger.info("Ranking features... done.")
def _compute_metrics_for_single_features(training, validation):
"""Return a Pandas data frame with metrics for every single feature."""
arguments = []
for feature in validation.get_features():
# each feature name is a tuple itself and
# here we take the last element of this tuple
training2 = training.select_feature(feature[-1])
validation2 = validation.select_feature(feature[-1])
argument = (training2, validation2, feature, )
arguments.append(argument)
result_list = Parallel(n_jobs=config.FEATURE_RANKING_N_JOBS,
backend='multiprocessing')(
delayed(_compute_feature_metrics_star)(x) for x in arguments)
result = pd.concat(result_list, axis=0)
return result
def _compute_metrics_for_feature_groups(training, validation):
arguments = []
for subgroup in validation.get_subgroups():
# each feature name is a tuple itself and here we take the last
# element of this tuple
training2 = training.select_subgroup(subgroup[-1])
validation2 = validation.select_subgroup(subgroup[-1])
argument = (training2, validation2, subgroup + ('ALL', ), )
arguments.append(argument)
for group in validation.get_groups():
training2 = training.select_group(group)
validation2 = validation.select_group(group)
argument = (training2, validation2, (group, 'ALL', 'ALL'),)
arguments.append(argument)
result_list = Parallel(n_jobs=config.FEATURE_RANKING_N_JOBS,
backend='multiprocessing')(
delayed(_compute_feature_metrics_star)(x) for x in arguments)
result = pd.concat(result_list, axis=0)
return result
# This method is called by multiple processes
def _compute_feature_metrics_star(args):
return _compute_feature_metrics(*args)
# This method is called by multiple processes
def _compute_feature_metrics(training, validation, label):
_logger.debug("Computing metrics for %s..." % str(label))
index = pd.MultiIndex.from_tuples(
[label], names=['Group', 'Subgroup', 'Feature'])
_logger.debug("Using random forest...")
clf = ensemble.RandomForestClassifier(random_state=1, verbose=0, n_jobs=-1)
evaluationutils.fit(clf, training, index)
y_pred, y_score = evaluationutils.predict(clf, validation, index)
validation_result = evaluationutils.compute_metrics(
index, validation.get_metrics_meta(), validation.get_Y(), y_score, y_pred)
# computing the feature metrics on the training set is useful for
# identifying overfitting
training_y_pred, training_y_score = evaluationutils.predict(clf, training, index)
training_result = evaluationutils.compute_metrics_for_mask(
index, evaluationutils.get_content_mask(training.get_metrics_meta(), 'ALL'), 'ALL',
training.get_Y(), training_y_score, training_y_pred)
training_result.columns = list(itertools.product(
['TRAINING'], training_result.columns.values))
result = pd.concat([validation_result, training_result], axis=1)
return result
def _output_sorted_by_auc_pr(time_label, system_name, metrics):
"""Output the metrics sorted by area under precision-recall curve."""
_logger.debug("output_sorted_by_auc_pr...")
metrics.sort_values([('ALL', 'PR')], ascending=False, inplace=True)
metrics.to_csv(config.OUTPUT_PREFIX + "_" + time_label + "_" +
system_name + "_feature_ranking.csv")
latex = metrics.loc[:, evaluationutils.COLUMNS]
# latex.reset_index(drop=True, inplace=True)
latex.to_latex(config.OUTPUT_PREFIX + "_" + time_label + "_" +
system_name + "_feature_ranking.tex", float_format='{:.3f}'.format)
n_features = min(9, len(metrics) - 1)
selection = metrics.iloc[0:n_features] \
.loc[:, [('ALL', 'Feature'), ('ALL', 'PR')]]
_logger.info("Top 10 for all content\n" +
(selection.to_string(float_format='{:.4f}'.format)))
_logger.debug("output_sorted_by_auc_pr... done.")
def _output_sorted_by_group(
time_label, system_name, metrics, group_names, subgroup_names):
"""Output the metrics sorted by group and by PR-AUC within a group."""
_logger.debug('_output_sorted_by_group...')
sort_columns = ['_Group', '_Subgroup', '_Order', '_Feature']
ascending_columns = [True, True, False, True]
metrics['_Group'] = metrics.index.get_level_values('Group')
metrics['_Subgroup'] = metrics.index.get_level_values('Subgroup')
metrics['_Feature'] = metrics.index.get_level_values('Feature')
subgroup_names = ['ALL'] + subgroup_names
# Define the order of groups and subgroups
metrics['_Group'] = metrics['_Group'].astype('category').cat.set_categories(
group_names, ordered=True)
metrics['_Subgroup'] = metrics['_Subgroup'].astype('category').cat.set_categories(
subgroup_names, ordered=True)
# Sort the features by AUC_PR and make sure the subgroup is always shown
# before the single features
metrics['_Order'] = metrics[('ALL', 'PR')]
# without this line, the following line causes a PerformanceWarning
metrics.sort_index(inplace=True)
metrics.loc[(metrics['_Feature'] == 'ALL'), '_Order'] = 1.0
metrics.sort_values(by=sort_columns,
ascending=ascending_columns, inplace=True)
metrics = metrics.drop(sort_columns, axis=1)
metrics.to_csv(config.OUTPUT_PREFIX + "_" + time_label + "_" +
system_name + "_feature_groups.csv")
latex_names = metrics.apply(_compute_latex_name, axis=1)
metrics.set_index(latex_names, inplace=True)
metrics = evaluationutils.remove_columns(metrics, evaluationutils.CURVES)
metrics = evaluationutils.remove_columns(metrics, evaluationutils.STATISTICS)
evaluationutils.print_metrics_to_latex(
metrics, config.OUTPUT_PREFIX + "_" + time_label + "_" +
system_name + "_feature_groups.tex")
_logger.debug('_output_sorted_by_group... done.')
def _compute_latex_name(row):
group = row.name[0]
subgroup = row.name[1]
feature = row.name[2]
# Is group?
if subgroup == 'ALL' and feature == 'ALL':
result = "\\quad %s" % group
# Is subgroup?
elif feature == 'ALL':
result = "\\quad\quad %s" % subgroup
# Is feature?
else:
result = "\\quad\\quad\\quad %s" % feature
return result
| 39.93578 | 91 | 0.68045 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,102 | 0.356306 |
550309304b59f46612eb7c9d7614edf6b323939f
| 25,470 |
py
|
Python
|
libjmp.py
|
RenolY2/obj2bjmp
|
de5ea2acf4493bec4c1b918b38099685fd9b864e
|
[
"MIT"
] | null | null | null |
libjmp.py
|
RenolY2/obj2bjmp
|
de5ea2acf4493bec4c1b918b38099685fd9b864e
|
[
"MIT"
] | null | null | null |
libjmp.py
|
RenolY2/obj2bjmp
|
de5ea2acf4493bec4c1b918b38099685fd9b864e
|
[
"MIT"
] | null | null | null |
from struct import unpack, pack
from math import ceil, inf, acos, degrees
from vectors import Vector3, Triangle, Vector2, Matrix3x3
from re import match
UPVECTOR = Vector3(0.0, 1.0, 0.0)
FWVECTOR = Vector3(1.0, 0.0, 0.0)
SIDEVECTOR = Vector3(0.0, 0.0, 1.0)
def round_vector(vector, digits):
vector.x = round(vector.x, digits)
vector.y = round(vector.y, digits)
vector.z = round(vector.z, digits)
def read_vertex(v_data):
split = v_data.split("/")
if len(split) == 3:
vnormal = int(split[2])
else:
vnormal = None
v = int(split[0])
return v#, vnormal
def read_uint32(f):
val = f.read(0x4)
return unpack(">I", val)[0]
def read_float_tripple(f):
val = f.read(0xC)
return unpack(">fff", val)
def read_vector3(f):
xyz = unpack(">fff", f.read(0xC))
return Vector3(*xyz)
def read_float(f):
val = f.read(0x4)
return unpack(">f", val)[0]
def read_uint16(f):
return unpack(">H", f.read(2))[0]
def write_uint32(f, val):
f.write(pack(">I", val))
def write_uint16(f, val):
f.write(pack(">H", val))
def write_vector3(f, vector):
f.write(pack(">fff", vector.x, vector.y, vector.z))
def write_float(f, val):
f.write(pack(">f", val))
def read_obj(objfile):
vertices = []
faces = []
face_normals = []
normals = []
floor_type = None
smallest_x = smallest_z = biggest_x = biggest_z = None
for line in objfile:
line = line.strip()
args = line.split(" ")
if len(args) == 0 or line.startswith("#"):
continue
cmd = args[0]
if cmd == "v":
# print(args)
for i in range(args.count("")):
args.remove("")
x, y, z = map(float, args[1:4])
vertices.append((x, y, z))
if smallest_x is None:
# Initialize values
smallest_x = biggest_x = x
smallest_z = biggest_z = z
else:
if x < smallest_x:
smallest_x = x
elif x > biggest_x:
biggest_x = x
if z < smallest_z:
smallest_z = z
elif z > biggest_z:
biggest_z = z
elif cmd == "f":
# if it uses more than 3 vertices to describe a face then we panic!
# no triangulation yet.
if len(args) == 5:
# raise RuntimeError("Model needs to be triangulated! Only faces with 3 vertices are supported.")
v1, v2, v3, v4 = map(read_vertex, args[1:5])
# faces.append(((v1[0] - 1, v1[1]), (v3[0] - 1, v3[1]), (v2[0] - 1, v2[1])))
# faces.append(((v3[0] - 1, v3[1]), (v1[0] - 1, v1[1]), (v4[0] - 1, v4[1])))
faces.append((v1, v2, v3, floor_type))
faces.append((v3, v4, v1, floor_type))
elif len(args) == 4:
v1, v2, v3 = map(read_vertex, args[1:4])
# faces.append(((v1[0]-1, v1[1]), (v3[0]-1, v3[1]), (v2[0]-1, v2[1])))
faces.append((v1, v2, v3, floor_type))
else:
raise RuntimeError("Model needs to be triangulated! Only faces with 3 or 4 vertices are supported.")
# if len(args) != 4:
# raise RuntimeError("Model needs to be triangulated! Only faces with 3 vertices are supported.")
# v1, v2, v3 = map(read_vertex, args[1:4])
# faces.append((v1, v2, v3, floor_type))
elif cmd == "vn":
nx, ny, nz = map(float, args[1:4])
normals.append((nx, ny, nz))
elif cmd == "usemtl":
assert len(args) >= 2
matname = " ".join(args[1:])
floor_type_match = match("^(.*?)(0x[0-9a-fA-F]{4})(.*?)$", matname)
if floor_type_match is not None:
floor_type = int(floor_type_match.group(2), 16)
else:
floor_type = None
# print("Found material:", matname, "Using floor type:", hex(floor_type))
# objects.append((current_object, vertices, faces))
return vertices, faces, normals, (smallest_x, smallest_z, biggest_x, biggest_z)
class BoundaryBox(object):
def __init__(self):
self.start = None
self.end = None
self.mid = None
@classmethod
def from_vector(cls, start, end):
bbox = cls()
bbox.start = start.copy()
bbox.end = end.copy()
bbox.mid = (bbox.start + bbox.end) / 2.0
return bbox
@classmethod
def from_file(cls, f):
bbox = cls()
bbox.start = Vector3(*read_float_tripple(f))
bbox.end = Vector3(*read_float_tripple(f))
return bbox
def write(self, f):
write_vector3(f, self.start)
write_vector3(f, self.end)
def size(self):
diff = self.end - self.start
diff.x = abs(diff.x)
diff.y = abs(diff.y)
diff.z = abs(diff.z)
return diff
def scale(self, x, y, z):
mid = (self.start + self.end) / 2.0
p1 = self.start - mid
p2 = self.end - mid
p1.x *= x
p1.y *= y
p1.z *= z
p2.x *= x
p2.y *= y
p2.z *= z
self.start = p1 + mid
self.end = p2 + mid
def contains(self, triangle):
p1, p2, p3 = triangle.origin, triangle.p2, triangle.p3
start, end = self.start, self.end
min_x = min(p1.x, p2.x, p3.x)# - self.mid.x
max_x = max(p1.x, p2.x, p3.x)# - self.mid.x
min_z = min(p1.z, p2.z, p3.z)# - self.mid.z
max_z = max(p1.z, p2.z, p3.z)# - self.mid.z
if max_x < start.x or min_x > end.x:
return False
if max_z < start.z or min_z > end.z:
return False
return True
class BJMPTriangle(object):
def __init__(self):
self._p1_index = None
self._p2_index = None
self._p3_index = None
self.triangle = None
self.data = None
self.normal = None
self.d = 0
self.binormal = None
self.tangent = None
self.p1 = None
self.edge_normal1 = None
self.edge_normal1_d = 0
self.p2 = None
self.edge_normal2 = None
self.edge_normal2_d = 0
self.p3 = None
self.edge_normal3 = None
self.edge_normal3_d = 0
self.coll_data = 0x100
def is_wall(self, normal):
return degrees(acos(normal.cos_angle(Vector3(0.0, 1.0, 0.0)))) > 45
@classmethod
def from_triangle(cls, triangle, coll_data=None):
tri = cls()
tri.triangle = triangle
triangle.normal *= -1
round_vector(triangle.normal, 6)
tri.coll_data = coll_data
tri.normal = triangle.normal
if not tri.is_wall(tri.normal):
tri.binormal = triangle.normal.cross(FWVECTOR) #*-1
flip = True
if tri.binormal.norm() == 0:
#tri.binormal = triangle.normal.cross(UPVECTOR) *-1
#flip = True
tri.binormal = UPVECTOR.copy()
tri.binormal.normalize()
tri.tangent = Vector3(0.0, 0.0, 0.0)
tri.tangent = triangle.normal.cross(tri.binormal)#*-1
tri.tangent.normalize()
if coll_data is None:
tri.coll_data = 0x0100
else:
tri.binormal = triangle.normal.cross(UPVECTOR) #*-1
flip = True
if tri.binormal.norm() == 0:
#tri.binormal = triangle.normal.cross(UPVECTOR) *-1
#flip = True
tri.binormal = FWVECTOR.copy()
tri.binormal.normalize()
tri.tangent = Vector3(0.0, 0.0, 0.0)
tri.tangent = triangle.normal.cross(tri.binormal)#*-1
tri.tangent.normalize()
tri.binormal *= -1
tri.tangent *= -1
if coll_data is None:
tri.coll_data = 0x810
#if flip:
# tmp = tri.tangent
# tri.tangent = tri.binormal*-1
# tri.binormal = tmp#*-1
tri.d = tri.normal.dot(triangle.origin)
p1, p2, p3 = triangle.origin, triangle.p2, triangle.p3
#tri.p1 = Vector3(-p1.z, 0, -p1.x)
tri.edge_normal1 = (p2-p1).cross(tri.normal)
tri.edge_normal1.normalize()
#tri.p2 = Vector3(-p2.z, 0, -p2.x)
tri.edge_normal2 = (p3-p2).cross(tri.normal)
tri.edge_normal2.normalize()
tri.edge_normal2_d = tri.edge_normal2.dot(p2)
#tri.p3 = Vector3(-p3.z, 0, -p3.x)
tri.edge_normal3 = (p1-p3).cross(tri.normal)
tri.edge_normal3.normalize()
tri.edge_normal3_d = tri.edge_normal3.dot(p3)
tri.edge_normal1_d = tri.edge_normal1.dot(p1)
nbt = Matrix3x3(
tri.binormal.x, tri.binormal.y, tri.binormal.z,
tri.normal.x, tri.normal.y, tri.normal.z,
tri.tangent.x, tri.tangent.y, tri.tangent.z
)
p1 = p1 - tri.normal*tri.d
p2 = p2 - tri.normal*tri.d
p3 = p3 - tri.normal*tri.d
p1 = nbt.multiply_vec3(p1)
p2 = nbt.multiply_vec3(p2)
p3 = nbt.multiply_vec3(p3)
tri.p1 = p1
tri.p2 = p2
tri.p3 = p3
"""nbt = Matrix3x3(tri.normal.x, tri.normal.y, tri.normal.z,
tri.tangent.x, tri.tangent.y, tri.tangent.z,
tri.binormal.x, tri.binormal.y, tri.binormal.z)
nbt.transpose()
tri.p1 = Vector3(*nbt.multiply_vec3(p1.x, p1.y, p1.z))
tri.p2 = Vector3(*nbt.multiply_vec3(p2.x, p2.y, p2.z))
tri.p3 = Vector3(*nbt.multiply_vec3(p3.x, p3.y, p3.z))"""
return tri
@classmethod
def from_file(cls, f, vertices):
tri = cls()
start = f.tell()
v1, v2, v3 = read_uint16(f), read_uint16(f), read_uint16(f)
tri.triangle = Triangle(vertices[v1], vertices[v2], vertices[v3])
tri.normal = Vector3(*read_float_tripple(f))
tri.d = read_float(f)
tri.binormal = Vector3(*read_float_tripple(f))
tri.tangent = Vector3(*read_float_tripple(f))
tri.p1 = Vector3(read_float(f), 0, read_float(f))
tri.edge_normal1 = Vector3(*read_float_tripple(f))
tri.edge_normal1_d = read_float(f)
tri.p2 = Vector3(read_float(f), 0, read_float(f))
tri.edge_normal2 = Vector3(*read_float_tripple(f))
tri.edge_normal2_d = read_float(f)
tri.p3 = Vector3(read_float(f), 0, read_float(f))
tri.edge_normal3 = Vector3(*read_float_tripple(f))
tri.edge_normal3_d = read_float(f)
tri.coll_data = read_uint16(f)
assert f.tell() - start == 0x78
return tri
def fill_vertices(self, vertices: list):
try:
v1_index = vertices.index(self.triangle.origin)
except ValueError:
v1_index = len(vertices)
vertices.append(self.triangle.origin)
try:
v2_index = vertices.index(self.triangle.p2)
except ValueError:
v2_index = len(vertices)
vertices.append(self.triangle.p2)
try:
v3_index = vertices.index(self.triangle.p3)
except ValueError:
v3_index = len(vertices)
vertices.append(self.triangle.p3)
self._p1_index = v1_index
self._p2_index = v2_index
self._p3_index = v3_index
def write(self, f):
write_uint16(f, self._p1_index)
write_uint16(f, self._p2_index)
write_uint16(f, self._p3_index)
write_vector3(f, self.normal)
write_float(f, self.d)
write_vector3(f, self.binormal)
write_vector3(f, self.tangent)
write_float(f, self.p1.x)
write_float(f, self.p1.z)
write_vector3(f, self.edge_normal1)
write_float(f, self.edge_normal1_d)
write_float(f, self.p2.x)
write_float(f, self.p2.z)
write_vector3(f, self.edge_normal2)
write_float(f, self.edge_normal2_d)
write_float(f, self.p3.x)
write_float(f, self.p3.z)
write_vector3(f, self.edge_normal3)
write_float(f, self.edge_normal3_d)
write_uint16(f, self.coll_data)
class Group(object):
def __init__(self):
self._tri_count = 0
self._offset = 0
self.bbox = None
self.tri_indices = []
@classmethod
def from_file(cls, f):
group = cls()
val = read_uint32(f)
group._tri_count = (val >> 24) & 0xFF
group._offset = val & 0xFFFFFF
group.bbox = BoundaryBox.from_file(f)
return group
def read_indices(self, indices):
for i in range(self._tri_count):
self.tri_indices.append(indices[i+self._offset])
def add_indices(self, indices):
self._offset = len(indices)
self._tri_count = len(self.tri_indices)
for index in self.tri_indices:
indices.append(index)
def write(self, f):
assert self._tri_count <= 0xFF
assert self._offset <= 0xFFFFFF
write_uint32(f, self._tri_count << 24 | self._offset)
self.bbox.write(f)
class CollisionGroups(object):
def __init__(self):
self.bbox = None
self.grid_x = 0
self.grid_y = 0
self.grid_z = 0
self.cell_dimensions = None
self.cell_inverse = None
self.groups = []
#self.indices = []
@classmethod
def from_model(cls, model):
pass
@classmethod
def from_file(cls, f):
colgroups = cls()
colgroups.bbox = BoundaryBox.from_file(f)
colgroups.grid_x = read_uint32(f)
colgroups.grid_y = read_uint32(f)
colgroups.grid_z = read_uint32(f)
colgroups.cell_dimensions = read_vector3(f)
colgroups.cell_inverse = read_vector3(f)
group_count = read_uint32(f)
colgroups.groups = []
for i in range(group_count):
colgroups.groups.append(Group.from_file(f))
indices = []
index_count = read_uint32(f)
for i in range(index_count):
indices.append(read_uint16(f))
for group in colgroups.groups:
group.read_indices(indices)
return colgroups
def write(self, f):
self.bbox.write(f)
write_uint32(f, self.grid_x)
write_uint32(f, self.grid_y)
write_uint32(f, self.grid_z)
write_vector3(f, self.cell_dimensions)
write_vector3(f, self.cell_inverse)
write_uint32(f, len(self.groups))
indices = []
for group in self.groups:
group.add_indices(indices)
group.write(f)
indices = []
for group in self.groups:
indices.extend(group.tri_indices)
write_uint32(f, len(indices))
for index in indices:
write_uint16(f, index)
class BJMP(object):
def __init__(self):
self.bbox_inner = None
self.bbox_outer = None
self.triangles = []
self.collision_groups = CollisionGroups()
@classmethod
def from_obj(cls, f):
vertices = []
uvs = []
faces = []
bjmp = cls()
collision_type = None
smallest_x = smallest_y = smallest_z = biggest_x = biggest_y = biggest_z = None
for line in f:
line = line.strip()
args = line.split(" ")
if len(args) == 0 or line.startswith("#"):
continue
cmd = args[0]
if cmd == "v":
# print(args)
for i in range(args.count("")):
args.remove("")
x, y, z = map(float, args[1:4])
vertices.append(Vector3(x, y, z))
if smallest_x is None:
# Initialize values
smallest_x = biggest_x = x
smallest_y = biggest_y = y
smallest_z = biggest_z = z
else:
if x < smallest_x:
smallest_x = x
elif x > biggest_x:
biggest_x = x
if y < smallest_y:
smallest_y = y
elif y > biggest_y:
biggest_y = y
if z < smallest_z:
smallest_z = z
elif z > biggest_z:
biggest_z = z
elif cmd == "f":
# if it uses more than 3 vertices to describe a face then we panic!
# no triangulation yet.
if len(args) == 5:
# raise RuntimeError("Model needs to be triangulated! Only faces with 3 vertices are supported.")
v1, v2, v3, v4 = map(read_vertex, args[1:5])
# faces.append(((v1[0] - 1, v1[1]), (v3[0] - 1, v3[1]), (v2[0] - 1, v2[1])))
# faces.append(((v3[0] - 1, v3[1]), (v1[0] - 1, v1[1]), (v4[0] - 1, v4[1])))
tri1 = Triangle(vertices[v1 - 1], vertices[v3 - 1], vertices[v2 - 1])
tri2 = Triangle(vertices[v3 - 1], vertices[v1 - 1], vertices[v4 - 1])
if tri1.normal.norm() != 0:
bjmp_tri1 = BJMPTriangle.from_triangle(tri1, collision_type)
bjmp.triangles.append(bjmp_tri1)
if tri2.normal.norm() != 0:
bjmp_tri2 = BJMPTriangle.from_triangle(tri2, collision_type)
bjmp.triangles.append(bjmp_tri2)
elif len(args) == 4:
v1, v3, v2 = map(read_vertex, args[1:4])
# faces.append(((v1[0]-1, v1[1]), (v3[0]-1, v3[1]), (v2[0]-1, v2[1])))
tri1 = Triangle(vertices[v1 - 1], vertices[v2 - 1], vertices[v3 - 1])
if tri1.normal.norm() != 0:
bjmp_tri1 = BJMPTriangle.from_triangle(tri1, collision_type)
bjmp.triangles.append(bjmp_tri1)
else:
raise RuntimeError(
"Model needs to be triangulated! Only faces with 3 or 4 vertices are supported.")
# if len(args) != 4:
# raise RuntimeError("Model needs to be triangulated! Only faces with 3 vertices are supported.")
# v1, v2, v3 = map(read_vertex, args[1:4])
# faces.append((v1, v2, v3, floor_type))
elif cmd == "usemtl":
assert len(args) >= 2
matname = " ".join(args[1:])
floor_type_match = match("^(.*?)(0x[0-9a-fA-F]{4})(.*?)$", matname)
if floor_type_match is not None:
collision_type = int(floor_type_match.group(2), 16)
else:
collision_type = None
# print("Found material:", matname, "Using floor type:", hex(floor_type))"""
bjmp.bbox_inner = BoundaryBox.from_vector(
Vector3(smallest_x, smallest_y, smallest_z),
Vector3(biggest_x, biggest_y, biggest_z)
)
bjmp.bbox_outer = BoundaryBox.from_vector(
bjmp.bbox_inner.start,
bjmp.bbox_inner.end
)
cell_x = 150.0
cell_z = 150.0
bjmp.collision_groups.bbox = bjmp.bbox_inner
bjmp.collision_groups.cell_dimensions = Vector3(cell_x, biggest_y - smallest_y, cell_z)
bjmp.collision_groups.cell_inverse = Vector3( 1.0/bjmp.collision_groups.cell_dimensions.x,
1.0/bjmp.collision_groups.cell_dimensions.y,
1.0/bjmp.collision_groups.cell_dimensions.z)
x_max = int(ceil((biggest_x - smallest_x) / cell_x))
z_max = int(ceil((biggest_z - smallest_z) / cell_z))
start_x = bjmp.bbox_inner.start.x
start_z = bjmp.bbox_inner.start.z
bjmp.collision_groups.grid_x = x_max
bjmp.collision_groups.grid_y = 1
bjmp.collision_groups.grid_z = z_max
for ix in range(x_max):
print(ix, "/", x_max)
for iz in range(z_max):
bbox_x = start_x + ix*cell_x
bbox_z = start_z + iz*cell_z
bbox = BoundaryBox.from_vector(
Vector3(bbox_x, smallest_y, bbox_z),
Vector3(bbox_x+cell_x, biggest_y, bbox_z+cell_z)
)
group = Group()
group.bbox = bbox
min_y = inf
max_y = -inf
for i, triangle in enumerate(bjmp.triangles):
if bbox.contains(triangle.triangle):
tri = triangle.triangle
if tri.origin.y < min_y:
min_y = tri.origin.y
if tri.p2.y < min_y:
min_y = tri.p2.y
if tri.p3.y < min_y:
min_y = tri.p3.y
if tri.origin.y > max_y:
max_y = tri.origin.y
if tri.p2.y > max_y:
max_y = tri.p2.y
if tri.p3.y > max_y:
max_y = tri.p3.y
group.tri_indices.append(i)
if min_y < bbox.start.y:
bbox.start.y = min_y
if max_y > bbox.start.y:
bbox.end.y = max_y
bbox.start.y -= 5.0
bbox.end.y += 5.0
bjmp.collision_groups.groups.append(group)
return bjmp
@classmethod
def from_file(cls, f):
bjmp = cls()
magic = read_uint32(f)
if magic == 0x013304E6:
#self.simple = False
bjmp.bbox_inner = BoundaryBox.from_file(f)
bjmp.bbox_outer = BoundaryBox.from_file(f)
#elif magic == 0x01330237:
# self.simple = True
# self.bbox = BoundaryBox()
else:
raise RuntimeError("Unknown/Unsupported magic: {:x}".format(magic))
vertex_count = read_uint16(f)
vertices = []
for i in range(vertex_count):
vertices.append(read_vector3(f))
bjmp.triangles = []
tri_count = read_uint32(f)
for i in range(tri_count):
bjmp.triangles.append(BJMPTriangle.from_file(f, vertices))
print("Remaining data starts at {0:x}".format(f.tell()))
bjmp.collision_groups = CollisionGroups.from_file(f)
assert f.read() == b""
print("sizes")
print("x z size:", bjmp.collision_groups.grid_x, bjmp.collision_groups.grid_z)
print(bjmp.collision_groups.bbox.size())
print(bjmp.collision_groups.cell_dimensions)
return bjmp
def write(self, f):
write_uint32(f, 0x013304E6)
self.bbox_inner.write(f)
self.bbox_outer.write(f)
vertices = []
for triangle in self.triangles:
triangle.fill_vertices(vertices)
write_uint16(f, len(vertices))
for vertex in vertices:
write_vector3(f, vertex)
write_uint32(f, len(self.triangles))
for triangle in self.triangles:
triangle.write(f)
self.collision_groups.write(f)
if __name__ == "__main__":
import sys
in_name = sys.argv[1]
if in_name.endswith(".obj"):
out_name = in_name + ".bjmp"
with open(in_name, "r") as f:
bjmp = BJMP.from_obj(f)
with open(out_name, "wb") as f:
bjmp.write(f)
elif in_name.endswith(".bjmp"):
out_name = in_name+".obj"
with open(in_name, "rb") as f:
bjmp = BJMP.from_file(f)
with open(out_name, "w") as f:
f.write("# .OBJ generated from Pikmin 2 by Yoshi2's obj2grid.py\n\n")
f.write("# VERTICES BELOW\n\n")
vertex_counter = 0
faces = []
for btriangle in bjmp.triangles:
tri = btriangle.triangle
p1, p2, p3 = tri.origin, tri.p2, tri.p3
f.write("v {} {} {}\n".format(p1.x, p1.y, p1.z))
f.write("v {} {} {}\n".format(p2.x, p2.y, p2.z))
f.write("v {} {} {}\n".format(p3.x, p3.y, p3.z))
#f.write("vt {} {}\n".format(btriangle.p1.x, btriangle.p1.z))
#f.write("vt {} {}\n".format(btriangle.p2.x, btriangle.p2.z))
#f.write("vt {} {}\n".format(btriangle.p3.x, btriangle.p3.z))
faces.append((vertex_counter+1, vertex_counter+2, vertex_counter+3, btriangle.coll_data))
vertex_counter += 3
last_coll = None
for i1, i2, i3, coll in faces:
if coll != last_coll:
f.write("usemtl collision_type0x{:04X}\n".format(coll))
f.write("f {0} {2} {1}\n".format(i1, i2, i3))
print("done")
| 32.322335 | 117 | 0.517314 | 19,527 | 0.766667 | 0 | 0 | 13,763 | 0.540361 | 0 | 0 | 3,234 | 0.126973 |
5504298a3a2d8c197f31284679e78d49ef6eed72
| 585 |
py
|
Python
|
kattis/integerlists.py
|
div5252/competitive-programming
|
111902dff75e79e65213c95055ffb0bb15b76e94
|
[
"WTFPL"
] | 506 |
2018-08-22T10:30:38.000Z
|
2022-03-31T10:01:49.000Z
|
kattis/integerlists.py
|
diegordzr/competitive-programming
|
1443fb4bd1c92c2acff64ba2828abb21b067e6e0
|
[
"WTFPL"
] | 13 |
2019-08-07T18:31:18.000Z
|
2020-12-15T21:54:41.000Z
|
kattis/integerlists.py
|
diegordzr/competitive-programming
|
1443fb4bd1c92c2acff64ba2828abb21b067e6e0
|
[
"WTFPL"
] | 234 |
2018-08-06T17:11:41.000Z
|
2022-03-26T10:56:42.000Z
|
#!/usr/bin/env python3
# https://open.kattis.com/problems/integerlists
for _ in range(int(input())):
p = input()
n = int(input())
i, j = 0, n
xs = input()[1:-1].split(',')
front = True
for c in p:
if c == 'R':
front = not front
elif i == j:
i += 1
break
elif front:
i += 1
else:
j -= 1
if i > j:
print('error')
else:
if front:
print('[' + ','.join(xs[i:j]) + ']')
else:
print('[' + ','.join(xs[i:j][::-1]) + ']')
| 22.5 | 54 | 0.384615 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 100 | 0.17094 |
55046b3036b22157a72d92e77888dc355a149d40
| 3,177 |
py
|
Python
|
main/tests/test_middleware.py
|
uktrade/return-to-office
|
d4c53c734611413c9f8a7624e52dc35910c5ff57
|
[
"MIT"
] | 1 |
2020-10-25T18:16:47.000Z
|
2020-10-25T18:16:47.000Z
|
main/tests/test_middleware.py
|
uktrade/return-to-office
|
d4c53c734611413c9f8a7624e52dc35910c5ff57
|
[
"MIT"
] | 1 |
2020-10-27T07:11:26.000Z
|
2020-10-27T07:11:26.000Z
|
main/tests/test_middleware.py
|
uktrade/return-to-office
|
d4c53c734611413c9f8a7624e52dc35910c5ff57
|
[
"MIT"
] | null | null | null |
import pytest
from django.http import HttpResponse
from django.urls import reverse
from main.middleware import IpRestrictionMiddleware
def dummy_view(_):
return HttpResponse(status=200)
class TestIpRestrictionMiddleware:
def test_middleware_is_enabled(self, client, settings):
settings.IP_RESTRICT = True
settings.IP_RESTRICT_APPS = ["admin"]
settings.IP_SAFELIST_XFF_INDEX = -2
assert client.get(reverse("admin:index")).status_code == 401
def test_applies_to_specified_apps_only(self, rf, settings):
"""Only apps listed in `settings.IP_WHITELIST_APPS` should be ip restricted"""
settings.IP_RESTRICT = True
settings.IP_RESTRICT_APPS = ["admin"]
settings.IP_SAFELIST_XFF_INDEX = -2
request = rf.get("/")
assert IpRestrictionMiddleware(dummy_view)(request).status_code == 200
def test_not_enabled_ifip_restrict_is_false(self, rf, settings):
settings.IP_RESTRICT = False
settings.IP_RESTRICT_APPS = ["admin"]
settings.IP_SAFELIST_XFF_INDEX = -2
request = rf.get(reverse("admin:index"), HTTP_X_FORWARDED_FOR="")
assert IpRestrictionMiddleware(dummy_view)(request).status_code == 200
@pytest.mark.parametrize(
"xff_header,expected_status",
(
["1.1.1.1, 2.2.2.2, 3.3.3.3", 200],
["1.1.1.1", 401],
[
"",
401,
],
),
)
def test_x_forwarded_header(self, rf, settings, xff_header, expected_status):
settings.IP_RESTRICT = True
settings.IP_RESTRICT_APPS = ["admin"]
settings.ALLOWED_IPS = ["2.2.2.2"]
settings.IP_SAFELIST_XFF_INDEX = -2
request = rf.get(reverse("admin:index"), HTTP_X_FORWARDED_FOR=xff_header)
assert IpRestrictionMiddleware(dummy_view)(request).status_code == expected_status
@pytest.mark.parametrize(
"allowed_ips,expected_status", ([["2.2.2.2"], 200], [["1.1.1.1"], 401])
)
def test_ips(self, rf, settings, allowed_ips, expected_status):
settings.IP_RESTRICT = True
settings.IP_RESTRICT_APPS = ["admin"]
settings.ALLOWED_IPS = allowed_ips
settings.IP_SAFELIST_XFF_INDEX = -2
request = rf.get(reverse("admin:index"), HTTP_X_FORWARDED_FOR="1.1.1.1, 2.2.2.2, 3.3.3.3")
assert IpRestrictionMiddleware(dummy_view)(request).status_code == expected_status
settings.ALLOWED_IPS = ["3.3.3.3"]
assert IpRestrictionMiddleware(dummy_view)(request).status_code == 401
@pytest.mark.parametrize(
"allowed_ips,expected_status", ([["2.2.2.2"], 200], [["1.1.1.1"], 401])
)
def test_ip_restricted_path(self, rf, settings, allowed_ips, expected_status):
settings.IP_RESTRICT = True
settings.IP_RESTRICT_PATH_NAMES = ["main:show-bookings"]
settings.ALLOWED_IPS = allowed_ips
settings.IP_SAFELIST_XFF_INDEX = -2
request = rf.get(
reverse("main:show-bookings"), HTTP_X_FORWARDED_FOR="1.1.1.1, 2.2.2.2, 3.3.3.3"
)
assert IpRestrictionMiddleware(dummy_view)(request).status_code == expected_status
| 34.912088 | 98 | 0.657224 | 2,981 | 0.938307 | 0 | 0 | 1,931 | 0.607806 | 0 | 0 | 442 | 0.139125 |
5505c2fad9d4eaf68b407e24b865a1b9411e4836
| 2,418 |
py
|
Python
|
modelproj/topic.py
|
cesell/modelproj
|
313f89784a19842c866fa2563b326e5d044a2301
|
[
"MIT"
] | null | null | null |
modelproj/topic.py
|
cesell/modelproj
|
313f89784a19842c866fa2563b326e5d044a2301
|
[
"MIT"
] | null | null | null |
modelproj/topic.py
|
cesell/modelproj
|
313f89784a19842c866fa2563b326e5d044a2301
|
[
"MIT"
] | null | null | null |
import json
import re
from urllib.request import urlopen
'''
The use of objects has various benefits.
1. Better control of context
2. State that can be evaluated
3. Data can be created and then processing can be added
4. Clean interface
'''
class TopicSummarizer():
"""TopicSummarizer - Summarizes a wikipedia entry
Returns:
str: [Summary of entry]
"""
TEXT_URL_TMP = 'https://en.wikipedia.org/w/api.php?action=query&prop=extracts&exsentences=2&titles={title}&format=json'
THUMB_URL_TMP = 'https://en.wikipedia.org/w/api.php?action=query&prop=pageimages&titles={title}&format=json'
def __init__(self, topic):
self.topic = str(topic)
def process(self):
self._fetch_text()
self._fetch_thumbnail()
return self
def get_results(self, as_text=False):
if as_text:
return self.topic + ' summary: ' + self._text
return TopicSummary(self.topic, self._thumb_url, self._text)
def _fetch_text(self):
self._text_api_url = self.TEXT_URL_TMP.format(title=self.topic)
self._text_resp = self._get_url_json(self._text_api_url)
self._text = list(self._text_resp['query']['pages'].values())[
0]['extract']
def _fetch_thumbnail(self):
self._thumb_api_url = self.THUMB_URL_TMP.format(title=self.topic)
self._thumb_resp = self._get_url_json(self._thumb_api_url)
self._thumb_url = list(self._thumb_resp['query']['pages'].values())[0][
'thumbnail']['source']
def _get_url_json(self, url):
resp = urlopen(url)
resp_body = resp.read()
return json.loads(resp_body)
class TopicSummary():
def __init__(self, topic, thumb_url, text):
self.topic = topic
self.thumb_url = thumb_url
self.text = re.sub(r'</*.>', '', text)
def __repr__(self):
cn = self.__class__.__name__
return '%s(%r, %r, %r)' % (cn, self.topic, self.thumb_url, self.text)
def main():
from argparse import ArgumentParser
prs = ArgumentParser(description='summarize topics from Wikipedia')
prs.add_argument('-t', '--topic', help='the target topic', required='True')
args = prs.parse_args()
print(TopicSummarizer(args.topic).process().get_results(as_text=True))
return
if __name__ == '__main__':
main()
| 30.607595 | 124 | 0.63689 | 1,775 | 0.734078 | 0 | 0 | 0 | 0 | 0 | 0 | 668 | 0.276261 |
550824fc3e2f47ccef32bd1ac78448a3f415ba0f
| 4,154 |
py
|
Python
|
wanderingpole/classifyingtweets/train_wandering_old.py
|
ssdorsey/wandering-pole
|
606ad8f1979354e01dea1acf01107b88b3b9e91b
|
[
"MIT"
] | null | null | null |
wanderingpole/classifyingtweets/train_wandering_old.py
|
ssdorsey/wandering-pole
|
606ad8f1979354e01dea1acf01107b88b3b9e91b
|
[
"MIT"
] | null | null | null |
wanderingpole/classifyingtweets/train_wandering_old.py
|
ssdorsey/wandering-pole
|
606ad8f1979354e01dea1acf01107b88b3b9e91b
|
[
"MIT"
] | null | null | null |
import pandas as pd
import numpy as np
import json
from simpletransformers.classification import ClassificationModel, ClassificationArgs
import sklearn
from sklearn.model_selection import train_test_split
import torch
import re
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import os
from tqdm import tqdm
np.random.seed(2)
# import the data
# tweets = pd.read_csv('data/postIR_final.csv')
# os.chdir('..')
tweets = pd.read_csv('D:/Dropbox/Twitter/training_data/training_final.csv', encoding='latin1')
# restrict to tweets with coding
tweets = tweets[tweets['uncivil_final'].isin([0,1])]
# subset to just text and labels, fix columns names
tweets = tweets.loc[:, ['text', 'uncivil_final']]
tweets.columns = ['text', 'labels']
# import other batch
mike = pd.read_excel(r'D:\Dropbox\wandering-pole\wanderingpole\data\new_pull_Michael.xls')
mike = mike[['full_text', 'uncivil']]
mike = mike.rename(columns={'full_text': 'text', 'uncivil': 'labels'})
# extra
mike_extra = pd.read_csv(r'D:\Dropbox\wandering-pole\wanderingpole\data\michael_extra.csv')
mike_extra = mike_extra.rename(columns={'full_text': 'text', 'uncivil': 'labels'})
# pull a bunch of old 0's
old_model = pd.read_csv("D:/Dropbox/Twitter/allMCtweets.csv", encoding='latin1')
old_0 = old_model[old_model['polarizing']==0].sample(7432, random_state=619)
old_0 = old_0[['text']]
old_0['labels'] = 0
# combine the new data
tweets = pd.concat([tweets, mike, mike_extra, old_0])
# drop incomplete data
tweets = tweets[tweets['labels'].isin([0,1])]
# drop duplicates
tweets = tweets.drop_duplicates(subset=['text'])
# delete links
# TODO: convert emoticons
re_url = r"(?i)\b((?:https?://|www\d{0,3}[.]|[a-z0-9.\-]+[.][a-z]{2,4}/)(?:[^\s()<>]+|\(([^\s()<>]+|(\([^\s()<>]+\)))*\))+(?:\(([^\s()<>]+|(\([^\s()<>]+\)))*\)|[^\s`!()\[\]{};:'\".,<>?«»“”‘’]))"
tweets['text'] = tweets['text'].replace(re_url, '', regex=True)
# remove retweet header
re_retweet = r"RT\s@\w+:"
tweets['text'] = tweets['text'].replace(re_retweet, '', regex=True)
# double-check for weird excel handling of ampersand
re_amp = r'&'
tweets['text'] = tweets['text'].replace(re_amp, '', regex=True)
# split train/test
# tweets.loc[: , 'split'] = np.random.choice(['train','validate','test'], len(tweets), p=[.85, .15])
# train = tweets.loc[tweets.split=='train']
# validate = tweets.loc[tweets.split=='validate']
# test = tweets.loc[tweets.split=='test']
tweets.loc[: , 'split'] = np.random.choice(['train','test'], len(tweets), p=[.85, .15])
train = tweets.loc[tweets.split=='train']
test = tweets.loc[tweets.split=='test']
# build / train
# weights
counts = train['labels'].value_counts().sort_index()
weights = [(1-(ii/len(train)))*10 for ii in counts]
model_args = ClassificationArgs()
# model_args.use_early_stopping = True
# model_args.early_stopping_delta = 0.01
# model_args.early_stopping_metric = "mcc"
# model_args.early_stopping_metric_minimize = False
# model_args.early_stopping_patience = 5
# model_args.evaluate_during_training_verbose = True
# model_args.evaluate_during_training_steps = 1000
model_args.output_dir = r'Model_berttweet/'
model_args.cache_dir = r'Model_berttweet/'
model_args.overwrite_output_dir = True
model_args.training_batch_size = 1024
model_args.eval_batch_size = 1024
model_args.num_train_epochs = 5
model = ClassificationModel(
'bertweet'
, 'vinai/bertweet-base'
, num_labels=len(tweets['labels'].unique())
# , weight=weights # DO help
, weight=[.8,10]
, use_cuda=True
, args=model_args
)
model.train_model(train)
# Evaluate the model
# model = ClassificationModel('bertweet'
# , 'Model_berttweet/'
# , num_labels=2
# , args={'eval_batch_size':512})
result, model_outputs, wrong_predictions = model.eval_model(test)
y_t = list(test.labels)
y_hat = [np.argmax(a) for a in model_outputs]
print(sklearn.metrics.classification_report(y_true=y_t, y_pred=y_hat))
sklearn.metrics.confusion_matrix(y_true=y_t, y_pred=y_hat)
# put out the results
test.loc[:, 'predicted'] = y_hat
| 32.708661 | 194 | 0.684401 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,035 | 0.488713 |
5508e6dcf9a3120fc2d2a1fa35c2fb918cef92fb
| 3,339 |
py
|
Python
|
Source/common.py
|
joaohenggeler/twitch-chat-highlights
|
826cda239de2e5185266a04c12a8909ae5f98a3b
|
[
"MIT"
] | null | null | null |
Source/common.py
|
joaohenggeler/twitch-chat-highlights
|
826cda239de2e5185266a04c12a8909ae5f98a3b
|
[
"MIT"
] | null | null | null |
Source/common.py
|
joaohenggeler/twitch-chat-highlights
|
826cda239de2e5185266a04c12a8909ae5f98a3b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
"""
A module that defines any general purpose functions used by all scripts, including loading configuration files,
connecting to the database, and handling Twitch's timestamp formats.
"""
import json
import sqlite3
from datetime import datetime
from typing import Tuple, Union
####################################################################################################
class CommonConfig():
# From the config file.
json_config: dict
client_id: str
access_token: str
database_filename: str
def __init__(self):
with open('config.json') as file:
self.json_config = json.load(file)
self.__dict__.update(self.json_config['common'])
def connect_to_database(self) -> sqlite3.Connection:
db = sqlite3.connect(self.database_filename, isolation_level=None)
db.row_factory = sqlite3.Row
db.execute('''PRAGMA journal_mode = WAL;''')
db.execute('''PRAGMA synchronous = NORMAL;''')
db.execute('''PRAGMA temp_store = MEMORY;''')
db.execute('''
CREATE TABLE IF NOT EXISTS 'Channel'
(
'Id' INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
'Name' VARCHAR(50) NOT NULL UNIQUE
);
''')
db.execute('''
CREATE TABLE IF NOT EXISTS 'Video'
(
'Id' INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
'ChannelId' INTEGER NOT NULL,
'TwitchId' VARCHAR(50) NOT NULL UNIQUE,
'Title' TEXT NOT NULL,
'CreationTime' TIMESTAMP NOT NULL,
'Duration' TIME NOT NULL,
'YouTubeId' VARCHAR(50) UNIQUE,
'Notes' TEXT,
FOREIGN KEY (ChannelId) REFERENCES Channel (Id)
);
''')
# VideoId can be NULL when we're storing messages from a live stream, meaning there's no VOD yet.
db.execute('''
CREATE TABLE IF NOT EXISTS 'Chat'
(
'Id' INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
'ChannelId' INTEGER NOT NULL,
'VideoId' INTEGER,
'Timestamp' TIMESTAMP NOT NULL,
'Message' TEXT NOT NULL,
FOREIGN KEY (ChannelId) REFERENCES Channel (Id),
FOREIGN KEY (VideoId) REFERENCES Video (Id)
);
''')
return db
####################################################################################################
def split_twitch_duration(duration: str) -> Tuple[int, int, int, int]:
# Duration format: 00h00m00s or 00m00s
duration = duration.replace('h', ':').replace('m', ':').replace('s', '')
tokens = duration.split(':', 2)
hours = int(tokens[-3]) if len(tokens) >= 3 else 0
minutes = int(tokens[-2]) if len(tokens) >= 2 else 0
seconds = int(tokens[-1]) if len(tokens) >= 1 else 0
total_seconds = hours * 3600 + minutes * 60 + seconds
return hours, minutes, seconds, total_seconds
def convert_twitch_timestamp_to_datetime(timestamp: str) -> datetime:
# Datetime format: YYYY-MM-DDThh:mm:ss.sssZ
# Where the following precisions where observed:
# - YYYY-MM-DDThh:mm:ss.sssssssssZ
# - YYYY-MM-DDThh:mm:ss.ssZ
# - YYYY-MM-DDThh:mm:ss.sZ
# - YYYY-MM-DDThh:mm:ssZ
# Truncate anything past the microsecond precision.
if '.' in timestamp:
microseconds: Union[str, int]
beginning, microseconds = timestamp.rsplit('.', 1)
microseconds, _ = microseconds.rsplit('Z', 1)
timestamp = beginning + '.' + microseconds[:6].ljust(6, '0') + 'Z'
timestamp = timestamp.replace('Z', '+00:00')
return datetime.fromisoformat(timestamp)
| 30.081081 | 112 | 0.632824 | 1,688 | 0.505541 | 0 | 0 | 0 | 0 | 0 | 0 | 1,945 | 0.58251 |
550ab4d7e165fcec5a4c9ed00a1fc8a3d4f624ba
| 986 |
py
|
Python
|
unicodetest.py
|
conradstorz/SpeedTrivia
|
e222831223c704f5bb169d4c2d475c9e2a8c4c08
|
[
"Apache-2.0"
] | null | null | null |
unicodetest.py
|
conradstorz/SpeedTrivia
|
e222831223c704f5bb169d4c2d475c9e2a8c4c08
|
[
"Apache-2.0"
] | 1 |
2021-04-26T22:47:19.000Z
|
2021-04-26T22:47:19.000Z
|
unicodetest.py
|
conradstorz/SpeedTrivia
|
e222831223c704f5bb169d4c2d475c9e2a8c4c08
|
[
"Apache-2.0"
] | null | null | null |
from unidecode import unidecode
from unicodedata import name
import ftfy
for i in range(33, 65535):
if i > 0xEFFFF:
continue # Characters in Private Use Area and above are ignored
if 0xD800 <= i <= 0xDFFF:
continue
h = hex(i)
u = chr(i)
f = ftfy.fix_text(u, normalization="NFKC")
a = unidecode(u)
if a != "[?]" and len(u) != 0 and len(a) != 0 and len(f) != 0:
new_char = ""
if u != f:
for c in list(f):
new_char += "{}, ".format(ord(c))
new_char = new_char[:-2]
else:
new_char = "Same"
try:
txt = name(u).lower()
# print(txt)
if 'mark' in txt:
print(
f"dec={i} hex={h} unicode_chr={u} ftfy_chr(s)={f} ftfy_dec={new_char}\n",
f"ascii_chr={a} uni_len={len(u)} ascii_len={len(a)} unicode_name={name(u)}"
)
except ValueError:
pass
| 30.8125 | 95 | 0.485801 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 244 | 0.247465 |
550bcb1222329d8b28b7654dc8f797c3fc71d7e1
| 4,363 |
py
|
Python
|
codes/mosaic.py
|
KurmasanaWT/community
|
5fc9e7da5b3e8df2bc9f85580a070de8c868a656
|
[
"MIT"
] | null | null | null |
codes/mosaic.py
|
KurmasanaWT/community
|
5fc9e7da5b3e8df2bc9f85580a070de8c868a656
|
[
"MIT"
] | null | null | null |
codes/mosaic.py
|
KurmasanaWT/community
|
5fc9e7da5b3e8df2bc9f85580a070de8c868a656
|
[
"MIT"
] | null | null | null |
import dash_bootstrap_components as dbc
from dash import html
from app import app
layout = dbc.Container(
children=[
dbc.Card([
#dbc.CardHeader("EURONEWS (União Européia)"),
dbc.CardBody([html.Iframe(className="ytvid", width="420", height="315", src="https://www.youtube.com/embed/sPgqEHsONK8?&autoplay=1&mute=1", allow="fullscreen")]),
], className="cardSize-vid"),
dbc.Card([
#dbc.CardHeader("SKY NEWS (Reino Unido)"),
dbc.CardBody([html.Iframe(className="ytvid", width="420", height="315", src="https://www.youtube.com/embed/9Auq9mYxFEE?&autoplay=1&mute=1", allow="fullscreen")]),
], className="cardSize-vid"),
dbc.Card([
#dbc.CardHeader("FRANCE 24 (França)"),
dbc.CardBody([html.Iframe(className="ytvid", width="420", height="315", src="https://www.youtube.com/embed/u9foWyMSATM?&autoplay=1&mute=1", allow="fullscreen")]),
], className="cardSize-vid"),
dbc.Card([
#dbc.CardHeader("DEUTSCH WELLE (Alemanha)"),
dbc.CardBody([html.Iframe(className="ytvid", width="420", height="315", src="https://www.youtube.com/embed/m01az_TdpQI?&autoplay=1&mute=1", allow="fullscreen")]),
], className="cardSize-vid"),
dbc.Card([
#dbc.CardHeader("RT (Rússia)"),
dbc.CardBody([html.Iframe(className="ytvid", width="420", height="315", src="https://odysee.com/$/embed/RTlivestream/8c06ebe369b6ecf6ad383e4a32bfca34c0168d79?r=RfLjh5uDhbZHt8SDkQFdZyKTmCbSCpWH&autoplay=1&mute=1", allow="fullscreen")]),
], className="cardSize-vid"),
dbc.Card([
#dbc.CardHeader("TRT WORLD (Turquia)"),
dbc.CardBody([html.Iframe(className="ytvid", width="420", height="315", src="https://www.youtube.com/embed/CV5Fooi8YJA?&autoplay=1&mute=1", allow="fullscreen")]),
], className="cardSize-vid"),
dbc.Card([
#dbc.CardHeader("AL JAZEERA (Catar)"),
dbc.CardBody([html.Iframe(className="ytvid", width="420", height="315", src="https://www.youtube.com/embed/F-POY4Q0QSI?&autoplay=1&mute=1", allow="fullscreen")]),
], className="cardSize-vid"),
dbc.Card([
#dbc.CardHeader("NDTV (India)"),
dbc.CardBody([html.Iframe(className="ytvid", width="420", height="315", src="https://www.youtube.com/embed/WB-y7_ymPJ4?&autoplay=1&mute=1", allow="fullscreen")]),
], className="cardSize-vid"),
dbc.Card([
#dbc.CardHeader("CGTN EUROPE (China)"),
dbc.CardBody([html.Iframe(className="ytvid", width="420", height="315", src="https://www.youtube.com/embed/FGabkYr-Sfs?&autoplay=1&mute=1", allow="fullscreen")]),
], className="cardSize-vid"),
dbc.Card([
#dbc.CardHeader("ANN NEWS (Japão)"),
dbc.CardBody([html.Iframe(className="ytvid", width="420", height="315", src="https://www.youtube.com/embed/coYw-eVU0Ks?&autoplay=1&mute=1", allow="fullscreen")]),
], className="cardSize-vid"),
dbc.Card([
#dbc.CardHeader("NEWS 12 NEW YORK (Estados Unidos)"),
dbc.CardBody([html.Iframe(className="ytvid", width="420", height="315", src="https://www.youtube.com/embed/RmmRlztXETI?&autoplay=1&mute=1", allow="fullscreen")]),
], className="cardSize-vid"),
dbc.Card([
#dbc.CardHeader("WEBCAM UCRÂNIA (Ao Vivo)"),
dbc.CardBody([html.Iframe(className="ytvid", width="420", height="315", src="https://www.youtube.com/embed/3hiyVq44pK8?&autoplay=1&mute=1", allow="fullscreen")]),
], className="cardSize-vid"),
html.Div(dbc.Badge(children=[
html.Span("* Todos os vídeos são transmitidos pelo "),
html.A(href='http://www.youtube.com', target='new', children='YouTube'),
html.Span(" exceto o canal RT, transmitido pelo "),
html.A(href='http://www.odysee.com', target='new', children='Odysee'),
html.Span(" .")
], className="badge-link",
))
], fluid=True
)
def get():
return html.Div(layout)
'''
RT RUSSIA
https://rumble.com/embed/vtp5hp/?pub=4&autoplay=2
https://odysee.com/$/embed/RTlivestream/8c06ebe369b6ecf6ad383e4a32bfca34c0168d79?r=RfLjh5uDhbZHt8SDkQFdZyKTmCbSCpWH
'''
| 49.579545 | 247 | 0.608755 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,176 | 0.497827 |
550c04ca9a44d927c37f244ee230dced2cf832ce
| 4,387 |
py
|
Python
|
app/weibo/views.py
|
guoweikuang/weibo_project
|
38cb2a6d72a16f2f8c1714e83564c833f8e4af0c
|
[
"Apache-2.0"
] | 4 |
2019-03-25T08:47:22.000Z
|
2021-03-16T02:39:29.000Z
|
app/weibo/views.py
|
guoweikuang/weibo_project
|
38cb2a6d72a16f2f8c1714e83564c833f8e4af0c
|
[
"Apache-2.0"
] | 1 |
2020-01-06T03:37:46.000Z
|
2020-01-06T03:37:46.000Z
|
app/weibo/views.py
|
guoweikuang/weibo_project
|
38cb2a6d72a16f2f8c1714e83564c833f8e4af0c
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
~~~~~~~~~~~~~~~~~~~~~~
main module
#author guoweikuang
"""
from flask import render_template
from flask import redirect
from flask import url_for
from flask import request
from flask_login import login_required
from pyecharts import Bar
from pyecharts.utils import json_dumps
#from pyecharts import json_dumps
import json
from . import weibo
from .forms import CrawlForm
from .forms import AnalyzeForm
from app import run_async_crawl
from app import run_build_vsm
from ..utils import filter_data
from ..utils import run_k_means
from ..utils import classify_k_cluster
from ..utils import get_mysql_content
from ..utils import get_mysql_opinion
from ..utils import run_old_all_process
from ..utils import get_max_hot_keyword_chart
from ..utils import list_top_hot_topic
from ..utils import get_hot_text_from_category
from ..utils import bar_chart
REMOTE_HOST = "https://pyecharts.github.io/assets/js"
@weibo.route('/', methods=['GET', 'POST'])
def index():
""" 首页 """
rows = list_top_hot_topic(db=1)
category = request.values.get('topic')
categorys = [cate[0] for cate in rows]
results = []
if category:
categorys.remove(category)
categorys.insert(0, category)
results = get_hot_text_from_category(category, db=0)
else:
category = categorys[0]
results = get_hot_text_from_category(category, db=0)
print(results)
return render_template('weibo/index.html', rows=rows, categorys=categorys, contents=results)
@weibo.route('/crawl', methods=['GET', 'POST'])
@login_required
def crawl():
""" 爬取模块 """
crawl_form = CrawlForm()
result = get_mysql_content(days=1)
if crawl_form.validate_on_submit():
result = run_async_crawl(start_page=crawl_form.start_page.data,
end_page=crawl_form.end_page.data)
return redirect(url_for('weibo.crawl'))
return render_template('weibo/crawl.html', form=crawl_form, results=result)
@weibo.route('/analyze', methods=['GET', 'POST'])
@login_required
def analyze():
""" 聚类分析
:return:
"""
analyze_form = AnalyzeForm()
if analyze_form.validate_on_submit():
k = analyze_form.k_cluster.data
run_old_all_process(start_time=analyze_form.start_time.data,
end_time=analyze_form.end_time.data,
k=analyze_form.k_cluster.data)
#datas = run_build_vsm(start_time=analyze_form.start_time.data,
# end_time=analyze_form.end_time.data)
#labels = run_k_means(k=k)
#classify_k_cluster(labels=labels, datas=datas)
return redirect(url_for("weibo.display"))
return render_template('weibo/analyze.html', form=analyze_form)
@weibo.route('/display', methods=['GET', 'POST'])
@login_required
def display():
""" 图表展示.
:return:
"""
result = {}
keywords, img_name, rows, category = get_max_hot_keyword_chart(db=1)
name = "images/%s/%s" % (category, img_name)
results = sorted(keywords.items(), key=lambda d: d[1], reverse=True)[::-1]
keywords = [key.decode('utf-8') for key, value in results]
rows = [row.split('\t') for row in rows]
return render_template('weibo/display.html',
img_name=name,
keywords=keywords,
rows=rows)
@weibo.route('/sensitive', methods=['GET', 'POST'])
@login_required
def sensitive():
"""
敏感词.
:return:
"""
results = get_mysql_opinion()
opinion = ['心理健康', '社会突发事件', '校园安全', '反动言论']
sen_type = request.values.get("category")
if sen_type:
opinion.remove(sen_type)
opinion.insert(0, sen_type)
rows = results[sen_type]
else:
rows = results[opinion[0]]
return render_template('weibo/sensitive.html', rows=rows, categorys=opinion)
@weibo.route('/pyecharts', methods=['GET', 'POST'])
@login_required
def show_chart():
""" test chart.
:return:
"""
bar = bar_chart()
return render_template('pyecharts.html',
chart_id=bar.chart_id,
host=REMOTE_HOST,
my_width='100%',
my_height=600,
my_option=json_dumps(bar.options),
script_list=bar.get_js_dependencies())
| 29.843537 | 96 | 0.641213 | 0 | 0 | 0 | 0 | 3,505 | 0.786403 | 0 | 0 | 911 | 0.204398 |
550c5f11985d3fc31fb9561d9cb991332777ee6d
| 2,598 |
py
|
Python
|
source/extras/Mesa/src/mesa/glapi/gl_offsets.py
|
binaryblob01/zfree86
|
e80ea992d87501b8e3e2d7c07a414591c2e11c70
|
[
"Xnet",
"X11"
] | 1 |
2021-09-08T21:13:25.000Z
|
2021-09-08T21:13:25.000Z
|
source/extras/Mesa/src/mesa/glapi/gl_offsets.py
|
binaryblob01/zfree86
|
e80ea992d87501b8e3e2d7c07a414591c2e11c70
|
[
"Xnet",
"X11"
] | null | null | null |
source/extras/Mesa/src/mesa/glapi/gl_offsets.py
|
binaryblob01/zfree86
|
e80ea992d87501b8e3e2d7c07a414591c2e11c70
|
[
"Xnet",
"X11"
] | 1 |
2021-01-22T00:19:47.000Z
|
2021-01-22T00:19:47.000Z
|
#!/usr/bin/env python
# (C) Copyright IBM Corporation 2004
# All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# on the rights to use, copy, modify, merge, publish, distribute, sub
# license, and/or sell copies of the Software, and to permit persons to whom
# the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice (including the next
# paragraph) shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
# IBM AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
# Authors:
# Ian Romanick <[email protected]>
#
# $XFree86: xc/extras/Mesa/src/mesa/glapi/gl_offsets.py,v 1.2 2006/05/16 15:34:46 tsi Exp $
from xml.sax import saxutils
from xml.sax import make_parser
from xml.sax.handler import feature_namespaces
import gl_XML
import license
import sys, getopt
class PrintGlOffsets(gl_XML.FilterGLAPISpecBase):
name = "gl_offsets.py (from Mesa)"
def __init__(self):
gl_XML.FilterGLAPISpecBase.__init__(self)
self.license = license.bsd_license_template % ( \
"""Copyright (C) 1999-2001 Brian Paul All Rights Reserved.
(C) Copyright IBM Corporation 2004""", "BRIAN PAUL, IBM")
def printFunction(self, f):
if f.fn_offset < 0: return
print '#define _gloffset_%s %d' % (f.name, f.fn_offset)
def printRealHeader(self):
print '#ifndef _GLAPI_OFFSETS_H_'
print '#define _GLAPI_OFFSETS_H_'
print ''
return
def printRealFooter(self):
print ''
print '#endif'
return
def show_usage():
print "Usage: %s [-f input_file_name]" % sys.argv[0]
sys.exit(1)
if __name__ == '__main__':
file_name = "gl_API.xml"
try:
(args, trail) = getopt.getopt(sys.argv[1:], "f:")
except Exception,e:
show_usage()
for (arg,val) in args:
if arg == "-f":
file_name = val
dh = PrintGlOffsets()
parser = make_parser()
parser.setFeature(feature_namespaces, 0)
parser.setContentHandler(dh)
f = open(file_name)
dh.printHeader()
parser.parse(f)
dh.printFooter()
| 29.191011 | 91 | 0.736336 | 627 | 0.241339 | 0 | 0 | 0 | 0 | 0 | 0 | 1,581 | 0.608545 |
550d6f4bd6f869c618dd2e2fd54a7578116878b5
| 1,728 |
py
|
Python
|
exercises/rational-numbers/rational_numbers.py
|
southpush/python
|
048191583ed2cf668c6180d851d100f277a74101
|
[
"MIT"
] | null | null | null |
exercises/rational-numbers/rational_numbers.py
|
southpush/python
|
048191583ed2cf668c6180d851d100f277a74101
|
[
"MIT"
] | null | null | null |
exercises/rational-numbers/rational_numbers.py
|
southpush/python
|
048191583ed2cf668c6180d851d100f277a74101
|
[
"MIT"
] | null | null | null |
from __future__ import division
class Rational(object):
def __init__(self, numer, denom):
if denom == 0:
raise ValueError('denom should not be 0')
i = 2
while i <= abs(numer):
if numer % i == 0 and denom % i == 0:
numer = numer // i
denom = denom // i
else:
i += 1
if numer * denom < 0:
self.numer = -abs(numer)
self.denom = abs(denom)
else:
self.numer = abs(numer)
self.denom = abs(denom)
def __eq__(self, other):
if self.numer == 0 and other.numer == 0:
return True
return self.numer == other.numer and self.denom == other.denom
def __repr__(self):
return '{}/{}'.format(self.numer, self.denom)
def __add__(self, other):
return Rational(self.numer * other.denom + other.numer * self.denom, self.denom * other.denom)
def __sub__(self, other):
return Rational(self.numer * other.denom - other.numer * self.denom, self.denom * other.denom)
def __mul__(self, other):
return Rational(self.numer * other.numer, self.denom * other.denom)
def __truediv__(self, other):
if self.denom * other.numer != 0:
return Rational(self.numer * other.denom, self.denom * other.numer)
else:
raise ValueError()
def __abs__(self):
return Rational(abs(self.numer), abs(self.denom))
def __pow__(self, power):
return Rational(self.numer ** power, self.denom ** power) if power > 0 else Rational(self.denom ** -power, self.numer ** -power)
def __rpow__(self, base):
return base ** (self.numer / self.denom)
| 32.603774 | 136 | 0.567708 | 1,693 | 0.979745 | 0 | 0 | 0 | 0 | 0 | 0 | 30 | 0.017361 |
550d81bb5bc1c79a79d6bdfe74dcdeb0caf47b8e
| 6,269 |
py
|
Python
|
webui/forms/dag.py
|
blawson/dataforj
|
2b666f303b628ceced425e2bdb7f93ae2ccc2a73
|
[
"Apache-2.0"
] | 1 |
2021-04-30T02:15:33.000Z
|
2021-04-30T02:15:33.000Z
|
webui/forms/dag.py
|
blawson/dataforj
|
2b666f303b628ceced425e2bdb7f93ae2ccc2a73
|
[
"Apache-2.0"
] | null | null | null |
webui/forms/dag.py
|
blawson/dataforj
|
2b666f303b628ceced425e2bdb7f93ae2ccc2a73
|
[
"Apache-2.0"
] | 1 |
2021-04-30T03:27:20.000Z
|
2021-04-30T03:27:20.000Z
|
from django import forms
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout, Div, Submit, HTML, Button, Row, Field
from crispy_forms.bootstrap import AppendedText, PrependedText, FormActions
from webui.dataforj import get_flow
class SelectedStepForm(forms.Form):
step_name = forms.CharField(label='Step name', max_length=100)
class AddSourceForm(forms.Form):
step_name = forms.CharField(label='Step name', max_length=100)
step_description = forms.CharField(label='Step description')
uri = forms.CharField(label='URI, i.e. location of the source data')
format_type = forms.CharField(label='Format of the data (e.g. CSV, Parquet, etc)')
options_text = forms.CharField(widget=forms.Textarea,
required=False,
label='Options seperated by newlines in the form k=v, e.g. delimiter=True')
helper = FormHelper()
helper.form_class = 'form-horizontal'
helper.layout = Layout(
Field('step_name', css_class='input-xlarge'),
Field('step_description', css_class='input-xlarge'),
Field('uri', css_class='input-xlarge'),
Field('format_type', css_class='input-xlarge'),
Field('options_text', css_class='input-xlarge'),
FormActions(
Submit('new', 'Add Sournce', css_class="btn-primary")
)
)
class AddChildForm(forms.Form):
CHOICES=[
('sql', 'SQL'),
('pyspark', 'PySpark'),
('union', 'Union'),
('sink', 'Sink')
]
step_type = forms.ChoiceField(label='Choose the type of step you wish to create', choices=CHOICES, widget=forms.RadioSelect)
helper = FormHelper()
helper.form_class = 'form-horizontal'
helper.layout = Layout(
Field('step_type', css_class='input-xlarge')
)
def create_dependson_tuple(flow):
depends_on_list=[step.name for step in flow._steps.values() if step.dagre_type != 'Sink']
return tuple((name, name) for name in depends_on_list)
class AddSinkForm(forms.Form):
step_name = forms.CharField(label='Step name', max_length=100)
step_description = forms.CharField(label='Step description')
depends_ons = forms.ChoiceField(label='Which step should become input for this one', choices=[])
uri = forms.CharField(label='URI, i.e. location of the source data')
format_type = forms.CharField(label='Format of the data (e.g. CSV, Parquet, etc)')
mode = forms.CharField(label='Mode (e.g. overwrite)')
options_text = forms.CharField(widget=forms.Textarea,
label='Options seperated by newlines in the form k=v, e.g. delimiter=True',
required=False)
helper = FormHelper()
helper.form_class = 'form-horizontal'
helper.layout = Layout(
Field('step_type', css_class='input-xlarge'),
Field('step_name', css_class='input-xlarge'),
Field('step_description', css_class='input-xlarge'),
Field('depends_ons', rows="3", css_class='input-xlarge'),
Field('uri', css_class='input-xlarge'),
Field('format_type', css_class='input-xlarge'),
Field('mode', css_class='input-xlarge'),
Field('options_text', css_class='input-xlarge')
)
def __init__(self, *args, **kwargs):
flow = kwargs.pop('flow')
super(AddSinkForm, self).__init__(*args, **kwargs)
self.fields['depends_ons'].choices = create_dependson_tuple(flow)
class AddUnionForm(forms.Form):
step_name = forms.CharField(label='Step name', max_length=100)
step_description = forms.CharField(label='Step description')
depends_ons = forms.MultipleChoiceField(label='Which steps should become input for this one', choices=[])
helper = FormHelper()
helper.form_class = 'form-horizontal'
helper.layout = Layout(
Field('step_type', css_class='input-xlarge'),
Field('step_name', css_class='input-xlarge'),
Field('step_description', css_class='input-xlarge'),
Field('depends_ons', rows="3", css_class='input-xlarge')
)
def __init__(self, *args, **kwargs):
flow = kwargs.pop('flow')
super(AddUnionForm, self).__init__(*args, **kwargs)
self.fields['depends_ons'].choices = create_dependson_tuple(flow)
class AddSqlForm(forms.Form):
step_name = forms.CharField(label='Step name', max_length=100)
step_description = forms.CharField(label='Step description')
depends_ons = forms.MultipleChoiceField(label='Which steps should become input for this one', choices=[])
sql_file_path = forms.CharField(label='Location where the SQL code will be stored')
helper = FormHelper()
helper.form_class = 'form-horizontal'
helper.layout = Layout(
Field('step_type', css_class='input-xlarge'),
Field('step_name', css_class='input-xlarge'),
Field('step_description', css_class='input-xlarge'),
Field('depends_ons', rows="3", css_class='input-xlarge'),
Field('sql_file_path', css_class='input-xlarge')
)
def __init__(self, *args, **kwargs):
flow = kwargs.pop('flow')
super(AddSqlForm, self).__init__(*args, **kwargs)
self.fields['depends_ons'].choices = create_dependson_tuple(flow)
class AddPySparkForm(forms.Form):
step_name = forms.CharField(label='Step name', max_length=100)
step_description = forms.CharField(label='Step description')
depends_ons = forms.MultipleChoiceField(label='Which steps should become input for this one', choices=[])
pyspark_file_path = forms.CharField(label='Location where the PySpark code will be stored')
helper = FormHelper()
helper.form_class = 'form-horizontal'
helper.layout = Layout(
Field('step_type', css_class='input-xlarge'),
Field('step_name', css_class='input-xlarge'),
Field('step_description', css_class='input-xlarge'),
Field('depends_ons', rows="3", css_class='input-xlarge'),
Field('pyspark_file_path', css_class='input-xlarge')
)
def __init__(self, *args, **kwargs):
flow = kwargs.pop('flow')
super(AddPySparkForm, self).__init__(*args, **kwargs)
self.fields['depends_ons'].choices = create_dependson_tuple(flow)
| 40.445161 | 128 | 0.667092 | 5,803 | 0.925666 | 0 | 0 | 0 | 0 | 0 | 0 | 1,831 | 0.292072 |
550fca2bdb3d0148522e4c33f7841bfbb8e59b80
| 3,109 |
py
|
Python
|
remote-access/remote_connect.py
|
sag-tgo/thin-edge.io_examples
|
7da43f330b640d48c2b0f3be2594ff85fe5c9dfe
|
[
"Apache-2.0"
] | 3 |
2021-06-07T19:11:23.000Z
|
2022-02-03T16:20:27.000Z
|
remote-access/remote_connect.py
|
sag-tgo/thin-edge.io_examples
|
7da43f330b640d48c2b0f3be2594ff85fe5c9dfe
|
[
"Apache-2.0"
] | 5 |
2021-11-04T09:44:36.000Z
|
2022-03-30T22:19:11.000Z
|
remote-access/remote_connect.py
|
sag-tgo/thin-edge.io_examples
|
7da43f330b640d48c2b0f3be2594ff85fe5c9dfe
|
[
"Apache-2.0"
] | 11 |
2021-06-16T14:04:01.000Z
|
2022-03-17T08:29:54.000Z
|
import logging
from c8ydp.device_proxy import DeviceProxy, WebSocketFailureException
from threading import Thread
import threading
from c8yMQTT import C8yMQTT
import concurrent.futures
import os
logging.basicConfig(level=logging.INFO,format='%(asctime)s %(name)s %(message)s')
logger = logging.getLogger(__name__)
def setCommandExecuting(command):
logger.info('Setting command: '+ command + ' to executing')
c8y.publish('c8y/s/us','501,'+command)
def setCommandSuccessfull(command):
logger.info('Setting command: '+ command + ' to successful')
c8y.publish('c8y/s/us','503,'+command)
def setCommandFailed(command,errorMessage):
logger.info('Setting command: '+ command + ' to failed cause: ' +errorMessage)
c8y.publish('c8y/s/us','502,'+command+','+errorMessage)
def on_message(client, obj, msg):
message = msg.payload.decode('utf-8')
logger.debug("Message Received: " + msg.topic + " " + str(msg.qos) + " " + message)
if message.startswith('71'):
fields = message.split(",")
c8y.token = fields[1]
logger.info('New JWT Token received')
if message.startswith('530'):
fields = message.split(",")
tcp_host = fields[2]
tcp_port = int(fields[3])
connection_key = fields[4]
c8y.logger.info('Received Remote Connect.')
setCommandExecuting('c8y_RemoteAccessConnect')
with concurrent.futures.ThreadPoolExecutor() as executor:
future = executor.submit(remoteConnect,tcp_host,tcp_port,connection_key,'https://'+url )
return_value = future.result()
c8y.logger.info('Remote Connect Result:' + return_value)
if return_value.startswith('success'):
setCommandSuccessfull('c8y_RemoteAccessConnect')
else:
setCommandFailed('c8y_RemoteAccessConnect',return_value)
def remoteConnect( tcp_host,tcp_port,connection_key,base_url):
try:
c8y.logger.info('Starting Remote to: ' + str(tcp_host) + ':' + str(tcp_port) + ' Key: ' + str(connection_key) + ' url: ' + str(base_url))
devProx = DeviceProxy( tcp_host,
tcp_port,
None,
connection_key,
base_url,
None,
None,
c8y.token
)
devProx.connect()
logger.info('Remote Connection successfull finished')
return 'success'
except Exception as e:
logger.error('Remote Connection error:' + str(e))
return str(e)
stream = os.popen('sudo tedge config get c8y.url')
url=stream.read().strip()
logger.info('Got tenant URL: '+ url)
c8y = C8yMQTT('remote_connect','localhost',1883,'c8y/s/ds,c8y/s/e,c8y/s/dt,c8y/s/dat')
connected = c8y.connect(on_message)
logger.info('Connection Result:' + str(connected))
if connected != 0:
logger.error('Connection not possible: ' + str(connected))
exit()
c8y.publish("c8y/s/us", "114,c8y_RemoteAccessConnect")
| 38.382716 | 145 | 0.620135 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 724 | 0.232872 |
550fdb4e80b863ed65bbb7d6dee920e010a04788
| 1,397 |
py
|
Python
|
Homework 1/question_solutions/question_3_convergence.py
|
rukmal/FE-621-Homework
|
9c7cef7931b58aed54867acd8e8cf1928bc6d2dd
|
[
"MIT"
] | 4 |
2020-04-29T04:34:50.000Z
|
2021-11-11T07:49:08.000Z
|
Homework 1/question_solutions/question_3_convergence.py
|
rukmal/FE-621-Homework
|
9c7cef7931b58aed54867acd8e8cf1928bc6d2dd
|
[
"MIT"
] | null | null | null |
Homework 1/question_solutions/question_3_convergence.py
|
rukmal/FE-621-Homework
|
9c7cef7931b58aed54867acd8e8cf1928bc6d2dd
|
[
"MIT"
] | 1 |
2020-04-23T07:32:44.000Z
|
2020-04-23T07:32:44.000Z
|
from context import fe621
import numpy as np
import pandas as pd
def convergenceSegmentLimit():
"""Function to compute the number of segments required for convergence of
various quadrature methods.
"""
# Objective function
def f(x: float) -> float:
return np.where(x == 0.0, 1.0, np.sin(x) / x)
# Setting target tolerance level for termination
epsilon = 1e-3
# Using Trapezoidal rule
trapezoidal_result = fe621.numerical_integration.convergenceApproximation(
f=f,
rule=fe621.numerical_integration.trapezoidalRule,
epsilon=epsilon
)
# Using Simpson's rule
simpsons_result = fe621.numerical_integration.convergenceApproximation(
f=f,
rule=fe621.numerical_integration.simpsonsRule,
epsilon=epsilon
)
# Building DataFrame of results for output
results = pd.DataFrame(np.abs(np.array([trapezoidal_result,
simpsons_result])))
# Setting row and column names
results.columns = ['Estimated Area', 'Segments']
results.index = ['Trapezoidal Rule', 'Simpson\'s Rule']
# Saving to CSV
results.to_csv('Homework 1/bin/numerical_integration/convergence.csv',
header=True, index=True, float_format='%.8e')
if __name__ == '__main__':
# Part 3 - Convergence Analysis
convergenceSegmentLimit()
| 28.510204 | 78 | 0.662133 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 476 | 0.34073 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.